gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#
# The Python Imaging Library
# $Id$
#
# map CSS3-style colour description strings to RGB
#
# History:
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-15 fl Added RGBA support
# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2
# 2004-07-19 fl Fixed gray/grey spelling issues
# 2009-03-05 fl Fixed rounding error in grayscale calculation
#
# Copyright (c) 2002-2004 by Secret Labs AB
# Copyright (c) 2002-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
import re
def getrgb(color):
"""
Convert a color string to an RGB tuple. If the string cannot be parsed,
this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue[, alpha])``
"""
color = color.lower()
rgb = colormap.get(color, None)
if rgb:
if isinstance(rgb, tuple):
return rgb
colormap[color] = rgb = getrgb(rgb)
return rgb
# check for known string formats
if re.match('#[a-f0-9]{3}$', color):
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16),
)
if re.match('#[a-f0-9]{4}$', color):
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16),
int(color[4]*2, 16),
)
if re.match('#[a-f0-9]{6}$', color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
)
if re.match('#[a-f0-9]{8}$', color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
int(color[7:9], 16),
)
m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3))
)
m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((int(m.group(1)) * 255) / 100.0 + 0.5),
int((int(m.group(2)) * 255) / 100.0 + 0.5),
int((int(m.group(3)) * 255) / 100.0 + 0.5)
)
m = re.match(r"hsl\(\s*(\d+)\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
from colorsys import hls_to_rgb
rgb = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5)
)
m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$",
color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3)),
int(m.group(4))
)
raise ValueError("unknown color specifier: %r" % color)
def getcolor(color, mode):
"""
Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a
greyscale value if the mode is not color or a palette image. If the string
cannot be parsed, this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(graylevel [, alpha]) or (red, green, blue[, alpha])``
"""
# same as getrgb, but converts the result to the given mode
color, alpha = getrgb(color), 255
if len(color) == 4:
color, alpha = color[0:3], color[3]
if Image.getmodebase(mode) == "L":
r, g, b = color
color = (r*299 + g*587 + b*114)//1000
if mode[-1] == 'A':
return (color, alpha)
else:
if mode[-1] == 'A':
return color + (alpha,)
return color
colormap = {
# X11 colour table from https://drafts.csswg.org/css-color-4/, with
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
# colour names used in CSS 1.
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgrey": "#a9a9a9",
"darkgreen": "#006400",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgreen": "#90ee90",
"lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from hurry.filesize import size
from clint.textui import progress
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.template.loader import render_to_string
from django.contrib.humanize.templatetags.humanize import naturaltime
from calaccess_raw.management.commands import CalAccessCommand
from calaccess_raw import (
get_download_directory,
get_test_download_directory,
get_model_list
)
from calaccess_raw.models.tracking import RawDataVersion
class Command(CalAccessCommand):
help = "Download, unzip, clean and load the latest CAL-ACCESS database ZIP"
def add_arguments(self, parser):
"""
Adds custom arguments specific to this command.
"""
super(Command, self).add_arguments(parser)
parser.add_argument(
"--skip-download",
action="store_false",
dest="download",
default=True,
help="Skip downloading of the ZIP archive"
)
parser.add_argument(
"--skip-clean",
action="store_false",
dest="clean",
default=True,
help="Skip cleaning up the raw data files"
)
parser.add_argument(
"--skip-load",
action="store_false",
dest="load",
default=True,
help="Skip loading up the raw data files"
)
parser.add_argument(
"--keep-files",
action="store_true",
dest="keep_files",
default=False,
help="Keep zip, unzipped, TSV and CSV files"
)
parser.add_argument(
"--no-archive",
action="store_true",
dest="no_archive",
default=False,
help="Store an archive the downloaded zip file on the version model"
)
parser.add_argument(
"--noinput",
action="store_true",
dest="noinput",
default=False,
help="Download the ZIP archive without asking permission"
)
parser.add_argument(
"--test",
"--use-test-data",
action="store_true",
dest="test_data",
default=False,
help="Use sampled test data (skips download, clean a load)"
)
parser.add_argument(
"-a",
"--app-name",
dest="app_name",
default="calaccess_raw",
help="Name of Django app with models into which data will "
"be imported (if other not calaccess_raw)"
)
def handle(self, *args, **options):
super(Command, self).handle(*args, **options)
# set / compute any attributes that multiple class methods need
self.app_name = options["app_name"]
self.keep_files = options["keep_files"]
self.no_archive = options["no_archive"]
self.test_mode = options['test_data']
self.downloading = options['download']
self.cleaning = options['clean']
self.loading = options['load']
if self.test_mode:
# if using test data, we don't need to download
self.downloading = False
# and always keep files when running test data
self.keep_files = True
self.data_dir = get_test_download_directory()
# need to set this app-wide because cleancalaccessrawfile
# also calls get_download_directory
settings.CALACCESS_DOWNLOAD_DIR = self.data_dir
else:
self.data_dir = get_download_directory()
os.path.exists(self.data_dir) or os.makedirs(self.data_dir)
self.zip_path = os.path.join(self.data_dir, 'calaccess.zip')
self.tsv_dir = os.path.join(self.data_dir, "tsv/")
# Immediately check that the tsv directory exists when using test data,
# so we can stop immediately.
if self.test_mode:
if not os.path.exists(self.tsv_dir):
raise CommandError("Data tsv directory does not exist "
"at %s" % self.tsv_dir)
elif self.verbosity:
self.log("Using test data")
self.csv_dir = os.path.join(self.data_dir, "csv/")
os.path.exists(self.csv_dir) or os.makedirs(self.csv_dir)
download_metadata = self.get_download_metadata()
current_release_datetime = download_metadata['last-modified']
last_started_update = self.get_last_log()
try:
last_download = self.command_logs.filter(
command='downloadcalaccessrawdata'
).order_by('-start_datetime')[0]
except IndexError:
last_download = None
up_to_date = False
can_resume = False
# if there's a previously started update
if last_started_update:
# if current release datetime matches version of last started update
if current_release_datetime == last_started_update.version.release_datetime:
# if the last update finished
if last_started_update.finish_datetime:
up_to_date = True
else:
# if the last update didn't finish
# (but is still for the current version)
can_resume = True
# if the last started update didn't finish
elif not last_started_update.finish_datetime:
# can resume update of old version as long as skipping download
if not self.downloading:
can_resume = True
# or if there is a last download
elif last_download:
# and last download's version matches the outstanding update version
if last_download.version == last_started_update.version:
# and last download completed
if last_download.finish_datetime:
can_resume = True
if options['noinput']:
# if not taking input and can resume, automatically go into resume mode
self.resume_mode = can_resume
else:
prompt_context = dict(
current_release_datetime=current_release_datetime,
expected_size=size(download_metadata['content-length']),
up_to_date=up_to_date,
can_resume=can_resume,
)
last_finished_update = self.get_last_log(finished=True)
if last_finished_update:
loaded_v = last_finished_update.version
prompt_context['since_loaded_version'] = naturaltime(loaded_v.release_datetime)
else:
prompt_context['since_loaded_version'] = None
prompt = render_to_string(
'calaccess_raw/updatecalaccessrawdata.txt',
prompt_context,
)
if can_resume:
if self.confirm_proceed(prompt):
self.resume_mode = True
else:
self.resume_mode = False
if not self.confirm_proceed('Do you want re-start your update?\n'):
raise CommandError("Update cancelled")
else:
self.resume_mode = False
if not self.confirm_proceed(prompt):
raise CommandError("Update cancelled")
if not self.test_mode:
if self.resume_mode:
self.log_record = last_started_update
else:
# get or create a version
# .get_or_create() throws IntegrityError
try:
version = self.raw_data_versions.get(
release_datetime=current_release_datetime
)
except RawDataVersion.DoesNotExist:
version = self.raw_data_versions.create(
release_datetime=current_release_datetime,
size=download_metadata['content-length']
)
# create a new log record
self.log_record = self.command_logs.create(
version=version,
command=self,
called_by=self.get_caller_log()
)
# if the user could have resumed but didn't
force_restart_download = can_resume and not self.resume_mode
# if not skipping download, and there's a previous download
if self.downloading and last_download:
# if not forcing a restart
if not force_restart_download:
# check if version we are updating is last one being downloaded
if self.log_record.version == last_download.version:
# if it finished
if last_download.finish_datetime:
self.log('Already downloaded.')
self.downloading = False
if self.downloading:
call_command(
"downloadcalaccessrawdata",
keep_files=self.keep_files,
no_archive=self.no_archive,
verbosity=self.verbosity,
noinput=True,
restart=force_restart_download,
)
if self.verbosity:
self.duration()
# execute the other steps that haven't been skipped
if options['clean']:
self.clean()
if self.verbosity:
self.duration()
if options['load']:
self.load()
if self.verbosity:
self.duration()
if self.verbosity:
self.success("Done!")
if not self.test_mode:
self.log_record.finish_datetime = datetime.now()
self.log_record.save()
def clean(self):
"""
Clean up the raw data files from the state so they are
ready to get loaded into the database.
"""
if self.verbosity:
self.header("Cleaning data files")
tsv_list = os.listdir(self.tsv_dir)
if self.resume_mode:
# get finished clean command logs of last update
prev_cleaned = [
x.file_name + '.TSV'
for x in self.log_record.called.filter(
command='cleancalaccessrawfile',
finish_datetime__isnull=False
)
]
self.log("{} files already cleaned.".format(len(prev_cleaned)))
# remove these from tsv_list
tsv_list = [x for x in tsv_list if x not in prev_cleaned]
# Loop through all the files in the source directory
if self.verbosity:
tsv_list = progress.bar(tsv_list)
for name in tsv_list:
call_command(
"cleancalaccessrawfile",
name,
verbosity=self.verbosity,
keep_files=self.keep_files,
)
def load(self):
"""
Loads the cleaned up csv files into the database
"""
if self.verbosity:
self.header("Loading data files")
model_list = [
x for x in get_model_list() if os.path.exists(x.objects.get_csv_path())
]
if self.resume_mode:
# get finished load command logs of last update
prev_loaded = [
x.file_name
for x in self.log_record.called.filter(
command='loadcalaccessrawfile',
finish_datetime__isnull=False
)
]
self.log("{} models already loaded.".format(len(prev_loaded)))
# remove these from model_list
model_list = [x for x in model_list if x._meta.db_table not in prev_loaded]
if self.verbosity:
model_list = progress.bar(model_list)
for model in model_list:
call_command(
"loadcalaccessrawfile",
model.__name__,
verbosity=self.verbosity,
keep_files=self.keep_files,
app_name=self.app_name,
)
|
|
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Acceptance tests for flocker-plugin which can be run against the same
acceptance testing infrastructure (Vagrant, etc) as Flocker itself.
Eventually flocker-plugin should have unit tests, but starting with integration
tests is a reasonable first pass, since unit tests depend on having a big stack
of (ideally verified) fakes for Docker, flocker API etc.
Run these tests first time with:
$ vagrant box add \
http://build.clusterhq.com/results/vagrant/master/flocker-tutorial.json
$ admin/run-powerstrip-acceptance-tests \
--keep --distribution=fedora-20 flockerdockerplugin.test.test_acceptance
After that, you can do quick test runs with the following.
If you haven't changed the server-side component of flocker-plugin (ie, if
you've only changed the acceptance test):
$ ./quick.sh --no-build
If you have changed flocker-plugin itself (and not just the acceptance
test):
$ ./quick.sh
These tests have a propensity to fail unless you also change "MaxClients"
setting higher than 10 (e.g. 100) in /etc/sshd_config on the nodes you're
testing against.
"""
import sys, os, json
BASE_PATH = os.path.dirname(os.path.realpath(__file__ + "/../../"))
FLOCKER_PATH = BASE_PATH + "/flocker"
DOCKER_PATH = BASE_PATH + "/docker"
PLUGIN_DIR = "/usr/share/docker/plugins"
sys.path.insert(0, FLOCKER_PATH)
from twisted.internet import defer, reactor
from twisted.trial.unittest import TestCase
from twisted.web.client import Agent
import socket
import treq
from treq.client import HTTPClient
from flocker.acceptance.test_api import get_test_cluster
from pipes import quote as shell_quote
from subprocess import PIPE, Popen
def run_SSH(port, user, node, command, input, key=None,
background=False):
"""
Run a command via SSH.
:param int port: Port to connect to.
:param bytes user: User to run the command as.
:param bytes node: Node to run command on.
:param command: Command to run.
:type command: ``list`` of ``bytes``.
:param bytes input: Input to send to command.
:param FilePath key: If not None, the path to a private key to use.
:param background: If ``True``, don't block waiting for SSH process to
end or read its stdout. I.e. it will run "in the background".
Also ensures remote process has pseudo-tty so killing the local SSH
process will kill the remote one.
:return: stdout as ``bytes`` if ``background`` is false, otherwise
return the ``subprocess.Process`` object.
"""
quotedCommand = ' '.join(map(shell_quote, command))
command = [
b'ssh',
b'-p', b'%d' % (port,),
b'-o', b'StrictHostKeyChecking=no',
b'-o', b'UserKnownHostsFile=/dev/null',
]
if key is not None:
command.extend([
b"-i",
key.path])
if background:
# Force pseudo-tty so that remote process exists when the ssh
# client does:
command.extend([b"-t", b"-t"])
command.extend([
b'@'.join([user, node]),
quotedCommand
])
if background:
process = Popen(command, stdin=PIPE)
process.stdin.write(input)
return process
else:
process = Popen(command, stdout=PIPE, stdin=PIPE, stderr=PIPE)
result = process.communicate(input)
if process.returncode != 0:
raise Exception('Command Failed', command, process.returncode, result)
return result[0]
from flocker.testtools import loop_until
from twisted.python.filepath import FilePath
from signal import SIGINT
from os import kill, path, system
from characteristic import attributes
# This refers to where to fetch the latest version of flocker-plugin from.
# If you want faster development cycle than Docker automated builds allow you
# can change it from "clusterhq" to your personal repo, and create a repo on
# Docker hub called "flocker-plugin". Then modify $DOCKER_PULL_REPO in
# quick.sh accordingly and use that script.
DOCKER_PULL_REPO = "lmarsden"
PF_VERSION = "testing_combined_volume_plugin"
# hacks hacks hacks
BUILD_ONCE = []
INJECT_ONCE = {}
KEY = FilePath(os.path.expanduser("~") + "/.ssh/id_rsa_flocker")
class PowerstripFlockerTests(TestCase):
"""
Real flocker-plugin tests against two nodes using the flocker
acceptance testing framework.
"""
# Slow builds because initial runs involve pulling some docker images
# (flocker-plugin).
timeout = 1200
def _buildDockerOnce(self):
"""
Using blocking APIs, build docker once per test run.
"""
if len(BUILD_ONCE):
return
if path.exists(DOCKER_PATH):
dockerCmd = ("cd %(dockerDir)s;"
"docker build -t custom-docker .;"
"docker run --privileged --rm "
"-e DOCKER_EXPERIMENTAL=1 "
"-e DOCKER_GITCOMMIT=`git log -1 --format=%%h` "
"-v %(dockerDir)s:/go/src/github.com/docker/docker "
"custom-docker hack/make.sh binary" % dict(
dockerDir=DOCKER_PATH))
print "Running docker command:", dockerCmd
exit = system(dockerCmd)
if exit > 0:
raise Exception("failed to build docker")
BUILD_ONCE.append(1)
def _injectDockerOnce(self, ip):
"""
Using blocking APIs, copy the docker binary from whence it was built in
_buildDockerOnce to the given ip.
"""
if ip not in INJECT_ONCE:
INJECT_ONCE[ip] = []
if len(INJECT_ONCE[ip]):
return
if path.exists(DOCKER_PATH):
# e.g. 1.5.0-plugins
dockerVersion = "1.7.0-dev-experimental" # XXX Docker need to update their VERSION file open("%s/VERSION" % (DOCKER_PATH,)).read().strip()
binaryPath = "%(dockerDir)s/bundles/%(dockerVersion)s/binary/docker-%(dockerVersion)s" % dict(
dockerDir=DOCKER_PATH, dockerVersion=dockerVersion)
hostBinaryPath = "/usr/bin/docker"
key = "/home/buildslave/.ssh/id_rsa_flocker"
exit = system("scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "
"-i %(key)s %(binaryPath)s root@%(ip)s:%(hostBinaryPath)s" % dict(
key=key, hostBinaryPath=hostBinaryPath, binaryPath=binaryPath, ip=ip))
if exit > 0:
raise Exception("failed to inject docker into %(ip)s" % dict(ip=ip))
INJECT_ONCE[ip].append(1)
def setUp(self):
"""
Ready the environment for tests which actually run docker
with flocker-plugin enabled.
* Log into each node in turn:
* Load flocker-plugin into docker
"""
self.agent = Agent(reactor) # no connectionpool
self.client = HTTPClient(self.agent)
d = get_test_cluster(self, 2)
def got_cluster(cluster):
self.cluster = cluster
self.plugins = {}
daemonReadyDeferreds = []
self.ips = [node.address for node in cluster.nodes]
# Build docker if necessary (if there's a docker submodule)
self._buildDockerOnce()
for ip in self.ips:
# cleanup after previous test runs
#run(ip, ["pkill", "-f", "flocker"])
shell(ip, "sleep 5 && initctl stop docker || true")
# Copy docker into the respective node
self._injectDockerOnce(ip)
# workaround https://github.com/calavera/docker/pull/4#issuecomment-100046383
shell(ip, "mkdir -p %s" % (PLUGIN_DIR,))
# cleanup stale sockets
shell(ip, "rm -f %s/*" % (PLUGIN_DIR,))
#shell(ip, "supervisorctl stop flocker-agent")
#shell(ip, "supervisorctl start flocker-agent")
"""
for container in ("flocker",):
try:
run(ip, ["docker", "rm", "-f", container])
except Exception:
print container, "was not running, not killed, OK."
# start flocker-plugin
FLOCKER_PLUGIN = "%s/flocker-plugin:%s" % (DOCKER_PULL_REPO, PF_VERSION)
run(ip, ["docker", "pull", FLOCKER_PLUGIN])
"""
# TODO - come up with cleaner/nicer way of flocker-plugin
# being able to establish its own host uuid (or volume
# mountpoints), such as API calls.
# See https://github.com/ClusterHQ/flocker-plugin/issues/2
# for how to do this now.
"""
self.plugins[ip] = remote_service_for_test(self, ip,
["docker", "run", "--name=flocker",
"-v", "%s:%s" % (PLUGIN_DIR, PLUGIN_DIR),
"-e", "FLOCKER_CONTROL_SERVICE_BASE_URL=%s" % (self.cluster.base_url,),
"-e", "MY_NETWORK_IDENTITY=%s" % (ip,),
"-e", "MY_HOST_UUID=%s" % (host_uuid,),
FLOCKER_PLUGIN])
"""
host_uuid = run(ip, ["python", "-c", "import json; "
"print json.load(open('/etc/flocker/volume.json'))['uuid']"]).strip()
cmd = ("cd /root && if [ ! -e powerstrip-flocker ]; then "
"git clone https://github.com/clusterhq/powerstrip-flocker && "
"cd powerstrip-flocker && "
"git checkout %s && cd /root;" % (PF_VERSION,)
+ "fi && cd /root/powerstrip-flocker && "
+ "FLOCKER_CONTROL_SERVICE_BASE_URL=%s" % (self.cluster.base_url,)
+ " MY_NETWORK_IDENTITY=%s" % (ip,)
+ " MY_HOST_UUID=%s" % (host_uuid,)
+ " twistd -noy flockerdockerplugin.tac")
print "CMD >>", cmd
self.plugins[ip] = remote_service_for_test(self, ip,
["bash", "-c", cmd])
# XXX Better not to have sleep 5 in here but hey
shell(ip, "sleep 5 && initctl start docker")
print "Waiting for flocker-plugin to show up on", ip, "..."
# XXX This will only work for the first test, need to restart
# docker in tearDown.
daemonReadyDeferreds.append(wait_for_plugin(ip))
d = defer.gatherResults(daemonReadyDeferreds)
# def debug():
# services
# import pdb; pdb.set_trace()
# d.addCallback(lambda ignored: deferLater(reactor, 1, debug))
return d
d.addCallback(got_cluster)
return d
def test_create_a_dataset(self):
"""
Running a docker container specifying a dataset name which has never
been created before creates it in the API.
"""
node1, node2 = sorted(self.ips)
fsName = "test001"
print "About to run docker run..."
shell(node1, "docker run "
"-v %s:/data --volume-driver=flocker busybox "
"sh -c 'echo 1 > /data/file'" % (fsName,))
url = self.cluster.base_url + "/configuration/datasets"
d = self.client.get(url)
d.addCallback(treq.json_content)
def verify(result):
self.assertTrue(len(result) > 0)
self.assertEqual(result[0]["metadata"], {"name": fsName})
#self.assertEqual(result[0]["primary"], node1)
d.addBoth(verify)
return d
def test_create_a_dataset_manifests(self):
"""
Running a docker container specifying a dataset name which has never
been created before creates the actual filesystem and mounts it in
place in time for the container to start.
We can verify this by asking Docker for the information about which
volumes are *actually* mounted in the container, then going and
checking that the real volume path on the host contains the '1' written
to the 'file' file specified in the docker run command...
"""
node1, node2 = sorted(self.ips)
fsName = "test001"
shell(node1, "docker run -d "
"-v %s:/data --volume-driver=flocker busybox "
"sh -c 'echo fish > /data/file'" % (fsName,)).strip()
# The volume that Docker now has mounted exists as a ZFS volume...
zfs_volumes = shell(node1, "zfs list -t snapshot,filesystem -r flocker "
"|grep flocker/ |wc -l").strip()
self.assertEqual(int(zfs_volumes), 1)
# ... and contains a file which contains the characters "fish".
catFileOutput = shell(node1, "docker run "
"-v %s:/data --volume-driver=flocker busybox "
"cat /data/file" % (fsName,)).strip()
self.assertEqual(catFileOutput, "fish")
def test_create_two_datasets_same_name(self):
"""
The metadata stored about a dataset name is checked to make sure that
no two volumes with the same name are created. (In fact, if two
volumes are created with the same name on the same host, it's a shared
volume.)
"""
node1, node2 = sorted(self.ips)
fsName = "test001"
# First volume...
container_id_1 = shell(node1, "docker run -d "
"-v %s:/data --volume-driver=flocker busybox "
"sh -c 'echo fish > /data/file'" % (fsName,)).strip()
docker_inspect = json.loads(run(node1, ["docker", "inspect", container_id_1]))
volume_1 = docker_inspect[0]["Volumes"].values()[0]
# Second volume...
container_id_2 = shell(node1, "docker run -d "
"-v %s:/data --volume-driver=flocker busybox "
"sh -c 'echo fish > /data/file'" % (fsName,)).strip()
docker_inspect = json.loads(run(node1, ["docker", "inspect", container_id_2]))
volume_2 = docker_inspect[0]["Volumes"].values()[0]
# ... have the same flocker UUID.
self.assertEqual(volume_1, volume_2)
def test_move_a_dataset(self):
"""
Running a docker container specifying a dataset name which has been
created before but which is no longer running moves the dataset before
starting the container.
"""
node1, node2 = sorted(self.ips)
fsName = "test001"
# Write some bytes to a volume on one host...
shell(node1, "docker run "
"-v %s:/data --volume-driver=flocker busybox "
"sh -c 'echo chicken > /data/file'" % (fsName,))
# ... and read them from the same named volume on another...
container_id = shell(node2, "docker run -d "
"-v %s:/data --volume-driver=flocker busybox "
"sh -c 'cat /data/file'" % (fsName,)).strip()
output = run(node2, ["docker", "logs", container_id])
self.assertEqual(output.strip(), "chicken")
def test_move_a_dataset_check_persistence(self):
"""
The data in the dataset between the initial instantiation of it and the
second instantiation of it persists.
"""
pass
test_move_a_dataset_check_persistence.skip = "not implemented yet"
def test_dataset_is_not_moved_when_being_used(self):
"""
If a container (*any* container) is currently running with a dataset
mounted, an error is reported rather than ripping it out from
underneath a running container.
"""
pass
test_dataset_is_not_moved_when_being_used.skip = "not implemented yet"
def test_two_datasets_one_move_one_create(self):
"""
When a docker run command mentions two datasets, one which is currently
not running on another host, and another which is new, the new one gets
created and the extant one gets moved. Both operations complete before
the container is started.
"""
pass
test_two_datasets_one_move_one_create.skip = "not implemented yet"
def shell(node, command, input=""):
"""
Run a command (byte string) in a shell on a remote host. Useful for
defining env vars, pipelines and such. With optional input (bytes).
"""
command = ["sh", "-c", command]
result = run(node, command, input)
return result
def run(node, command, input=""):
"""
Synchronously run a command (list of bytes) on a node's address (bytes)
with optional input (bytes).
"""
#print "Running", command, "on", node
result = run_SSH(22, "root", node, command, input, key=KEY)
#print "Output from", node + ":", result, "(%s)" % (command,)
return result
def wait_for_plugin(hostname):
"""
Wait until a non-zero number of plugins are loaded.
"""
return loop_until(lambda:
"flocker.sock" in shell(hostname, "ls -alh %s" % (PLUGIN_DIR,)))
def wait_for_socket(hostname, port):
# TODO: upstream this modified version into flocker (it was copied from
# flocker.acceptance.test_api)
"""
Wait until remote TCP socket is available.
:param str hostname: The host where the remote service is running.
:return Deferred: Fires when socket is available.
"""
def api_available():
try:
s = socket.socket()
s.connect((hostname, port))
return True
except socket.error:
return False
return loop_until(api_available)
@attributes(['address', 'process'])
class RemoteService(object):
"""
A record of a background SSH process and the node that it's running on.
:ivar bytes address: The IPv4 address on which the service is running.
:ivar Subprocess.Popen process: The running ``SSH`` process that is running
the remote process.
"""
def close(process):
"""
Kill a process.
:param subprocess.Popen process: The process to be killed.
"""
process.stdin.close()
kill(process.pid, SIGINT)
def remote_service_for_test(test_case, address, command):
"""
Start a remote process (via SSH) for a test and register a cleanup function
to stop it when the test finishes.
:param TestCase test_case: The test case instance on which to register
cleanup operations.
:param bytes address: The IPv4 address of the node on which to run
``command``.
:param list command: The command line arguments to run remotely via SSH.
:returns: A ``RemoteService`` instance.
"""
service = RemoteService(
address=address,
process=run_SSH(
port=22,
user='root',
node=address,
command=command,
input=b"",
key=KEY,
background=True
)
)
test_case.addCleanup(close, service.process)
return service
|
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ClusterViewResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str',
'description': 'str',
'status': 'str',
'secure': 'bool',
'ambari_server_ip': 'str',
'blueprint': 'BlueprintViewResponse',
'host_groups': 'list[HostGroupViewResponse]',
'shared_service_response': 'SharedServiceResponse'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'status': 'status',
'secure': 'secure',
'ambari_server_ip': 'ambariServerIp',
'blueprint': 'blueprint',
'host_groups': 'hostGroups',
'shared_service_response': 'sharedServiceResponse'
}
def __init__(self, id=None, name=None, description=None, status=None, secure=False, ambari_server_ip=None, blueprint=None, host_groups=None, shared_service_response=None):
"""
ClusterViewResponse - a model defined in Swagger
"""
self._id = None
self._name = None
self._description = None
self._status = None
self._secure = None
self._ambari_server_ip = None
self._blueprint = None
self._host_groups = None
self._shared_service_response = None
if id is not None:
self.id = id
self.name = name
if description is not None:
self.description = description
if status is not None:
self.status = status
if secure is not None:
self.secure = secure
if ambari_server_ip is not None:
self.ambari_server_ip = ambari_server_ip
if blueprint is not None:
self.blueprint = blueprint
if host_groups is not None:
self.host_groups = host_groups
if shared_service_response is not None:
self.shared_service_response = shared_service_response
@property
def id(self):
"""
Gets the id of this ClusterViewResponse.
id of the resource
:return: The id of this ClusterViewResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ClusterViewResponse.
id of the resource
:param id: The id of this ClusterViewResponse.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this ClusterViewResponse.
name of the resource
:return: The name of this ClusterViewResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ClusterViewResponse.
name of the resource
:param name: The name of this ClusterViewResponse.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
if name is not None and len(name) > 100:
raise ValueError("Invalid value for `name`, length must be less than or equal to `100`")
if name is not None and len(name) < 5:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `5`")
if name is not None and not re.search('(^[a-z][-a-z0-9]*[a-z0-9]$)', name):
raise ValueError("Invalid value for `name`, must be a follow pattern or equal to `/(^[a-z][-a-z0-9]*[a-z0-9]$)/`")
self._name = name
@property
def description(self):
"""
Gets the description of this ClusterViewResponse.
description of the resource
:return: The description of this ClusterViewResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this ClusterViewResponse.
description of the resource
:param description: The description of this ClusterViewResponse.
:type: str
"""
if description is not None and len(description) > 1000:
raise ValueError("Invalid value for `description`, length must be less than or equal to `1000`")
if description is not None and len(description) < 0:
raise ValueError("Invalid value for `description`, length must be greater than or equal to `0`")
self._description = description
@property
def status(self):
"""
Gets the status of this ClusterViewResponse.
status of the cluster
:return: The status of this ClusterViewResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this ClusterViewResponse.
status of the cluster
:param status: The status of this ClusterViewResponse.
:type: str
"""
allowed_values = ["REQUESTED", "CREATE_IN_PROGRESS", "AVAILABLE", "UPDATE_IN_PROGRESS", "UPDATE_REQUESTED", "UPDATE_FAILED", "CREATE_FAILED", "ENABLE_SECURITY_FAILED", "PRE_DELETE_IN_PROGRESS", "DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETED", "STOPPED", "STOP_REQUESTED", "START_REQUESTED", "STOP_IN_PROGRESS", "START_IN_PROGRESS", "START_FAILED", "STOP_FAILED", "WAIT_FOR_SYNC", "MAINTENANCE_MODE_ENABLED"]
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
@property
def secure(self):
"""
Gets the secure of this ClusterViewResponse.
tells wether the cluster is secured or not
:return: The secure of this ClusterViewResponse.
:rtype: bool
"""
return self._secure
@secure.setter
def secure(self, secure):
"""
Sets the secure of this ClusterViewResponse.
tells wether the cluster is secured or not
:param secure: The secure of this ClusterViewResponse.
:type: bool
"""
self._secure = secure
@property
def ambari_server_ip(self):
"""
Gets the ambari_server_ip of this ClusterViewResponse.
public ambari ip of the stack
:return: The ambari_server_ip of this ClusterViewResponse.
:rtype: str
"""
return self._ambari_server_ip
@ambari_server_ip.setter
def ambari_server_ip(self, ambari_server_ip):
"""
Sets the ambari_server_ip of this ClusterViewResponse.
public ambari ip of the stack
:param ambari_server_ip: The ambari_server_ip of this ClusterViewResponse.
:type: str
"""
self._ambari_server_ip = ambari_server_ip
@property
def blueprint(self):
"""
Gets the blueprint of this ClusterViewResponse.
blueprint for the cluster
:return: The blueprint of this ClusterViewResponse.
:rtype: BlueprintViewResponse
"""
return self._blueprint
@blueprint.setter
def blueprint(self, blueprint):
"""
Sets the blueprint of this ClusterViewResponse.
blueprint for the cluster
:param blueprint: The blueprint of this ClusterViewResponse.
:type: BlueprintViewResponse
"""
self._blueprint = blueprint
@property
def host_groups(self):
"""
Gets the host_groups of this ClusterViewResponse.
collection of hostgroups
:return: The host_groups of this ClusterViewResponse.
:rtype: list[HostGroupViewResponse]
"""
return self._host_groups
@host_groups.setter
def host_groups(self, host_groups):
"""
Sets the host_groups of this ClusterViewResponse.
collection of hostgroups
:param host_groups: The host_groups of this ClusterViewResponse.
:type: list[HostGroupViewResponse]
"""
self._host_groups = host_groups
@property
def shared_service_response(self):
"""
Gets the shared_service_response of this ClusterViewResponse.
shared service for a specific stack
:return: The shared_service_response of this ClusterViewResponse.
:rtype: SharedServiceResponse
"""
return self._shared_service_response
@shared_service_response.setter
def shared_service_response(self, shared_service_response):
"""
Sets the shared_service_response of this ClusterViewResponse.
shared service for a specific stack
:param shared_service_response: The shared_service_response of this ClusterViewResponse.
:type: SharedServiceResponse
"""
self._shared_service_response = shared_service_response
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ClusterViewResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
from __future__ import unicode_literals
import difflib
import errno
import json
import os
import posixpath
import re
import socket
import sys
import threading
import unittest
import warnings
from collections import Counter
from copy import copy
from functools import wraps
from unittest.util import safe_repr
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer
from django.core.urlresolvers import clear_url_caches, set_urlconf
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils import six
from django.utils.decorators import classproperty
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import (
unquote, urlparse, urlsplit, urlunsplit,
)
from django.utils.six.moves.urllib.request import url2pathname
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
query['sql'] for query in self.captured_queries
)
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _CursorFailure(object):
def __init__(self, cls_name, wrapped):
self.cls_name = cls_name
self.wrapped = wrapped
def __call__(self):
raise AssertionError(
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set %s.allow_database_queries to True to silence this failure." % self.cls_name
)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
# Tests shouldn't be allowed to query the database since
# this base class doesn't enforce any isolation.
allow_database_queries = False
@classmethod
def setUpClass(cls):
super(SimpleTestCase, cls).setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = _CursorFailure(cls.__name__, connection.cursor)
@classmethod
def tearDownClass(cls):
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = connection.cursor.wrapped
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super(SimpleTestCase, cls).tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False))
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* If the class has a 'urls' attribute, replace ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
self._urlconf_setup()
mail.outbox = []
def _urlconf_setup(self):
if hasattr(self, 'urls'):
warnings.warn(
"SimpleTestCase.urls is deprecated and will be removed in "
"Django 1.10. Use @override_settings(ROOT_URLCONF=...) "
"in %s instead." % self.__class__.__name__,
RemovedInDjango110Warning, stacklevel=2)
set_urlconf(None)
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
"""
self._urlconf_teardown()
def _urlconf_teardown(self):
if hasattr(self, '_old_root_urlconf'):
set_urlconf(None)
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix='',
fetch_redirect_response=True):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request (use fetch_redirect_response=False to check
such links without fetching them).
"""
if host is not None:
warnings.warn(
"The host argument is deprecated and no longer used by assertRedirects",
RemovedInDjango20Warning, stacklevel=2
)
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
if fetch_redirect_response:
redirect_response = response.client.get(path, QueryDict(query),
secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
if url != expected_url:
# For temporary backwards compatibility, try to compare with a relative url
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
relative_url = urlunsplit(('', '', e_path, e_query, e_fragment))
if url == relative_url:
warnings.warn(
"assertRedirects had to strip the scheme and domain from the "
"expected URL, as it was always added automatically to URLs "
"before Django 1.9. Please update your expected URLs by "
"removing the scheme and domain.",
RemovedInDjango20Warning, stacklevel=2)
expected_url = relative_url
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response.charset)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None,
"Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None,
"Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of %s in response"
" (expected %d)" % (real_count, text_repr, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0,
msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Asserts that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err,
repr(field_errors)))
elif field in context[formset].forms[form_index].fields:
self.fail(msg_prefix + "The field '%s' "
"on formset '%s', form %d in "
"context %d contains no errors" %
(field, formset, form_index, i))
else:
self.fail(msg_prefix + "The formset '%s', form %d in "
"context %d does not contain the field '%s'" %
(formset, form_index, i, field))
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in "
"context %d does not contain any non-field "
"errors." % (formset, form_index, i))
self.assertTrue(err in non_field_errors,
msg_prefix + "The formset '%s', form %d "
"in context %d does not contain the "
"non-field error '%s' "
"(actual errors: %s)" %
(formset, form_index, i, err,
repr(non_field_errors)))
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in "
"context %d does not contain any "
"non-form errors." % (formset, i))
self.assertTrue(err in non_form_errors,
msg_prefix + "The formset '%s' in context "
"%d does not contain the "
"non-form error '%s' (actual errors: %s)" %
(formset, i, err, repr(non_form_errors)))
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render "
"the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not
None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, ', '.join(template_names)))
if count is not None:
self.assertEqual(template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)." %
(template_name, count, template_names.count(template_name)))
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Asserts that the message in a raised exception matches the passed
value.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
# callable_obj was a documented kwarg in Django 1.8 and older.
callable_obj = kwargs.pop('callable_obj', None)
if callable_obj:
args = (callable_obj,) + args
return six.assertRaisesRegex(self, expected_exception,
re.escape(expected_message), *args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args,
**dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages,
error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs),
fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None,
'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None,
'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, needle, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
# Since tests will be wrapped in a transaction, or serialized if they
# are not available, we allow queries to be run.
allow_database_queries = True
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
registry to these applications, then firing post_migrate -- it must
run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
raise
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# If we need to provide replica initial data from migrated apps,
# then do so.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_migrate isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=self.available_apps is not None)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but uses `transaction.atomic()` to achieve
test isolation.
In most situation, TestCase should be prefered to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Helper method to open atomic blocks for multiple databases"""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened through the previous method"""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
if not connections_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
cls.setUpTestData()
@classmethod
def tearDownClass(cls):
if connections_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(TestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase"""
pass
def _should_reload_connections(self):
if connections_support_transactions():
return False
return super(TestCase, self)._should_reload_connections()
def _fixture_setup(self):
if not connections_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
self._rollback_atomics(self.atomics)
class CheckCondition(object):
"""Descriptor class for deferred condition checking"""
def __init__(self, cond_func):
self.cond_func = cond_func
def __get__(self, obj, objtype):
return self.cond_func()
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
test_item.__unittest_skip__ = CheckCondition(condition)
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(*features):
"""
Skip a test if a database has at least one of the named features.
"""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features)
)
def skipUnlessDBFeature(*features):
"""
Skip a test unless a database has all the named features.
"""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features)
)
def skipUnlessAnyDBFeature(*features):
"""
Skip a test unless a database has any of the named features.
"""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features)
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(FSFilesHandler, self).__init__()
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super(FSFilesHandler, self).get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(FSFilesHandler, self).__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, possible_ports, static_handler, connections_override=None):
self.host = host
self.port = None
self.possible_ports = possible_ports
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = WSGIServer(
(self.host, port), QuietWSGIRequestHandler)
except socket.error as e:
if (index + 1 < len(self.possible_ports) and
e.errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (
cls.server_thread.host, cls.server_thread.port)
@classmethod
def setUpClass(cls):
super(LiveServerTestCase, cls).setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
msg = 'Invalid address ("%s") for live server.' % specified_address
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2])
cls.server_thread = LiveServerThread(host, possible_ports,
cls.static_handler,
connections_override=connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
cls.server_thread.join()
# Restore sqlite in-memory database connections' non-shareability
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db(conn.settings_dict['NAME']):
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
super(LiveServerTestCase, cls).tearDownClass()
|
|
"""
FROWNS LICENSE
Copyright (c) 2001-2003, Brian Kelley
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Brian Kelley nor the names of frowns
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# Build a simple Molecule object given the events from the Smiles
# tokenizer.
import string
from pinky.smiles import handler
import weakref
from ..mol import Atom, Bond, Molecule
# bondlookup is of the form
# textSymbol, bondsymbol, bondorder, bondtype, equiv class, stereo
STEREO_NONE = None
STEREO_UP = "UP"
STEREO_DOWN = "DOWN"
BONDLOOKUP = {'-': ('-', 1, 1, 1, STEREO_NONE),
'=': ('=', 2, 2, 2, STEREO_NONE),
'#': ('#', 3, 3, 3, STEREO_NONE),
'\\': ('\\',1, 1, 1, STEREO_DOWN),
'/': ('/' ,1, 1, 1, STEREO_UP),
':':(':', 1.5, 4, 4, STEREO_NONE),
}
def get_symbol_aromatic(text):
if text[0] in "cnosp":
return text.upper(), 1
return text, 0
def normalize_closure(text):
if text[:1] == "%":
return int(text[1:])
return int(text)
implicit_bond = -123
class DummyVFGraph:
def __init__(self):
self.atoms = -1
def InsertNode(self, node):
self.atoms += 1
return self.atoms
def InsertEdge(self, index1, index2, bond):
pass
class BuildMol(handler.TokenHandler):
def begin(self):
self.closures = {}
self.atoms = []
self.bonds = []
self._atom = None
self._prev_atoms = []
# None occurs after a '.'
# implicit_bond means implicit single bond
self._pending_bond = None
def end(self):
if len(self._prev_atoms) >= 2:
raise AssertionError("Missing ')'")
if self._pending_bond not in [implicit_bond, None]:
raise AssertionError("Missing an atom after the bond")
if self.closures:
raise AssertionError("Missing closures for %s" %
(self.closures.keys(),))
self.mol = Molecule(self.atoms, self.bonds)
def add_token(self, field, pos, text):
getattr(self, "do_" + field)(text)
def add_atom(self, atom):
atoms = self.atoms
atom.index = len(atoms)
atoms.append(atom)
if self._pending_bond == implicit_bond:
# Implicit single or aromatic bond
self._pending_bond = Bond()
if self._pending_bond is not None:
bond = self._pending_bond
prev_atom = self._prev_atoms[-1]
bond.atoms[:] = [prev_atom, atom]
##self.mol.add_bond(bond, prev_atom, atom)
bond.atoms = [prev_atom, atom]
atom.bonds.append(bond)
prev_atom.bonds.append(bond)
atom.oatoms.append(prev_atom)
prev_atom.oatoms.append(atom)
self.bonds.append(bond)
self._pending_bond = implicit_bond
if not self._prev_atoms:
self._prev_atoms.append(atom)
else:
self._prev_atoms[-1] = atom
#self.mol.atoms.append(atom)
def do_raw_atom(self, text):
atom = Atom()
symbol, atom.aromatic = get_symbol_aromatic(text)
atom.set_symbol(symbol)
self.add_atom(atom)
def do_open_bracket(self, text):
self._atom = Atom()
self._atom.has_explicit_hcount = True
def do_weight(self, text):
self._atom.weight = int(text)
def do_element(self, text):
symbol, self._atom.aromatic = get_symbol_aromatic(text)
self._atom.set_symbol(symbol)
def do_chiral_count(self, text):
#print "setting chirality", self._atom, int(text[1:])
self._atom.chirality = int(text[1:])
def do_chiral_named(self, text):
self._atom.chiral_class = text[1:3]
self._atom.chirality = int(text[3:])
def do_chiral_symbols(self, text):
self._atom.chiral_class = len(text)
def do_hcount(self, text):
if text == "H":
self._atom.explicit_hcount = 1
else:
self._atom.explicit_hcount = int(text[1:])
def do_positive_count(self, text):
self._atom.charge = int(text[1:])
def do_positive_symbols(self, text):
self._atom.charge = len(text)
def do_negative_count(self, text):
self._atom.charge = -int(text[1:])
def do_negative_symbols(self, text):
self._atom.charge = -len(text)
def do_close_bracket(self, text):
self.add_atom(self._atom)
self._atom = None
def do_bond(self, text):
assert self._pending_bond in (implicit_bond, None)
symbol, bondorder, bondtype, equiv_class, stereo = BONDLOOKUP[text]
# if the bond came in as aromatic (which it
# CAN'T!))
if bondtype == 4:
assert 0, "Bond's shouldn't come in as ':'"
fixed = 0
else:
fixed = 1
bond = Bond(text, bondorder, bondtype, fixed, stereo)
bond.equiv_class = equiv_class
self._pending_bond = bond
def do_dot(self, text):
assert self._pending_bond in (implicit_bond, None)
self._pending_bond = None
def do_closure(self, text):
num = normalize_closure(text)
if num in self.closures:
prev_atom, bond = self.closures[num]
del self.closures[num]
assert self._pending_bond is not None, "Can't happen"
if self._pending_bond is not implicit_bond and \
bond is not implicit_bond and \
self._pending_bond.symbol != "-": # according to toolkit
# need to verify they are compatible
prev_symbol = bond.symbol
symbol = self._pending_bond.symbol
if (prev_symbol == symbol) or \
(prev_symbol == "/" and symbol == "\\") or \
(prev_symbol == "\\" and symbol == "/"):
pass
else:
raise AssertionError("bond types don't match")
elif bond is implicit_bond and self._pending_bond is not implicit_bond:
# see if one of the bonds is not implicit and keep it
bond = self._pending_bond
elif bond is implicit_bond:
# both are implicit so make a new one
bond = Bond()
bond._closure = 1
atom = self._prev_atoms[-1]
if prev_atom is atom:
raise AssertionError("cannot close a ring with itself")
bond.atoms[:] = [prev_atom, atom]
prev_atom._closure = 1
atom._closure = 1
##self.mol.add_bond(bond, prev_atom, atom)
bond.atoms = [prev_atom, atom]
atom.bonds.append(bond)
prev_atom.bonds.append(bond)
atom.oatoms.append(prev_atom)
prev_atom.oatoms.append(atom)
self.bonds.append(bond)
else:
self.closures[num] = (self._prev_atoms[-1], self._pending_bond)
self._pending_bond = implicit_bond
def do_open_branch(self, text):
self._prev_atoms.append(self._prev_atoms[-1])
def do_close_branch(self, text):
self._prev_atoms.pop()
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4892
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
from collections import defaultdict
import hashlib
from couchdbkit import ResourceConflict
from casexml.apps.stock.consumption import compute_consumption_or_default
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.parsing import json_format_datetime
from casexml.apps.case.exceptions import BadStateException, RestoreException
from casexml.apps.phone.models import SyncLog, CaseState
import logging
from dimagi.utils.couch.database import get_db, get_safe_write_kwargs
from casexml.apps.phone import xml
from datetime import datetime
from casexml.apps.stock.const import COMMTRACK_REPORT_XMLNS
from casexml.apps.stock.models import StockTransaction
from dimagi.utils.couch.cache.cache_core import get_redis_default_cache
from couchforms.xml import (
ResponseNature,
get_response_element,
get_simple_response_xml,
)
from casexml.apps.case.xml import check_version, V1
from casexml.apps.phone.fixtures import generator
from django.http import HttpResponse, Http404
from casexml.apps.phone.checksum import CaseStateHash
from no_exceptions.exceptions import HttpException
class StockSettings(object):
def __init__(self, section_to_consumption_types=None, consumption_config=None,
default_product_list=None, force_consumption_case_filter=None):
"""
section_to_consumption_types should be a dict of stock section-ids to corresponding
consumption section-ids. any stock sections not found in the dict will not have
any consumption data set in the restore
"""
self.section_to_consumption_types = section_to_consumption_types or {}
self.consumption_config = consumption_config
self.default_product_list = default_product_list or []
self.force_consumption_case_filter = force_consumption_case_filter or (lambda case: False)
class RestoreConfig(object):
"""
A collection of attributes associated with an OTA restore
"""
def __init__(self, user, restore_id="", version=V1, state_hash="",
caching_enabled=False, items=False, stock_settings=None):
self.user = user
self.restore_id = restore_id
self.version = version
self.state_hash = state_hash
self.caching_enabled = caching_enabled
self.cache = get_redis_default_cache()
self.items = items
self.stock_settings = stock_settings or StockSettings()
@property
@memoized
def sync_log(self):
if self.restore_id:
sync_log = SyncLog.get(self.restore_id)
if sync_log.user_id == self.user.user_id \
and sync_log.doc_type == 'SyncLog':
return sync_log
else:
raise HttpException(412)
else:
return None
def validate(self):
# runs validation checks, raises exceptions if anything is amiss
check_version(self.version)
if self.sync_log and self.state_hash:
parsed_hash = CaseStateHash.parse(self.state_hash)
if self.sync_log.get_state_hash() != parsed_hash:
raise BadStateException(expected=self.sync_log.get_state_hash(),
actual=parsed_hash,
case_ids=self.sync_log.get_footprint_of_cases_on_phone())
def get_stock_payload(self, syncop):
cases = [e.case for e in syncop.actual_cases_to_sync]
from lxml.builder import ElementMaker
E = ElementMaker(namespace=COMMTRACK_REPORT_XMLNS)
def entry_xml(id, quantity):
return E.entry(
id=id,
quantity=str(int(quantity)),
)
def transaction_to_xml(trans):
return entry_xml(trans.product_id, trans.stock_on_hand)
def consumption_entry(case_id, product_id, section_id):
consumption_value = compute_consumption_or_default(
case_id,
product_id,
datetime.utcnow(),
section_id,
self.stock_settings.consumption_config
)
if consumption_value is not None:
return entry_xml(product_id, consumption_value)
def _unique_products(stock_transaction_queryset):
return sorted(stock_transaction_queryset.values_list('product_id', flat=True).distinct())
for commtrack_case in cases:
relevant_sections = sorted(StockTransaction.objects.filter(
case_id=commtrack_case._id).values_list('section_id', flat=True).distinct())
section_product_map = defaultdict(lambda: [])
section_timestamp_map = defaultdict(lambda: json_format_datetime(datetime.utcnow()))
for section_id in relevant_sections:
relevant_reports = StockTransaction.objects.filter(case_id=commtrack_case._id, section_id=section_id)
product_ids = _unique_products(relevant_reports)
transactions = [StockTransaction.latest(commtrack_case._id, section_id, p) for p in product_ids]
as_of = json_format_datetime(max(txn.report.date for txn in transactions))
section_product_map[section_id] = product_ids
section_timestamp_map[section_id] = as_of
yield E.balance(*(transaction_to_xml(e) for e in transactions),
**{'entity-id': commtrack_case._id, 'date': as_of, 'section-id': section_id})
for section_id, consumption_section_id in self.stock_settings.section_to_consumption_types.items():
if (section_id in relevant_sections or
self.stock_settings.force_consumption_case_filter(commtrack_case)):
consumption_product_ids = self.stock_settings.default_product_list \
if self.stock_settings.default_product_list \
else section_product_map[section_id]
consumption_entries = filter(lambda e: e is not None, [
consumption_entry(commtrack_case._id, p, section_id)
for p in consumption_product_ids
])
if consumption_entries:
yield E.balance(
*consumption_entries,
**{
'entity-id': commtrack_case._id,
'date': section_timestamp_map[section_id],
'section-id': consumption_section_id,
}
)
def get_payload(self):
user = self.user
last_sync = self.sync_log
self.validate()
cached_payload = self.get_cached_payload()
if cached_payload:
return cached_payload
sync_operation = user.get_case_updates(last_sync)
case_xml_elements = [xml.get_case_element(op.case, op.required_updates, self.version)
for op in sync_operation.actual_cases_to_sync]
commtrack_elements = self.get_stock_payload(sync_operation)
last_seq = str(get_db().info()["update_seq"])
# create a sync log for this
previous_log_id = last_sync.get_id if last_sync else None
synclog = SyncLog(user_id=user.user_id, last_seq=last_seq,
owner_ids_on_phone=user.get_owner_ids(),
date=datetime.utcnow(), previous_log_id=previous_log_id,
cases_on_phone=[CaseState.from_case(c) for c in \
sync_operation.actual_owned_cases],
dependent_cases_on_phone=[CaseState.from_case(c) for c in \
sync_operation.actual_extended_cases])
synclog.save(**get_safe_write_kwargs())
# start with standard response
response = get_response_element(
"Successfully restored account %s!" % user.username,
ResponseNature.OTA_RESTORE_SUCCESS)
# add sync token info
response.append(xml.get_sync_element(synclog.get_id))
# registration block
response.append(xml.get_registration_element(user))
# fixture block
for fixture in generator.get_fixtures(user, self.version, last_sync):
response.append(fixture)
# case blocks
for case_elem in case_xml_elements:
response.append(case_elem)
for ct_elem in commtrack_elements:
response.append(ct_elem)
if self.items:
response.attrib['items'] = '%d' % len(response.getchildren())
resp = xml.tostring(response)
self.set_cached_payload_if_enabled(resp)
return resp
def get_response(self):
try:
return HttpResponse(self.get_payload(), mimetype="text/xml")
except RestoreException, e:
logging.exception("%s error during restore submitted by %s: %s" %
(type(e).__name__, self.user.username, str(e)))
response = get_simple_response_xml(
e.message,
ResponseNature.OTA_RESTORE_ERROR
)
return HttpResponse(response, mimetype="text/xml",
status=412) # precondition failed
def _initial_cache_key(self):
return hashlib.md5('ota-restore-{user}-{version}'.format(
user=self.user.user_id,
version=self.version,
)).hexdigest()
def get_cached_payload(self):
if self.caching_enabled:
if self.sync_log:
return self.sync_log.get_cached_payload(self.version)
else:
return self.cache.get(self._initial_cache_key())
def set_cached_payload_if_enabled(self, resp):
if self.caching_enabled:
if self.sync_log:
try:
self.sync_log.set_cached_payload(resp, self.version)
except ResourceConflict:
# if one sync takes a long time and another one updates the sync log
# this can fail. in this event, don't fail to respond, since it's just
# a caching optimization
pass
else:
self.cache.set(self._initial_cache_key(), resp, 60*60)
def generate_restore_payload(user, restore_id="", version=V1, state_hash="",
items=False):
"""
Gets an XML payload suitable for OTA restore. If you need to do something
other than find all cases matching user_id = user.user_id then you have
to pass in a user object that overrides the get_case_updates() method.
It should match the same signature as models.user.get_case_updates():
user: who the payload is for. must implement get_case_updates
restore_id: sync token
version: the CommCare version
returns: the xml payload of the sync operation
"""
config = RestoreConfig(user, restore_id, version, state_hash, items=items)
return config.get_payload()
def generate_restore_response(user, restore_id="", version=V1, state_hash="",
items=False):
config = RestoreConfig(user, restore_id, version, state_hash, items=items)
return config.get_response()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DenseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, nn_ops.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_1')
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_2')
def testVariableInput(self):
with self.cached_session():
v = variable_scope.get_variable(
'X', initializer=init_ops.zeros_initializer(), shape=(1, 1))
x = core_layers.Dense(1)(v)
variables.global_variables_initializer().run()
self.assertAllEqual(x.eval(), [[0.0]])
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testCall(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 2], outputs.get_shape().as_list())
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias.name, 'my_dense/bias:0')
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoEagerLeak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = random_ops.random_uniform((5, 4), seed=1)
core_layers.Dense(5)(inputs)
core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')(inputs)
@test_util.run_in_graph_and_eager_modes
def testCallTensorDot(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
@test_util.run_in_graph_and_eager_modes
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
@test_util.run_in_graph_and_eager_modes
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = random_ops.random_uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
def testCallOnPlaceHolder(self):
inputs = array_ops.placeholder(dtype=dtypes.float32)
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
@test_util.run_in_graph_and_eager_modes
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if not context.executing_eagerly():
self.assertEqual(outputs.op.name, 'dense1/Relu')
dense = core_layers.Dense(2, name='dense2')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if not context.executing_eagerly():
self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
def testActivityRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', activity_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', kernel_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in dense.variables])
self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys))
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testBiasRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.evaluate([v.initializer for v in dense.variables])
self.assertAllEqual(self.evaluate(dense.losses), self.evaluate(loss_keys))
def testFunctionalDense(self):
with self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = core_layers.dense(
inputs, 2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(outputs.op.name, 'my_dense/Relu')
def testFunctionalDenseTwice(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
vars1 = _get_variable_dict_from_varstore().values()
core_layers.dense(inputs, 2)
vars2 = _get_variable_dict_from_varstore().values()
self.assertEqual(len(vars1), 2)
self.assertEqual(len(vars2), 4)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuse(self):
with self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
core_layers.dense(inputs, 2, name='my_dense', reuse=True)
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuseFromScope(self):
with self.cached_session():
with variable_scope.variable_scope('scope'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
with variable_scope.variable_scope('scope', reuse=True):
core_layers.dense(inputs, 2, name='my_dense')
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
def testFunctionalDenseInitializerFromScope(self):
with variable_scope.variable_scope(
'scope',
initializer=init_ops.ones_initializer()), self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
variables.global_variables_initializer().run()
weights = _get_variable_dict_from_varstore()
self.assertEqual(len(weights), 2)
# Check that the matrix weights got initialized to ones (from scope).
self.assertAllClose(weights['scope/dense/kernel'].read_value().eval(),
np.ones((3, 2)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights['scope/dense/bias'].read_value().eval(),
np.zeros((2)))
def testEagerExecution(self):
with context.eager_mode():
container = variable_scope.EagerVariableStore()
x = constant_op.constant([[2.0]])
with container.as_default():
y = core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertAllEqual(y, [[2.0]])
self.assertEqual(len(container.variables()), 2)
# Recreate the layer to test reuse.
with container.as_default():
core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertEqual(len(container.variables()), 2)
def testFunctionalDenseWithCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
self.assertEqual(called[0], 2)
def testFunctionalDenseInScope(self):
with self.cached_session():
with variable_scope.variable_scope('test'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
var_dict = _get_variable_dict_from_varstore()
var_key = 'test/my_dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test1') as scope:
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name=scope)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test1/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test2'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test2/dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
@test_util.run_in_graph_and_eager_modes
def testComputeOutputShape(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
ts = tensor_shape.TensorShape
# pylint: disable=protected-access
with self.assertRaises(ValueError):
dense.compute_output_shape(ts(None))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([]))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([1]))
self.assertEqual(
[None, 2],
dense.compute_output_shape((None, 3)).as_list())
self.assertEqual(
[None, 2],
dense.compute_output_shape(ts([None, 3])).as_list())
self.assertEqual(
[None, 4, 2],
dense.compute_output_shape(ts([None, 4, 3])).as_list())
# pylint: enable=protected-access
@test_util.run_in_graph_and_eager_modes
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
dense = core_layers.Dense(2,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3), seed=1)
dense(inputs)
self.assertEqual(dense.kernel_constraint, k_constraint)
self.assertEqual(dense.bias_constraint, b_constraint)
def _get_variable_dict_from_varstore():
var_dict = variable_scope._get_default_variable_store()._vars # pylint: disable=protected-access
sorted_var_dict = collections.OrderedDict(
sorted(var_dict.items(), key=lambda t: t[0]))
return sorted_var_dict
class DropoutTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDropoutProperties(self):
dp = core_layers.Dropout(0.5, name='dropout')
self.assertEqual(dp.rate, 0.5)
self.assertEqual(dp.noise_shape, None)
dp.apply(array_ops.ones(()))
self.assertEqual(dp.name, 'dropout')
@test_util.run_in_graph_and_eager_modes
def testBooleanLearningPhase(self):
dp = core_layers.Dropout(0.5)
inputs = array_ops.ones((5, 3))
dropped = dp.apply(inputs, training=True)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = dp.apply(inputs, training=False)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 3)), np_output)
def testDynamicLearningPhase(self):
with self.cached_session() as sess:
dp = core_layers.Dropout(0.5, seed=1)
inputs = array_ops.ones((5, 5))
training = array_ops.placeholder(dtype='bool')
dropped = dp.apply(inputs, training=training)
self.evaluate(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={training: True})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={training: False})
self.assertAllClose(np.ones((5, 5)), np_output)
@test_util.run_in_graph_and_eager_modes
def testDynamicNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [None, 1, None]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testCustomNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [5, 1, 2]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testFunctionalDropout(self):
with self.cached_session():
inputs = array_ops.ones((5, 5))
dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1)
variables.global_variables_initializer().run()
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 5)), np_output)
def testDynamicRate(self):
with self.cached_session() as sess:
rate = array_ops.placeholder(dtype='float32', name='rate')
dp = core_layers.Dropout(rate, name='dropout')
inputs = array_ops.ones((5, 5))
dropped = dp.apply(inputs, training=True)
sess.run(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={rate: 0.5})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={rate: 0.0})
self.assertAllClose(np.ones((5, 5)), np_output)
class FlattenTest(test.TestCase):
def testCreateFlatten(self):
with self.cached_session() as sess:
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((3, 2, 3))})
self.assertEqual(list(np_output.shape), [3, 6])
self.assertEqual(y.get_shape().as_list(), [None, 6])
x = array_ops.placeholder(shape=(1, 2, 3, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((1, 2, 3, 2))})
self.assertEqual(list(np_output.shape), [1, 12])
self.assertEqual(y.get_shape().as_list(), [1, 12])
def testComputeShape(self):
shape = core_layers.Flatten().compute_output_shape((1, 2, 3, 2))
self.assertEqual(shape.as_list(), [1, 12])
shape = core_layers.Flatten().compute_output_shape((None, 3, 2))
self.assertEqual(shape.as_list(), [None, 6])
shape = core_layers.Flatten().compute_output_shape((None, 3, None))
self.assertEqual(shape.as_list(), [None, None])
def testFunctionalFlatten(self):
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.flatten(x, name='flatten')
self.assertEqual(y.get_shape().as_list(), [None, 6])
def testFlattenValueError(self):
x = array_ops.placeholder(shape=(None,), dtype='float32')
with self.assertRaises(ValueError):
core_layers.Flatten()(x)
def testFlattenUnknownAxes(self):
with self.cached_session() as sess:
x = array_ops.placeholder(shape=(5, None, None), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 2, 3))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
x = array_ops.placeholder(shape=(5, None, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 3, 2))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
from io import BytesIO
from codecs import encode
import http.client
import urllib.parse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read().decode('utf-8')
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
return #TODO
url = urllib.parse.urlparse(self.nodes[0].url)
print("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += hex_str_to_bytes(txid)
binaryRequest += pack("i", n)
binaryRequest += hex_str_to_bytes(vintx)
binaryRequest += pack("i", 0)
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = BytesIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str, parse_float=Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid']
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj, True)
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
|
|
# orm/interfaces.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines some key base classes prominent within the internals,
as well as the now-deprecated ORM extension classes.
Other than the deprecated extensions, this module and the
classes within are mostly private, though some attributes
are exposed when inspecting mappings.
"""
from __future__ import absolute_import
from .. import util
from ..sql import operators
from .base import (ONETOMANY, MANYTOONE, MANYTOMANY,
EXT_CONTINUE, EXT_STOP, NOT_EXTENSION)
from .base import (InspectionAttr, InspectionAttr,
InspectionAttrInfo, _MappedAttribute)
import collections
from .. import inspect
# imported later
MapperExtension = SessionExtension = AttributeExtension = None
__all__ = (
'AttributeExtension',
'EXT_CONTINUE',
'EXT_STOP',
'ONETOMANY',
'MANYTOMANY',
'MANYTOONE',
'NOT_EXTENSION',
'LoaderStrategy',
'MapperExtension',
'MapperOption',
'MapperProperty',
'PropComparator',
'SessionExtension',
'StrategizedProperty',
)
class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots):
"""Represent a particular class attribute mapped by :class:`.Mapper`.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
__slots__ = (
'_configure_started', '_configure_finished', 'parent', 'key',
'info'
)
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
The collection typically only applies to a RelationshipProperty.
"""
is_property = True
"""Part of the InspectionAttr interface; states this object is a
mapper property.
"""
def _memoized_attr_info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`.relationship`, or :func:`.composite`
functions.
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
def setup(self, context, entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
def create_row_processor(self, context, path,
mapper, result, adapter, populators):
"""Produce row processing functions and append to the given
set of populators lists.
"""
def cascade_iterator(self, type_, state, visited_instances=None,
halt_on=None):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
This method typically only applies to RelationshipProperty.
"""
return iter(())
def set_parent(self, parent, init):
"""Set the parent mapper that references this MapperProperty.
This method is overridden by some subclasses to perform extra
setup when the mapper is first known.
"""
self.parent = parent
def instrument_class(self, mapper):
"""Hook called by the Mapper to the property to initiate
instrumentation of the class attribute managed by this
MapperProperty.
The MapperProperty here will typically call out to the
attributes module to set up an InstrumentedAttribute.
This step is the first of two steps to set up an InstrumentedAttribute,
and is called early in the mapper setup process.
The second step is typically the init_class_attribute step,
called from StrategizedProperty via the post_instrument_class()
hook. This step assigns additional state to the InstrumentedAttribute
(specifically the "impl") which has been determined after the
MapperProperty has determined what kind of persistence
management it needs to do (e.g. scalar, object, collection, etc).
"""
def __init__(self):
self._configure_started = False
self._configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
The given Mapper is the Mapper invoking the operation, which
may not be the same Mapper as self.parent in an inheritance
scenario; however, Mapper will always at least be a sub-mapper of
self.parent.
This method is typically used by StrategizedProperty, which delegates
it to LoaderStrategy.init_class_attribute() to perform final setup
on the class-bound InstrumentedAttribute.
"""
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive, _resolve_conflict_map):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object.
"""
def __repr__(self):
return '<%s at 0x%x; %s>' % (
self.__class__.__name__,
id(self), getattr(self, 'key', 'no key'))
class PropComparator(operators.ColumnOperators):
"""Defines SQL operators for :class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \\
ColumnProperty,\\
CompositeProperty,\\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
See also:
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = 'prop', 'property', '_parententity', '_adapt_to_entity'
def __init__(self, prop, parentmapper, adapt_to_entity=None):
self.prop = self.property = prop
self._parententity = adapt_to_entity or parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def _query_clause_element(self):
return self.__clause_element__()
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parententity, adapt_to_entity)
@property
def _parentmapper(self):
"""legacy; this is renamed to _parententity to be
compatible with QueryableAttribute."""
return inspect(self._parententity).mapper
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
"""Redefine this object in terms of a polymorphic subclass.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
"""
return self.operate(PropComparator.of_type_op, class_)
def any(self, criterion=None, **kwargs):
"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
The mechanics of StrategizedProperty are used for every Query
invocation for every mapped attribute participating in that Query,
to determine first how the attribute will be rendered in SQL
and secondly how the attribute will retrieve a value from a result
row and apply it to a mapped object. The routines here are very
performance-critical.
"""
__slots__ = '_strategies', 'strategy'
strategy_wildcard_key = None
def _get_context_loader(self, context, path):
load = None
# use EntityRegistry.__getitem__()->PropRegistry here so
# that the path is stated in terms of our base
search_path = dict.__getitem__(path, self)
# search among: exact match, "attr.*", "default" strategy
# if any.
for path_key in (
search_path._loader_key,
search_path._wildcard_path_loader_key,
search_path._default_path_loader_key
):
if path_key in context.attributes:
load = context.attributes[path_key]
break
return load
def _get_strategy(self, key):
try:
return self._strategies[key]
except KeyError:
cls = self._strategy_lookup(*key)
self._strategies[key] = self._strategies[
cls] = strategy = cls(self)
return strategy
def _get_strategy_by_cls(self, cls):
return self._get_strategy(cls._strategy_keys[0])
def setup(
self, context, entity, path, adapter, **kwargs):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.setup_query(context, entity, path, loader, adapter, **kwargs)
def create_row_processor(
self, context, path, mapper,
result, adapter, populators):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.create_row_processor(
context, path, loader,
mapper, result, adapter, populators)
def do_init(self):
self._strategies = {}
self.strategy = self._get_strategy_by_cls(self.strategy_class)
def post_instrument_class(self, mapper):
if not self.parent.non_primary and \
not mapper.class_manager._attr_has_impl(self.key):
self.strategy.init_class_attribute(mapper)
_all_strategies = collections.defaultdict(dict)
@classmethod
def strategy_for(cls, **kw):
def decorate(dec_cls):
# ensure each subclass of the strategy has its
# own _strategy_keys collection
if '_strategy_keys' not in dec_cls.__dict__:
dec_cls._strategy_keys = []
key = tuple(sorted(kw.items()))
cls._all_strategies[cls][key] = dec_cls
dec_cls._strategy_keys.append(key)
return dec_cls
return decorate
@classmethod
def _strategy_lookup(cls, *key):
for prop_cls in cls.__mro__:
if prop_cls in cls._all_strategies:
strategies = cls._all_strategies[prop_cls]
try:
return strategies[key]
except KeyError:
pass
raise Exception("can't locate strategy for %s %s" % (cls, key))
class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def process_query(self, query):
"""Apply a modification to the given :class:`.Query`."""
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
This is typically used during a lazy load or scalar refresh
operation to propagate options stated in the original Query to the
new Query being used for the load. It occurs for those options that
specify propagate_to_loaders=True.
"""
self.process_query(query)
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
For example, simple column attributes will add their represented
column to the list of selected columns, a joined eager loader
may establish join clauses to add to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
__slots__ = 'parent_property', 'is_class_level', 'parent', 'key'
def __init__(self, parent):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
def init_class_attribute(self, mapper):
pass
def setup_query(self, context, entity, path, loadopt, adapter, **kwargs):
"""Establish column and other state for a given QueryContext.
This method fulfills the contract specified by MapperProperty.setup().
StrategizedProperty delegates its setup() method
directly to this method.
"""
def create_row_processor(self, context, path, loadopt, mapper,
result, adapter, populators):
"""Establish row processing functions for a given QueryContext.
This method fulfills the contract specified by
MapperProperty.create_row_processor().
StrategizedProperty delegates its create_row_processor() method
directly to this method.
"""
def __str__(self):
return str(self.parent_property)
|
|
#!/usr/bin/env python
from __future__ import print_function
################################################################################
#
# graph.py
#
#
# Copyright (c) 10/9/2009 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
"""
graph.py
provides support for diacyclic graph
with topological_sort
"""
import sys, re, os
# use simplejson in place of json for python < 2.6
try:
import json
except ImportError:
import simplejson
json = simplejson
from collections import defaultdict
from itertools import chain
from .print_dependencies import *
import tempfile
import subprocess
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# class node
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
class graph_error(Exception):
def __init__(self, message):
self.message = message
def __str__ (self):
return self.message
class error_duplicate_node_name(graph_error):
pass
class node (object):
"""
node
designed for diacyclic graphs but can hold anything
contains lists of nodes and
dictionary to look up node name from node
"""
_all_nodes = list()
_name_to_node = dict()
_index_to_node = dict()
_global_node_index = 0
_one_to_one = 0
_many_to_many = 1
_one_to_many = 2
_many_to_one = 3
@staticmethod
def _get_leaves ():
for n in node._all_nodes:
if len(n._inward) == 0:
yield n
@staticmethod
def _get_roots ():
for n in node._all_nodes:
if len(n._outward) == 0:
yield n
@staticmethod
def _count_nodes ():
return len(_all_nodes)
@staticmethod
def _dump_tree_as_str ():
"""
dumps entire tree
"""
return ("%d nodes " % node._count_nodes()) + "\n" + \
"\n".join([x._fullstr() for x in node._all_nodes])
@staticmethod
def _lookup_node_from_name (name):
return node._name_to_node[name]
@staticmethod
def _lookup_node_from_index (index):
return node._index_to_node[index]
@staticmethod
def _is_node (name):
return name in node._name_to_node
#_____________________________________________________________________________________
# init
#_____________________________________________________________________________________
def __init__ (self, name, **args):
"""
each node has
_name
_inward : lists of incoming edges
_outward: lists of outgoing edges
"""
#
# make sure node name is unique
#
#if name in node._name_to_node:
# raise error_duplicate_node_name("[%s] has already been added" % name)
self.__dict__.update(args)
self._inward = list()
self._outward= list()
self.args = args
self._name = name
self._signal = False
self._node_index = node._global_node_index
node._global_node_index += 1
#
# for looking up node for name
#
node._all_nodes.append(self)
node._name_to_node[self._name] = self
node._index_to_node[self._node_index] = self
#_____________________________________________________________________________________
# _add_child
#_____________________________________________________________________________________
def _add_child(self, child):
"""
connect edges
"""
# do not add duplicates
if child in self._outward:
return child
self._outward.append(child)
child._inward.append(self)
return child
#_____________________________________________________________________________________
# _remove_child
#_____________________________________________________________________________________
def _remove_child(self, child):
"""
disconnect edges
"""
if child in self._outward:
self._outward.remove(child)
if self in child._inward:
child._inward.remove(self)
return child
#_____________________________________________________________________________________
# _add_parent
#_____________________________________________________________________________________
def _add_parent(self, parent):
"""
connect edges
"""
# do not add duplicates
if parent in self._inward:
return parent
self._inward.append(parent)
parent._outward.append(self)
return parent
#_____________________________________________________________________________________
# _remove_all_parents
#_____________________________________________________________________________________
def _remove_all_parents(self):
"""
disconnect edges
"""
# remove self from parent
for parent in self._inward:
if self in parent._outward:
parent._outward.remove(self)
# clear self
self._inward = []
return self
#_____________________________________________________________________________________
# _remove_parent
#_____________________________________________________________________________________
def _remove_parent(self, parent):
"""
disconnect edges
"""
if parent in self._inward:
self._inward.remove(parent)
if self in parent._outward:
parent._outward.remove(self)
return parent
#_____________________________________________________________________________________
# _get_inward/_get_outward
#_____________________________________________________________________________________
def _get_outward (self):
"""
just in case we need to return inward when we mean outward!
(for reversed graphs)
"""
return self._outward
def _get_inward (self):
"""
just in case we need to return inward when we mean outward!
(for reversed graphs)
"""
return self._inward
#_____________________________________________________________________________________
# _fullstr
#_____________________________________________________________________________________
def _fullstr(self):
"""
Full dump. Normally edges are not printed out
Everything is indented except name
"""
self_desc = list()
for k,v in sorted(iter(self.__dict__.items()), key = lambda x_v: (0,x_v[0],x_v[1]) if x_v[0] == "_name" else (1,x_v[0],x_v[1])):
indent = " " if k != "_name" else ""
if k in ("_inward", "_outward"):
v = ",".join([x._name for x in v])
self_desc.append(indent + str(k) + "=" + str(v))
else:
self_desc.append(indent + str(k) + "=" + str(v))
return "\n".join(self_desc)
#_____________________________________________________________________________________
# __str__
#_____________________________________________________________________________________
def __str__ (self):
"""
Print everything except lists of edges
Useful for debugging
"""
self_desc = list()
for k,v in sorted(self.__dict__.items(), reverse=True):
indent = " " if k != "_name" else ""
if k[0] == '_':
continue
else:
self_desc.append(indent + str(k) + "=" + str(v))
return " Task = " + "\n".join(self_desc)
#_____________________________________________________________________________________
# _signalled
#
#_____________________________________________________________________________________
def _signalled (self, extra_data_for_signal = None):
"""
Signals whether depth first search ends without this node
"""
return self._signal
#_____________________________________________________________________________________
# node_to_json
#
#
#_____________________________________________________________________________________
class node_to_json(json.JSONEncoder):
"""
output node using json
"""
def default(self, obj):
print(str(obj))
if isinstance(obj, node):
return obj._name, {
"index": obj._node_index,
"_signal": obj._signal,
"_get_inward": [n._name for n in obj._inward],
"_get_outward": [n._name for n in obj._outward],
}
return json.JSONEncoder.default(self, obj)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# topological_sort_visitor
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
def default_signalled (node, extra_data):
"""
Depth first search stops when node._signalled return True
"""
return node._signalled(extra_data)
class topological_sort_visitor (object):
"""
topological sort
used with DFS to find all nodes in topologically sorted order
All finds all DAG breaking cycles
"""
IGNORE_NODE_SIGNAL = 0
NOTE_NODE_SIGNAL = 1
END_ON_SIGNAL = 2
#_____________________________________________________________________________________
# init
#_____________________________________________________________________________________
def __init__ (self, forced_dfs_nodes,
node_termination = END_ON_SIGNAL,
extra_data_for_signal = None,
signal_callback = None):
"""
list of saved results
"""
self._forced_dfs_nodes = set(forced_dfs_nodes)
self._node_termination = node_termination
self._start_nodes = set()
self._back_edges = set()
self._back_nodes = set()
self._signalling_nodes = set()
self.signal_callback = signal_callback
# keep order for tree traversal later
self._examined_edges = list()
# keep order for topological sorted results
self._finished_nodes = list()
self._extra_data_for_signal = extra_data_for_signal
def combine_with (self, other):
"""
combine the results of two visitors
(add other to self)
"""
self._back_edges .update(other._back_edges)
self._back_nodes .update(other._back_nodes)
extra_finished_nodes = set(other._finished_nodes) - set(self._finished_nodes)
self._finished_nodes .extend(extra_finished_nodes)
#_____________________________________________________________________________________
# __str__
#_____________________________________________________________________________________
def __str__ (self):
"""
for diagnostics
"""
signalling_str = get_nodes_str ("Signalling", self._signalling_nodes)
finished_str = get_nodes_str ("Finished", self._finished_nodes)
forced_str = get_nodes_str ("Forced to run", self._forced_dfs_nodes)
start_str = get_nodes_str ("Start", self._start_nodes)
back_edges_str = get_edges_str ("back", self._back_edges)
return (""
+ finished_str
+ start_str
+ back_edges_str
+ signalling_str
+ finished_str
)
#_____________________________________________________________________________________
# not_dag
#_____________________________________________________________________________________
def not_dag (self):
"""
back edges add circularity
"""
return len(self._back_edges)
#_____________________________________________________________________________________
# dag_violating_edges
#_____________________________________________________________________________________
def dag_violating_edges (self):
"""
back edges add circularity
"""
return self._back_edges
#_____________________________________________________________________________________
# dag_violating_nodes
#_____________________________________________________________________________________
def dag_violating_nodes (self):
"""
all nodes involved in cycless
"""
return self._back_nodes
#_____________________________________________________________________________________
# identify_dag_violating_nodes_and_edges
#
#_____________________________________________________________________________________
def identify_dag_violating_nodes_and_edges (self):
"""
find all nodes and edges in any cycles
All dag violating cycles are defined by the back edge identified in DFS.
All paths which go the other way: start at the to_node and end up at the from_node
are therefore also part of the cycle
"""
if not len(self._back_edges):
return
cnt_examined_edges = len(self._examined_edges)
# add this to _back_edges at the end
cycle_edges = set()
#
# each cycle
# starts from the to_node of each back_edge and
# ends with the from_node of each back_edge
#
for cycle_to_node, cycle_from_node in self._back_edges:
start_search_from = 0
while 1:
#
# find start of cycle
for i, (f,t,n) in enumerate(self._examined_edges[start_search_from:]):
if f == cycle_from_node:
break
# no more cycles for this cycle_from_node/cycle_to_node pair
else:
break
#
# cycle end might be within the same pair
# if so, don't search the current (not the next) edge for the cycle end
#
# Otherwise incrementing search position avoids infinite loop
#
start_search_from = cycle_start = start_search_from + i
if self._examined_edges[cycle_start][1] != cycle_to_node:
start_search_from += 1
for i, (f,t,n) in enumerate(self._examined_edges[start_search_from:]):
#
# found end of cycle
#
if t == cycle_to_node:
cycle_end = start_search_from + i + 1
#
# ignore backtracked nodes which will not be part of the cycle
# we are essentially doing tree traversal here
#
backtracked_nodes = set()
for f,t,n in self._examined_edges[cycle_start:cycle_end]:
if t is None:
backtracked_nodes.add(n)
for f,t,n in self._examined_edges[cycle_start:cycle_end]:
if f is None or f in backtracked_nodes or t in backtracked_nodes:
continue
cycle_edges.add((f,t))
self._back_nodes.add(f)
self._back_nodes.add(t)
start_search_from = cycle_end
break
# if cycle_from_node comes around again, this is not a cycle
if cycle_from_node == f:
if not i:
i += 1
start_search_from = start_search_from + i
break
continue
# no more cycles for this cycle_from_node/cycle_to_node pair
else:
break
self._back_edges.update(cycle_edges)
#_____________________________________________________________________________________
# not_dag
#_____________________________________________________________________________________
def topological_sorted (self):
"""
_finished_nodes
"""
return self._finished_nodes
#_____________________________________________________________________________________
# terminate_before
#_____________________________________________________________________________________
def terminate_before(self, node):
"""
Allow node to terminate this path in DFS without including itself
(see terminate_at)
If node in _forced_dfs_nodes that overrides what the node wants
"""
#
# If _node_termination = IGNORE_NODE_TERMINATION
# always go through whole tree
#
if self._node_termination == self.IGNORE_NODE_SIGNAL:
return False
#
# If _node_termination = NOTE_NODE_TERMINATION
# always go through whole tree but remember
# which nodes want to terminate
#
# Note that _forced_dfs_nodes is ignored
#
if self._node_termination == self.NOTE_NODE_SIGNAL:
if self.signal_callback(node, self._extra_data_for_signal):
self._signalling_nodes.add(node)
return False
#
# _forced_dfs_nodes always overrides node preferences
# but let us save what the node says anyway for posterity
#
if node in self._forced_dfs_nodes:
## Commented out code lets us save self_terminating_nodes even when
## they have been overridden by _forced_dfs_nodes
#if self.signal_callback(node, self._extra_data_for_signal):
# self._signalling_nodes.add(node)
return False
#
# OK. Go by what the node wants then
#
if self.signal_callback(node, self._extra_data_for_signal):
self._signalling_nodes.add(node)
return True
return False
#_____________________________________________________________________________________
# call_backs
#_____________________________________________________________________________________
def discover_vertex(self, node):
pass
def start_vertex(self, node):
self._start_nodes.add(node)
def finish_vertex(self, node):
"""
Save
1) topologically sorted nodes
2) as "None" (back) edges which allows _examined_edges to be traversed
like a tree
"""
self._examined_edges.append((None, None, node))
self._finished_nodes.append(node)
def examine_edge(self, node_from, node_to):
"""
Save edges as we encounter then so we can look for loops
"""
self._examined_edges.append((node_from, node_to, None))
def back_edge(self, node_from, node_to):
self._back_edges.add((node_from, node_to))
def tree_edge(self, node_from, node_to):
pass
def forward_or_cross_edge(self, node_from, node_to):
pass
def terminate_at (self, node):
"""
Terminate this line of DFS but include myself
"""
return False
#
#_________________________________________________________________________________________
# debug_print_visitor
#_________________________________________________________________________________________
class debug_print_visitor (object):
"""
log progress through DFS: for debugging
"""
def terminate_before(self, node):
return False
def terminate_at (self, node):
return False
def start_vertex(self, node):
print("s start vertex %s" % (node._name))
def finish_vertex(self, node):
print(" v finish vertex %s" % (node._name))
def discover_vertex(self, node):
print(" | discover vertex %s" % (node._name))
def examine_edge(self, node_from, node_to):
print(" -- examine edge %s -> %s" % (node_from._name, node_to._name))
def back_edge(self, node_from, node_to):
print(" back edge %s -> %s" % (node_from._name, node_to._name))
def tree_edge(self, node_from, node_to):
print(" - tree edge %s -> %s" % (node_from._name, node_to._name))
def forward_or_cross_edge(self, node_from, node_to):
print(" - forward/cross edge %s -> %s" % (node_from._name, node_to._name))
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#_________________________________________________________________________________________
# depth first search
#_________________________________________________________________________________________
#
#
#
WHITE = 0 # virgin
GRAY = 1 # processing
BLACK = 2 # finished
def depth_first_visit(u, visitor, colours, outedges_func):
"""
depth_first_visit
unused callbacks are commented out
"""
# start processing this node: so gray
colours[u] = GRAY
stack = list()
#
# unused callback
#
#visitor.discover_vertex(u)
curr_edges = outedges_func(u)
if visitor.terminate_before(u):
colours[u] = BLACK
return
# If this vertex terminates the search, we push empty range
if visitor.terminate_at(u):
stack.append((u, curr_edges, len(curr_edges)))
else:
stack.append((u, curr_edges, 0))
while len(stack):
u, curr_edges, curr_edge_pos = stack.pop()
while curr_edge_pos < len(curr_edges):
v = curr_edges[curr_edge_pos]
visitor.examine_edge(u, v)
v_colour = colours[v]
if visitor.terminate_before(v):
colours[v] = BLACK
curr_edge_pos += 1
continue
if v_colour == WHITE:
#
# unused callback
#
#visitor.tree_edge(u, v)
curr_edge_pos += 1
stack.append((u, curr_edges, curr_edge_pos))
u = v
colours[u] = GRAY
#
# unused callback
#
#visitor.discover_vertex(u)
curr_edges = outedges_func(u)
curr_edge_pos = 0
if visitor.terminate_at(u):
break
elif v_colour == GRAY:
visitor.back_edge(u, v)
curr_edge_pos += 1
else:
#
# unused callback
#
#visitor.forward_or_cross_edge(u, v)
curr_edge_pos += 1
colours[u] = BLACK
visitor.finish_vertex(u)
def depth_first_search(starting_nodes, visitor, outedges_func = node._get_inward):
"""
depth_first_search
go through all starting points and DFV on each of them
if they haven't been seen before
"""
colours = defaultdict(int) # defaults to WHITE
if len(starting_nodes):
for start in starting_nodes:
if colours[start] == WHITE:
visitor.start_vertex(start)
depth_first_visit(start, visitor, colours, outedges_func)
else:
#
# go through all nodes, maintaining order
#
for start in node._all_nodes:
if colours[start] == WHITE:
visitor.start_vertex(start)
depth_first_visit(start, visitor, colours, outedges_func)
#_________________________________________________________________________________________
# topologically_sorted_nodes
#_________________________________________________________________________________________
def topologically_sorted_nodes( to_leaves,
force_start_from = [],
gather_all_non_signalled = True,
test_all_signals = False,
extra_data_for_signal = None,
signal_callback = None):
"""
Get all nodes which are children of to_leaves
in topological sorted order
Defaults to including all nodes which are non-signalled and their dependents (via include_any_children())
i.e. includes the *last* non-signalling node on each branch and all the way up the tree
Otherwise stops at each branch just before signalling node
i.e. includes the last non-signalling *run* on each branch
force_start_from
Optionally specify all the child nodes which *have* to
be included in the list at least
This will override any node signals
force_start_from = True to get the whole tree irrespective of signalling
Rewritten to minimise calls to node._signalled()
"""
#
# got through entire tree, looking for signalling nodes,
# usually for debugging or for printing
#
if test_all_signals:
v = topological_sort_visitor([],
topological_sort_visitor.NOTE_NODE_SIGNAL,
extra_data_for_signal,
signal_callback)
depth_first_search(to_leaves, v, node._get_inward)
signalling_nodes = v._signalling_nodes
else:
signalling_nodes = set()
if gather_all_non_signalled:
#
# get whole tree, ignoring signalling
#
v = topological_sort_visitor([], topological_sort_visitor.IGNORE_NODE_SIGNAL)
depth_first_search(to_leaves, v, node._get_inward)
#
# not dag: no further processing
#
if v.not_dag():
v.identify_dag_violating_nodes_and_edges ()
return (v.topological_sorted(), v._signalling_nodes, v.dag_violating_edges(),
v.dag_violating_nodes())
#
# if force_start_from == True
#
# return entire tree
#
if force_start_from == True:
return (v.topological_sorted(), v._signalling_nodes, v.dag_violating_edges(),
v.dag_violating_nodes())
#
# Set of all nodes we are going to return
# We will use v.topological_sorted to return them in the right (sorted) order
#
nodes_to_include = set()
#
# If force start from is a list of nodes,
# include these and all of its dependents (via include_any_children)
#
# We don't need to bother to check if they signal (_signalled)
# This saves calling the expensive _signalled
#
if len(force_start_from):
nodes_to_include.update(include_any_children(force_start_from))
# This should not be necessary because include_any_children also returns self.
#for n in force_start_from:
# if n in nodes_to_include:
# continue
# nodes_to_include.add(n)
# nodes_to_include.update(include_any_children([n]))
#
# Now select all nodes from ancestor -> descendant which do not signal (signal_callback() == false)
# and select their descendants (via include_any_children())
#
# Nodes which signal are added to signalling_nodes
#
reversed_nodes = v.topological_sorted()
for n in reversed_nodes:
if n in nodes_to_include:
continue
if not signal_callback(n, extra_data_for_signal):
#nodes_to_include.add(n)
nodes_to_include.update(include_any_children([n]))
else:
signalling_nodes.add(n)
#sys.stderr.write(json.dumps(n, cls=node_to_json, sort_keys=1) + "\n")
return ([n for n in v.topological_sorted() if n in nodes_to_include],
signalling_nodes,
[],[])
#
# gather_all_non_signalled = False
# stop at first signalled
#
else:
if force_start_from == True:
#
# get whole tree, ignoring signalling
#
v = topological_sort_visitor([],
topological_sort_visitor.IGNORE_NODE_SIGNAL)
else:
#
# End at each branch without including signalling node
# but ignore signalling for forced_nodes_and_dependencies
#
# Get forced nodes and all descendants via include_any_children
#
forced_nodes_and_dependencies = []
if len(force_start_from):
forced_nodes_and_dependencies = include_any_children(force_start_from)
v = topological_sort_visitor( forced_nodes_and_dependencies,
topological_sort_visitor.END_ON_SIGNAL,
extra_data_for_signal,
signal_callback)
#
# Forward graph iteration
#
depth_first_search(to_leaves, v, node._get_inward)
if v.not_dag():
v.identify_dag_violating_nodes_and_edges ()
signalling_nodes.update(v._signalling_nodes)
return (v.topological_sorted(), signalling_nodes, v.dag_violating_edges(), v.dag_violating_nodes())
#
def debug_print_nodes(to_leaves):
v = debug_print_visitor()
depth_first_search(to_leaves, v, node._get_inward)
#_________________________________________________________________________________________
# graph_printout
#_________________________________________________________________________________________
def graph_colour_demo_printout (stream,
output_format,
size = '11,8',
dpi = '120'):
"""
Demo of the different colour schemes
"""
if output_format == 'dot':
write_colour_scheme_demo_in_dot_format(stream)
return
# print to dot file
#temp_dot_file = tempfile.NamedTemporaryFile(suffix='.dot', delete=False)
fh, temp_dot_file_name = tempfile.mkstemp(suffix='.dot')
temp_dot_file = os.fdopen(fh, "w")
write_colour_scheme_demo_in_dot_format(temp_dot_file)
temp_dot_file.close()
print_dpi = ("-Gdpi='%s'" % dpi) if output_format != "svg" else ""
run_dot = os.popen("dot -Gsize='%s' %s -T%s < %s" % (size, print_dpi, output_format, temp_dot_file_name))
#
# wierd bug fix for firefox and svg
#
result_str = run_dot.read()
err = run_dot.close()
if err:
raise RuntimeError("dot failed to run with exit code %d" % err)
if output_format == "svg":
result_str = result_str.replace("0.12", "0.0px")
stream.write(result_str)
#_________________________________________________________________________________________
# graph_printout_in_dot_format
#_________________________________________________________________________________________
def graph_printout_in_dot_format ( stream,
to_leaves,
force_start_from = [],
draw_vertically = True,
ignore_upstream_of_target = False,
skip_signalling_nodes = False,
gather_all_non_signalled = True,
test_all_signals = True,
no_key_legend = False,
minimal_key_legend = True,
user_colour_scheme = None,
pipeline_name = "Pipeline:",
extra_data_for_signal = None,
signal_callback = None):
"""
print out pipeline dependencies in dot formatting
"""
(topological_sorted, # tasks_to_run
signalling_nodes, # up to date
dag_violating_edges,
dag_violating_nodes) = topologically_sorted_nodes(to_leaves, force_start_from,
gather_all_non_signalled,
test_all_signals,
extra_data_for_signal,
signal_callback)
#
# N.B. For graph:
# upstream = parent
# dependents/downstream
# = children
#
#
nodes_to_display = get_reachable_nodes(to_leaves, not ignore_upstream_of_target)
#
# print out dependencies in dot format
#
write_flowchart_in_dot_format(topological_sorted, # tasks_to_run
signalling_nodes, # up to date
dag_violating_edges,
dag_violating_nodes,
stream,
to_leaves,
force_start_from,
nodes_to_display,
draw_vertically,
skip_signalling_nodes,
no_key_legend,
minimal_key_legend,
user_colour_scheme,
pipeline_name)
#_________________________________________________________________________________________
# graph_printout
#_________________________________________________________________________________________
def graph_printout (stream,
output_format,
to_leaves,
force_start_from = [],
draw_vertically = True,
ignore_upstream_of_target = False,
skip_signalling_nodes = False,
gather_all_non_signalled = True,
test_all_signals = True,
no_key_legend = False,
minimal_key_legend = True,
user_colour_scheme = None,
pipeline_name = "Pipeline:",
size = (11,8),
dpi = 120,
extra_data_for_signal = None,
signal_callback = None):
"""
print out pipeline dependencies in a variety of formats, using the programme "dot"
an intermediary
"""
if output_format == 'dot':
graph_printout_in_dot_format ( stream,
to_leaves,
force_start_from,
draw_vertically,
ignore_upstream_of_target,
skip_signalling_nodes,
gather_all_non_signalled,
test_all_signals,
no_key_legend,
minimal_key_legend,
user_colour_scheme,
pipeline_name,
extra_data_for_signal,
signal_callback)
return
# print to dot file
#temp_dot_file = tempfile.NamedTemporaryFile(suffix='.dot', delete=False)
fh, temp_dot_file_name = tempfile.mkstemp(suffix='.dot')
temp_dot_file = os.fdopen(fh, "wb")
graph_printout_in_dot_format ( temp_dot_file,
to_leaves,
force_start_from,
draw_vertically,
ignore_upstream_of_target,
skip_signalling_nodes,
gather_all_non_signalled,
test_all_signals,
no_key_legend,
minimal_key_legend,
user_colour_scheme,
pipeline_name,
extra_data_for_signal,
signal_callback)
temp_dot_file.close()
if isinstance(size, tuple):
print_size = "(%d,%d)" % size
elif isinstance(size, (str)):
print_size = size
else:
raise Exception("Flowchart print size [%s] should be specified as a tuple of X,Y in inches" % str(size))
#
# N.B. Resolution doesn't seem to play nice with SVG and is ignored
#
print_dpi = ("-Gdpi='%s'" % dpi) if output_format != "svg" else ""
cmd = "dot -Gsize='%s' %s -T%s < %s" % (print_size, print_dpi, output_format, temp_dot_file_name)
proc = subprocess.Popen(cmd, shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result_str, error_str = proc.communicate()
retcode = proc.returncode
if retcode:
raise subprocess.CalledProcessError(retcode, cmd + "\n" + "\n".join([str(result_str), str(error_str)]))
#run_dot = os.popen(cmd)
#result_str = run_dot.read()
#err = run_dot.close()
#
#if err:
# raise RuntimeError("dot failed to run with exit code %d" % err)
#
# wierd workaround for bug / bad interaction between firefox and svg:
# Font sizes have "px" appended.
#
if output_format == "svg":
# result str is a binary string. I.e. could be .jpg
# must turn it into string before we can replace, and then turn it back into binary
result_str = result_str.decode()
result_str = result_str.replace("0.12", "0.0px")
result_str = result_str.encode()
stream.write(result_str)
#_________________________________________________________________________________________
# include_any_children
#_________________________________________________________________________________________
def include_any_children (nodes):
"""
Get all children nodes by DFS in the inward direction,
Ignores signals
Also includes original nodes in the results
"""
children_visitor = topological_sort_visitor([], topological_sort_visitor.IGNORE_NODE_SIGNAL)
depth_first_search(nodes, children_visitor, node._get_outward)
return children_visitor.topological_sorted()
#_________________________________________________________________________________________
# get_reachable_nodes
#_________________________________________________________________________________________
def get_reachable_nodes(nodes, children_as_well = True):
"""
Get all nodes which are parents and children of nodes
recursing through the entire tree
i.e. go up *and* down tree starting from node
1) specify parents_as_well = False
to only get children and not parents of nodes
"""
# look for parents of nodes and start there instead
if children_as_well:
nodes = include_any_children (nodes)
parent_visitor = topological_sort_visitor([], topological_sort_visitor.IGNORE_NODE_SIGNAL)
depth_first_search(nodes, parent_visitor, node._get_inward)
return parent_visitor.topological_sorted()
#_________________________________________________________________________________________
# Helper functions to dump edges and nodes
#_________________________________________________________________________________________
def get_edges_str (name, edges):
"""
helper function to dump edges as a list of names
"""
edges_str = " %d %s edges\n" % (len(edges), name)
edges_str += " " + ", ".join([x_y[0]._name + "->" + x_y[1]._name for x_y in edges]) + "\n"
return edges_str
def get_nodes_str (name, nodes):
"""
helper function to dump nodes as a list of names
"""
nodes_str = " %s nodes = %d\n" % (name, len(nodes))
nodes_str += " " + ", ".join([x._name for x in nodes]) + "\n"
return nodes_str
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient checker for functions.
The gradient checker verifies numerically that an function properly
computes the gradients
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
def _product(t):
if isinstance(t, int):
return t
else:
y = 1
for x in t:
y *= x
return y
def _eval_indexed_slices(a):
"""Converts IndexedSlices to IndexedSlicesValue with numpy indices/values.
When eager execution is enabled, converts IndexedSlices
to IndexedSlicesValue with numpy indices/values.
Args:
a: any value.
Returns:
If a is IndexedSlices and eager execution is enabled, calls numpy() on a's
fields. Otherwise returns a unchanged.
"""
if isinstance(a, ops.IndexedSlices) and context.executing_eagerly():
return ops.IndexedSlicesValue(
indices=[x.numpy() for x in a.indices],
values=[x.numpy() for x in a.values],
dense_shape=a.dense_shape)
return a
def _to_numpy(a):
"""Converts Tensors, EagerTensors, and IndexedSlicesValue to numpy arrays.
Args:
a: any value.
Returns:
If a is EagerTensor or Tensor, returns the evaluation of a by calling
numpy() or run(). If a is IndexedSlicesValue, constructs the corresponding
dense numpy array. Otherwise returns a unchanged.
"""
if isinstance(a, ops.EagerTensor):
return a.numpy()
if isinstance(a, ops.Tensor):
sess = ops.get_default_session()
return sess.run(a)
if isinstance(a, ops.IndexedSlicesValue):
arr = np.zeros(a.dense_shape)
assert len(a.values) == len(a.indices), (
"IndexedSlicesValue has %s value slices but %s indices\n%s" %
(a.values, a.indices, a))
for values_slice, index in zip(a.values, a.indices):
assert 0 <= index < len(arr), (
"IndexedSlicesValue has invalid index %s\n%s" % (index, a))
arr[index] += values_slice
return arr
return a
def _prepare(f, xs_dtypes):
"""Return a function that executes 'f'.
In TF 2.x, this is the same as `f`.
In TF 1.x, returns a Python function that executes the graph defined by `f`
in a Session.
Args:
f: the function.
xs_dtypes: dtypes of f's arguments.
Returns:
a function that will be evaluated in both graph and eager mode
"""
if context.executing_eagerly():
def decorated_eager(*xs_data):
return f(*map(ops.convert_to_tensor, xs_data))
return decorated_eager
xs = [array_ops.placeholder(x_dtype) for x_dtype in xs_dtypes]
y = f(*xs)
sess = ops.get_default_session()
def decorated_graph(*xs_data):
xs_data = [_to_numpy(a) for a in xs_data]
return sess.run(y, feed_dict=dict(zip(xs, xs_data)))
return decorated_graph
def _compute_theoretical_jacobian(f, y_shape, y_dtype, xs, param):
"""Computes the theoretical Jacobian for f regarding xs[param].
One can think of the relation among f, xs and y as y = f(xs).
Args:
f: the function.
y_shape: the shape of the result.
y_dtype: the dtype of the result.
xs: a list of tensors.
param: the index of the target parameter.
Returns:
A 2-d numpy array representing the Jacobian. It has "x_size" rows
and "y_size" columns where "x_size" is the number of elements in xs[param]
and "y_size" is the number of elements in the result.
Raises:
ValueError: If result is empty but the gradient is nonzero.
"""
x = xs[param]
# Complex vectors are treated as vectors of twice as many reals.
x_shape = tuple(x.shape) + (2,) if x.dtype.is_complex else x.shape
y_factor = 2 if y_dtype.is_complex else 1
# To compute the jacobian, we treat x and y as one-dimensional vectors.
x_size = _product(x_shape)
x_val_size = _product(x_shape[1:]) # This is used for sparse gradients
y_size = _product(y_shape) * y_factor
# Allocate 2-D Jacobian, with x dimensions smashed into the first
# dimension and y dimensions smashed into the second.
jacobian = np.zeros((x_size, y_size), dtype=x.dtype.real_dtype.as_numpy_dtype)
# For each of the entry of dy, we set this to be 1 and
# everything else to be 0 and compute the gradients -- this will give us one
# one column of the Jacobian matrix.
dy_data = np.zeros(y_shape, dtype=y_dtype.as_numpy_dtype)
dy_data_flat = dy_data.ravel().view(y_dtype.real_dtype.as_numpy_dtype)
grad_fn_unprep = backprop.gradients_function(f, [param])
grad_fn = _prepare(lambda dy, *xs: grad_fn_unprep(*xs, dy=dy),
[y_dtype] + [x.dtype for x in xs])
for col in range(y_size):
dy_data_flat[col] = 1
grad = _to_numpy(grad_fn(dy_data, *xs)[0])
grad = _eval_indexed_slices(grad)
dy_data_flat[col] = 0
if isinstance(grad, ops.IndexedSlicesValue):
for i, v in zip(grad.indices, grad.values):
r_begin = i * x_val_size
r_end = r_begin + x_val_size
jacobian[r_begin:r_end, col] += v.flat
else:
jacobian[:, col] = grad.ravel().view(jacobian.dtype)
# If the output is empty, run the gradients at least once and make sure
# they produce zeros.
if y_size == 0: # don't use 'not y_size', because y_size may not be an int
grad = _to_numpy(grad_fn(dy_data, *xs)[0])
if grad.shape != x.shape:
raise ValueError("Empty gradient has wrong shape: expected %s, got %s" %
(x.shape, grad.shape))
if np.any(grad):
raise ValueError("Empty tensor with nonzero gradients")
logging.vlog(1, "Theoretical Jacobian =\n%s", jacobian)
return jacobian
def _compute_numeric_jacobian(f, y_size, y_dtype, xs, param,
delta):
"""Computes the numeric Jacobian for f regarding xs[param].
One can think of the relation among f, xs and y as y = f(xs).
Args:
f: the function.
y_size: the number of elements of the result.
y_dtype: the dtype of the result.
xs: a list of tensors.
param: the index of the target parameter.
delta: the amount of perturbation we give to the input.
Returns:
A 2-d numpy array representing the Jacobian. It has "x_size" rows
and "y_size" columns where "x_size" is the number of elements in xs[param]
and "y_size" is the number of elements in the result.
"""
# bfloat16 doesn't have enough bits to represent high precision numbers such
# as delta. Convert to float32 here. Since numeric_jacobian is expected to
# be the groundtruth to compare against, it shouldn't lose any information.
x_shape = xs[param].shape
x_dtype = xs[param].dtype
if y_dtype == dtypes.bfloat16:
f = lambda *xs: math_ops.cast(f(*xs), dtypes.float32)
y_dtype = dtypes.float32
# To compute the jacobian, we treat x and y as one-dimensional vectors
x_size = _product(x_shape) * (2 if x_dtype.is_complex else 1)
y_size = y_size * (2 if y_dtype.is_complex else 1)
x_dtype = x_dtype.real_dtype.as_numpy_dtype
y_dtype = y_dtype.real_dtype.as_numpy_dtype
xs_dtypes = [x.dtype for x in xs]
# Converts xs to numpy arrays to do in-place perturbation.
# Calls asarray() to avoid copying in ravel() later.
xs = [np.asarray(_to_numpy(x)) for x in xs]
x = xs[param]
# Make sure we have the right types
scale = np.asarray(2 * delta, dtype=y_dtype)[()]
jacobian = np.zeros((x_size, y_size), dtype=x_dtype)
# For each of the entry of x, we slightly perturbs this by adding and
# subtracting a delta and then compute difference between the outputs. This
# will give us one row of the Jacobian matrix.
f = _prepare(f, xs_dtypes)
for row in range(x_size):
original = x.ravel().view(x_dtype)[row]
x.ravel().view(x_dtype)[row] += delta
y_pos = _to_numpy(f(*xs))
x.ravel().view(x_dtype)[row] = original
x.ravel().view(x_dtype)[row] -= delta
y_neg = _to_numpy(f(*xs))
x.ravel().view(x_dtype)[row] = original
diff = (y_pos - y_neg) / scale
jacobian[row, :] = diff.ravel().view(y_dtype)
logging.vlog(1, "Numeric Jacobian =\n%s", jacobian)
return jacobian
def _compute_gradient(f,
y_shape,
y_dtype,
xs,
param,
delta):
"""Computes the theoretical and numerical jacobian."""
x = xs[param]
t = x.dtype
allowed_types = [dtypes.float16, dtypes.bfloat16, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128]
assert t.base_dtype in allowed_types, ("Cannot compute gradient for"
"unsupported type %s of argument %s" %
(t.name, param))
t2 = y_dtype
assert t2.base_dtype in allowed_types, ("Cannot compute gradient for"
"unsupported type %s of y" % t2.name)
y_size = _product(y_shape)
jacob_t = _compute_theoretical_jacobian(f, y_shape, y_dtype,
xs, param)
jacob_n = _compute_numeric_jacobian(f, y_size, y_dtype, xs,
param, delta)
return jacob_t, jacob_n
def _compute_gradient_list(f, xs, delta):
"""Compute gradients for a list of x values."""
# convert xs to tensors so that dtype and shape have uniform types
xs = list(map(ops.convert_to_tensor, xs))
# run the function to get info of the result
xs_dtypes = [x.dtype for x in xs]
f_temp = _prepare(f, xs_dtypes)
y = f_temp(*xs)
return zip(*[_compute_gradient(f, y.shape, dtypes.as_dtype(y.dtype),
xs, i, delta) for i in range(len(xs))])
@tf_export("test.compute_gradient", v1=[])
def compute_gradient(f, x, delta=1e-3):
"""Computes the theoretical and numeric Jacobian of f.
With y = f(x), computes the theoretical and numeric Jacobian dy/dx.
Args:
f: the function.
x: a list of tensors.
delta: (optional) perturbation used to compute numeric Jacobian.
Returns:
A pair of lists, where the first is a list of 2-d numpy arrays representing
the theoretical Jacobians for each argument, and the second list is the
numerical ones. Each 2-d array has "x_size" rows
and "y_size" columns where "x_size" is the number of elements in the
corresponding argument and "y_size" is the number of elements in f(x).
Raises:
ValueError: If result is empty but the gradient is nonzero.
"""
if not isinstance(x, list):
raise ValueError(
"`x` must be a list of Tensors (arguments to `f`), not a %s" % type(x))
return _compute_gradient_list(f, x, delta)
def max_error(grad1, grad2):
"""Computes maximum elementwise gap.
Computes the maximum elementwise gap between two lists of tensors of the same
shape.
Args:
grad1: a lists of tensors.
grad2: a lists of tensors with the same shape as grad1.
Returns:
The maximum elementwise gap between the two.
"""
error = 0
for j_t, j_n in zip(grad1, grad2):
if j_t.size or j_n.size: # Handle zero size tensors correctly
error = np.maximum(error, np.fabs(j_t - j_n).max())
return error
|
|
from __future__ import unicode_literals
from datetime import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.test import TestCase, skipUnlessDBFeature
from .models import Author, Article, Tag, Game, Season, Player
class LookupTests(TestCase):
def setUp(self):
# Create a few Authors.
self.au1 = Author(name='Author 1')
self.au1.save()
self.au2 = Author(name='Author 2')
self.au2.save()
# Create a couple of Articles.
self.a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1)
self.a1.save()
self.a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a2.save()
self.a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a3.save()
self.a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1)
self.a4.save()
self.a5 = Article(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2)
self.a5.save()
self.a6 = Article(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2)
self.a6.save()
self.a7 = Article(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2)
self.a7.save()
# Create a few Tags.
self.t1 = Tag(name='Tag 1')
self.t1.save()
self.t1.articles.add(self.a1, self.a2, self.a3)
self.t2 = Tag(name='Tag 2')
self.t2.save()
self.t2.articles.add(self.a3, self.a4, self.a5)
self.t3 = Tag(name='Tag 3')
self.t3.save()
self.t3.articles.add(self.a5, self.a6, self.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertQuerysetEqual(Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline'))
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(set([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
self.assertRaises(TypeError, Article.objects.in_bulk)
self.assertRaises(TypeError, Article.objects.in_bulk, headline__startswith='Blah')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
identity = lambda x: x
self.assertQuerysetEqual(Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
transform=identity)
self.assertQuerysetEqual(Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
transform=identity)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertQuerysetEqual(Article.objects.values('id', 'headline').iterator(),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
transform=identity)
# The values() method works with "extra" fields specified in extra(select).
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
transform=identity)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertQuerysetEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}], transform=identity)
# You can specify fields from forward and reverse relations, just like filter().
self.assertQuerysetEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
], transform=identity)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a non-existent field name in values() (a field that is neither in the
# model nor in extra(select)).
self.assertRaises(FieldError,
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values,
'id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertQuerysetEqual(Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0)
}], transform=identity)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
identity = lambda x: x
self.assertQuerysetEqual(Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
], transform=identity)
self.assertQuerysetEqual(Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
transform=identity)
self.assertQuerysetEqual(
Author.objects.values_list('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
], transform=identity)
self.assertRaises(TypeError, Article.objects.values_list, 'id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')),
'<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()),
'<Article: Article 6>')
self.assertRaises(Article.DoesNotExist, self.a5.get_next_by_pub_date)
self.assertEqual(repr(self.a6.get_next_by_pub_date()),
'<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()),
'<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()),
'<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
a8 = Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
a8.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>'])
a9 = Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
a9.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>'])
a10 = Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
a10.save()
self.assertQuerysetEqual(Article.objects.filter(headline__contains='\\'),
['<Article: Article with \ backslash>'])
def test_exclude(self):
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
])
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(
Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(
Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual(
[article for article in Article.objects.none().iterator()],
[])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
try:
Article.objects.filter(pub_date_year='2005').count()
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(str(ex), "Cannot resolve keyword 'pub_date_year' "
"into field. Choices are: author, author_id, headline, "
"id, pub_date, tag")
try:
Article.objects.filter(headline__starts='Article')
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(str(ex), "Join on field 'headline' not permitted. "
"Did you misspell 'starts' for the lookup type?")
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
a1 = Article(pub_date=now, headline='f')
a1.save()
a2 = Article(pub_date=now, headline='fo')
a2.save()
a3 = Article(pub_date=now, headline='foo')
a3.save()
a4 = Article(pub_date=now, headline='fooo')
a4.save()
a5 = Article(pub_date=now, headline='hey-Foo')
a5.save()
a6 = Article(pub_date=now, headline='bar')
a6.save()
a7 = Article(pub_date=now, headline='AbBa')
a7.save()
a8 = Article(pub_date=now, headline='baz')
a8.save()
a9 = Article(pub_date=now, headline='baxZ')
a9.save()
# zero-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
])
# one-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>'])
# wildcard
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>'])
# leading anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'),
['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'),
['<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>'])
# character sets
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'),
['<Article: baxZ>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
# and more articles:
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
# alternation
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>'])
# greedy matching
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
])
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>'])
def test_regex_null(self):
"""
Ensure that a regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
Ensure that a regex lookup does not fail on non-string fields
"""
Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'),
['<Season: 2013>'])
def test_regex_non_ascii(self):
"""
Ensure that a regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
Ensure that a lookup query containing non-fields raises the proper
exception.
"""
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah=99)
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah__exact=99)
with self.assertRaises(FieldError):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Ensure that genuine field names don't collide with built-in lookup
types ('year', 'gt', 'range', 'in' etc.).
Refs #11670.
"""
# Here we're using 'gt' as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games = Game.objects.filter(season__year__in=[2009, 2010])
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games = Game.objects.filter(season__year=2009)
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games = Game.objects.filter(season__year__in=[2011])
johnson = Player.objects.create(name="Johnson")
johnson.games = Game.objects.filter(season__year__in=[2011])
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
|
|
from __future__ import with_statement
import os, time, socket
from mparts.manager import Task
from mparts.host import CAPTURE, DISCARD, STDERR
from mparts.util import *
__all__ = ["InitDB", "Postgres", "PGVal", "PGOptsProvider"]
USE_DEBUG_LIBC = False
class InitDB(Task):
__info__ = ["host", "dbdir"]
def __init__(self, host, pg):
dbdir = pg.dbdir
Task.__init__(self, host = host, dbdir = dbdir)
self.host = host
self.dbdir = dbdir
self.pg = pg
self.trust = []
def addTrust(self, host):
route = self.host.routeToHost(host)
# Postgres requires a CIDR address
ai = socket.getaddrinfo(route, None, socket.AF_INET, socket.SOCK_STREAM)
self.trust.append(ai[0][4][0] + "/32")
return self
def start(self):
dbdir = self.dbdir
# Is there already a database here?
try:
self.host.r.readFile(os.path.join(dbdir, "PG_VERSION"))
# Yep. All set.
self.log("Database already exists")
return
except EnvironmentError:
pass
# Create the directory
self.host.r.run(["mkdir", "-p", dbdir])
# Initialize the database
self.pg.initdb(dbdir)
# Set up trust
for t in self.trust:
hba = "\nhost all all %s trust\n" % t
self.host.r.writeFile(os.path.join(dbdir, "pg_hba.conf"),
hba, noCheck = True, append = True)
class Postgres(Task):
__info__ = ["host", "pgPath", "pgBuild", "dbdir", "malloc", "*dynOpts"]
def __init__(self, host, pgPath, pgBuild, dbdir, malloc = "glibc", **opts):
Task.__init__(self, host = host)
self.host = host
self.pgPath = pgPath
self.pgBuild = pgBuild
self.dbdir = dbdir
self.malloc = malloc
# For custom Postgres builds, we have to point it to its
# libraries or it will try to use the system libraries. At
# best, it will probably have the wrong default socket path.
self.__addEnv = {"LD_LIBRARY_PATH" : os.path.join(pgPath, pgBuild, "lib")}
if USE_DEBUG_LIBC:
self.__addEnv["LD_LIBRARY_PATH"] += ":/usr/lib/debug"
if malloc == "glibc":
pass
elif malloc == "tcmalloc":
self.__addEnv["LD_PRELOAD"] = "/usr/lib/libtcmalloc_minimal.so"
else:
raise ValueError("Unknown malloc %s" % malloc)
self.__userOpts = opts
self.dynOpts = {}
def conninfo(self, fromHost, dbname = None):
if fromHost == self.host:
ci = []
else:
ci = ["host=%s" % (fromHost.routeToHost(self.host))]
if dbname:
ci.append("dbname=%s" % dbname)
return " ".join(ci)
def connargs(self, fromHost, dbname = None):
if fromHost == self.host:
ca = []
else:
ca = ["-h", fromHost.routeToHost(self.host)]
if dbname:
ca.extend(["-d", dbname])
return ca
def __bin(self, name):
return os.path.join(self.pgPath, self.pgBuild, "bin", name)
def initdb(self, dbdir):
self.host.r.run([self.__bin("initdb"), dbdir], addEnv = self.__addEnv)
def psql(self, sql, dbname = None, discard = False, args = ["-A", "-t"]):
p = self.host.r.run([self.__bin("psql")] +
self.connargs(self.host, dbname = dbname) +
args + ["-c", sql],
stdout = DISCARD if discard else CAPTURE,
addEnv = self.__addEnv)
if discard:
return None
return p.stdoutRead()
def dbExists(self, name):
res = self.psql("select count(1) from pg_catalog.pg_database "
"where datname = '%s'" % name,
dbname = "template1")
return bool(int(res))
def createDB(self, name):
self.host.r.run([self.__bin("createdb"), name],
addEnv = self.__addEnv)
def tableExists(self, name, dbname = None):
res = self.psql("select count(1) from pg_tables "
"where schemaname = 'public' "
"and tablename = '%s'" % name,
dbname = dbname)
return bool(int(res))
def __getPostmaster(self):
# Find the postmaster
ps = self.host.r.procList()
running = False
for pid, info in ps.iteritems():
if info["status"]["Name"] == "postgres":
running = True
if len(info["cmdline"]) and info["cmdline"][0].startswith("/"):
return pid
if running:
raise ValueError("Postgres is running, but failed to find postmaster")
def __getAllOpts(self, m):
# Set some defaults
opts = {"datestyle": "iso, mdy",
"lc_messages": "C",
"lc_monetary": "C",
"lc_numeric": "C",
"lc_time": "C"}
# Gather configuration from the manager
providers = {}
for p in m.find(cls = PGOptsProvider):
for k, v in p.getPGOpts(self).iteritems():
if k in providers and opts[k] != v:
raise ValueError("%s and %s differ on Postgres setting %s" %
(providers[k], p, k))
opts[k] = v
providers[k] = p
# Local configuration overrides
opts.update(self.__userOpts)
return opts
def __optsToConffile(self, config):
lines = []
for k, v in config.iteritems():
if isinstance(v, bool) or isinstance(v, int) or isinstance(v, float):
vs = str(v)
elif isinstance(v, str):
vs = "'%s'" % v.replace("'", "''")
elif isinstance(v, PGVal):
vs = str(v)
else:
raise ValueError("Setting %s value %s has unknown %s",
(k, v, type(v)))
lines.append("%s = %s" % (k, vs))
return "\n".join(lines)
def __queryDynConfig(self):
# Query the running configuration
q = "select name, setting, unit, vartype from pg_settings"
res = self.psql(q, dbname = "template1",
args = ["-A", "-P", "fieldsep=\t", "-P", "t"])
config = {}
for l in res.splitlines():
# Parse row
parts = l.split("\t")
if len(parts) != 4:
raise ValueError("Failed to parse pg_settings row %r" % l)
name, setting, unit, vartype = parts
# Parse value
if vartype == "bool":
value = {"off":False, "on":True}[setting]
elif vartype == "string":
value = setting
elif vartype == "integer":
value = int(setting)
elif vartype == "real":
value = float(setting)
else:
raise ValueError("Unknown vartype in pg_settings row %r" % l)
# Parse unit
if unit and (vartype == "integer" or vartype == "real"):
if unit[0].isdigit():
value *= int(unit[0])
unit = unit[1:]
value = PGVal(value, unit)
elif unit:
raise ValueError("Not expecting unit on pg_settings row %r" % l)
config[name] = value
# Get compilation options
for configOpt in ["LDFLAGS", "CFLAGS"]:
p = self.host.r.run([self.__bin("pg_config"),
"--" + configOpt.lower()],
stdout = CAPTURE, addEnv = self.__addEnv)
config[configOpt] = p.stdoutRead().rstrip("\n")
return config
def start(self, m):
# Stop any running postgres
self.reset()
# Write configuration file
configPath = os.path.join(self.host.outDir(), "%s.conf" % self.name)
config = self.__getAllOpts(m)
self.host.r.writeFile(configPath, self.__optsToConffile(config))
# Start postgres
logPath = self.host.getLogPath(self)
for mayFail in [True, False]:
self.host.r.run([self.__bin("pg_ctl"),
"-D", self.dbdir,
"-o", "--config_file=%s" % configPath,
"start"],
stdout = logPath, addEnv = self.__addEnv)
# Monitor log file
# XXX Use standard monitor function
done = False
for retry in range(20):
time.sleep(0.5)
log = self.host.r.readFile(logPath)
if "database system is ready" in log:
done = True
break
if "FATAL" in log:
break
else:
raise RuntimeError("Timeout waiting for postgres to start")
if done:
break
# Start up failed. Make sure postmaster has exited.
for retry in range(10):
time.sleep(0.5)
if not self.__getPostmaster():
break
else:
raise ValueError("Postgres failed to log start, but hasn't exited")
# Is it a shmmax problem?
if mayFail:
log = self.host.r.readFile(logPath)
if "could not create shared memory segment" in log:
req = int(log.split("size=", 1)[1].split(",", 1)[0])
with Progress("Raising SHMMAX to %d" % req):
self.host.sysctl("kernel.shmmax", req)
# Clear the log so we don't get tripped up by
# errors in it
self.host.r.writeFile(logPath, "")
continue
# Give up
raise ValueError("Failed to start postgres")
def stop(self):
# Get full postgres configuration
self.dynOpts = self.__queryDynConfig()
# We don't actively stop the server. It's not doing (much)
# harm, and this way we might be able to reuse it
# XXX Stop the server.
def reset(self):
self.dynOpts = {}
# Find the postmaster
pid = self.__getPostmaster()
if not pid:
return
# Signal the postmaster to do a fast exit
with Progress("Shutting down postmaster %d" % pid):
import signal
self.host.r.kill(pid, signal.SIGINT)
for retry in range(10):
time.sleep(0.5)
ps = self.host.r.procList()
if pid not in ps:
return
raise ValueError("Failed to cleanly shut down postmaster")
class PGVal(object):
def __init__(self, val, unit):
if unit not in ["kB", "MB", "GB", "ms", "s", "min", "h", "d"]:
raise ValueError("Illegal Postgres unit %r" % unit)
self.val = val
self.unit = unit
def __repr__(self):
return "PGVal(%r, %r)" % (self.val, self.unit)
def __str__(self):
return "%d%s" % (self.val, self.unit)
def __eq__(self, other):
return (isinstance(other, PGVal) and
self.val == other.val and self.unit == other.unit)
def toInfoValue(self):
return (self.val, self.unit)
class PGOptsProvider(object):
def getPGOpts(self, pg):
raise NotImplementedError("getPGOpts is abstract")
|
|
"""
Internal shared-state variables such as config settings and host lists.
"""
import os
import sys
from optparse import make_option
from fabric.network import HostConnectionCache
from fabric.version import get_version
#
# Win32 flag
#
# Impacts a handful of platform specific behaviors. Note that Cygwin's Python
# is actually close enough to "real" UNIXes that it doesn't need (or want!) to
# use PyWin32 -- so we only test for literal Win32 setups (vanilla Python,
# ActiveState etc) here.
win32 = (sys.platform == 'win32')
#
# Environment dictionary - support structures
#
class _AttributeDict(dict):
"""
Dictionary subclass enabling attribute lookup/assignment of keys/values.
For example::
>>> m = _AttributeDict({'foo': 'bar'})
>>> m.foo
'bar'
>>> m.foo = 'not bar'
>>> m['foo']
'not bar'
``_AttributeDict`` objects also provide ``.first()`` which acts like
``.get()`` but accepts multiple keys as arguments, and returns the value of
the first hit, e.g.::
>>> m = _AttributeDict({'foo': 'bar', 'biz': 'baz'})
>>> m.first('wrong', 'incorrect', 'foo', 'biz')
'bar'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError:
# to conform with __getattr__ spec
raise AttributeError(key)
def __setattr__(self, key, value):
self[key] = value
def first(self, *names):
for name in names:
value = self.get(name)
if value:
return value
# By default, if the user (including code using Fabric as a library) doesn't
# set the username, we obtain the currently running username and use that.
def _get_system_username():
"""
Obtain name of current system user, which will be default connection user.
"""
if not win32:
import pwd
try:
username = pwd.getpwuid(os.getuid())[0]
# getpwuid raises KeyError if it cannot find a username for the given
# UID, e.g. on ep.io and similar "non VPS" style services. Rather than
# error out, just set the 'default' username to None. Can check for
# this value later if required.
except KeyError:
username = None
return username
else:
import win32api
import win32security
import win32profile
return win32api.GetUserName()
def _rc_path():
"""
Return platform-specific default file path for $HOME/.fabricrc.
"""
rc_file = '.fabricrc'
if not win32:
return os.path.expanduser("~/" + rc_file)
else:
from win32com.shell.shell import SHGetSpecialFolderPath
from win32com.shell.shellcon import CSIDL_PROFILE
return "%s/%s" % (
SHGetSpecialFolderPath(0, CSIDL_PROFILE),
rc_file
)
# Options/settings which exist both as environment keys and which can be set on
# the command line, are defined here. When used via `fab` they will be added to
# the optparse parser, and either way they are added to `env` below (i.e. the
# 'dest' value becomes the environment key and the value, the env value).
#
# Keep in mind that optparse changes hyphens to underscores when automatically
# deriving the `dest` name, e.g. `--reject-unknown-hosts` becomes
# `reject_unknown_hosts`.
#
# Furthermore, *always* specify some sort of default to avoid ending up with
# optparse.NO_DEFAULT (currently a two-tuple)! In general, None is a better
# default than ''.
#
# User-facing documentation for these are kept in docs/env.rst.
env_options = [
make_option('-r', '--reject-unknown-hosts',
action='store_true',
default=False,
help="reject unknown hosts"
),
make_option('-D', '--disable-known-hosts',
action='store_true',
default=False,
help="do not load user known_hosts file"
),
make_option('-u', '--user',
default=_get_system_username(),
help="username to use when connecting to remote hosts"
),
make_option('-p', '--password',
default=None,
help="password for use with authentication and/or sudo"
),
make_option('-H', '--hosts',
default=[],
help="comma-separated list of hosts to operate on"
),
make_option('-R', '--roles',
default=[],
help="comma-separated list of roles to operate on"
),
make_option('-x', '--exclude-hosts',
default=[],
help="comma-separated list of hosts to exclude"
),
make_option('-i',
action='append',
dest='key_filename',
default=None,
help="path to SSH private key file. May be repeated."
),
# Use -a here to mirror ssh(1) options.
# Note, much later on: well, no. ssh -a concerns agent *forwarding*. Sigh.
make_option('-a', '--no_agent',
action='store_true',
default=False,
help="don't use the running SSH agent"
),
# Another minor departure from SSH, due to above mixup: use -A to allow
# disabling of agent forwarding (which is on by default.)
make_option('-A', '--no-agent-forward',
action='store_true',
default=False,
help="don't forward local agent to remote end"
),
# No matching option for ssh(1) so just picked something appropriate.
make_option('-k', '--no-keys',
action='store_true',
default=False,
help="don't load private key files from ~/.ssh/"
),
make_option('-f', '--fabfile',
default='fabfile',
help="Python module file to import, e.g. '../other.py'"
),
make_option('-w', '--warn-only',
action='store_true',
default=False,
help="warn, instead of abort, when commands fail"
),
make_option('-s', '--shell',
default='/bin/bash -l -c',
help="specify a new shell, defaults to '/bin/bash -l -c'"
),
make_option('-c', '--config',
dest='rcfile',
default=_rc_path(),
help="specify location of config file to use"
),
# Verbosity controls, analogous to context_managers.(hide|show)
make_option('--hide',
metavar='LEVELS',
help="comma-separated list of output levels to hide"
),
make_option('--show',
metavar='LEVELS',
help="comma-separated list of output levels to show"
),
# Global PTY flag for run/sudo
make_option('--no-pty',
dest='always_use_pty',
action='store_false',
default=True,
help="do not use pseudo-terminal in run/sudo"
),
# Parallel execution model flag
make_option('-P', '--parallel',
dest='parallel',
action='store_true',
default=False,
help="Default to parallel execution method"
),
# Limits the number of forks the parallel option uses
make_option('-z', '--pool-size',
dest='pool_size',
type='int',
metavar='NUM_FORKS',
default=0,
help="Number of concurrent processes to use when running in parallel",
),
# Abort on prompting flag
make_option('--abort-on-prompts',
action='store_true',
default=False,
help="Abort instead of prompting (for password, host, etc)"
),
# Keepalive
make_option('--keepalive',
dest='keepalive',
type=int,
default=0,
help="enables a keepalive every n seconds"
),
# Linewise output
make_option('--linewise',
action='store_true',
default=False,
help="Print stdout/stderr line-by-line instead of byte-by-byte"
),
]
#
# Environment dictionary - actual dictionary object
#
# Global environment dict. Currently a catchall for everything: config settings
# such as global deep/broad mode, host lists, username etc.
# Most default values are specified in `env_options` above, in the interests of
# preserving DRY: anything in here is generally not settable via the command
# line.
env = _AttributeDict({
'again_prompt': 'Sorry, try again.',
'all_hosts': [],
'combine_stderr': True,
'command': None,
'command_prefixes': [],
'cwd': '', # Must be empty string, not None, for concatenation purposes
'echo_stdin': True,
'exclude_hosts': [],
'host': None,
'host_string': None,
'lcwd': '', # Must be empty string, not None, for concatenation purposes
'local_user': _get_system_username(),
'output_prefix': True,
'passwords': {},
'path': '',
'path_behavior': 'append',
'port': None,
'real_fabfile': None,
'roles': [],
'roledefs': {},
# -S so sudo accepts passwd via stdin, -p with our known-value prompt for
# later detection (thus %s -- gets filled with env.sudo_prompt at runtime)
'sudo_prefix': "sudo -S -p '%s' ",
'sudo_prompt': 'sudo password:',
'use_shell': True,
'user': None,
'version': get_version('short')
})
# Add in option defaults
for option in env_options:
env[option.dest] = option.default
#
# Command dictionary
#
# Keys are the command/function names, values are the callables themselves.
# This is filled in when main() runs.
commands = {}
#
# Host connection dict/cache
#
connections = HostConnectionCache()
def default_channel():
"""
Return a channel object based on ``env.host_string``.
"""
chan = connections[env.host_string].get_transport().open_session()
chan.input_enabled = True
return chan
#
# Output controls
#
class _AliasDict(_AttributeDict):
"""
`_AttributeDict` subclass that allows for "aliasing" of keys to other keys.
Upon creation, takes an ``aliases`` mapping, which should map alias names
to lists of key names. Aliases do not store their own value, but instead
set (override) all mapped keys' values. For example, in the following
`_AliasDict`, calling ``mydict['foo'] = True`` will set the values of
``mydict['bar']``, ``mydict['biz']`` and ``mydict['baz']`` all to True::
mydict = _AliasDict(
{'biz': True, 'baz': False},
aliases={'foo': ['bar', 'biz', 'baz']}
)
Because it is possible for the aliased values to be in a heterogenous
state, reading aliases is not supported -- only writing to them is allowed.
This also means they will not show up in e.g. ``dict.keys()``.
..note::
Aliases are recursive, so you may refer to an alias within the key list
of another alias. Naturally, this means that you can end up with
infinite loops if you're not careful.
`_AliasDict` provides a special function, `expand_aliases`, which will take
a list of keys as an argument and will return that list of keys with any
aliases expanded. This function will **not** dedupe, so any aliases which
overlap will result in duplicate keys in the resulting list.
"""
def __init__(self, arg=None, aliases=None):
init = super(_AliasDict, self).__init__
if arg is not None:
init(arg)
else:
init()
# Can't use super() here because of _AttributeDict's setattr override
dict.__setattr__(self, 'aliases', aliases)
def __setitem__(self, key, value):
if key in self.aliases:
for aliased in self.aliases[key]:
self[aliased] = value
else:
return super(_AliasDict, self).__setitem__(key, value)
def expand_aliases(self, keys):
ret = []
for key in keys:
if key in self.aliases:
ret.extend(self.expand_aliases(self.aliases[key]))
else:
ret.append(key)
return ret
# Keys are "levels" or "groups" of output, values are always boolean,
# determining whether output falling into the given group is printed or not
# printed.
#
# By default, everything except 'debug' is printed, as this is what the average
# user, and new users, are most likely to expect.
#
# See docs/usage.rst for details on what these levels mean.
output = _AliasDict({
'status': True,
'aborts': True,
'warnings': True,
'running': True,
'stdout': True,
'stderr': True,
'debug': False,
'user': True
}, aliases={
'everything': ['warnings', 'running', 'user', 'output'],
'output': ['stdout', 'stderr'],
'commands': ['stdout', 'running']
})
#
# I/O loop sleep parameter (in seconds)
#
io_sleep = 0.01
|
|
import numpy as np
import shutil, time, math, itertools, os
import h5netcdf as h5py
import netCDF4 as nc
from tqdm import tqdm
import tensorflow as tf
import threading
import random
from colorama import Fore, Back, Style
from config import get_config
import sys
from folderDefs import *
import glob
class DataLoader:
def __init__(self, folderPath, config, rawFileBase=''):
self.config = config
self.nSampleFetching = 1024
self.fileReader = []
self.lock = threading.Lock()
self.inputNames = self.config.input_names.split(',')
self.outputNames = config.output_names.split(',')
self.varAllList = self.inputNames + self.outputNames
#if 'SPDT' in self.varAllList:
# if 'TPHYSTND' in self.varAllList:
# # tendency due to everything but convection
# self.inputNames += ['dTdt_nonSP']
#if 'SPDQ' in self.varAllList:
# if 'PHQ' in self.varAllList:
# # tendency due to everything but convection
# self.inputNames += ['dQdt_nonSP']
print('self.varAllList', self.varAllList)
self.varNameSplit = -len(self.outputNames)
self.rawFileBase = rawFileBase
self.reload()
def reload(self):
raw_data_train_path = trainingDataDirRaw+'*.nc'
print(Fore.YELLOW, 'raw_data_train_path', raw_data_train_path, Style.RESET_ALL)
# read addresses and labels from the 'train' folder
self.rawFiles = {}
self.rawDates = []
self.varDim = {}
for fn in sorted(glob.glob(raw_data_train_path)):
date = fn.split('.')[-2]
self.rawDates += [date]
self.rawFiles[date] = fn
print(self.rawDates[:3])
print('last raw file:', fn)
with nc.Dataset(fn, mode='r') as aqua_rg:
self.n_tim = aqua_rg.dimensions['time'].size
if self.config.nlevs_imposed==0:
self.n_lev = aqua_rg.dimensions['lev'].size
else:
self.n_lev = self.config.nlevs_imposed
self.n_lat = aqua_rg.dimensions['lat'].size
self.n_lon = aqua_rg.dimensions['lon'].size
print(aqua_rg)
for k in aqua_rg.variables.keys():
self.varDim[k] = len(aqua_rg[k].shape)
if len(aqua_rg[k].shape) > 2:
print(Fore.YELLOW)
print(fn+': ', k, aqua_rg[k].shape, Style.RESET_ALL)
print('n_tim =', self.n_tim, " = ", aqua_rg.variables['time'][:3],"...",aqua_rg.variables['time'][-3:])
print('n_lev =', self.n_lev, " = ", aqua_rg.variables['lev'][:3],"...",aqua_rg.variables['lev'][-3:])
print('n_lat =', self.n_lat, " = ", aqua_rg.variables['lat'][:3],"...",aqua_rg.variables['lat'][-3:])
print('n_lon =', self.n_lon, " = ", aqua_rg.variables['lon'][:3],"...",aqua_rg.variables['lon'][-3:])
# if flattened, the split is not the index of the first output name, but the index of the first output once flattened
if not True:#:
self.varNameSplit = self.accessTimeData(aqua_rg, self.inputNames, 0, doLog=True).shape[0]
print('self.varNameSplit', self.varNameSplit)
sampX, sampY = self.prepareData(aqua_rg, 0, doLog=True)
print('sampX =', sampX.shape)
print('sampY =', sampY.shape)
try:
for i in range(len(self.fileReader)):
self.fileReader[i].close()
except:
pass
print("batchSize = ", self.config.batch_size)
self.Nsamples = len(self.rawDates) * self.n_tim * self.n_lon * self.n_lat
self.NumBatch = self.Nsamples // self.config.batch_size
self.Xshape = list(sampX.shape)
self.Yshape = list(sampY.shape)
print('Xshape', self.Xshape)
print('Yshape', self.Yshape)
## this deals with tf records and using them by date for training/validation
if not self.rawFileBase:
tfRecordsFolderName = '/'.join(self.recordFileName(trainingDataDirTFRecords+date+'/t{0:02d}').split('/')[:-1])
folders = sorted(glob.glob(trainingDataDirTFRecords+'*'))
foldersSplit = int(len(folders) * self.config.frac_train + 0.5)
print(folders[:3], len(folders), folders[-3:])
folders = folders[:foldersSplit] if self.config.is_train else folders[foldersSplit:]
print(Fore.RED, 'days', folders[0], '-->', folders[-1], Style.RESET_ALL)#[fn.split('/')[-1] for fn in folders], Style.RESET_ALL)
self.tfRecordsFiles = []
for fn in folders:
self.tfRecordsFiles += glob.glob(fn+"/*" + '_c' + ".tfrecords")
self.tfRecordsFiles = sorted(self.tfRecordsFiles)
print("tfRecordsFiles", len(self.tfRecordsFiles))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
for i in range(len(self.fileReader)):
self.fileReader[i].close()
except:
pass
def convertUnits(self, varname, arr):
"""Make sure SPDQ and SPDT have comparable units"""
if varname == "SPDQ" or varname == "PHQ":
return arr*2.5e6/1000.
return arr
def accessTimeData(self, fileReader, names, iTim, doLog=False):
inputs = []
levmax = 0
for k in names:
#if k =='dTdt_nonSP':
# # tendency due to everything but convection
# arr = fileReader['TPHYSTND'][iTim] - fileReader['SPDT'][iTim]
#elif k=='dQdt_nonSP':
# # tendency due to everything but convection
# arr = fileReader['PHQ'][iTim] - fileReader['SPDQ'][iTim]
#else:
if self.varDim[k] == 4:
arr = fileReader[k][iTim]
# arr = arr[-self.n_lev:,:,:] # select just n levels
elif self.varDim[k] == 3:
arr = fileReader[k][iTim][None]
elif self.varDim[k] == 2:
arr = fileReader[k][None]
elif self.varDim[k] == 1: # latitude only
# need to transform in single as latitude is in double
arr = fileReader[k]
arr = np.swapaxes(np.tile(arr, (1,self.n_lon,1)),1,2)# repeat lat to trasnform into matrix
arr = arr.astype('float32') # impose float 32 like other varaiables
if self.config.convert_units:
arr = self.convertUnits(k, arr)
#print(k, arr.shape)
levmax = max(levmax, arr.shape[0])
inputs += [arr]
inputs = [np.tile(a, (levmax,1,1)) if a.shape[0] == 1 else a for a in inputs]
if doLog:
for k in names:
print('accessTimeData', k, arr.shape)
inX = np.stack(inputs, axis=0)
if doLog:
print('accessTimeData ', names, inX.shape)
return inX
def prepareData(self, fileReader, iTim, doLog=False):
samp = self.accessTimeData(fileReader, self.varAllList, iTim, doLog)
return np.split(samp, [self.varNameSplit])
def get_inputs(self):
return self.get_record_inputs(self.config.is_train, self.config.batch_size, self.config.epoch)
def recordFileName(self, filename):
return filename + '_c' + '.tfrecords' # address to save the TFRecords file into
def makeTfRecordsDate(self, date):
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
filenameBase = self.recordFileName(trainingDataDirTFRecords+date+'/t{0:02d}')
folderName = '/'.join(filenameBase.split('/')[:-1])
#print(filenameBase)
#print(folderName)
if not os.path.exists(folderName):
os.makedirs(folderName)
shards = self.n_tim
sampBar = tqdm(range(shards), leave=False)
with nc.Dataset(self.rawFiles[date], mode='r') as aqua_rg:
for iTim in sampBar:
# open the TFRecords file
filename = filenameBase.format(iTim)
#print('opening the TFRecords file', filename)
writer = tf.python_io.TFRecordWriter(filename)
sampBar.set_description(folderName)
sX, sY = self.prepareData(aqua_rg, iTim)
# Create a feature
#print(sX.shape, sX.dtype)
#print(sY.shape, sY.dtype)
if True:
if True:
feature = {'X': _bytes_feature(tf.compat.as_bytes(sX.tostring())),
'Y': _bytes_feature(tf.compat.as_bytes(sY.tostring()))
}
# for iLat in range(sX.shape[-2]):
# for iLon in range(sX.shape[-1]):
# feature = {'X': _bytes_feature(tf.compat.as_bytes(sX[:,:,iLat,iLon].tostring())),
# 'Y': _bytes_feature(tf.compat.as_bytes(sY[:,:,iLat,iLon].tostring()))
# }
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
def makeTfRecords(self, n_threads=4):
""" Start background threads to feed queue """
threads = []
print("starting %d data threads for making records" % n_threads)
# for k in range(len(self.rawDates)):
# t = threading.Thread(target=self.makeTfRecordsDate, args=(self.rawDates[k]))
# t.daemon = True # thread will close when parent quits
# t.start()
# threads.append(t)
daysBar = tqdm(range(len(self.rawDates)))
for k in daysBar:
date = self.rawDates[k]
self.makeTfRecordsDate(date)
def read_and_decode(self, filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'X': tf.FixedLenFeature([], tf.string),
'Y': tf.FixedLenFeature([], tf.string)
})
X = tf.decode_raw(features['X'], tf.float32)
Y = tf.decode_raw(features['Y'], tf.float32)
#print('read_and_decode X', X)
X.set_shape(np.prod(self.Xshape))
Y.set_shape(np.prod(self.Yshape))
#print('read_and_decode X', X)
X = tf.reshape(X, self.Xshape)
Y = tf.reshape(Y, self.Yshape)
print('read_and_decode X', X)
print('read_and_decode Y', Y)
return X, Y
def get_record_inputs(self, train, batch_size, num_epochs):
"""Reads input data num_epochs times.
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
Note that an tf.train.QueueRunner is added to the graph, which
must be run using e.g. tf.train.start_queue_runners().
"""
if not num_epochs: num_epochs = None
with tf.name_scope('dequeue'):
filename_queue = tf.train.string_input_producer(self.tfRecordsFiles, num_epochs=num_epochs, shuffle=self.config.randomize)
print('filename_queue', filename_queue)
X, Y = self.read_and_decode(filename_queue)
X = tf.transpose(tf.reshape(X, self.Xshape[:2]+[-1]), [2,0,1])
Y = tf.transpose(tf.reshape(Y, self.Yshape[:2]+[-1]), [2,0,1])
X = tf.expand_dims(X, -1)
Y = tf.expand_dims(Y, -1)
X = X[:,:,-self.n_lev:,:][:,:,::-1]
Y = Y[:,:,-self.n_lev:,:][:,:,::-1]
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
self.capacityTrain = 8192 * 32
if self.config.randomize:
b_X, b_Y = tf.train.shuffle_batch([X, Y], batch_size=batch_size, num_threads=2,
enqueue_many=True,
capacity=self.capacityTrain,
min_after_dequeue=self.capacityTrain // 2)
else:
b_X, b_Y = tf.train.batch([X, Y], batch_size=batch_size, num_threads=2,
enqueue_many=True,
capacity=self.capacityTrain)
print('self.capacityTrain', self.capacityTrain)
return b_X, b_Y
if __name__ == "__main__":
config, unparsed = get_config()
print(Fore.GREEN, 'config\n', config)
print(Fore.RED, 'unparsed\n', unparsed)
print(Style.RESET_ALL)
if unparsed:
assert(False)
dh = DataLoader(trainingDataDir, config, raw_file_base)
dh.makeTfRecords()
|
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
from sqlalchemy import BigInteger
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy.ext import declarative
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import Numeric
from sqlalchemy.orm import backref
from sqlalchemy.orm import composite
from sqlalchemy.orm import relationship
from sqlalchemy import String
from sqlalchemy import Text
import glance.artifacts as ga
from glance.common import semver_db
from glance import i18n
from oslo_log import log as os_logging
BASE = declarative.declarative_base()
LOG = os_logging.getLogger(__name__)
_LW = i18n._LW
class ArtifactBase(models.ModelBase, models.TimestampMixin):
"""Base class for Artifact Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
__protected_attributes__ = set([
"created_at", "updated_at"])
created_at = Column(DateTime, default=lambda: timeutils.utcnow(),
nullable=False)
updated_at = Column(DateTime, default=lambda: timeutils.utcnow(),
nullable=False, onupdate=lambda: timeutils.utcnow())
def save(self, session=None):
from glance.db.sqlalchemy import api as db_api
super(ArtifactBase, self).save(session or db_api.get_session())
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
def to_dict(self):
d = {}
for c in self.__table__.columns:
d[c.name] = self[c.name]
return d
def _parse_property_type_value(prop, show_text_properties=True):
columns = [
'int_value',
'string_value',
'bool_value',
'numeric_value']
if show_text_properties:
columns.append('text_value')
for prop_type in columns:
if getattr(prop, prop_type) is not None:
return prop_type.rpartition('_')[0], getattr(prop, prop_type)
return None, None
class Artifact(BASE, ArtifactBase):
__tablename__ = 'artifacts'
__table_args__ = (
Index('ix_artifact_name_and_version', 'name', 'version_prefix',
'version_suffix'),
Index('ix_artifact_type', 'type_name', 'type_version_prefix',
'type_version_suffix'),
Index('ix_artifact_state', 'state'),
Index('ix_artifact_owner', 'owner'),
Index('ix_artifact_visibility', 'visibility'),
{'mysql_engine': 'InnoDB'})
__protected_attributes__ = ArtifactBase.__protected_attributes__.union(
set(['published_at', 'deleted_at']))
id = Column(String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
name = Column(String(255), nullable=False)
type_name = Column(String(255), nullable=False)
type_version_prefix = Column(BigInteger, nullable=False)
type_version_suffix = Column(String(255))
type_version_meta = Column(String(255))
type_version = composite(semver_db.DBVersion, type_version_prefix,
type_version_suffix, type_version_meta)
version_prefix = Column(BigInteger, nullable=False)
version_suffix = Column(String(255))
version_meta = Column(String(255))
version = composite(semver_db.DBVersion, version_prefix,
version_suffix, version_meta)
description = Column(Text)
visibility = Column(String(32), nullable=False)
state = Column(String(32), nullable=False)
owner = Column(String(255), nullable=False)
published_at = Column(DateTime)
deleted_at = Column(DateTime)
def to_dict(self, show_level=ga.Showlevel.BASIC,
show_text_properties=True):
d = super(Artifact, self).to_dict()
d.pop('type_version_prefix')
d.pop('type_version_suffix')
d.pop('type_version_meta')
d.pop('version_prefix')
d.pop('version_suffix')
d.pop('version_meta')
d['type_version'] = str(self.type_version)
d['version'] = str(self.version)
tags = []
for tag in self.tags:
tags.append(tag.value)
d['tags'] = tags
if show_level == ga.Showlevel.NONE:
return d
properties = {}
# sort properties
self.properties.sort(key=lambda elem: (elem.name, elem.position))
for prop in self.properties:
proptype, propvalue = _parse_property_type_value(
prop, show_text_properties)
if proptype is None:
continue
if prop.position is not None:
# make array
for p in properties.keys():
if p == prop.name:
# add value to array
properties[p]['value'].append(dict(type=proptype,
value=propvalue))
break
else:
# create new array
p = dict(type='array',
value=[])
p['value'].append(dict(type=proptype,
value=propvalue))
properties[prop.name] = p
else:
# make scalar
properties[prop.name] = dict(type=proptype,
value=propvalue)
d['properties'] = properties
blobs = {}
# sort blobs
self.blobs.sort(key=lambda elem: elem.position)
for blob in self.blobs:
locations = []
# sort locations
blob.locations.sort(key=lambda elem: elem.position)
for loc in blob.locations:
locations.append(dict(value=loc.value,
status=loc.status))
if blob.name in blobs:
blobs[blob.name].append(dict(size=blob.size,
checksum=blob.checksum,
locations=locations,
item_key=blob.item_key))
else:
blobs[blob.name] = []
blobs[blob.name].append(dict(size=blob.size,
checksum=blob.checksum,
locations=locations,
item_key=blob.item_key))
d['blobs'] = blobs
return d
class ArtifactDependency(BASE, ArtifactBase):
__tablename__ = 'artifact_dependencies'
__table_args__ = (Index('ix_artifact_dependencies_source_id',
'artifact_source'),
Index('ix_artifact_dependencies_origin_id',
'artifact_origin'),
Index('ix_artifact_dependencies_dest_id',
'artifact_dest'),
Index('ix_artifact_dependencies_direct_dependencies',
'artifact_source', 'is_direct'),
{'mysql_engine': 'InnoDB'})
id = Column(String(36), primary_key=True, nullable=False,
default=lambda: str(uuid.uuid4()))
artifact_source = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
artifact_dest = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
artifact_origin = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
is_direct = Column(Boolean, nullable=False)
position = Column(Integer)
name = Column(String(36))
source = relationship('Artifact',
backref=backref('dependencies', cascade="all, "
"delete"),
foreign_keys="ArtifactDependency.artifact_source")
dest = relationship('Artifact',
foreign_keys="ArtifactDependency.artifact_dest")
origin = relationship('Artifact',
foreign_keys="ArtifactDependency.artifact_origin")
class ArtifactTag(BASE, ArtifactBase):
__tablename__ = 'artifact_tags'
__table_args__ = (Index('ix_artifact_tags_artifact_id', 'artifact_id'),
Index('ix_artifact_tags_artifact_id_tag_value',
'artifact_id', 'value'),
{'mysql_engine': 'InnoDB'},)
id = Column(String(36), primary_key=True, nullable=False,
default=lambda: str(uuid.uuid4()))
artifact_id = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
artifact = relationship(Artifact,
backref=backref('tags',
cascade="all, delete-orphan"))
value = Column(String(255), nullable=False)
class ArtifactProperty(BASE, ArtifactBase):
__tablename__ = 'artifact_properties'
__table_args__ = (
Index('ix_artifact_properties_artifact_id', 'artifact_id'),
Index('ix_artifact_properties_name', 'name'),
{'mysql_engine': 'InnoDB'},)
id = Column(String(36), primary_key=True, nullable=False,
default=lambda: str(uuid.uuid4()))
artifact_id = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
artifact = relationship(Artifact,
backref=backref('properties',
cascade="all, delete-orphan"))
name = Column(String(255), nullable=False)
string_value = Column(String(255))
int_value = Column(Integer)
numeric_value = Column(Numeric)
bool_value = Column(Boolean)
text_value = Column(Text)
position = Column(Integer)
class ArtifactBlob(BASE, ArtifactBase):
__tablename__ = 'artifact_blobs'
__table_args__ = (
Index('ix_artifact_blobs_artifact_id', 'artifact_id'),
Index('ix_artifact_blobs_name', 'name'),
{'mysql_engine': 'InnoDB'},)
id = Column(String(36), primary_key=True, nullable=False,
default=lambda: str(uuid.uuid4()))
artifact_id = Column(String(36), ForeignKey('artifacts.id'),
nullable=False)
name = Column(String(255), nullable=False)
item_key = Column(String(329))
size = Column(BigInteger(), nullable=False)
checksum = Column(String(32))
position = Column(Integer)
artifact = relationship(Artifact,
backref=backref('blobs',
cascade="all, delete-orphan"))
class ArtifactBlobLocation(BASE, ArtifactBase):
__tablename__ = 'artifact_blob_locations'
__table_args__ = (Index('ix_artifact_blob_locations_blob_id',
'blob_id'),
{'mysql_engine': 'InnoDB'})
id = Column(String(36), primary_key=True, nullable=False,
default=lambda: str(uuid.uuid4()))
blob_id = Column(String(36), ForeignKey('artifact_blobs.id'),
nullable=False)
value = Column(Text, nullable=False)
position = Column(Integer)
status = Column(String(36), default='active', nullable=True)
blob = relationship(ArtifactBlob,
backref=backref('locations',
cascade="all, delete-orphan"))
def register_models(engine):
"""Create database tables for all models with the given engine."""
models = (Artifact, ArtifactTag, ArtifactProperty,
ArtifactBlob, ArtifactBlobLocation, ArtifactDependency)
for model in models:
model.metadata.create_all(engine)
def unregister_models(engine):
"""Drop database tables for all models with the given engine."""
models = (ArtifactDependency, ArtifactBlobLocation, ArtifactBlob,
ArtifactProperty, ArtifactTag, Artifact)
for model in models:
model.metadata.drop_all(engine)
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Dylan Curley
"""This module provides interfaces to the various supported api queries.
Specifically, there are functions to manage a request for more detail on a
locale (HandleLocaleQuery), on a metric for some specific region and date
(HandleMetricQuery), or on the nearest defined locales to a set of latitude
and longitude coordinates (HandleNearestNeighborQuery).
"""
import logging
from common import metrics
from datetime import date
class Error(Exception):
"""Common exception that all other exceptions in this module inherit from.
"""
pass
class LookupError(Error):
"""An error occurred during a lookup query.
"""
pass
class SyntaxError(Error):
"""There was a syntax error in the request.
"""
pass
def HandleLocaleQuery(locales_manager, locale):
"""Verifies passed arguments and issues a lookup of locale data.
Args:
locales_manager (LocalesManager object): Locale manager.
locale (string): Name of the locale to be queried.
Raises:
LookupError: If an error occurred during lookup, e.g. the requested
locale is unkown.
Returns:
(dict) Data about the requested locale.
"""
try:
locale = locales_manager.Locale(locale)
except KeyError as e:
raise LookupError(e)
return {'locale': {'name': locale.name,
'long_name': locale.long_name,
'latitude': locale.latitude,
'longitude': locale.longitude
},
'parent': locale.parent,
'children': locale.children
}
def HandleMetricQuery(metrics_manager, metric, locale, year, month):
"""Verifies passed arguments and issues a lookup of metric data.
Args:
metrics_manager (MetricsManager object): Metrics manager.
metric (string): Name of the metric to be queried.
locale (string): Locale of interest.
year (int): Year of interest.
month (int): Month of interest.
Raises:
LookupError: If an error occurred during lookup, e.g. the requested
locale is unkown.
SyntaxError: If expected parameters are not provided (are None).
Returns:
(dict) Data about the requested metric at the given year & month for
the given locale.
"""
# Anticipate non-standard locale= requests for world data.
if locale in ('', '""', "''", 'world', 'global'):
locale = 'world'
# Validate query parameters.
if metric is None:
raise SyntaxError('Must provide a parameter "name" identifying the'
' metric you wish to query.')
if year is None or month is None:
raise SyntaxError('Must provide parameters "year" and "month"'
' identifying the date you wish to query.')
if locale is None:
raise SyntaxError('Must provide a parameter "locale" identifying the'
' locale you wish to query. For example, "", "100",'
' "100_az", or "100_az_tucson".')
# Lookup & return the data.
try:
logging.debug('getting ' + metric + ' for ' + str(year) + '-' +
str(month))
data = metrics_manager.LookupResult(metric, year, month, locale)
except metrics.Error as e:
raise LookupError(e)
return data
def HandleMultiMetricQuery(metrics_manager, metric, locale,
startyear, startmonth, endyear, endmonth):
"""Verifies passed arguments and issues a lookup of metric data.
Args:
metrics_manager (MetricsManager object): Metrics manager.
metric (string): Name of the metric to be queried.
locale (string): Locale of interest.
startyear (int): Start year of interest.
startmonth (int): Start month of interest.
endyear (int): End year of interest.
endmonth (int): End month of interest.
Raises:
LookupError: If an error occurred during lookup, eg, the requested
locale is unknown.
SyntaxError: If expected parameters are not provided (are None).
Returns:
(dict) Data about the requested metric for all years and months between
startyear-startmonth and endyear-endmonth inclusive.
"""
# Anticipate non-standard locale= requests for world data.
if locale in ('', '""', "''", 'world', 'global'):
locale = 'world'
# Validate query parameters.
if metric is None:
raise SyntaxError('Must provide a parameter "name" identifying the'
' metric you wish to query.')
if startyear is None or startmonth is None:
raise SyntaxError('Must provide parameters "startyear" and "startmonth"'
' identifying the date you wish to query.')
if endyear is None or endmonth is None:
raise SyntaxError('Must provide parameters "endyear" and "endmonth"'
' identifying the date you wish to query.')
if locale is None:
raise SyntaxError('Must provide a parameter "locale" identifying the'
' locale you wish to query. For example, "", "100",'
' "100_az", or "100_az_tucson".')
# Lookup & return the data.
results = {}
current_date = date(startyear, startmonth, 1)
end_date = date(endyear, endmonth, 1)
if end_date < current_date:
raise SyntaxError('"endyear"-"endmonth" must be after'
' "startyear"-"startmonth"')
logging.debug('getting multiple from ' + str(current_date) + ' to ' +
str(end_date))
while current_date <= end_date:
try:
logging.debug('getting ' + metric + ' for ' + str(current_date))
key = str(current_date.year) + '-' + str(current_date.month)
results[key] = metrics_manager.LookupResult(
metric, current_date.year, current_date.month, locale)
except metrics.Error as e:
raise LookupError(e)
new_month = current_date.month + 1
new_year = current_date.year
if new_month == 13:
new_month = 1
new_year = new_year + 1
current_date = date(new_year, new_month, 1)
return results
def HandleNearestNeighborQuery(locale_finder, lat, lon):
"""Verifies passed arguments and issues a nearest neighbor lookup.
Args:
lat (float): Latitude of interest.
lon (float): Longitude of interest.
Raises:
SyntaxError: If expected parameters are not provided (are None).
Returns:
(dict) The nearest city, region, and country to the provided latitude
and longitude coordinates.
"""
if lat is None or lon is None:
raise SyntaxError('Must provide parameters "lat" and "lon" identifying'
' the latitude and logitude of interest.')
return locale_finder.FindNearestNeighbors(lat, lon)
|
|
__author__ = 'croxis'
from datetime import datetime
from io import BytesIO
import os
from queue import Empty
import random
from subprocess import PIPE
import time
import urllib.parse
import zipfile
import eventlet
from eventlet import Queue
from eventlet.green.subprocess import Popen
from flask import make_response, redirect, render_template, send_file, session, \
url_for
from flask.ext.socketio import join_room, leave_room
import lib.cardlib as cardlib
import lib.utils as utils
from reportlab.lib.pagesizes import landscape, letter
from reportlab.lib.utils import ImageReader
from . import main
from ..card_visual import create_card_img
from .. import app, session_manager, socketio
from .forms import GenerateCardsForm, MoreOptionsForm, SubmitCardsForm, \
get_checkpoints_options
thread_pool = {} # CERF token: thread
def enqueue_output(process, queue):
while process.poll() is None:
try:
line = process.stdout.readline().decode() # Remove decode with unl
if line.count('|') >= 9:
app.logger.debug("Line: " + line)
queue.put(line)
time.sleep(0.1)
except ValueError:
time.sleep(0.1)
@main.route('/')
def index():
return redirect(url_for('.index_mtgai'))
@main.route('/mtgai', methods=['GET', 'POST'])
def index_mtgai():
random_form = GenerateCardsForm()
random_form.checkpoint.choices = get_checkpoints_options()
form = SubmitCardsForm()
if random_form.validate_on_submit():
session['render_mode'] = random_form.render_mode.data
session['checkpoint_path'] = random_form.checkpoint.data
session['seed'] = random_form.seed.data
session['primetext'] = random_form.primetext.data
session['length'] = random_form.length.data
session['temperature'] = float(random_form.temperature.data)
session['name'] = random_form.name.data.lower() # Uppercase crashes nn
session['supertypes'] = random_form.supertypes.data
session['types'] = random_form.types.data
session['subtypes'] = random_form.subtypes.data
session['rarity'] = random_form.rarity.data
session['bodytext_prepend'] = random_form.bodytext_prepend.data
session['bodytext_append'] = random_form.bodytext_append.data
return redirect(url_for('.card_select'))
if form.validate_on_submit():
session['cardtext'] = form.body.data
session['cardsep'] = '\r\n\r\n'
session['mode'] = "existing"
session['render_mode'] = form.render_mode.data
session['checkpoint_path'] = ''
return redirect(url_for('.card_select'))
random_form.seed.data = random.randint(0, 255)
return render_template('index.html',
current_time=datetime.utcnow(),
form=form,
random_form=random_form,
name='name',
title='MTG Automatic Inventor (MTGAI)')
@socketio.on('generate')
def card_generate():
# Filebased systems need the session cleaned up manually
session_manager.cleanup_sessions()
checkpoint_option = session['checkpoint_path']
do_nn = checkpoint_option != "None"
if do_nn:
checkpoint_path = os.path.join(
os.path.expanduser(app.config['SNAPSHOTS_PATH']),
checkpoint_option)
length = int(session['length'])
if length > app.config['LENGTH_LIMIT']:
length = app.config['LENGTH_LIMIT']
socketio.emit('set max char', {'data': length})
length += 140 # Small fudge factor to be a little more accurate with
# the amount of text actually generated
use_render_mode(session["render_mode"])
if do_nn:
command = ['th', 'sample_hs_v3.1.lua', checkpoint_path, '-gpuid',
str(app.config['GPU'])]
else:
command = ['-gpuid', str(app.config['GPU'])]
if session['seed']:
command += ['-seed', str(session['seed'])]
if session['primetext']:
command += ['-primetext', session['primetext']]
if session['length']:
command += ['-length', str(length)]
if session['temperature']:
command += ['-temperature', str(session['temperature'])]
if session['name']:
command += ['-name', session['name']]
if session['types']:
command += ['-types', session['types']]
if session['supertypes']:
command += ['-supertypes', session['supertypes']]
if session['subtypes']:
command += ['-subtypes', session['subtypes']]
if session['rarity']:
command += ['-rarity', session['rarity']]
if session['bodytext_prepend']:
command += ['-bodytext_prepend', session['bodytext_prepend']]
if session['bodytext_append']:
command += ['-bodytext_append', session['bodytext_append']]
if do_nn:
room = session['csrf_token']
join_room(room)
session['mode'] = "nn"
session['cardtext'] = ''
app.logger.debug("Card generation initiated: " + ' '.join(command))
pipe = PIPE
with Popen(command,
cwd=os.path.expanduser(
app.config['GENERATOR_PATH']),
shell=False,
stdout=pipe) as process:
queue = Queue()
thread = eventlet.spawn(enqueue_output, process, queue)
while process.poll() is None:
try:
time.sleep(0.01)
line = queue.get_nowait()
except Empty:
pass
else:
socketio.emit('raw card', {'data': line}, room=room)
if session["do_text"]:
card = convert_to_card(line)
if card:
socketio.emit('text card', {
'data': card.format().replace('@',
card.name.title()).split(
'\n')},
room=room)
if session["do_images"]:
socketio.emit('image card', {
'data': urllib.parse.quote(line, safe='') +
session[
"image_extra_params"]},
room=room)
session[
'cardtext'] += line + '\n' # Recreate the output from the sampler
app.logger.debug("Card generated: " + line.rstrip('\n\n'))
session['cardsep'] = '\n\n'
app.logger.debug("Card generation complete.")
else:
session['mode'] = "dummy"
session['command'] = " ".join(command)
session.modified = True
app.save_session(session, make_response('dummy'))
socketio.emit('finished generation', {'data': ''}, room=room)
leave_room(room)
@main.route('/mtgai/card-select', methods=['GET', 'POST'])
def card_select():
generate = False
if session['checkpoint_path']:
generate = True
checkpoint_option = session['checkpoint_path']
do_nn = checkpoint_option != "None"
if do_nn:
session['mode'] = "nn"
else:
session['mode'] = "dummy"
session['command'] = "This needs some fixing and reorganization"
if session['mode'] == "dummy":
return render_template('nn_dummy.html', command=session['command'])
else:
use_render_mode(session["render_mode"])
extra_template_data = {'generate': generate}
extra_template_data['form'] = MoreOptionsForm(
can_print=session["can_print"], can_mse_set=session["can_mse_set"])
if session["can_print"]:
if extra_template_data['form'].validate_on_submit():
if extra_template_data['form'].print_button.data:
return redirect(url_for('.print_cards'))
if session["can_mse_set"]:
if extra_template_data['form'].validate_on_submit():
if extra_template_data['form'].mse_set_button.data:
return redirect(url_for('.download_mse_set'))
if session["do_images"] and not generate:
extra_template_data['urls'] = convert_to_urls(session['cardtext'], cardsep=session['cardsep'])
if session["do_text"] and not generate:
extra_template_data['text'] = convert_to_text(session['cardtext'], cardsep=session['cardsep'])
app.logger.debug("Render template: " + session['render_template'])
return render_template(session["render_template"],
**extra_template_data)
def use_render_mode(render_mode):
session["do_images"] = False
session["do_text"] = False
session["can_print"] = False
session["can_mse_set"] = True
if render_mode == "image":
session["do_images"] = True
session["do_google"] = True
session["can_print"] = True
session["image_extra_params"] = ""
session["render_template"] = 'card_select_image.html'
elif render_mode == "image_searchless":
session["do_images"] = True
session["do_google"] = False
session["can_print"] = True
session["image_extra_params"] = "?no-google=True"
session["render_template"] = 'card_select_image_no_google.html'
elif render_mode == "text":
session["do_text"] = True
session["render_template"] = 'card_select_text.html'
else:
session["render_template"] = 'card_select_raw_only.html'
@main.route('/mtgai/download-mse-set', methods=['GET', 'POST'])
def download_mse_set():
app.logger.debug("Set Session: " + str(session))
set_text = b''
cards = convert_to_cards(session['cardtext'])
zipped_bytes = BytesIO()
set_text += (utils.mse_prepend.encode())
for card in cards:
set_text += card.to_mse().encode('utf-8')
set_text += b'\n'
set_text += b'version control:\n\ttype: none\napprentice code: '
zipped = zipfile.ZipFile(zipped_bytes, mode='w')
zipped.writestr('set', set_text)
zipped.close()
zipped_bytes.seek(0)
return send_file(zipped_bytes, mimetype='application/zip',
as_attachment=True, attachment_filename="nn-set.mse-set")
@main.route('/mtgai/print', methods=['GET', 'POST'])
def print_cards():
# LETTER = (8.5, 11)
LETTER = (11, 8.5)
DPI = 72
# Set print margins
MARGIN = 0.5
x_offset = int(MARGIN * DPI)
y_offset = int(MARGIN * DPI)
CARDSIZE = (int(2.49 * DPI), int(3.48 * DPI))
# scale = CARDSIZE[0] / 375.0 # Default cardsize in px
cards = convert_to_cards(session['cardtext'])
byte_io = BytesIO()
from reportlab.pdfgen import canvas
canvas = canvas.Canvas(byte_io, pagesize=landscape(letter))
WIDTH, HEIGHT = landscape(letter)
# draw = ImageDraw.Draw(sheet)
for card in cards:
image = create_card_img(card, session["do_google"])
image_reader = ImageReader(image)
canvas.drawImage(image_reader,
x_offset,
y_offset,
width=CARDSIZE[0],
height=CARDSIZE[1])
x_offset += CARDSIZE[0] + 5 # 5 px border around cards
if x_offset + CARDSIZE[0] > LETTER[0] * DPI:
x_offset = int(MARGIN * DPI)
y_offset += CARDSIZE[1] + 5
if y_offset + CARDSIZE[1] > LETTER[1] * DPI:
x_offset = int(MARGIN * DPI)
y_offset = int(MARGIN * DPI)
canvas.showPage()
canvas.save()
byte_io.seek(0)
return send_file(byte_io, mimetype='application/pdf')
def convert_to_urls(card_text, cardsep='\r\n\r\n'):
urls = []
for card_src in card_text.split(cardsep):
if card_src:
card = cardlib.Card(card_src)
if card.valid:
urls.append(urllib.parse.quote(card_src, safe='') + session[
"image_extra_params"])
return urls
def convert_to_card(card_src):
"""Convert a single line of text to a Card. Returns none if invalid."""
card = cardlib.Card(card_src)
if card.valid:
return card
def convert_to_cards(text):
"""Card separation is \r\n\r\n when submitted by form and \n\n by text
file."""
cards = []
for card_src in text.split(session['cardsep']):
card = convert_to_card(card_src)
if card:
cards.append(card)
return cards
def convert_to_text(text, cardsep):
cards = convert_to_cards(text)
text = []
for card in cards:
text.extend(card.format().replace('@', card.name.title()).split('\n'))
text.append("------------------------")
return text[:-1]
|
|
import numba
import numpy as np
def initialize():
pass
@numba.njit
def make_rbnimon():
rrange = np.arange(9.0)
rbinom = np.zeros((9, 9))
for n in range(9):
for m in range(9):
if m > n:
rbinom[n, m] = 1
else:
rbinom[n, m] = np.prod(rrange[m + 1: n + 1]) / np.prod(
rrange[1 : n - m + 1]
)
return rbinom
RBINOM = make_rbnimon()
AC = np.zeros(9)
BC = np.zeros(9)
AC1 = np.zeros(9)
BC1 = np.zeros(9)
NTERMS = 8
# Coefficients of Table 1
AC[0] = -0.500004211065677e0
BC[0] = 0.115956920789028e0
AC[1] = -0.124989431448700e0
BC[1] = 0.278919134951974e0
AC[2] = -0.781685695640975e-2
BC[2] = 0.252752008621167e-1
AC[3] = -0.216324415010288e-3
BC[3] = 0.841879407543506e-3
AC[4] = -0.344525393452639e-5
BC[4] = 0.152425102734818e-4
AC[5] = -0.315133836828774e-7
BC[5] = 0.148292488399579e-6
AC[6] = -0.296636186265427e-9
BC[6] = 0.157622547107156e-8
AC[7] = -0.313689942474032e-12
BC[7] = 0.117975437124933e-11
AC[8] = -0.112031912249579e-13
BC[8] = 0.655345107753534e-13
# Coefficients of K1
AC1[0] = 0.250000197208863e0
BC1[0] = -0.307966963840932e0
AC1[1] = 0.312495047266388e-1
BC1[1] = -0.853676915840295e-1
AC1[2] = 0.130228768540005e-2
BC1[2] = -0.464343185899275e-2
AC1[3] = 0.270943632576982e-4
BC1[3] = -0.112338260301993e-3
AC1[4] = 0.341642640876988e-6
BC1[4] = -0.157491472277549e-5
AC1[5] = 0.271286480571077e-8
BC1[5] = -0.133414321160211e-7
AC1[6] = 0.197096143802491e-10
BC1[6] = -0.106342159633141e-9
AC1[7] = 0.329351103353054e-13
BC1[7] = -0.159531523882074e-12
AC1[8] = 0.573031034976631e-15
BC1[8] = -0.340195779923156e-14
@numba.njit
def prepare_z(x, y, z1, z2):
zin = np.complex(x, y)
z1in = z1
z2in = z2
Lin = abs(z2in - z1in)
z = (2.0 * zin - (z1in + z2in)) / (z2in - z1in)
zplus1 = z + 1.0
zmin1 = z - 1.0
# If at cornerpoint, move slightly
if abs(zplus1) < 1.0e-8 * 2.0 / Lin:
zplus1 = zplus1 + 1.0e-8
if abs(zmin1) < 1.0e-8 * 2.0 / Lin:
zmin1 = zmin1 + 1.0e-8
return zin, z1in, z2in, Lin, z, zplus1, zmin1
@numba.njit
def potbeslsho(x, y, z1, z2, labda, order, ilap, naq):
"""
Parameters
----------
x,y: Point where potential is computed
z1: Complex begin point of line-sink
z2: Complex end point of line-sink
labda(naq): labda's (zero for first labda if Laplace)
order: Order of the line-sink
ilap: equals 1 when first value is Laplace line-sink and first labda equals zero
naq: Number of aquifers
rv(naq): Array to store return value (must be pre-allocated)
Returns
--------
rv(naq): Potentials. Fist spot is Laplace value if ilap=1
"""
rv = np.zeros(naq)
# lstype = 1 means line-sink
lstype = 1
# Radius of convergence
Rconv = 7.0
# if (ilap==1) :
# istart = 1
# else:
# istart = 0
zin, z1in, z2in, Lin, z, zplus1, zmin1 = prepare_z(x, y, z1, z2)
# Laplace linesink
if ilap == 1:
power = order + 1
pcor = np.complex(0.0, 0.0)
for n in range(1, int((power + 1) / 2) + 1):
pcor = pcor + z ** (power - 2 * n + 1) / (2 * n - 1)
pcor = 2.0 * pcor
comega = (
z ** power * np.log((zmin1) / (zplus1))
+ pcor
- np.log(zmin1)
+ (-1.0) ** power * np.log(zplus1)
)
comega = -comega * Lin / (4.0 * np.pi * power)
rv[0] = np.real(comega)
# N-1 leakage factors
for i in range(ilap, naq):
# Check whether entire linesink is outside radius of convergence
# Outside if |z-zc|>L/2+7lab, and thus |Z|>1+7lab*2/L, or |zeta|>1/biglab+7 (zeta is called z here)
biglab = 2.0 * labda[i] / Lin
z = (2.0 * zin - (z1in + z2in)) / (z2in - z1in) / biglab
if abs(z) < (Rconv + 1.0 / biglab):
pot = IntegralF(zin, z1in, z2in, Lin, labda[i], order, Rconv, lstype)
rv[i] = -Lin / 2.0 * pot
else:
rv[i] = 0.0
return rv
@numba.njit
def potbeslsv(x, y, z1, z2, lab, order, ilap, naq):
# Check if endpoints need to be adjusted using the largest labda (the first one)
pot = np.zeros((order + 1, naq))
for n in range(0, order + 1):
pot[n, 0 : naq + 1] = potbeslsho(x, y, z1, z2, lab, n, ilap, naq)
return pot
@numba.njit
def disbeslsho(x, y, z1, z2, labda, order, ilap, naq):
# Input:
# x,y: Point where discharge is computed
# z1: Complex begin point of line-sink
# z2: Complex end point of line-sink
# labdain(Naquifers): Array with zero in first spot and labda's in remaining spots
# order: Order of the line-sink
# Naquifers: Number of aquifers
# rvx(Naquifers),rvy(Naquifers): Arrays to store return values
# Output:
# rvx(Naquifers),rvy(Naquifers): Values of Qx and Qy with Laplace value in first spot
# and mod.Helmholtz potentials in remaining spots
rv = np.zeros((2, naq))
# Radius of convergence
Rconv = 7.0
# lstype = 1 means line-sink
lstype = 1
zin, z1in, z2in, Lin, z, zplus1, zmin1 = prepare_z(x, y, z1, z2)
pcor = 0.0
# Laplace linesink
if ilap == 1:
pcor = np.complex(0.0, 0.0)
for n in range(1, int((order + 1) / 2) + 1):
pcor = pcor + float(order - 2 * n + 2) * z ** (order + 1 - 2 * n) / float(
2 * n - 1
)
pcor = 2.0 * pcor
cdum = 1.0 / (
order + 1
) # Without this intermediate statement it didn't seem to work
wdis = float(order + 1) * z ** order * np.log((zmin1) / (zplus1)) + pcor
wdis = (
wdis
+ (z ** (order + 1) - 1.0) / zmin1
- (z ** (order + 1) - (-1.0) ** (order + 1)) / zplus1
)
wdis = wdis * Lin / 2.0 / (z2in - z1in) / np.pi * cdum
rv[0, 0] = np.real(wdis)
rv[1, 0] = -np.imag(wdis)
for i in range(ilap, naq):
wdis = np.complex(0.0, 0.0)
# Check whether entire linesink is outside radius of convergence
# Outside if |z-zc|>L/2+7lab, and thus |Z|>1+7lab*2/L, or |zeta|>1/biglab+7 (zeta is called z here)
biglab = 2.0 * labda[i] / Lin
z = (2.0 * zin - (z1in + z2in)) / (z2in - z1in) / biglab
if abs(z) < (Rconv + 1.0 / biglab):
wdis = IntegralG(zin, z1in, z2in, Lin, labda[i], order, Rconv, lstype)
wdis = 2.0 * Lin / (z2in - z1in) / biglab * wdis
rv[0, i] = np.real(wdis)
rv[1, i] = -np.imag(wdis)
else:
rv[0, i] = 0.0
rv[1, i] = 0.0
return rv
@numba.njit
def disbeslsv(x, y, z1, z2, lab, order, ilap, naq):
# locals
qxqy = np.zeros((2 * (order + 1), naq))
# Check if endpoints need to be adjusted using the largest labda (the first one)
for n in range(0, order + 1):
rv = disbeslsho(x, y, z1, z2, lab, n, ilap, naq)
qxqy[n, 0 : naq + 1] = rv[0, 0 : naq + 1]
qxqy[n + order + 1, 0 : naq + 1] = rv[1, 0 : naq + 1]
return qxqy
@numba.njit
def potbesldho(x, y, z1, z2, labda, order, ilap, naq):
# Input:
# x,y: Point where potential is computed
# z1: Complex begin point of line-doublet
# z2: Complex end point of line-doublet
# labda(naq): labda's (zero for first labda if Laplace)
# order: Order of the line-doublet
# ilap: equals 1 when first value is Laplace line-doublet and first labda equals zero
# naq: Number of aquifers
# rv(naq): Array to store return value (must be pre-allocated)
# Output:
# rv(naq): Potentials. Fist spot is Laplace value if ilap=1
rv = np.zeros(naq)
# Radius of convergence
Rconv = 7.0
# lstype=2 means line-doublet
lstype = 2
zin, z1in, z2in, Lin, z, zplus1, zmin1 = prepare_z(x, y, z1, z2)
# Laplace line-doublet
if ilap == 1:
comega = z ** order * np.log(zmin1 / zplus1)
qm = np.complex(0.0, 0.0)
for n in range(1, int((order + 1) / 2) + 1):
qm = qm + z ** (order - 2.0 * float(n) + 1.0) / (2.0 * float(n) - 1.0)
comega = 1.0 / (2.0 * np.pi * np.complex(0.0, 1.0)) * (comega + 2.0 * qm)
rv[0] = np.real(comega)
# N-1 leakage factors
for i in range(ilap, naq):
pot = 0.0
# Check whether entire linedoublet is outside radius of convergence
# Outside if |z-zc|>L/2+7lab, and thus |Z|>1+7lab*2/L, or |zeta|>1/biglab+7 (zeta is called z here)
biglab = 2.0 * labda[i] / Lin
z = (2.0 * zin - (z1in + z2in)) / (z2in - z1in) / biglab
if abs(z) < (Rconv + 1.0 / biglab):
m1, m2, NLS = findm1m2(zin, z1in, z2in, Lin, labda[i], Rconv)
comega = np.complex(0.0, 0.0)
if m1 > 0: # Otherwise outside radius of convergence
z1new = z1in + float(m1 - 1) / float(NLS) * (z2in - z1in)
z2new = z1in + float(m2) / float(NLS) * (z2in - z1in)
del0 = float(1 - m1 - m2 + NLS) / float(1 - m1 + m2)
ra = float(NLS) / float(1 + m2 - m1)
comega = IntegralLapLineDipole(zin, z1new, z2new, del0, ra, order)
pot = IntegralF(zin, z1in, z2in, Lin, labda[i], order, Rconv, lstype)
rv[i] = (
np.real(comega / np.complex(0.0, 1.0)) + np.imag(z) / biglab * pot
) # Note that z is really zeta in analysis
else:
rv[i] = 0.0
return rv
@numba.njit
def potbesldv(x, y, z1, z2, lab, order, ilap, naq):
pot = np.zeros((order + 1, naq))
# Check if endpoints need to be adjusted using the largest labda (the first one)
for n in range(0, order + 1):
pot[n, 0 : naq + 1] = potbesldho(x, y, z1, z2, lab, n, ilap, naq)
return pot
@numba.njit
def disbesldho(x, y, z1, z2, labda, order, ilap, naq):
# Input:
# x,y: Point where discharge is computed
# z1: Complex begin point of line-sink
# z2: Complex end point of line-sink
# labdain(Naquifers): Array with zero in first spot and labda's in remaining spots
# order: Order of the line-sink
# naq: Number of aquifers
# Output:
# rv(2, Naquifers),rvy(Naquifers): Values of Qx and Qy with Laplace value in first spot
# and mod.Helmholtz potentials in remaining spots
rv = np.zeros((2, naq))
# Radius of convergence
Rconv = 7.0
# lstype=2 means line-doublet
lstype = 2
zin, z1in, z2in, Lin, z, zplus1, zmin1 = prepare_z(x, y, z1, z2)
# Laplace line-doublet
qm = np.complex(0.0, 0.0)
if ilap == 1:
if order == 0:
wdis = -(1.0 / zmin1 - 1.0 / zplus1) / (
np.pi * np.complex(0.0, 1.0) * (z2in - z1in)
)
else:
wdis = float(order) * z ** (order - 1) * np.log(zmin1 / zplus1)
wdis = wdis + z ** order * (1.0 / zmin1 - 1.0 / zplus1)
qm = np.complex(0.0, 0.0)
if order > 1: # To avoid a possible problem of 0 * 0^(-1)
for n in range(1, int(order / 2) + 1):
qm = qm + float(order - 2 * n + 1) * z ** (order - 2 * n) / float(
2 * n - 1
)
wdis = -(wdis + 2.0 * qm) / (np.pi * np.complex(0.0, 1.0) * (z2in - z1in))
rv[0, 0] = np.real(wdis)
rv[1, 0] = -np.imag(wdis)
# N-1 or N leakage factors
for i in range(ilap, naq):
wdis = np.complex(0.0, 0.0)
# Check whether entire line-doublet is outside radius of convergence
# Outside if |z-zc|>L/2+7lab, and thus |Z|>1+7lab*2/L, or |zeta|>1/biglab+7 (zeta is called z here)
biglab = 2.0 * labda[i] / Lin
z = (2.0 * zin - (z1in + z2in)) / (z2in - z1in) / biglab
if abs(z) < (Rconv + 1.0 / biglab):
m1, m2, NLS = findm1m2(zin, z1in, z2in, Lin, labda[i], Rconv)
wdis1 = np.complex(0.0, 0.0)
if m1 > 0:
z1new = z1in + float(m1 - 1) / float(NLS) * (z2in - z1in)
z2new = z1in + float(m2) / float(NLS) * (z2in - z1in)
del0 = float(1 - m1 - m2 + NLS) / float(1 - m1 + m2)
ra = float(NLS) / float(1 + m2 - m1)
wdis1 = IntegralLapLineDipoleDis(zin, z1new, z2new, del0, ra, order)
wdis1 = -2.0 * wdis1 / (np.complex(0.0, 1.0) * (z2new - z1new))
pot = IntegralF(zin, z1in, z2in, Lin, labda[i], order, Rconv, lstype)
wdis2 = IntegralG(zin, z1in, z2in, Lin, labda[i], order, Rconv, lstype)
wdis3 = pot / (2.0 * np.complex(0.0, 1.0)) + wdis2 * np.imag(z)
wdis = wdis1 - 4.0 * wdis3 / (biglab ** 2 * (z2in - z1in))
rv[0, i] = np.real(wdis)
rv[1, i] = -1.0 * np.imag(wdis)
return rv
@numba.njit
def disbesldv(x, y, z1, z2, lab, order, ilap, naq):
qxqy = np.zeros((2 * (order + 1), naq))
# Check if endpoints need to be adjusted using the largest labda (the first one)
for n in range(0, order + 1):
rv = disbesldho(x, y, z1, z2, lab, n, ilap, naq)
qxqy[n, 0 : naq + 1] = rv[0, 0:naq]
qxqy[n + order + 1, 0 : naq + 1] = rv[1, 0 : naq + 1]
return qxqy
@numba.njit
def IntegralF(zin, z1in, z2in, Lin, labda, order, Rconv, lstype):
czmzbarp = np.full(NTERMS + 1, np.complex(0.0, 0.0))
cgamma = np.full((NTERMS + 1, NTERMS + 1), np.complex(0.0, 0.0))
calphat = np.full(2 * NTERMS + 1, np.complex(0.0, 0.0))
cbetat = np.full(2 * NTERMS + 1, np.complex(0.0, 0.0))
cc = np.full(order + 2, np.complex(0.0, 0.0))
calpha = np.full(2 * NTERMS + order + 1, np.complex(0.0, 0.0))
cbeta = np.full(2 * NTERMS + order + 1, np.complex(0.0, 0.0))
m1, m2, NLS = findm1m2(zin, z1in, z2in, Lin, labda, Rconv)
if m1 == 0:
# pot = 0.0
return 0.0
# Compute zeta (called z here). This is the regular value of the entire element
L = abs(z2in - z1in)
biglab = 2.0 * labda / L
z = (2.0 * zin - (z1in + z2in)) / (z2in - z1in) / biglab
zbar = np.conj(z)
# Coefficients gamma(n,m), Eq. 21
# Store coefficents in matrix.
for n in range(0, NTERMS + 1):
czmzbarp[n] = (z - zbar) ** n
for n in range(0, NTERMS + 1):
for m in range(0, n + 1):
cgamma[n, m] = RBINOM[n, m] * czmzbarp[n - m]
# Eq. 23 These coefficients should be modified for a higher order linesink
for n in range(0, 2 * NTERMS + 1):
calphat[n] = np.complex(0.0, 0.0)
cbetat[n] = np.complex(0.0, 0.0)
for m in range(max(0, n - NTERMS), int(n / 2) + 1):
if lstype == 1:
calphat[n] = calphat[n] + AC[n - m] * cgamma[n - m, m]
cbetat[n] = cbetat[n] + BC[n - m] * cgamma[n - m, m]
else:
calphat[n] = calphat[n] + AC1[n - m] * cgamma[n - m, m]
cbetat[n] = cbetat[n] + BC1[n - m] * cgamma[n - m, m]
# Compute coefficients of delta^p
for m in range(0, order + 1):
cc[m] = RBINOM[order, m] * z ** (order - m) * biglab ** order
if order > 0:
for n in range(0, 2 * NTERMS + order + 1):
calpha[n] = np.complex(0.0, 0.0)
cbeta[n] = np.complex(0.0, 0.0)
for m in range(max(0, n - 2 * NTERMS), min(n, order) + 1):
calpha[n] = calpha[n] + cc[m] * calphat[n - m]
cbeta[n] = cbeta[n] + cc[m] * cbetat[n - m]
else:
calpha = calphat
cbeta = cbetat
# Evaluation of integral, Eq. 25
cInt = np.complex(0.0, 0.0)
del1 = -1.0 + 2.0 * (float(m1) - 1.0) / float(NLS)
del2 = -1.0 + 2.0 * float(m2) / float(NLS)
cd1minz = del1 / biglab - z
cd2minz = del2 / biglab - z
if abs(cd1minz) < 1.0e-8 / labda:
cd1minz = cd1minz + 1.0e-8
if abs(cd2minz) < 1.0e-8 / labda:
cd2minz = cd2minz + 1.0e-8
cln1 = np.log(cd1minz)
cln2 = np.log(cd2minz)
for n in range(0, 2 * NTERMS + order + 1):
cInt = cInt + (
2.0 * calpha[n] * cln2 - 2.0 * calpha[n] / (n + 1) + cbeta[n]
) * (cd2minz) ** (n + 1) / float(n + 1)
cInt = cInt - (
2.0 * calpha[n] * cln1 - 2.0 * calpha[n] / (n + 1) + cbeta[n]
) * (cd1minz) ** (n + 1) / float(n + 1)
pot = np.real(cInt) * biglab / (2.0 * np.pi)
return pot
@numba.njit
def IntegralG(zin, z1in, z2in, Lin, labda, order, Rconv, lstype):
czmzbarp = np.full(NTERMS + 1, np.complex(0.0, 0.0))
cgamma = np.full((NTERMS + 1, NTERMS + 1), np.complex(0.0, 0.0))
cahat = np.full(2 * (NTERMS - 1) + 1, np.complex(0.0, 0.0))
cbhat = np.full(2 * (NTERMS - 1) + 1, np.complex(0.0, 0.0))
calphat = np.full(2 * NTERMS + 1, np.complex(0.0, 0.0))
cbetat = np.full(2 * NTERMS + 1, np.complex(0.0, 0.0))
cc = np.full(order + 2, np.complex(0.0, 0.0))
calpha = np.full(2 * NTERMS + order + 1, np.complex(0.0, 0.0))
cbeta = np.full(2 * NTERMS + order + 1, np.complex(0.0, 0.0))
biglabin = 2.0 * labda / Lin
m1, m2, NLS = findm1m2(zin, z1in, z2in, Lin, labda, Rconv)
if m1 == 0:
# wdis = np.complex(0.0, 0.0)
return np.complex(0.0, 0.0)
# Compute zeta (called z here). This is the regular value of the entire element
L = abs(z2in - z1in)
biglab = 2.0 * labda / L
z = (2.0 * zin - (z1in + z2in)) / (z2in - z1in) / biglab
zbar = np.conj(z)
# Coefficients gamma(n,m), Eq. 21
# Store coefficents in matrix.
for n in range(0, NTERMS + 1):
czmzbarp[n] = (z - zbar) ** n
for n in range(0, NTERMS + 1):
for m in range(0, n + 1):
cgamma[n, m] = RBINOM[n, m] * czmzbarp[n - m]
# Integral g1
# Implemented with different z, rather than Delta1 and Delta2
z1 = z1in + float(m1 - 1) / float(NLS) * (z2in - z1in)
z2 = z1in + float(m2) / float(NLS) * (z2in - z1in)
del0 = float(1 - m1 - m2 + NLS) / float(1 - m1 + m2)
ra = float(NLS) / float(1 + m2 - m1)
# comega = np.complex(0.0, 0.0)
comega = IntegralLapLineDipole(zin, z1, z2, del0, ra, order)
if lstype == 1:
g1 = -AC[0] * biglabin * comega
# Integral g2
# Compute hat coefficients
for n in range(0, NTERMS):
cahat[n] = float(n + 1) * AC[n + 1]
cbhat[n] = AC[n + 1] + float(n + 1) * BC[n + 1]
else:
g1 = -AC1[0] * biglabin * comega
# Integral g2
# Compute hat coefficients
for n in range(0, NTERMS):
cahat[n] = float(n + 1) * AC1[n + 1]
cbhat[n] = AC1[n + 1] + float(n + 1) * BC1[n + 1]
# Eq. 23
for n in range(0, 2 * NTERMS):
calphat[n] = np.complex(0.0, 0.0)
cbetat[n] = np.complex(0.0, 0.0)
for m in range(max(0, n - NTERMS + 1), int((n + 1) / 2) + 1):
calphat[n] = calphat[n] + cahat[n - m] * cgamma[n - m + 1, m]
cbetat[n] = cbetat[n] + cbhat[n - m] * cgamma[n - m + 1, m]
# Compute coefficients of delta^p
for m in range(0, order + 1):
cc[m] = RBINOM[order, m] * z ** (order - m) * biglab ** order
if order > 0:
for n in range(0, 2 * NTERMS + order):
calpha[n] = np.complex(0.0, 0.0)
cbeta[n] = np.complex(0.0, 0.0)
for m in range(max(0, n - 2 * NTERMS + 1), min(n, order) + 1):
calpha[n] = calpha[n] + cc[m] * calphat[n - m]
cbeta[n] = cbeta[n] + cc[m] * cbetat[n - m]
else:
calpha = calphat
cbeta = cbetat
# Computation of integral
g2 = np.complex(0.0, 0.0)
del1 = -1.0 + 2.0 * (float(m1) - 1.0) / float(NLS)
del2 = -1.0 + 2.0 * float(m2) / float(NLS)
cd1minz = del1 / biglab - z
cd2minz = del2 / biglab - z
if abs(cd1minz) < 1.0e-8 / labda:
cd1minz = cd1minz + 1.0e-8
if abs(cd2minz) < 1.0e-8 / labda:
cd2minz = cd2minz + 1.0e-8
cln1 = np.log(cd1minz)
cln2 = np.log(cd2minz)
for n in range(0, 2 * NTERMS - 1 + order + 1):
g2 = g2 - (calpha[n] * cln2 - calpha[n] / (n + 1) + cbeta[n]) * (cd2minz) ** (
n + 1
) / float(n + 1)
g2 = g2 + (calpha[n] * cln1 - calpha[n] / (n + 1) + cbeta[n]) * (cd1minz) ** (
n + 1
) / float(n + 1)
g2 = biglabin * g2 / (2 * np.pi)
# Integral g3
# Eq. 23
calphat[0] = np.complex(0.0, 0.0)
for n in range(1, 2 * NTERMS): # Loop start at 1, because of bug in Digital Fortran
calphat[n] = np.complex(0.0, 0.0)
for m in range(max(0, n - NTERMS), int((n - 1) / 2) + 1):
calphat[n] = calphat[n] + cahat[n - m - 1] * cgamma[n - m - 1, m] * (
-1.0
) ** (n - 1 - 2 * m)
# Compute coefficients of delta^p
for m in range(0, order + 1):
cc[m] = RBINOM[order, m] * zbar ** (order - m) * biglab ** order
if order > 0:
for n in range(0, 2 * NTERMS + order):
calpha[n] = np.complex(0.0, 0.0)
for m in range(max(0, n - 2 * NTERMS + 1), min(n, order) + 1):
calpha[n] = calpha[n] + cc[m] * calphat[n - m]
else:
calpha = calphat
# Computation of integral
g3 = np.complex(0.0, 0.0)
# cd1minz = del1 / biglab - zbar cd2minz = del2 / biglab - zbar
# if ( abs(cd1minz) < 1.0e-8) cd1minz = cd1minz + 1.0e-8
# if ( abs(cd2minz) < 1.0e-8) cd2minz = cd2minz + 1.0e-8
# By definition log is conjugate of previous log this avoids problems with signs along the line (and saves logs).
cd1minz = np.conj(cd1minz)
cd2minz = np.conj(cd2minz)
cln1 = np.conj(cln1)
cln2 = np.conj(cln2)
for n in range(0, 2 * NTERMS + order):
g3 = g3 - (calpha[n] * cln2 - calpha[n] / (n + 1)) * (cd2minz) ** (
n + 1
) / float(n + 1)
g3 = g3 + (calpha[n] * cln1 - calpha[n] / (n + 1)) * (cd1minz) ** (
n + 1
) / float(n + 1)
g3 = biglabin * g3 / (2.0 * np.pi)
wdis = g1 + g2 + g3
return wdis
@numba.njit
def IntegralLapLineDipole(zin, z1, z2, del0, ra, order):
cg = np.full(order + 2, np.complex(0.0, 0.0))
z = (2.0 * zin - (z1 + z2)) / (z2 - z1)
zplus1 = z + 1.0
zmin1 = z - 1.0
# We don't always have to do this, so maybe put in condition?
# Determine coefficients of powers of Delta
for m in range(0, order + 1):
cg[m] = RBINOM[order, m] * (-del0) ** (order - m) / ra ** order
zterm = np.complex(0.0, 0.0)
for n in range(0, order + 1):
zterm = zterm + cg[n] * z ** n
qmtot = np.complex(0.0, 0.0)
for m in range(1, order + 1):
qm = np.complex(0.0, 0.0)
for n in range(1, int((m + 1) / 2) + 1):
qm = qm + z ** (m - 2 * n + 1) / float(2 * n - 1)
qmtot = qmtot + 2.0 * cg[m] * qm
comega = (zterm * np.log(zmin1 / zplus1) + qmtot) / (2.0 * np.pi)
return comega
@numba.njit
def IntegralLapLineDipoleDis(zin, z1, z2, del0, ra, order):
cg = np.full(order + 2, np.complex(0.0, 0.0))
z = (2.0 * zin - (z1 + z2)) / (z2 - z1)
zplus1 = z + 1.0
zmin1 = z - 1.0
# Determine coefficients of powers of Delta for [ (Delta-Delta_0)/a ] ^ p
for m in range(0, order + 1):
cg[m] = RBINOM[order, m] * (-del0) ** (order - m) / ra ** order
zterm1 = np.complex(0.0, 0.0)
zterm2 = np.complex(0.0, 0.0)
for n in range(1, order + 1):
zterm1 = zterm1 + cg[n] * float(n) * z ** (n - 1)
for n in range(0, order + 1):
zterm2 = zterm2 + cg[n] * z ** n
qmtot = np.complex(0.0, 0.0)
for m in range(2, order + 1):
qm = np.complex(0.0, 0.0)
for n in range(1, int(m / 2) + 1):
qm = qm + float(m - 2 * n + 1) * z ** (m - 2 * n) / float(2 * n - 1)
qmtot = qmtot + 2.0 * cg[m] * qm
wdis = (
zterm1 * np.log(zmin1 / zplus1) + zterm2 * (1.0 / zmin1 - 1.0 / zplus1) + qmtot
) / (2.0 * np.pi)
return wdis
@numba.njit
def findm1m2(zin, z1in, z2in, Lin, labda, Rconv):
# Break integral up in sections of max one labda
# and find first (m1) and last (m2) section within radius of convergence
if labda == 0.0:
NLS = 0
else:
NLS = int(np.ceil(Lin / labda))
m1 = 0
m2 = 0
for j in range(1, NLS + 1):
z1 = z1in + float(j - 1) / NLS * (z2in - z1in)
z2 = z1 + (z2in - z1in) / NLS
L = abs(z2 - z1)
biglab = 2.0 * labda / L
z = (2.0 * zin - (z1 + z2)) / (z2 - z1) / biglab
if m1 == 0:
if abs(z) < Rconv:
m1 = j
else:
if abs(z) > Rconv:
m2 = j - 1
break
if m2 == 0:
m2 = NLS
return m1, m2, NLS
|
|
#!/usr/bin/env python
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for checking JS files for common style guide violations.
These style guide violations should only apply to JavaScript and not an Ecma
scripting languages.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
from closure_linter import ecmalintrules
from closure_linter import error_check
from closure_linter import errors
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
# Shorthand
Error = error.Error
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
"""JavaScript lint rules that catch JavaScript specific style errors."""
def __init__(self, namespaces_info):
"""Initializes a JavaScriptLintRules instance."""
ecmalintrules.EcmaScriptLintRules.__init__(self)
self._namespaces_info = namespaces_info
self._declared_private_member_tokens = {}
self._declared_private_members = set()
self._used_private_members = set()
# A stack of dictionaries, one for each function scope entered. Each
# dictionary is keyed by an identifier that defines a local variable and has
# a token as its value.
self._unused_local_variables_by_scope = []
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a param tag."""
self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
'Missing docs for parameter: "%s"' % param_name, token)
def __ContainsRecordType(self, token):
"""Check whether the given token contains a record type.
Args:
token: The token being checked
Returns:
True if the token contains a record type, False otherwise.
"""
# If we see more than one left-brace in the string of an annotation token,
# then there's a record type in there.
return (
token and token.type == Type.DOC_FLAG and
token.attached_object.type is not None and
token.attached_object.type.find('{') != token.string.rfind('{'))
# pylint: disable=too-many-statements
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
# For @param don't ignore record type.
if (self.__ContainsRecordType(token) and
token.attached_object.flag_type != 'param'):
# We should bail out and not emit any warnings for this annotation.
# TODO(nicksantos): Support record types for real.
state.GetDocComment().Invalidate()
return
# Call the base class's CheckToken function.
super(JavaScriptLintRules, self).CheckToken(token, state)
# Store some convenience variables
namespaces_info = self._namespaces_info
if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES):
self._CheckUnusedLocalVariables(token, state)
if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
# Find all assignments to private members.
if token.type == Type.SIMPLE_LVALUE:
identifier = token.string
if identifier.endswith('_') and not identifier.endswith('__'):
doc_comment = state.GetDocComment()
suppressed = (doc_comment and doc_comment.HasFlag('suppress') and
(doc_comment.GetFlag('suppress').type == 'underscore' or
doc_comment.GetFlag('suppress').type ==
'unusedPrivateMembers'))
if not suppressed:
# Look for static members defined on a provided namespace.
if namespaces_info:
namespace = namespaces_info.GetClosurizedNamespace(identifier)
provided_namespaces = namespaces_info.GetProvidedNamespaces()
else:
namespace = None
provided_namespaces = set()
# Skip cases of this.something_.somethingElse_.
regex = re.compile(r'^this\.[a-zA-Z_]+$')
if namespace in provided_namespaces or regex.match(identifier):
variable = identifier.split('.')[-1]
self._declared_private_member_tokens[variable] = token
self._declared_private_members.add(variable)
elif not identifier.endswith('__'):
# Consider setting public members of private members to be a usage.
for piece in identifier.split('.'):
if piece.endswith('_'):
self._used_private_members.add(piece)
# Find all usages of private members.
if token.type == Type.IDENTIFIER:
for piece in token.string.split('.'):
if piece.endswith('_'):
self._used_private_members.add(piece)
if token.type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'param' and flag.name_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.name_token)
if flag.type is not None and flag.name is not None:
if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER):
# Check for variable arguments marker in type.
if (flag.type.startswith('...') and
flag.name != 'var_args'):
self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME,
'Variable length argument %s must be renamed '
'to var_args.' % flag.name,
token)
elif (not flag.type.startswith('...') and
flag.name == 'var_args'):
self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE,
'Variable length argument %s type must start '
'with \'...\'.' % flag.name,
token)
if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER):
# Check for optional marker in type.
if (flag.type.endswith('=') and
not flag.name.startswith('opt_')):
self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
'Optional parameter name %s must be prefixed '
'with opt_.' % flag.name,
token)
elif (not flag.type.endswith('=') and
flag.name.startswith('opt_')):
self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
'Optional parameter %s type must end with =.' %
flag.name,
token)
if flag.flag_type in state.GetDocFlag().HAS_TYPE:
# Check for both missing type token and empty type braces '{}'
# Missing suppress types are reported separately and we allow enums,
# const, private, public and protected without types.
allowed_flags = set(['suppress']).union(
state.GetDocFlag().CAN_OMIT_TYPE)
if (flag.flag_type not in allowed_flags and
(not flag.type or flag.type.isspace())):
self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
'Missing type in %s tag' % token.string, token)
elif flag.name_token and flag.type_end_token and tokenutil.Compare(
flag.type_end_token, flag.name_token) > 0:
self._HandleError(
errors.OUT_OF_ORDER_JSDOC_TAG_TYPE,
'Type should be immediately after %s tag' % token.string,
token)
elif token.type == Type.DOUBLE_QUOTE_STRING_START:
next_token = token.next
while next_token.type == Type.STRING_TEXT:
if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
next_token.string):
break
next_token = next_token.next
else:
self._HandleError(
errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
'Single-quoted string preferred over double-quoted string.',
token,
position=Position.All(token.string))
elif token.type == Type.END_DOC_COMMENT:
doc_comment = state.GetDocComment()
# When @externs appears in a @fileoverview comment, it should trigger
# the same limited doc checks as a special filename like externs.js.
if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'):
self._SetLimitedDocChecks(True)
if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
not self._is_html and
state.InTopLevel() and
not state.InNonScopeBlock()):
# Check if we're in a fileoverview or constructor JsDoc.
is_constructor = (
doc_comment.HasFlag('constructor') or
doc_comment.HasFlag('interface'))
# @fileoverview is an optional tag so if the dosctring is the first
# token in the file treat it as a file level docstring.
is_file_level_comment = (
doc_comment.HasFlag('fileoverview') or
not doc_comment.start_token.previous)
# If the comment is not a file overview, and it does not immediately
# precede some code, skip it.
# NOTE: The tokenutil methods are not used here because of their
# behavior at the top of a file.
next_token = token.next
if (not next_token or
(not is_file_level_comment and
next_token.type in Type.NON_CODE_TYPES)):
return
# Don't require extra blank lines around suppression of extra
# goog.require errors.
if (doc_comment.SuppressionOnly() and
next_token.type == Type.IDENTIFIER and
next_token.string in ['goog.provide', 'goog.require']):
return
# Find the start of this block (include comments above the block, unless
# this is a file overview).
block_start = doc_comment.start_token
if not is_file_level_comment:
token = block_start.previous
while token and token.type in Type.COMMENT_TYPES:
block_start = token
token = token.previous
# Count the number of blank lines before this block.
blank_lines = 0
token = block_start.previous
while token and token.type in [Type.WHITESPACE, Type.BLANK_LINE]:
if token.type == Type.BLANK_LINE:
# A blank line.
blank_lines += 1
elif token.type == Type.WHITESPACE and not token.line.strip():
# A line with only whitespace on it.
blank_lines += 1
token = token.previous
# Log errors.
error_message = False
expected_blank_lines = 0
# Only need blank line before file overview if it is not the beginning
# of the file, e.g. copyright is first.
if is_file_level_comment and blank_lines == 0 and block_start.previous:
error_message = 'Should have a blank line before a file overview.'
expected_blank_lines = 1
elif is_constructor and blank_lines != 3:
error_message = (
'Should have 3 blank lines before a constructor/interface.')
expected_blank_lines = 3
elif (not is_file_level_comment and not is_constructor and
blank_lines != 2):
error_message = 'Should have 2 blank lines between top-level blocks.'
expected_blank_lines = 2
if error_message:
self._HandleError(
errors.WRONG_BLANK_LINE_COUNT, error_message,
block_start, position=Position.AtBeginning(),
fix_data=expected_blank_lines - blank_lines)
elif token.type == Type.END_BLOCK:
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
function = state.GetFunction()
if not self._limited_doc_checks:
if (function.has_return and function.doc and
not is_immediately_called and
not function.doc.HasFlag('return') and
not function.doc.InheritsDocumentation() and
not function.doc.HasFlag('constructor')):
# Check for proper documentation of return value.
self._HandleError(
errors.MISSING_RETURN_DOCUMENTATION,
'Missing @return JsDoc in function with non-trivial return',
function.doc.end_token, position=Position.AtBeginning())
elif (not function.has_return and
not function.has_throw and
function.doc and
function.doc.HasFlag('return') and
not state.InInterfaceMethod()):
return_flag = function.doc.GetFlag('return')
if (return_flag.type is None or (
'undefined' not in return_flag.type and
'void' not in return_flag.type and
'*' not in return_flag.type)):
self._HandleError(
errors.UNNECESSARY_RETURN_DOCUMENTATION,
'Found @return JsDoc on function that returns nothing',
return_flag.flag_token, position=Position.AtBeginning())
# b/4073735. Method in object literal definition of prototype can
# safely reference 'this'.
prototype_object_literal = False
block_start = None
previous_code = None
previous_previous_code = None
# Search for cases where prototype is defined as object literal.
# previous_previous_code
# | previous_code
# | | block_start
# | | |
# a.b.prototype = {
# c : function() {
# this.d = 1;
# }
# }
# If in object literal, find first token of block so to find previous
# tokens to check above condition.
if state.InObjectLiteral():
block_start = state.GetCurrentBlockStart()
# If an object literal then get previous token (code type). For above
# case it should be '='.
if block_start:
previous_code = tokenutil.SearchExcept(block_start,
Type.NON_CODE_TYPES,
reverse=True)
# If previous token to block is '=' then get its previous token.
if previous_code and previous_code.IsOperator('='):
previous_previous_code = tokenutil.SearchExcept(previous_code,
Type.NON_CODE_TYPES,
reverse=True)
# If variable/token before '=' ends with '.prototype' then its above
# case of prototype defined with object literal.
prototype_object_literal = (previous_previous_code and
previous_previous_code.string.endswith(
'.prototype'))
if (function.has_this and function.doc and
not function.doc.HasFlag('this') and
not function.is_constructor and
not function.is_interface and
'.prototype.' not in function.name and
not prototype_object_literal):
self._HandleError(
errors.MISSING_JSDOC_TAG_THIS,
'Missing @this JsDoc in function referencing "this". ('
'this usually means you are trying to reference "this" in '
'a static function, or you have forgotten to mark a '
'constructor with @constructor)',
function.doc.end_token, position=Position.AtBeginning())
elif token.type == Type.IDENTIFIER:
if token.string == 'goog.inherits' and not state.InFunction():
if state.GetLastNonSpaceToken().line_number == token.line_number:
self._HandleError(
errors.MISSING_LINE,
'Missing newline between constructor and goog.inherits',
token,
position=Position.AtBeginning())
extra_space = state.GetLastNonSpaceToken().next
while extra_space != token:
if extra_space.type == Type.BLANK_LINE:
self._HandleError(
errors.EXTRA_LINE,
'Extra line between constructor and goog.inherits',
extra_space)
extra_space = extra_space.next
# TODO(robbyw): Test the last function was a constructor.
# TODO(robbyw): Test correct @extends and @implements documentation.
elif (token.string == 'goog.provide' and
not state.InFunction() and
namespaces_info is not None):
namespace = tokenutil.GetStringAfterToken(token)
# Report extra goog.provide statement.
if not namespace or namespaces_info.IsExtraProvide(token):
if not namespace:
msg = 'Empty namespace in goog.provide'
else:
msg = 'Unnecessary goog.provide: ' + namespace
# Hint to user if this is a Test namespace.
if namespace.endswith('Test'):
msg += (' *Test namespaces must be mentioned in the '
'goog.setTestOnly() call')
self._HandleError(
errors.EXTRA_GOOG_PROVIDE,
msg,
token, position=Position.AtBeginning())
if namespaces_info.IsLastProvide(token):
# Report missing provide statements after the last existing provide.
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides,
tokenutil.GetLastTokenInSameLine(token).next,
False)
# If there are no require statements, missing requires should be
# reported after the last provide.
if not namespaces_info.GetRequiredNamespaces():
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires,
tokenutil.GetLastTokenInSameLine(token).next,
True)
elif (token.string == 'goog.require' and
not state.InFunction() and
namespaces_info is not None):
namespace = tokenutil.GetStringAfterToken(token)
# If there are no provide statements, missing provides should be
# reported before the first require.
if (namespaces_info.IsFirstRequire(token) and
not namespaces_info.GetProvidedNamespaces()):
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides,
tokenutil.GetFirstTokenInSameLine(token),
True)
# Report extra goog.require statement.
if not namespace or namespaces_info.IsExtraRequire(token):
if not namespace:
msg = 'Empty namespace in goog.require'
else:
msg = 'Unnecessary goog.require: ' + namespace
self._HandleError(
errors.EXTRA_GOOG_REQUIRE,
msg,
token, position=Position.AtBeginning())
# Report missing goog.require statements.
if namespaces_info.IsLastRequire(token):
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires,
tokenutil.GetLastTokenInSameLine(token).next,
False)
elif token.type == Type.OPERATOR:
last_in_line = token.IsLastInLine()
# If the token is unary and appears to be used in a unary context
# it's ok. Otherwise, if it's at the end of the line or immediately
# before a comment, it's ok.
# Don't report an error before a start bracket - it will be reported
# by that token's space checks.
if (not token.metadata.IsUnaryOperator() and not last_in_line
and not token.next.IsComment()
and not token.next.IsOperator(',')
and token.next.type not in (Type.WHITESPACE, Type.END_PAREN,
Type.END_BRACKET, Type.SEMICOLON,
Type.START_BRACKET)):
self._HandleError(
errors.MISSING_SPACE,
'Missing space after "%s"' % token.string,
token,
position=Position.AtEnd(token.string))
elif token.type == Type.WHITESPACE:
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if not last_in_line and not first_in_line and not token.next.IsComment():
# Ensure there is no space after opening parentheses.
if (token.previous.type in (Type.START_PAREN, Type.START_BRACKET,
Type.FUNCTION_NAME)
or token.next.type == Type.START_PARAMETERS):
self._HandleError(
errors.EXTRA_SPACE,
'Extra space after "%s"' % token.previous.string,
token,
position=Position.All(token.string))
elif token.type == Type.SEMICOLON:
previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES,
reverse=True)
if not previous_token:
self._HandleError(
errors.REDUNDANT_SEMICOLON,
'Semicolon without any statement',
token,
position=Position.AtEnd(token.string))
elif (previous_token.type == Type.KEYWORD and
previous_token.string not in ['break', 'continue', 'return']):
self._HandleError(
errors.REDUNDANT_SEMICOLON,
('Semicolon after \'%s\' without any statement.'
' Looks like an error.' % previous_token.string),
token,
position=Position.AtEnd(token.string))
def _CheckUnusedLocalVariables(self, token, state):
"""Checks for unused local variables in function blocks.
Args:
token: The token to check.
state: The state tracker.
"""
# We don't use state.InFunction because that disregards scope functions.
in_function = state.FunctionDepth() > 0
if token.type == Type.SIMPLE_LVALUE or token.type == Type.IDENTIFIER:
if in_function:
identifier = token.string
# Check whether the previous token was var.
previous_code_token = tokenutil.CustomSearch(
token,
lambda t: t.type not in Type.NON_CODE_TYPES,
reverse=True)
if previous_code_token and previous_code_token.IsKeyword('var'):
# Add local variable declaration to the top of the unused locals
# stack.
self._unused_local_variables_by_scope[-1][identifier] = token
elif token.type == Type.IDENTIFIER:
# This covers most cases where the variable is used as an identifier.
self._MarkLocalVariableUsed(token)
elif token.type == Type.SIMPLE_LVALUE and '.' in identifier:
# This covers cases where a value is assigned to a property of the
# variable.
self._MarkLocalVariableUsed(token)
elif token.type == Type.START_BLOCK:
if in_function and state.IsFunctionOpen():
# Push a new map onto the stack
self._unused_local_variables_by_scope.append({})
elif token.type == Type.END_BLOCK:
if state.IsFunctionClose():
# Pop the stack and report any remaining locals as unused.
unused_local_variables = self._unused_local_variables_by_scope.pop()
for unused_token in unused_local_variables.values():
self._HandleError(
errors.UNUSED_LOCAL_VARIABLE,
'Unused local variable: %s.' % unused_token.string,
unused_token)
def _MarkLocalVariableUsed(self, token):
"""Marks the local variable as used in the relevant scope.
Marks the local variable as used in the scope nearest to the current
scope that matches the given token.
Args:
token: The token representing the potential usage of a local variable.
"""
identifier = token.string.split('.')[0]
# Find the first instance of the identifier in the stack of function scopes
# and mark it used.
for unused_local_variables in reversed(
self._unused_local_variables_by_scope):
if identifier in unused_local_variables:
del unused_local_variables[identifier]
break
def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
"""Reports missing provide statements to the error handler.
Args:
missing_provides: A dictionary of string(key) and integer(value) where
each string(key) is a namespace that should be provided, but is not
and integer(value) is first line number where it's required.
token: The token where the error was detected (also where the new provides
will be inserted.
need_blank_line: Whether a blank line needs to be inserted after the new
provides are inserted. May be True, False, or None, where None
indicates that the insert location is unknown.
"""
missing_provides_msg = 'Missing the following goog.provide statements:\n'
missing_provides_msg += '\n'.join(['goog.provide(\'%s\');' % x for x in
sorted(missing_provides)])
missing_provides_msg += '\n'
missing_provides_msg += '\nFirst line where provided: \n'
missing_provides_msg += '\n'.join(
[' %s : line %d' % (x, missing_provides[x]) for x in
sorted(missing_provides)])
missing_provides_msg += '\n'
self._HandleError(
errors.MISSING_GOOG_PROVIDE,
missing_provides_msg,
token, position=Position.AtBeginning(),
fix_data=(missing_provides.keys(), need_blank_line))
def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
"""Reports missing require statements to the error handler.
Args:
missing_requires: A dictionary of string(key) and integer(value) where
each string(key) is a namespace that should be required, but is not
and integer(value) is first line number where it's required.
token: The token where the error was detected (also where the new requires
will be inserted.
need_blank_line: Whether a blank line needs to be inserted before the new
requires are inserted. May be True, False, or None, where None
indicates that the insert location is unknown.
"""
missing_requires_msg = 'Missing the following goog.require statements:\n'
missing_requires_msg += '\n'.join(['goog.require(\'%s\');' % x for x in
sorted(missing_requires)])
missing_requires_msg += '\n'
missing_requires_msg += '\nFirst line where required: \n'
missing_requires_msg += '\n'.join(
[' %s : line %d' % (x, missing_requires[x]) for x in
sorted(missing_requires)])
missing_requires_msg += '\n'
self._HandleError(
errors.MISSING_GOOG_REQUIRE,
missing_requires_msg,
token, position=Position.AtBeginning(),
fix_data=(missing_requires.keys(), need_blank_line))
def Finalize(self, state):
"""Perform all checks that need to occur after all lines are processed."""
# Call the base class's Finalize function.
super(JavaScriptLintRules, self).Finalize(state)
if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
# Report an error for any declared private member that was never used.
unused_private_members = (self._declared_private_members -
self._used_private_members)
for variable in unused_private_members:
token = self._declared_private_member_tokens[variable]
self._HandleError(errors.UNUSED_PRIVATE_MEMBER,
'Unused private member: %s.' % token.string,
token)
# Clear state to prepare for the next file.
self._declared_private_member_tokens = {}
self._declared_private_members = set()
self._used_private_members = set()
namespaces_info = self._namespaces_info
if namespaces_info is not None:
# If there are no provide or require statements, missing provides and
# requires should be reported on line 1.
if (not namespaces_info.GetProvidedNamespaces() and
not namespaces_info.GetRequiredNamespaces()):
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides, state.GetFirstToken(), None)
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires, state.GetFirstToken(), None)
self._CheckSortedRequiresProvides(state.GetFirstToken())
def _CheckSortedRequiresProvides(self, token):
"""Checks that all goog.require and goog.provide statements are sorted.
Note that this method needs to be run after missing statements are added to
preserve alphabetical order.
Args:
token: The first token in the token stream.
"""
sorter = requireprovidesorter.RequireProvideSorter()
first_provide_token = sorter.CheckProvides(token)
if first_provide_token:
new_order = sorter.GetFixedProvideString(first_provide_token)
self._HandleError(
errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
'goog.provide classes must be alphabetized. The correct code is:\n' +
new_order,
first_provide_token,
position=Position.AtBeginning(),
fix_data=first_provide_token)
first_require_token = sorter.CheckRequires(token)
if first_require_token:
new_order = sorter.GetFixedRequireString(first_require_token)
self._HandleError(
errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
'goog.require classes must be alphabetized. The correct code is:\n' +
new_order,
first_require_token,
position=Position.AtBeginning(),
fix_data=first_require_token)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit.
Returns:
A list of regexps, used as matches (rather than searches).
"""
return [
re.compile(r'goog\.require\(.+\);?\s*$'),
re.compile(r'goog\.provide\(.+\);?\s*$'),
re.compile(r'goog\.setTestOnly\(.+\);?\s*$'),
re.compile(r'[\s/*]*@visibility\s*{.*}[\s*/]*$'),
]
|
|
"""Platform that supports scanning iCloud."""
import logging
import os
import random
from pyicloud import PyiCloudService
from pyicloud.exceptions import (
PyiCloudException,
PyiCloudFailedLoginException,
PyiCloudNoDevicesException,
)
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.components.device_tracker.const import (
ATTR_ATTRIBUTES,
ENTITY_ID_FORMAT,
)
from homeassistant.components.device_tracker.legacy import DeviceScanner
from homeassistant.components.zone import async_active_zone
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.util import slugify
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.util.location import distance
from .const import (
DOMAIN,
SERVICE_LOST_IPHONE,
SERVICE_RESET_ACCOUNT,
SERVICE_SET_INTERVAL,
SERVICE_UPDATE,
)
_LOGGER = logging.getLogger(__name__)
CONF_ACCOUNTNAME = "account_name"
CONF_MAX_INTERVAL = "max_interval"
CONF_GPS_ACCURACY_THRESHOLD = "gps_accuracy_threshold"
# entity attributes
ATTR_ACCOUNTNAME = "account_name"
ATTR_INTERVAL = "interval"
ATTR_DEVICENAME = "device_name"
ATTR_BATTERY = "battery"
ATTR_DISTANCE = "distance"
ATTR_DEVICESTATUS = "device_status"
ATTR_LOWPOWERMODE = "low_power_mode"
ATTR_BATTERYSTATUS = "battery_status"
ICLOUDTRACKERS = {}
_CONFIGURING = {}
DEVICESTATUSSET = [
"features",
"maxMsgChar",
"darkWake",
"fmlyShare",
"deviceStatus",
"remoteLock",
"activationLocked",
"deviceClass",
"id",
"deviceModel",
"rawDeviceModel",
"passcodeLength",
"canWipeAfterLock",
"trackingInfo",
"location",
"msg",
"batteryLevel",
"remoteWipe",
"thisDevice",
"snd",
"prsId",
"wipeInProgress",
"lowPowerMode",
"lostModeEnabled",
"isLocating",
"lostModeCapable",
"mesg",
"name",
"batteryStatus",
"lockedTimestamp",
"lostTimestamp",
"locationCapable",
"deviceDisplayName",
"lostDevice",
"deviceColor",
"wipedTimestamp",
"modelDisplayName",
"locationEnabled",
"isMac",
"locFoundEnabled",
]
DEVICESTATUSCODES = {
"200": "online",
"201": "offline",
"203": "pending",
"204": "unregistered",
}
SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ACCOUNTNAME): vol.All(cv.ensure_list, [cv.slugify]),
vol.Optional(ATTR_DEVICENAME): cv.slugify,
vol.Optional(ATTR_INTERVAL): cv.positive_int,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(ATTR_ACCOUNTNAME): cv.slugify,
vol.Optional(CONF_MAX_INTERVAL, default=30): cv.positive_int,
vol.Optional(CONF_GPS_ACCURACY_THRESHOLD, default=1000): cv.positive_int,
}
)
def setup_scanner(hass, config: dict, see, discovery_info=None):
"""Set up the iCloud Scanner."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
account = config.get(CONF_ACCOUNTNAME, slugify(username.partition("@")[0]))
max_interval = config.get(CONF_MAX_INTERVAL)
gps_accuracy_threshold = config.get(CONF_GPS_ACCURACY_THRESHOLD)
icloudaccount = Icloud(
hass, username, password, account, max_interval, gps_accuracy_threshold, see
)
if icloudaccount.api is not None:
ICLOUDTRACKERS[account] = icloudaccount
else:
_LOGGER.error("No ICLOUDTRACKERS added")
return False
def lost_iphone(call):
"""Call the lost iPhone function if the device is found."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
devicename = call.data.get(ATTR_DEVICENAME)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].lost_iphone(devicename)
hass.services.register(
DOMAIN, SERVICE_LOST_IPHONE, lost_iphone, schema=SERVICE_SCHEMA
)
def update_icloud(call):
"""Call the update function of an iCloud account."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
devicename = call.data.get(ATTR_DEVICENAME)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].update_icloud(devicename)
hass.services.register(DOMAIN, SERVICE_UPDATE, update_icloud, schema=SERVICE_SCHEMA)
def reset_account_icloud(call):
"""Reset an iCloud account."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].reset_account_icloud()
hass.services.register(
DOMAIN, SERVICE_RESET_ACCOUNT, reset_account_icloud, schema=SERVICE_SCHEMA
)
def setinterval(call):
"""Call the update function of an iCloud account."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
interval = call.data.get(ATTR_INTERVAL)
devicename = call.data.get(ATTR_DEVICENAME)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].setinterval(interval, devicename)
hass.services.register(
DOMAIN, SERVICE_SET_INTERVAL, setinterval, schema=SERVICE_SCHEMA
)
# Tells the bootstrapper that the component was successfully initialized
return True
class Icloud(DeviceScanner):
"""Representation of an iCloud account."""
def __init__(
self, hass, username, password, name, max_interval, gps_accuracy_threshold, see
):
"""Initialize an iCloud account."""
self.hass = hass
self.username = username
self.password = password
self.api = None
self.accountname = name
self.devices = {}
self.seen_devices = {}
self._overridestates = {}
self._intervals = {}
self._max_interval = max_interval
self._gps_accuracy_threshold = gps_accuracy_threshold
self.see = see
self._trusted_device = None
self._verification_code = None
self._attrs = {}
self._attrs[ATTR_ACCOUNTNAME] = name
self.reset_account_icloud()
randomseconds = random.randint(10, 59)
track_utc_time_change(self.hass, self.keep_alive, second=randomseconds)
def reset_account_icloud(self):
"""Reset an iCloud account."""
icloud_dir = self.hass.config.path("icloud")
if not os.path.exists(icloud_dir):
os.makedirs(icloud_dir)
try:
self.api = PyiCloudService(
self.username, self.password, cookie_directory=icloud_dir, verify=True
)
except PyiCloudFailedLoginException as error:
self.api = None
_LOGGER.error("Error logging into iCloud Service: %s", error)
return
try:
self.devices = {}
self._overridestates = {}
self._intervals = {}
for device in self.api.devices:
status = device.status(DEVICESTATUSSET)
_LOGGER.debug("Device Status is %s", status)
devicename = slugify(status["name"].replace(" ", "", 99))
_LOGGER.info("Adding icloud device: %s", devicename)
if devicename in self.devices:
_LOGGER.error("Multiple devices with name: %s", devicename)
continue
self.devices[devicename] = device
self._intervals[devicename] = 1
self._overridestates[devicename] = None
except PyiCloudNoDevicesException:
_LOGGER.error("No iCloud Devices found!")
def icloud_trusted_device_callback(self, callback_data):
"""Handle chosen trusted devices."""
self._trusted_device = int(callback_data.get("trusted_device"))
self._trusted_device = self.api.trusted_devices[self._trusted_device]
if not self.api.send_verification_code(self._trusted_device):
_LOGGER.error("Failed to send verification code")
self._trusted_device = None
return
if self.accountname in _CONFIGURING:
request_id = _CONFIGURING.pop(self.accountname)
configurator = self.hass.components.configurator
configurator.request_done(request_id)
# Trigger the next step immediately
self.icloud_need_verification_code()
def icloud_need_trusted_device(self):
"""We need a trusted device."""
configurator = self.hass.components.configurator
if self.accountname in _CONFIGURING:
return
devicesstring = ""
devices = self.api.trusted_devices
for i, device in enumerate(devices):
devicename = device.get(
"deviceName", "SMS to %s" % device.get("phoneNumber")
)
devicesstring += f"{i}: {devicename};"
_CONFIGURING[self.accountname] = configurator.request_config(
f"iCloud {self.accountname}",
self.icloud_trusted_device_callback,
description=(
"Please choose your trusted device by entering"
" the index from this list: " + devicesstring
),
entity_picture="/static/images/config_icloud.png",
submit_caption="Confirm",
fields=[{"id": "trusted_device", "name": "Trusted Device"}],
)
def icloud_verification_callback(self, callback_data):
"""Handle the chosen trusted device."""
self._verification_code = callback_data.get("code")
try:
if not self.api.validate_verification_code(
self._trusted_device, self._verification_code
):
raise PyiCloudException("Unknown failure")
except PyiCloudException as error:
# Reset to the initial 2FA state to allow the user to retry
_LOGGER.error("Failed to verify verification code: %s", error)
self._trusted_device = None
self._verification_code = None
# Trigger the next step immediately
self.icloud_need_trusted_device()
if self.accountname in _CONFIGURING:
request_id = _CONFIGURING.pop(self.accountname)
configurator = self.hass.components.configurator
configurator.request_done(request_id)
def icloud_need_verification_code(self):
"""Return the verification code."""
configurator = self.hass.components.configurator
if self.accountname in _CONFIGURING:
return
_CONFIGURING[self.accountname] = configurator.request_config(
f"iCloud {self.accountname}",
self.icloud_verification_callback,
description=("Please enter the validation code:"),
entity_picture="/static/images/config_icloud.png",
submit_caption="Confirm",
fields=[{"id": "code", "name": "code"}],
)
def keep_alive(self, now):
"""Keep the API alive."""
if self.api is None:
self.reset_account_icloud()
if self.api is None:
return
if self.api.requires_2fa:
try:
if self._trusted_device is None:
self.icloud_need_trusted_device()
return
if self._verification_code is None:
self.icloud_need_verification_code()
return
self.api.authenticate()
if self.api.requires_2fa:
raise Exception("Unknown failure")
self._trusted_device = None
self._verification_code = None
except PyiCloudException as error:
_LOGGER.error("Error setting up 2FA: %s", error)
else:
self.api.authenticate()
currentminutes = dt_util.now().hour * 60 + dt_util.now().minute
try:
for devicename in self.devices:
interval = self._intervals.get(devicename, 1)
if (currentminutes % interval == 0) or (
interval > 10 and currentminutes % interval in [2, 4]
):
self.update_device(devicename)
except ValueError:
_LOGGER.debug("iCloud API returned an error")
def determine_interval(self, devicename, latitude, longitude, battery):
"""Calculate new interval."""
currentzone = run_callback_threadsafe(
self.hass.loop, async_active_zone, self.hass, latitude, longitude
).result()
if (
currentzone is not None
and currentzone == self._overridestates.get(devicename)
) or (currentzone is None and self._overridestates.get(devicename) == "away"):
return
zones = (
self.hass.states.get(entity_id)
for entity_id in sorted(self.hass.states.entity_ids("zone"))
)
distances = []
for zone_state in zones:
zone_state_lat = zone_state.attributes["latitude"]
zone_state_long = zone_state.attributes["longitude"]
zone_distance = distance(
latitude, longitude, zone_state_lat, zone_state_long
)
distances.append(round(zone_distance / 1000, 1))
if distances:
mindistance = min(distances)
else:
mindistance = None
self._overridestates[devicename] = None
if currentzone is not None:
self._intervals[devicename] = self._max_interval
return
if mindistance is None:
return
# Calculate out how long it would take for the device to drive to the
# nearest zone at 120 km/h:
interval = round(mindistance / 2, 0)
# Never poll more than once per minute
interval = max(interval, 1)
if interval > 180:
# Three hour drive? This is far enough that they might be flying
interval = 30
if battery is not None and battery <= 33 and mindistance > 3:
# Low battery - let's check half as often
interval = interval * 2
self._intervals[devicename] = interval
def update_device(self, devicename):
"""Update the device_tracker entity."""
# An entity will not be created by see() when track=false in
# 'known_devices.yaml', but we need to see() it at least once
entity = self.hass.states.get(ENTITY_ID_FORMAT.format(devicename))
if entity is None and devicename in self.seen_devices:
return
attrs = {}
kwargs = {}
if self.api is None:
return
try:
for device in self.api.devices:
if str(device) != str(self.devices[devicename]):
continue
status = device.status(DEVICESTATUSSET)
_LOGGER.debug("Device Status is %s", status)
dev_id = status["name"].replace(" ", "", 99)
dev_id = slugify(dev_id)
attrs[ATTR_DEVICESTATUS] = DEVICESTATUSCODES.get(
status["deviceStatus"], "error"
)
attrs[ATTR_LOWPOWERMODE] = status["lowPowerMode"]
attrs[ATTR_BATTERYSTATUS] = status["batteryStatus"]
attrs[ATTR_ACCOUNTNAME] = self.accountname
status = device.status(DEVICESTATUSSET)
battery = status.get("batteryLevel", 0) * 100
location = status["location"]
if location and location["horizontalAccuracy"]:
horizontal_accuracy = int(location["horizontalAccuracy"])
if horizontal_accuracy < self._gps_accuracy_threshold:
self.determine_interval(
devicename,
location["latitude"],
location["longitude"],
battery,
)
interval = self._intervals.get(devicename, 1)
attrs[ATTR_INTERVAL] = interval
accuracy = location["horizontalAccuracy"]
kwargs["dev_id"] = dev_id
kwargs["host_name"] = status["name"]
kwargs["gps"] = (location["latitude"], location["longitude"])
kwargs["battery"] = battery
kwargs["gps_accuracy"] = accuracy
kwargs[ATTR_ATTRIBUTES] = attrs
self.see(**kwargs)
self.seen_devices[devicename] = True
except PyiCloudNoDevicesException:
_LOGGER.error("No iCloud Devices found")
def lost_iphone(self, devicename):
"""Call the lost iPhone function if the device is found."""
if self.api is None:
return
self.api.authenticate()
for device in self.api.devices:
if str(device) == str(self.devices[devicename]):
_LOGGER.info("Playing Lost iPhone sound for %s", devicename)
device.play_sound()
def update_icloud(self, devicename=None):
"""Request device information from iCloud and update device_tracker."""
if self.api is None:
return
try:
if devicename is not None:
if devicename in self.devices:
self.update_device(devicename)
else:
_LOGGER.error(
"devicename %s unknown for account %s",
devicename,
self._attrs[ATTR_ACCOUNTNAME],
)
else:
for device in self.devices:
self.update_device(device)
except PyiCloudNoDevicesException:
_LOGGER.error("No iCloud Devices found")
def setinterval(self, interval=None, devicename=None):
"""Set the interval of the given devices."""
devs = [devicename] if devicename else self.devices
for device in devs:
devid = f"{DOMAIN}.{device}"
devicestate = self.hass.states.get(devid)
if interval is not None:
if devicestate is not None:
self._overridestates[device] = run_callback_threadsafe(
self.hass.loop,
async_active_zone,
self.hass,
float(devicestate.attributes.get("latitude", 0)),
float(devicestate.attributes.get("longitude", 0)),
).result()
if self._overridestates[device] is None:
self._overridestates[device] = "away"
self._intervals[device] = interval
else:
self._overridestates[device] = None
self.update_device(device)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import six
from tensorflow.contrib import losses as losses_lib
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import training
# TODO(zakaria): add functions that creates a head and returns ModelOpFn
def _regression_head(label_name=None,
weight_column_name=None,
label_dimension=1,
enable_centered_bias=False,
head_name=None):
"""Creates a _Head for linear regression.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: dimension of the label for multilabels.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
Returns:
An instance of _Head
"""
return _RegressionHead(
label_name=label_name,
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias,
head_name=head_name)
# TODO(zakaria): Add logistic_regression_head
def _multi_class_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None):
"""Creates a _Head for multi class single label classification.
The Head uses softmax cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`. Invalid if
`n_classes` is 2.
Returns:
An instance of _MultiClassHead.
Raises:
ValueError: if `n_classes` is < 2, or `metric_class_ids` is provided when
`n_classes` is 2.
"""
if (n_classes is None) or (n_classes < 2):
raise ValueError("n_classes must be > 1 for classification: %s." %
n_classes)
if n_classes == 2:
if metric_class_ids:
raise ValueError("metric_class_ids invalid for n_classes==2.")
return _BinaryLogisticHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
return _MultiClassHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids)
def _binary_svm_head(
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,):
"""Creates a `_Head` for binary classification with SVMs.
The head uses binary hinge loss.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
thresholds: thresholds for eval metrics, defaults to [.5]
Returns:
An instance of `_Head`.
"""
return _BinarySvmHead(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds)
def _multi_label_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None):
"""Creates a _Head for multi label classification.
The Head uses softmax cross entropy loss.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
Returns:
An instance of _MultiClassHead.
Raises:
ValueError: if n_classes is < 2
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
return _MultiLabelHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids)
def _multi_head(heads, loss_weights=None):
"""Creates a MultiHead stemming from same logits/hidden layer.
Args:
heads: list of _Head objects.
loss_weights: optional list of weights to be used to combine losses from
each head. All losses are weighted equally if not provided.
Returns:
A _Head instance that combines multiple heads.
Raises:
ValueError: if heads and loss_weights have different size.
"""
if loss_weights:
if len(loss_weights) != len(heads):
raise ValueError("heads and loss_weights must have same size")
def _weighted_loss_combiner(losses):
if loss_weights:
if len(losses) != len(loss_weights):
raise ValueError("losses and loss_weights must have same size")
weighted_losses = []
for loss, weight in zip(losses, loss_weights):
weighted_losses.append(math_ops.multiply(loss, weight))
return math_ops.add_n(weighted_losses)
else:
return math_ops.add_n(losses)
return _MultiHead(heads, loss_combiner=_weighted_loss_combiner)
# TODO(zakaria): Make the classes public once we are ready for users to subclass
# them.
class _Head(object):
"""Interface for the head/top of a model.
Given logits or output of a hidden layer, a Head knows how to compute
predictions, loss, default metric and export signature.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, head_name):
self.head_name = head_name
@abc.abstractproperty
def logits_dimension(self):
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def head_ops(self,
features,
labels,
mode,
train_op_fn,
logits=None,
logits_input=None,
scope=None):
"""Returns ops for a model_fn.
Args:
features: input dict.
labels: labels dict or tensor.
mode: estimator's ModeKeys
train_op_fn: function that takes a scalar loss and returns an op to
optimize with the loss.
logits: logits to be used for the head.
logits_input: tensor to build logits from.
scope: Optional scope for variable_scope.
Returns:
`ModelFnOps`.
Raises:
ValueError: if mode is not recognized.
"""
raise NotImplementedError("Calling an abstract method.")
def _create_output_alternatives(self, predictions):
"""Creates output alternative for the Head.
Args:
predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a
symbolic name for an output Tensor possibly but not necessarily taken
from `PredictionKey`, and 'Tensor' is the corresponding output Tensor
itself.
Returns:
`dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where
'submodel_name' is a submodel identifier that should be consistent across
the pipeline (here likely taken from the head_name),
'problem_type' is a `ProblemType`,
'tensor_name' is a symbolic name for an output Tensor possibly but not
necessarily taken from `PredictionKey`, and
'Tensor' is the corresponding output Tensor itself.
"""
return {self.head_name: (self._problem_type, predictions)}
# TODO(zakaria): use contrib losses.
def _mean_squared_loss(logits, labels):
with ops.name_scope(None, "mean_squared_loss", (logits, labels)) as name:
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
logits = array_ops.expand_dims(logits, dim=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
return math_ops.square(logits - math_ops.to_float(labels), name=name)
class _RegressionHead(_Head):
"""_Head for regression."""
def __init__(self,
label_name,
weight_column_name,
label_dimension,
enable_centered_bias,
head_name,
loss_fn=_mean_squared_loss):
"""Base type for all single heads.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Integer, number of label columns.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
loss_fn: Loss function.
"""
super(_RegressionHead, self).__init__(head_name=head_name)
self._loss_fn = loss_fn
self._logits_dimension = label_dimension
self._label_name = label_name
self._weight_column_name = weight_column_name
self._enable_centered_bias = enable_centered_bias
self._problem_type = constants.ProblemType.LINEAR_REGRESSION
@property
def logits_dimension(self):
return self._logits_dimension
def head_ops(self,
features,
labels,
mode,
train_op_fn,
logits=None,
logits_input=None,
scope=None):
"""See `_Head`."""
_check_mode_valid(mode)
_check_logits_input_not_supported(logits, logits_input)
centered_bias = None
if self._enable_centered_bias:
centered_bias = _centered_bias(self._logits_dimension, self.head_name)
logits = nn.bias_add(logits, centered_bias)
predictions = self._logits_to_predictions(logits)
loss = None
train_op = None
eval_metric_ops = None
if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
labels_tensor = _to_labels_tensor(labels, self._label_name)
loss = _training_loss(
features,
labels_tensor,
logits,
loss_fn=self._loss_fn,
weight_column_name=self._weight_column_name,
head_name=self.head_name)
if (mode == model_fn.ModeKeys.TRAIN) and (train_op_fn is not None):
train_op = _train_op(loss, labels_tensor, train_op_fn, centered_bias,
self.logits_dimension, self._loss_fn)
eval_metric_ops = _eval_metric_ops(self._default_metrics(), features,
labels, predictions)
return model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=self._create_output_alternatives(predictions))
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` after applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
key = prediction_key.PredictionKey.SCORES
with ops.name_scope(None, "predictions", (logits,)):
if self.logits_dimension == 1:
logits = array_ops.squeeze(logits, squeeze_dims=(1,), name=key)
return {key: logits}
def _default_metrics(self):
"""Returns a dict of `MetricSpec` keyed by `MetricKey`."""
return {
_summary_key(self.head_name, metric_key.MetricKey.LOSS):
_weighted_average_loss_metric_spec(
self._loss_fn, prediction_key.PredictionKey.SCORES,
self._label_name, self._weight_column_name)
}
def _log_loss_with_two_classes(logits, labels):
with ops.name_scope(None, "log_loss_with_two_classes",
(logits, labels)) as name:
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
if len(labels.get_shape()) == 1:
labels = array_ops.expand_dims(labels, dim=(1,))
return nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.to_float(labels), logits=logits, name=name)
def _one_class_to_two_class_logits(logits):
return array_ops.concat((array_ops.zeros_like(logits), logits), 1)
class _BinaryLogisticHead(_Head):
"""_Head for binary logistic classifciation."""
def __init__(self,
label_name,
weight_column_name,
enable_centered_bias,
head_name,
loss_fn=_log_loss_with_two_classes,
thresholds=None):
"""Base type for all single heads.
Args:
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
loss_fn: Loss function.
thresholds: thresholds for eval.
Raises:
ValueError: if n_classes is invalid.
"""
super(_BinaryLogisticHead, self).__init__(head_name=head_name)
self._thresholds = thresholds if thresholds else (.5,)
self._label_name = label_name
self._weight_column_name = weight_column_name
self._loss_fn = loss_fn
self._enable_centered_bias = enable_centered_bias
self._problem_type = constants.ProblemType.LOGISTIC_REGRESSION
@property
def logits_dimension(self):
return 1
def head_ops(self,
features,
labels,
mode,
train_op_fn,
logits=None,
logits_input=None,
scope=None):
"""See `_Head`."""
_check_mode_valid(mode)
_check_logits_input_not_supported(logits, logits_input)
centered_bias = None
if self._enable_centered_bias:
centered_bias = _centered_bias(1, self.head_name)
logits = nn.bias_add(logits, centered_bias)
predictions = self._logits_to_predictions(logits)
loss = None
train_op = None
eval_metric_ops = None
if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
labels_tensor = _to_labels_tensor(labels, self._label_name)
loss = _training_loss(
features,
labels_tensor,
logits,
loss_fn=self._loss_fn,
weight_column_name=self._weight_column_name,
head_name=self.head_name)
if (mode == model_fn.ModeKeys.TRAIN) and (train_op_fn is not None):
train_op = _train_op(loss, labels_tensor, train_op_fn, centered_bias,
self.logits_dimension, self._loss_fn)
eval_metric_ops = _eval_metric_ops(self._default_metrics(), features,
labels, predictions)
return model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=self._create_output_alternatives(predictions))
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Output` after applying possible centered bias.
Returns:
Dict of prediction `Output` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
two_class_logits = _one_class_to_two_class_logits(logits)
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.LOGISTIC:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.LOGISTIC),
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
two_class_logits,
name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
two_class_logits,
1,
name=prediction_key.PredictionKey.CLASSES)
}
def _default_metrics(self):
"""Returns a dict of `MetricSpec` objects keyed by name."""
metrics = {
_summary_key(self.head_name, metric_key.MetricKey.LOSS):
_weighted_average_loss_metric_spec(
self._loss_fn, prediction_key.PredictionKey.LOGITS,
self._label_name, self._weight_column_name)
}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, metric_key.MetricKey.ACCURACY)] = (
metric_spec.MetricSpec(metrics_lib.streaming_accuracy,
prediction_key.PredictionKey.CLASSES,
self._label_name, self._weight_column_name))
def _add_binary_metric(key, metric_fn):
metrics[_summary_key(self.head_name, key)] = metric_spec.MetricSpec(
metric_fn, prediction_key.PredictionKey.LOGISTIC, self._label_name,
self._weight_column_name)
_add_binary_metric(metric_key.MetricKey.PREDICTION_MEAN,
_predictions_streaming_mean)
_add_binary_metric(metric_key.MetricKey.LABEL_MEAN,
_indicator_labels_streaming_mean)
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
_add_binary_metric(metric_key.MetricKey.ACCURACY_BASELINE,
_indicator_labels_streaming_mean)
_add_binary_metric(metric_key.MetricKey.AUC, _streaming_auc)
for threshold in self._thresholds:
_add_binary_metric(metric_key.MetricKey.ACCURACY_MEAN % threshold,
_accuracy_at_threshold(threshold))
# Precision for positive examples.
_add_binary_metric(
metric_key.MetricKey.PRECISION_MEAN % threshold,
_streaming_at_threshold(metrics_lib.streaming_precision_at_thresholds,
threshold),)
# Recall for positive examples.
_add_binary_metric(metric_key.MetricKey.RECALL_MEAN % threshold,
_streaming_at_threshold(
metrics_lib.streaming_recall_at_thresholds,
threshold))
return metrics
def _softmax_cross_entropy_loss(logits, labels):
with ops.name_scope(None, "softmax_cross_entropy_loss", (
logits,
labels,)) as name:
# Check that we got integer for classification.
if not labels.dtype.is_integer:
raise ValueError("Labels dtype should be integer "
"Instead got %s." % labels.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] labels.
if len(labels.get_shape()) == 2:
labels = array_ops.squeeze(labels, squeeze_dims=(1,))
return nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
class _MultiClassHead(_Head):
"""_Head for classification."""
def __init__(self,
n_classes,
label_name,
weight_column_name,
enable_centered_bias,
head_name,
loss_fn=_softmax_cross_entropy_loss,
thresholds=None,
metric_class_ids=None):
"""_Head for classification.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`_BinaryLogisticHead`).
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be prefixed by the head_name and an underscore.
loss_fn: Loss function.
thresholds: thresholds for eval.
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
Raises:
ValueError: if `n_classes` or `metric_class_ids` is invalid.
"""
super(_MultiClassHead, self).__init__(head_name=head_name)
if (n_classes is None) or (n_classes <= 2):
raise ValueError("n_classes must be > 2: %s." % n_classes)
self._thresholds = thresholds if thresholds else (.5,)
self._logits_dimension = n_classes
self._label_name = label_name
self._weight_column_name = weight_column_name
self._loss_fn = loss_fn
self._enable_centered_bias = enable_centered_bias
self._problem_type = constants.ProblemType.CLASSIFICATION
self._metric_class_ids = tuple([] if metric_class_ids is None else
metric_class_ids)
for class_id in self._metric_class_ids:
if (class_id < 0) or (class_id >= n_classes):
raise ValueError("Class ID %s not in [0, %s)." % (class_id, n_classes))
@property
def logits_dimension(self):
return self._logits_dimension
def head_ops(self,
features,
labels,
mode,
train_op_fn,
logits=None,
logits_input=None,
scope=None):
"""See `_Head`."""
_check_mode_valid(mode)
_check_logits_input_not_supported(logits, logits_input)
centered_bias = None
if self._enable_centered_bias:
centered_bias = _centered_bias(self._logits_dimension, self.head_name)
logits = nn.bias_add(logits, centered_bias)
predictions = self._logits_to_predictions(logits)
loss = None
train_op = None
eval_metric_ops = None
if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
labels_tensor = _to_labels_tensor(labels, self._label_name)
loss = _training_loss(
features,
labels_tensor,
logits,
loss_fn=self._loss_fn,
weight_column_name=self._weight_column_name,
head_name=self.head_name)
if (mode == model_fn.ModeKeys.TRAIN) and (train_op_fn is not None):
train_op = _train_op(loss, labels_tensor, train_op_fn, centered_bias,
self._logits_dimension, self._loss_fn)
eval_metric_ops = _eval_metric_ops(self._default_metrics(), features,
labels, predictions)
return model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
output_alternatives=self._create_output_alternatives(predictions))
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Tensor` after applying possible centered bias.
Returns:
Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
logits, 1, name=prediction_key.PredictionKey.CLASSES)
}
def _metric_spec(self, metric_fn, prediction_name):
return metric_spec.MetricSpec(metric_fn, prediction_name, self._label_name,
self._weight_column_name)
def _default_metrics(self):
"""Returns a dict of `MetricSpec` objects keyed by name."""
def _streaming_auc_with_class_id_label(predictions, labels, weights=None):
indicator_labels = _class_id_labels_to_indicator(
labels, num_classes=self.logits_dimension)
return _streaming_auc(predictions, indicator_labels, weights)
loss_key = _summary_key(self.head_name, metric_key.MetricKey.LOSS)
accuracy_key = _summary_key(self.head_name, metric_key.MetricKey.ACCURACY)
auc_key = _summary_key(self.head_name, metric_key.MetricKey.AUC)
metrics = {
loss_key:
_weighted_average_loss_metric_spec(
self._loss_fn, prediction_key.PredictionKey.LOGITS,
self._label_name, self._weight_column_name),
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
accuracy_key:
self._metric_spec(metrics_lib.streaming_accuracy,
prediction_key.PredictionKey.CLASSES),
auc_key:
self._metric_spec(_streaming_auc_with_class_id_label,
prediction_key.PredictionKey.PROBABILITIES)
}
def _class_predictions_streaming_mean(predictions,
labels,
weights=None,
class_id=None):
del labels
return metrics_lib.streaming_mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(predictions)),
array_ops.ones_like(predictions),
array_ops.zeros_like(predictions)),
weights=weights)
def _class_labels_streaming_mean(predictions,
labels,
weights=None,
class_id=None):
del predictions
assert class_id is not None
return metrics_lib.streaming_mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(labels)),
array_ops.ones_like(labels), array_ops.zeros_like(labels)),
weights=weights)
def _class_streaming_auc(predictions, labels, weights=None, class_id=None):
assert class_id is not None
indicator_labels = _class_id_labels_to_indicator(
labels, num_classes=self.logits_dimension)
return _streaming_auc(
predictions, indicator_labels, weights=weights, class_id=class_id)
for class_id in self._metric_class_ids:
# TODO(ptucker): Add per-class accuracy, precision, recall.
prediction_mean_key = _summary_key(
self.head_name, metric_key.MetricKey.CLASS_PREDICTION_MEAN % class_id)
label_mean_key = _summary_key(self.head_name,
metric_key.MetricKey.CLASS_LABEL_MEAN %
class_id)
probability_mean_key = _summary_key(
self.head_name,
metric_key.MetricKey.CLASS_PROBABILITY_MEAN % class_id)
logits_mean_key = _summary_key(self.head_name,
metric_key.MetricKey.CLASS_LOGITS_MEAN %
class_id)
auc_key = _summary_key(self.head_name,
metric_key.MetricKey.CLASS_AUC % class_id)
metrics[prediction_mean_key] = self._metric_spec(
functools.partial(
_class_predictions_streaming_mean, class_id=class_id),
prediction_key.PredictionKey.CLASSES)
metrics[label_mean_key] = self._metric_spec(
functools.partial(
_class_labels_streaming_mean, class_id=class_id),
prediction_key.PredictionKey.PROBABILITIES)
metrics[probability_mean_key] = self._metric_spec(
functools.partial(
_predictions_streaming_mean, class_id=class_id),
prediction_key.PredictionKey.PROBABILITIES)
metrics[logits_mean_key] = self._metric_spec(
functools.partial(
_predictions_streaming_mean, class_id=class_id),
prediction_key.PredictionKey.LOGITS)
metrics[auc_key] = self._metric_spec(
functools.partial(
_class_streaming_auc, class_id=class_id),
prediction_key.PredictionKey.LOGITS)
return metrics
def _to_labels_tensor(labels, label_name):
labels = labels[label_name] if isinstance(labels, dict) else labels
if isinstance(labels, sparse_tensor.SparseTensor):
raise ValueError("SparseTensor is not supported as labels.")
return labels
def _assert_labels_rank(labels):
return control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(labels), 2),
("labels shape should be either [batch_size, 1] or [batch_size]",))
class _BinarySvmHead(_BinaryLogisticHead):
"""_Head for binary classification using SVMs."""
def __init__(self, label_name, weight_column_name, enable_centered_bias,
head_name, thresholds):
def _loss_fn(logits, labels):
with ops.name_scope(None, "hinge_loss", (logits, labels)) as name:
with ops.control_dependencies((_assert_labels_rank(labels),)):
labels = array_ops.reshape(labels, shape=(-1, 1))
return losses_lib.hinge_loss(logits, labels, scope=name)
super(_BinarySvmHead, self).__init__(
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_loss_fn,
thresholds=thresholds)
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
_one_class_to_two_class_logits(logits),
1,
name=prediction_key.PredictionKey.CLASSES)
}
def _default_metrics(self):
"""See `_MultiClassHead`."""
metrics = {
_summary_key(self.head_name, metric_key.MetricKey.LOSS):
_weighted_average_loss_metric_spec(
self._loss_fn, prediction_key.PredictionKey.LOGITS,
self._label_name, self._weight_column_name)
}
metrics[_summary_key(self.head_name, metric_key.MetricKey.ACCURACY)] = (
metric_spec.MetricSpec(metrics_lib.streaming_accuracy,
prediction_key.PredictionKey.CLASSES,
self._label_name, self._weight_column_name))
# TODO(sibyl-vie3Poto): add more metrics relevant for svms.
return metrics
class _MultiLabelHead(_MultiClassHead):
"""_Head for multlabel classification."""
# TODO(zakaria): add signature and metric for multilabel.
def __init__(self,
n_classes,
label_name,
weight_column_name,
enable_centered_bias,
head_name,
thresholds,
metric_class_ids=None):
super(_MultiLabelHead, self).__init__(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
loss_fn=_sigmoid_cross_entropy_loss,
thresholds=thresholds,
metric_class_ids=metric_class_ids)
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.PROBABILITIES:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.to_int64(
math_ops.greater(logits, 0),
name=prediction_key.PredictionKey.CLASSES)
}
def _metric_spec(self, metric_fn, prediction_name):
return metric_spec.MetricSpec(metric_fn, prediction_name, self._label_name,
self._weight_column_name)
def _default_metrics(self):
"""Returns a dict of `MetricSpec` objects keyed by name."""
loss_key = _summary_key(self.head_name, metric_key.MetricKey.LOSS)
accuracy_key = _summary_key(self.head_name, metric_key.MetricKey.ACCURACY)
auc_key = _summary_key(self.head_name, metric_key.MetricKey.AUC)
metrics = {
loss_key:
_weighted_average_loss_metric_spec(
self._loss_fn, prediction_key.PredictionKey.LOGITS,
self._label_name, self._weight_column_name),
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
accuracy_key:
self._metric_spec(metrics_lib.streaming_accuracy,
prediction_key.PredictionKey.CLASSES),
auc_key:
self._metric_spec(_streaming_auc,
prediction_key.PredictionKey.PROBABILITIES),
}
for class_id in self._metric_class_ids:
# TODO(ptucker): Add per-class accuracy, precision, recall.
prediction_mean_key = _summary_key(
self.head_name, metric_key.MetricKey.CLASS_PREDICTION_MEAN % class_id)
label_mean_key = _summary_key(self.head_name,
metric_key.MetricKey.CLASS_LABEL_MEAN %
class_id)
probability_mean_key = _summary_key(
self.head_name,
metric_key.MetricKey.CLASS_PROBABILITY_MEAN % class_id)
logits_mean_key = _summary_key(self.head_name,
metric_key.MetricKey.CLASS_LOGITS_MEAN %
class_id)
auc_key = _summary_key(self.head_name,
metric_key.MetricKey.CLASS_AUC % class_id)
metrics[prediction_mean_key] = self._metric_spec(
functools.partial(
_predictions_streaming_mean, class_id=class_id),
prediction_key.PredictionKey.CLASSES)
metrics[label_mean_key] = self._metric_spec(
functools.partial(
_indicator_labels_streaming_mean, class_id=class_id),
prediction_key.PredictionKey.CLASSES)
metrics[probability_mean_key] = self._metric_spec(
functools.partial(
_predictions_streaming_mean, class_id=class_id),
prediction_key.PredictionKey.PROBABILITIES)
metrics[logits_mean_key] = self._metric_spec(
functools.partial(
_predictions_streaming_mean, class_id=class_id),
prediction_key.PredictionKey.LOGITS)
metrics[auc_key] = self._metric_spec(
functools.partial(
_streaming_auc, class_id=class_id),
prediction_key.PredictionKey.LOGITS)
return metrics
class _MultiHead(_Head):
"""_Head to combine multiple _Head objects.
All heads stem from the same logits/logit_input tensor.
For training, combines losses of each heads according a function provided by
user.
For eval, adds a /head_name suffix to the keys in eval metrics.
For inference, updates keys prediction dict to a 2-tuple,
(head_name, prediction_key)
"""
def __init__(self, heads, loss_combiner):
"""_Head to combine multiple _Head objects.
Args:
heads: list of _Head objects.
loss_combiner: function that takes a list of loss tensors for the heads
and returns the final loss tensor for the multi head.
Raises:
ValueError: if any head does not have a name.
"""
# TODO(zakaria): Keep _Head a pure interface.
super(_MultiHead, self).__init__(head_name=None)
self._logits_dimension = 0
for head in heads:
if not head.head_name:
raise ValueError("Head must have a name.")
self._logits_dimension += head.logits_dimension
self._heads = heads
self._loss_combiner = loss_combiner
@property
def logits_dimension(self):
return self._logits_dimension
def head_ops(self,
features,
target,
mode,
train_op_fn,
logits=None,
logits_input=None,
scope=None):
"""See _Head.head_ops.
Args:
features: input dict.
target: labels dict.
mode: estimator's ModeKeys
train_op_fn: function that takes a scalar loss and returns an op to
optimize with the loss.
logits: Concatenated logits of (x, 1) shape where x is the sum of
logits_dimension of all the heads, i.e., same as logits_dimension of
this class. This function will split the logits tensor and pass logits
of proper size to each head.
logits_input: tensor to build logits from.
scope: Optional scope for variable_scope.
Returns:
`ModelFnOps`.
Raises:
ValueError: if mode is not recognized or both logits and logits_input is
provided.
"""
def _noop(unused_loss):
return control_flow_ops.no_op()
if logits is not None and logits_input is not None:
raise ValueError("only one of logits and logits_input must be provided.")
all_model_fn_ops = []
if logits is not None:
all_logits = self._split_logits(logits)
for head, logits in zip(self._heads, all_logits):
all_model_fn_ops.append(
head.head_ops(
features, target, mode, _noop, logits=logits, scope=scope))
else:
# Uses logits_input
for head in self._heads:
all_model_fn_ops.append(
head.head_ops(
features,
target,
mode,
_noop,
logits_input=logits_input,
scope=scope))
if mode == model_fn.ModeKeys.TRAIN:
return self._combine_train(all_model_fn_ops, train_op_fn)
if mode == model_fn.ModeKeys.INFER:
return self._combine_infer(all_model_fn_ops)
if mode == model_fn.ModeKeys.EVAL:
return self._combine_eval(all_model_fn_ops)
raise ValueError("mode=%s unrecognized" % str(mode))
def _split_logits(self, logits):
"""Splits logits for heads.
Args:
logits: the logits tensor.
Returns:
A list of logits for the individual heads.
"""
all_logits = []
begin = 0
for head in self._heads:
current_logits_size = head.logits_dimension
current_logits = array_ops.slice(logits, [0, begin],
[-1, current_logits_size])
all_logits.append(current_logits)
begin += current_logits_size
return all_logits
def _combine_train(self, all_model_fn_ops, train_op_fn):
"""Combines list of ModelFnOps for training.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
train_op_fn: Function to create train op. See head_ops documentaion for
more details.
Returns:
ModelFnOps that combines all the heads.
"""
losses = []
additional_train_ops = []
for m in all_model_fn_ops:
losses.append(m.loss)
additional_train_ops.append(m.train_op)
loss = self._loss_combiner(losses)
train_op = train_op_fn(loss)
train_op = control_flow_ops.group(train_op, *additional_train_ops)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.TRAIN,
loss=loss,
train_op=train_op)
def _combine_infer(self, all_model_fn_ops):
"""Combines list of ModelFnOps for inference.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
Returns:
ModelFnOps that combines all the heads.
"""
predictions = {}
output_alternatives = {}
for head, m in zip(self._heads, all_model_fn_ops):
head_name = head.head_name
output_alternatives[head_name] = m.output_alternatives[head_name]
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.INFER,
predictions=predictions,
output_alternatives=output_alternatives)
def _combine_eval(self, all_model_fn_ops):
"""Combines list of ModelFnOps for eval.
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
Returns:
ModelFnOps that combines all the heads.
"""
predictions = {}
metrics = {}
losses = []
for head, m in zip(self._heads, all_model_fn_ops):
losses.append(m.loss)
head_name = head.head_name
for k, v in m.predictions.items():
predictions[(head_name, k)] = v
for k, v in m.eval_metric_ops.items():
# metrics["%s/%s" % (k, head_name)] = v
metrics[k] = v
loss = self._loss_combiner(losses)
return model_fn.ModelFnOps(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=loss,
eval_metric_ops=metrics)
def _weighted_loss(loss, weight):
"""Returns cumulative weighted loss as 1d `Tensor`."""
with ops.name_scope(None, "weighted_loss", (loss, weight)) as name:
return math_ops.multiply(
array_ops.reshape(
loss, shape=(-1,)),
array_ops.reshape(
weight, shape=(-1,)),
name=name)
def _weight_tensor(features, weight_column_name):
"""Returns weights as 1d `Tensor`."""
if not weight_column_name:
return None
with ops.name_scope(None, "weight_tensor",
tuple(six.itervalues(features))) as name:
return array_ops.reshape(
math_ops.to_float(features[weight_column_name]), shape=(-1,), name=name)
def _loss(loss_unweighted, weight, name):
"""Returns a tuple of (loss, weighted_average_loss)."""
with ops.name_scope(name, values=(loss_unweighted, weight)) as name_scope:
if weight is None:
loss = math_ops.reduce_mean(loss_unweighted, name=name_scope)
return loss, loss
loss_weighted = _weighted_loss(loss_unweighted, weight)
weighted_average_loss = math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.to_float(math_ops.reduce_sum(weight)),
name="weighted_average_loss")
loss = math_ops.reduce_mean(loss_weighted, name=name_scope)
return loss, weighted_average_loss
def _check_logits_input_not_supported(logits, logits_input):
if logits_input is not None or logits is None:
raise NotImplementedError("logits_input is not supported yet, "
"must pass logits")
def _check_mode_valid(mode):
"""Raises ValueError if the given mode is invalid."""
if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and
mode != model_fn.ModeKeys.EVAL):
raise ValueError("mode=%s unrecognized." % str(mode))
def _centered_bias(logits_dimension, head_name=None):
"""Returns `logits`, optionally with centered bias applied.
Args:
logits_dimension: Last dimension of `logits`. Must be >= 1.
head_name: Optional name of the head.
Returns:
Centered bias `Variable`.
Raises:
ValueError: if `logits_dimension` is invalid.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
centered_bias = variable_scope.get_variable(
name="centered_bias_weight",
shape=(logits_dimension,),
initializer=init_ops.zeros_initializer(),
trainable=True)
for dim in range(logits_dimension):
if head_name:
summary.scalar("centered_bias/bias_%d/%s" % (dim, head_name),
centered_bias[dim])
else:
summary.scalar("centered_bias/bias_%d" % dim, centered_bias[dim])
return centered_bias
def _centered_bias_step(centered_bias, logits_dimension, labels, loss_fn):
"""Creates and returns training op for centered bias."""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
with ops.name_scope(None, "centered_bias_step", (labels,)) as name:
batch_size = array_ops.shape(labels)[0]
logits = array_ops.reshape(
array_ops.tile(centered_bias, (batch_size,)),
(batch_size, logits_dimension))
with ops.name_scope(None, "centered_bias", (labels, logits)):
centered_bias_loss = math_ops.reduce_mean(
loss_fn(logits, labels), name="training_loss")
# Learn central bias by an optimizer. 0.1 is a convervative lr for a
# single variable.
return training.AdagradOptimizer(0.1).minimize(
centered_bias_loss, var_list=(centered_bias,), name=name)
def _summary_key(head_name, val):
return "%s/%s" % (val, head_name) if head_name else val
def _training_loss(features,
labels,
logits,
loss_fn,
weight_column_name=None,
head_name=None):
"""Returns training loss tensor.
Training loss is different from the loss reported on the tensorboard as we
should respect the example weights when computing the gradient.
L = sum_{i} w_{i} * l_{i} / B
where B is the number of examples in the batch, l_{i}, w_{i} are individual
losses, and example weight.
Args:
features: Features `dict`.
labels: Either a `Tensor` for labels or in multihead case, a `dict` of
string to `Tensor`.
logits: logits, a float `Tensor`. Shape is `(batch_size, logits_dimension)`.
loss_fn: Function taking `logits` and `labels`, and returning the raw
unweighted loss.
weight_column_name: Key for weights `Tensor` in `features`, if applicable.
head_name: Head name, used for summary.
Returns:
A loss `Output`.
"""
with ops.name_scope(None, "training_loss",
tuple(six.itervalues(features)) +
(labels, logits)) as name:
loss, weighted_average_loss = _loss(
loss_fn(logits, labels),
_weight_tensor(features, weight_column_name),
name=name)
# The tag must be same as the tag for eval loss, so the losses will show up
# in the same graph in tensorboard.
logging_ops.scalar_summary(
_summary_key(head_name, "loss"), weighted_average_loss)
return loss
def _train_op(loss,
labels,
train_op_fn,
centered_bias=None,
logits_dimension=None,
loss_fn=None):
"""Returns op for the training step."""
if centered_bias is not None:
centered_bias_step = _centered_bias_step(centered_bias, logits_dimension,
labels, loss_fn)
else:
centered_bias_step = None
with ops.name_scope(None, "train_op", (loss, labels)):
train_op = train_op_fn(loss)
if centered_bias_step is not None:
train_op = control_flow_ops.group(train_op, centered_bias_step)
return train_op
def _eval_metric_ops(metrics, features, labels, predictions):
with ops.name_scope(None, "metrics",
(tuple(six.itervalues(features)) +
(labels,) + tuple(six.itervalues(predictions)))):
# pylint: disable=protected-access
return estimator._make_metrics_ops(metrics, features, labels, predictions)
# pylint: enable=protected-access
def _sigmoid_cross_entropy_loss(logits, labels):
with ops.name_scope(None, "sigmoid_cross_entropy_loss",
(logits, labels)) as name:
# sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] labels.
return nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.to_float(labels), logits=logits, name=name)
def _float_weights_or_none(weights):
if weights is None:
return None
with ops.name_scope(None, "float_weights", (weights,)) as name:
return math_ops.to_float(weights, name=name)
def _weighted_average_loss_metric_spec(loss_fn, pred_key, label_key,
weight_key):
def _streaming_weighted_average_loss(predictions, labels, weights=None):
loss_unweighted = loss_fn(predictions, labels)
if weights is not None:
weights = math_ops.to_float(weights)
_, weighted_average_loss = _loss(loss_unweighted, weights, name="eval_loss")
return metrics_lib.streaming_mean(weighted_average_loss)
return metric_spec.MetricSpec(_streaming_weighted_average_loss, pred_key,
label_key, weight_key)
def _indicator_labels_streaming_mean(predictions,
labels,
weights=None,
class_id=None):
del predictions
if class_id is not None:
labels = labels[:, class_id]
return metrics_lib.streaming_mean(labels, weights=weights)
def _predictions_streaming_mean(predictions,
labels,
weights=None,
class_id=None):
del labels
if class_id is not None:
predictions = predictions[:, class_id]
return metrics_lib.streaming_mean(predictions, weights=weights)
# TODO(ptucker): Add support for SparseTensor labels.
def _class_id_labels_to_indicator(labels, num_classes):
if (num_classes is None) or (num_classes < 2):
raise ValueError("Invalid num_classes %s." % num_classes)
with ops.control_dependencies((_assert_labels_rank(labels),)):
labels = array_ops.reshape(labels, (-1,))
return array_ops.one_hot(labels, depth=num_classes, axis=-1)
def _streaming_auc(predictions, labels, weights=None, class_id=None):
if class_id is not None:
predictions = predictions[:, class_id]
labels = labels[:, class_id]
return metrics_lib.streaming_auc(
predictions,
math_ops.cast(labels, dtypes.bool),
weights=_float_weights_or_none(weights))
def _assert_class_id(class_id, num_classes=None):
"""Average label value for class `class_id`."""
if (class_id is None) or (class_id < 0):
raise ValueError("Invalid class_id %s." % class_id)
if num_classes is not None:
if num_classes < 2:
raise ValueError("Invalid num_classes %s." % num_classes)
if class_id >= num_classes:
raise ValueError("Invalid class_id %s." % class_id)
def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, labels, weights=None):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.streaming_accuracy(
predictions=threshold_predictions, labels=labels, weights=weights)
return _accuracy_metric
def _streaming_at_threshold(streaming_metrics_fn, threshold):
def _streaming_metrics(predictions, labels, weights=None):
precision_tensor, update_op = streaming_metrics_fn(
predictions,
labels=labels,
thresholds=(threshold,),
weights=_float_weights_or_none(weights))
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
return _streaming_metrics
|
|
# Copyright (c) 2013-2016 Christian Geier et al.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
The SQLite backend implementation.
note on naming:
* every variable name vevent should be of type icalendar.Event
* every variable named event should be of type khal.khalendar.Events
* variables named vevents/events (plural) should be iterables of their
respective types
"""
# TODO remove creating Events from SQLiteDb
# we currently expect str/CALENDAR objects but return Event(), we should
# accept and return the same kind of events
import contextlib
from datetime import datetime, timedelta
from os import makedirs, path
import sqlite3
from dateutil import parser
import icalendar
import pytz
import xdg.BaseDirectory
from .event import Event, EventStandIn
from . import aux
from .. import log
from .exceptions import CouldNotCreateDbDir, OutdatedDbVersionError, UpdateFailed
logger = log.logger
DB_VERSION = 5 # The current db layout version
RECURRENCE_ID = 'RECURRENCE-ID'
THISANDFUTURE = 'THISANDFUTURE'
THISANDPRIOR = 'THISANDPRIOR'
DATE = 0
DATETIME = 1
PROTO = 'PROTO'
def sort_key(vevent):
# insert the (sub) events in the right order, e.g. recurrence-id events
# after the corresponding rrule event
assert isinstance(vevent, icalendar.Event) # REMOVE ME
uid = str(vevent['UID'])
rec_id = vevent.get(RECURRENCE_ID)
if rec_id is None:
return uid, 0
rrange = rec_id.params.get('RANGE')
if rrange == THISANDFUTURE:
return uid, aux.to_unix_time(rec_id.dt)
else:
return uid, 1
class SQLiteDb(object):
"""
This class should provide a caching database for a calendar, keeping raw
vevents in one table but allowing to retrieve events by dates (via the help
of some auxiliary tables)
:param calendar: the `name` of this calendar, if the same *name* and
*dbpath* is given on next creation of an SQLiteDb object
the same tables will be used
:type calendar: str
:param db_path: path where this sqlite database will be saved, if this is
None, a place according to the XDG specifications will be
chosen
:type db_path: str or None
"""
def __init__(self, calendars, db_path, locale):
if db_path is None:
db_path = xdg.BaseDirectory.save_data_path('khal') + '/khal.db'
self.calendars = calendars
self.db_path = path.expanduser(db_path)
self._create_dbdir()
self.locale = locale
self._at_once = False
self.conn = sqlite3.connect(self.db_path)
self.cursor = self.conn.cursor()
self._create_default_tables()
self._check_calendars_exists()
self._check_table_version()
@property
def _select_calendars(self):
return ', '.join(['\'' + cal + '\'' for cal in self.calendars])
@contextlib.contextmanager
def at_once(self):
assert not self._at_once
self._at_once = True
try:
yield self
except:
raise
else:
self.conn.commit()
finally:
self._at_once = False
def _create_dbdir(self):
"""create the dbdir if it doesn't exist"""
if self.db_path == ':memory:':
return None
dbdir = self.db_path.rsplit('/', 1)[0]
if not path.isdir(dbdir):
try:
logger.debug('trying to create the directory for the db')
makedirs(dbdir, mode=0o770)
logger.debug('success')
except OSError as error:
logger.fatal('failed to create {0}: {1}'.format(dbdir, error))
raise CouldNotCreateDbDir()
def _check_table_version(self):
"""tests for curent db Version
if the table is still empty, insert db_version
"""
self.cursor.execute('SELECT version FROM version')
result = self.cursor.fetchone()
if result is None:
self.cursor.execute('INSERT INTO version (version) VALUES (?)',
(DB_VERSION, ))
self.conn.commit()
elif not result[0] == DB_VERSION:
raise OutdatedDbVersionError(
str(self.db_path) +
" is probably an invalid or outdated database.\n"
"You should consider removing it and running khal again.")
def _create_default_tables(self):
"""creates version and calendar tables and inserts table version number
"""
self.cursor.execute('CREATE TABLE IF NOT EXISTS '
'version (version INTEGER)')
logger.debug(u"created version table")
self.cursor.execute('''CREATE TABLE IF NOT EXISTS calendars (
calendar TEXT NOT NULL UNIQUE,
resource TEXT NOT NULL,
ctag TEXT
)''')
self.cursor.execute('''CREATE TABLE IF NOT EXISTS events (
href TEXT NOT NULL,
calendar TEXT NOT NULL,
sequence INT,
etag TEXT,
item TEXT,
primary key (href, calendar)
);''')
self.cursor.execute('''CREATE TABLE IF NOT EXISTS recs_loc (
dtstart INT NOT NULL,
dtend INT NOT NULL,
href TEXT NOT NULL REFERENCES events( href ),
rec_inst TEXT NOT NULL,
ref TEXT NOT NULL,
dtype INT NOT NULL,
calendar TEXT NOT NULL,
primary key (href, rec_inst, calendar)
);''')
self.cursor.execute('''CREATE TABLE IF NOT EXISTS recs_float (
dtstart INT NOT NULL,
dtend INT NOT NULL,
href TEXT NOT NULL REFERENCES events( href ),
rec_inst TEXT NOT NULL,
ref TEXT NOT NULL,
dtype INT NOT NULL,
calendar TEXT NOT NULL,
primary key (href, rec_inst, calendar)
);''')
self.conn.commit()
def _check_calendars_exists(self):
"""make sure an entry for the current calendar exists in `calendar`
table
"""
for cal in self.calendars:
self.cursor.execute('''SELECT count(*) FROM calendars
WHERE calendar = ?;''', (cal,))
result = self.cursor.fetchone()
if result[0] != 0:
logger.debug(u"tables for calendar {0} exist".format(cal))
else:
sql_s = 'INSERT INTO calendars (calendar, resource) VALUES (?, ?);'
stuple = (cal, '')
self.sql_ex(sql_s, stuple)
def sql_ex(self, statement, stuple=''):
"""wrapper for sql statements, does a "fetchall" """
self.cursor.execute(statement, stuple)
result = self.cursor.fetchall()
if not self._at_once:
self.conn.commit()
return result
def update(self, vevent_str, href, etag='', calendar=None):
"""insert a new or update an existing card in the db
This is mostly a wrapper around two SQL statements, doing some cleanup
before.
:param vevent_str: event to be inserted or updated.
We assume that even if it contains more than one
VEVENT, that they are all part of the same event and
all have the same UID
:type vevent: unicode
:param href: href of the card on the server, if this href already
exists in the db the card gets updated. If no href is
given, a random href is chosen and it is implied that this
card does not yet exist on the server, but will be
uploaded there on next sync.
:type href: str()
:param etag: the etag of the vcard, if this etag does not match the
remote etag on next sync, this card will be updated from
the server. For locally created vcards this should not be
set
:type etag: str()
"""
assert calendar is not None
if href is None:
raise ValueError('href may not be None')
ical = icalendar.Event.from_ical(vevent_str)
vevents = (aux.sanitize(c, self.locale['default_timezone'], href, calendar) for
c in ical.walk() if c.name == 'VEVENT')
# Need to delete the whole event in case we are updating a
# recurring event with an event which is either not recurring any
# more or has EXDATEs, as those would be left in the recursion
# tables. There are obviously better ways to achieve the same
# result.
self.delete(href, calendar=calendar)
for vevent in sorted(vevents, key=sort_key):
check_support(vevent, href, calendar)
self._update_impl(vevent, href, calendar)
sql_s = ('INSERT INTO events '
'(item, etag, href, calendar) '
'VALUES (?, ?, ?, ?);')
stuple = (vevent_str, etag, href, calendar)
self.sql_ex(sql_s, stuple)
def update_birthday(self, vevent, href, etag='', calendar=None):
"""
XXX write docstring
"""
assert calendar is not None
if href is None:
raise ValueError('href may not be None')
ical = icalendar.Event.from_ical(vevent)
vcard = ical.walk()[0]
if 'BDAY' in vcard.keys():
bday = vcard['BDAY']
if isinstance(bday, list):
logger.warn('Vcard {0} in collection {1} has more than one '
'BIRTHDAY, will be skippend and not be available '
'in khal.'.format(href, calendar))
return
try:
if bday[0:2] == '--' and bday[3] != '-':
bday = '1900' + bday[2:]
orig_bday = False
else:
orig_bday = True
bday = parser.parse(bday).date()
except ValueError:
logger.warn('cannot parse BIRTHDAY in {0} in collection '
'{1}'.format(href, calendar))
return
if 'FN' in vcard:
name = vcard['FN']
else:
n = vcard['N'].split(';')
name = ' '.join([n[1], n[2], n[0]])
event = icalendar.Event()
event.add('dtstart', bday)
event.add('dtend', bday + timedelta(days=1))
event.add('summary', '{0}\'s birthday'.format(name))
event.add('rrule', {'freq': 'YEARLY'})
if orig_bday:
event.add('x-birthday',
'{:04}{:02}{:02}'.format(bday.year, bday.month, bday.day))
event.add('x-fname', name)
event.add('uid', href)
event_str = event.to_ical().decode('utf-8')
self._update_impl(event, href, calendar)
sql_s = ('INSERT INTO events (item, etag, href, calendar) VALUES (?, ?, ?, ?);')
stuple = (event_str, etag, href, calendar)
self.sql_ex(sql_s, stuple)
def _update_impl(self, vevent, href, calendar):
"""insert `vevent` into the database
expand `vevent`'s reccurence rules (if needed) and insert all instance
in the respective tables
than insert non-reccuring and original recurring (those with an RRULE
property) events into table `events`
"""
# TODO FIXME this function is a steaming pile of shit
rec_id = vevent.get(RECURRENCE_ID)
if rec_id is None:
rrange = None
else:
rrange = rec_id.params.get('RANGE')
# testing on datetime.date won't work as datetime is a child of date
if not isinstance(vevent['DTSTART'].dt, datetime):
dtype = DATE
else:
dtype = DATETIME
if ('TZID' in vevent['DTSTART'].params and dtype == DATETIME) or \
getattr(vevent['DTSTART'].dt, 'tzinfo', None):
recs_table = 'recs_loc'
else:
recs_table = 'recs_float'
thisandfuture = (rrange == THISANDFUTURE)
if thisandfuture:
start_shift, duration = calc_shift_deltas(vevent)
start_shift = start_shift.days * 3600 * 24 + start_shift.seconds
duration = duration.days * 3600 * 24 + duration.seconds
dtstartend = aux.expand(vevent, href)
if not dtstartend:
# Does this event even have dates? Technically it is possible for
# events to be empty/non-existent by deleting all their recurrences
# through EXDATE.
return
for dtstart, dtend in dtstartend:
if dtype == DATE:
dbstart = aux.to_unix_time(dtstart)
dbend = aux.to_unix_time(dtend)
if rec_id is not None:
rec_inst = aux.to_unix_time(rec_id.dt)
ref = rec_inst
else:
rec_inst = dbstart
ref = PROTO
else:
dbstart = aux.to_unix_time(dtstart)
dbend = aux.to_unix_time(dtend)
if rec_id is not None:
ref = rec_inst = str(aux.to_unix_time(rec_id.dt))
else:
rec_inst = dbstart
ref = PROTO
if thisandfuture:
recs_sql_s = (
'UPDATE {0} SET dtstart = rec_inst + ?, dtend = rec_inst + ?, ref = ? '
'WHERE rec_inst >= ? AND href = ? AND calendar = ?;'.format(recs_table))
stuple = (start_shift, start_shift + duration, ref, rec_inst, href, calendar)
else:
recs_sql_s = (
'INSERT OR REPLACE INTO {0} '
'(dtstart, dtend, href, ref, dtype, rec_inst, calendar)'
'VALUES (?, ?, ?, ?, ?, ?, ?);'.format(recs_table))
stuple = (dbstart, dbend, href, ref, dtype, rec_inst, calendar)
self.sql_ex(recs_sql_s, stuple)
# end of loop
def get_ctag(self, calendar):
stuple = (calendar, )
sql_s = 'SELECT ctag FROM calendars WHERE calendar = ?;'
try:
ctag = self.sql_ex(sql_s, stuple)[0][0]
return ctag
except IndexError:
return None
def set_ctag(self, ctag, calendar):
stuple = (ctag, calendar, )
sql_s = 'UPDATE calendars SET ctag = ? WHERE calendar = ?;'
self.sql_ex(sql_s, stuple)
self.conn.commit()
def get_etag(self, href, calendar):
"""get etag for href
type href: str()
return: etag
rtype: str()
"""
sql_s = 'SELECT etag FROM events WHERE href = ? AND calendar = ?;'
try:
etag = self.sql_ex(sql_s, (href, calendar))[0][0]
return etag
except IndexError:
return None
def delete(self, href, etag=None, calendar=None):
"""
removes the event from the db,
:param etag: only there for compatiblity with vdirsyncer's Storage,
we always delete
:returns: None
"""
assert calendar is not None
for table in ['recs_loc', 'recs_float']:
sql_s = 'DELETE FROM {0} WHERE href = ? AND calendar = ?;'.format(table)
self.sql_ex(sql_s, (href, calendar))
sql_s = 'DELETE FROM events WHERE href = ? AND calendar = ?;'
self.sql_ex(sql_s, (href, calendar))
def list(self, calendar):
""" list all events in `calendar`
used for testing
:returns: list of (href, etag)
"""
sql_s = 'SELECT href, etag FROM events WHERE calendar = ?;'
return list(set(self.sql_ex(sql_s, (calendar, ))))
def get_localized(self, start, end, minimal=False):
"""returns
:type start: datetime.datetime
:type end: datetime.datetime
:param minimal: if set, we do not return an event but a minimal stand in
:type minimal: bool
"""
assert start.tzinfo is not None
assert end.tzinfo is not None
start = aux.to_unix_time(start)
end = aux.to_unix_time(end)
if minimal:
sql_s = (
'SELECT events.calendar FROM '
'recs_loc JOIN events ON '
'recs_loc.href = events.href AND '
'recs_loc.calendar = events.calendar WHERE '
'(dtstart >= ? AND dtstart <= ? OR '
'dtend > ? AND dtend <= ? OR '
'dtstart <= ? AND dtend >= ?) AND events.calendar in ({0}) '
'ORDER BY dtstart')
else:
sql_s = (
'SELECT item, recs_loc.href, dtstart, dtend, ref, etag, dtype, events.calendar '
'FROM recs_loc JOIN events ON '
'recs_loc.href = events.href AND '
'recs_loc.calendar = events.calendar WHERE '
'(dtstart >= ? AND dtstart <= ? OR '
'dtend > ? AND dtend <= ? OR '
'dtstart <= ? AND dtend >= ?) AND events.calendar in ({0}) '
'ORDER BY dtstart')
stuple = (start, end, start, end, start, end)
result = self.sql_ex(sql_s.format(self._select_calendars), stuple)
if minimal:
for calendar in result:
yield EventStandIn(calendar[0])
else:
for item, href, start, end, ref, etag, dtype, calendar in result:
start = pytz.UTC.localize(datetime.utcfromtimestamp(start))
end = pytz.UTC.localize(datetime.utcfromtimestamp(end))
yield self.construct_event(item, href, start, end, ref, etag, calendar, dtype)
def get_floating(self, start, end, minimal=False):
"""return floating events between `start` and `end`
:type start: datetime.datetime
:type end: datetime.datetime
:param minimal: if set, we do not return an event but a minimal stand in
:type minimal: bool
"""
assert start.tzinfo is None
assert end.tzinfo is None
strstart = aux.to_unix_time(start)
strend = aux.to_unix_time(end)
if minimal:
sql_s = (
'SELECT events.calendar FROM '
'recs_float JOIN events ON '
'recs_float.href = events.href AND '
'recs_float.calendar = events.calendar WHERE '
'(dtstart >= ? AND dtstart < ? OR '
'dtend > ? AND dtend <= ? OR '
'dtstart <= ? AND dtend > ? ) AND events.calendar in ({0}) '
'ORDER BY dtstart')
else:
sql_s = (
'SELECT item, recs_float.href, dtstart, dtend, ref, etag, dtype, events.calendar '
'FROM recs_float JOIN events ON '
'recs_float.href = events.href AND '
'recs_float.calendar = events.calendar WHERE '
'(dtstart >= ? AND dtstart < ? OR '
'dtend > ? AND dtend <= ? OR '
'dtstart <= ? AND dtend > ? ) AND events.calendar in ({0}) '
'ORDER BY dtstart')
stuple = (strstart, strend, strstart, strend, strstart, strend)
result = self.sql_ex(sql_s.format(self._select_calendars), stuple)
if minimal:
for calendar in result:
yield EventStandIn(calendar[0])
else:
for item, href, start, end, ref, etag, dtype, calendar in result:
start = datetime.utcfromtimestamp(start)
end = datetime.utcfromtimestamp(end)
yield self.construct_event(item, href, start, end, ref, etag, calendar, dtype)
def get_localized_at(self, dtime):
"""return localized events which are scheduled at `dtime`
:type dtime: datetime.datetime
"""
assert dtime.tzinfo is not None
dtime = aux.to_unix_time(dtime)
sql_s = (
'SELECT item, recs_loc.href, dtstart, dtend, ref, etag, dtype, events.calendar FROM '
'recs_loc JOIN events ON '
'recs_loc.href = events.href AND '
'recs_loc.calendar = events.calendar WHERE '
'(dtstart <= ? AND dtend >= ? ) '
'AND events.calendar in ({0});')
stuple = (dtime, dtime)
result = self.sql_ex(sql_s.format(self._select_calendars), stuple)
for item, href, start, end, ref, etag, dtype, calendar in result:
start = pytz.UTC.localize(datetime.utcfromtimestamp(start))
end = pytz.UTC.localize(datetime.utcfromtimestamp(end))
yield self.construct_event(item, href, start, end, ref, etag, calendar, dtype)
def get_floating_at(self, dtime):
"""return allday events which are scheduled at `dtime`
:type dtime: datetime.datetime
"""
assert dtime.tzinfo is None
dtime = aux.to_unix_time(dtime)
sql_s = (
'SELECT item, recs_float.href, dtstart, dtend, ref, etag, dtype, events.calendar FROM '
'recs_float JOIN events ON '
'recs_float.href = events.href AND '
'recs_float.calendar = events.calendar WHERE '
'(dtstart <= ? AND dtend >= ? ) '
'AND events.calendar in ({0});')
stuple = (dtime, dtime)
result = self.sql_ex(sql_s.format(self._select_calendars), stuple)
for item, href, start, end, ref, etag, dtype, calendar in result:
start = datetime.utcfromtimestamp(start)
end = datetime.utcfromtimestamp(end)
yield self.construct_event(item, href, start, end, ref, etag, calendar, dtype)
def get(self, href, start=None, end=None, ref=None, dtype=None, calendar=None):
"""returns the Event matching href
if start and end are given, a specific Event from a Recursion set is
returned, otherwise the Event returned exactly as saved in the db
"""
assert calendar is not None
sql_s = 'SELECT href, etag, item FROM events WHERE href = ? AND calendar = ?;'
result = self.sql_ex(sql_s, (href, calendar))
href, etag, item = result[0]
if dtype == DATE:
start = start.date()
end = end.date()
return Event.fromString(item,
locale=self.locale,
href=href,
calendar=calendar,
etag=etag,
start=start,
end=end,
ref=ref,
)
def construct_event(self, item, href, start, end, ref, etag, calendar, dtype=None):
if dtype == DATE:
start = start.date()
end = end.date()
return Event.fromString(item,
locale=self.locale,
href=href,
calendar=calendar,
etag=etag,
start=start,
end=end,
ref=ref,
)
def search(self, search_string):
"""search for events matching `search_string`"""
sql_s = ('SELECT href, calendar FROM events '
'WHERE item LIKE (?) and calendar in ({0});')
stuple = ('%{0}%'.format(search_string), )
result = self.sql_ex(sql_s.format(self._select_calendars), stuple)
for href, calendar in result:
event = self.get(href, calendar=calendar)
yield event
def check_support(vevent, href, calendar):
"""test if all icalendar features used in this event are supported,
raise `UpdateFailed` otherwise.
:param vevent: event to test
:type vevent: icalendar.cal.Event
:param href: href of this event, only used for logging
:type href: str
"""
rec_id = vevent.get(RECURRENCE_ID)
if rec_id is not None and rec_id.params.get('RANGE') == THISANDPRIOR:
raise UpdateFailed(
'The parameter `THISANDPRIOR` is not (and will not be) '
'supported by khal (as applications supporting the latest '
'standard MUST NOT create those. Therefore event {0} from '
'calendar {1} will not be shown in khal'
.format(href, calendar)
)
rdate = vevent.get('RDATE')
if rdate is not None and rdate.params.get('VALUE') == 'PERIOD':
raise UpdateFailed(
'`RDATE;VALUE=PERIOD` is currently not supported by khal. '
'Therefore event {0} from calendar {1} will not be shown in khal.\n'
'Please post exemplary events (please remove any private data) '
'to https://github.com/geier/khal/issues/152 .'
.format(href, calendar)
)
def calc_shift_deltas(vevent):
"""calculate an event's duration and by how much its start time has shifted
versus its recurrence-id time
:param event: an event with an RECURRENCE-ID property
:type event: icalendar.Event
:returns: time shift and duration
:rtype: (datetime.timedelta, datetime.timedelta)
"""
assert isinstance(vevent, icalendar.Event) # REMOVE ME
start_shift = vevent['DTSTART'].dt - vevent['RECURRENCE-ID'].dt
try:
duration = vevent['DTEND'].dt - vevent['DTSTART'].dt
except KeyError:
duration = vevent['DURATION'].dt
return start_shift, duration
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numbers
import mock
import operator
import time
import unittest
import socket
import os
import errno
import itertools
import random
from collections import defaultdict
from datetime import datetime
from six.moves import urllib
from swift.container import reconciler
from swift.container.server import gen_resp_headers
from swift.common.direct_client import ClientException
from swift.common import swob
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import split_path, Timestamp, encode_timestamps
from test.unit import debug_logger, FakeRing, fake_http_connect
from test.unit.common.middleware.helpers import FakeSwift
def timestamp_to_last_modified(timestamp):
return datetime.utcfromtimestamp(
float(Timestamp(timestamp))).strftime('%Y-%m-%dT%H:%M:%S.%f')
def container_resp_headers(**kwargs):
return HeaderKeyDict(gen_resp_headers(kwargs))
class FakeStoragePolicySwift(object):
def __init__(self):
self.storage_policy = defaultdict(FakeSwift)
self._mock_oldest_spi_map = {}
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.storage_policy[None], name)
def __call__(self, env, start_response):
method = env['REQUEST_METHOD']
path = env['PATH_INFO']
_, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4,
rest_with_last=True)
if not obj:
policy_index = None
else:
policy_index = self._mock_oldest_spi_map.get(cont, 0)
# allow backend policy override
if 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX' in env:
policy_index = int(env['HTTP_X_BACKEND_STORAGE_POLICY_INDEX'])
try:
return self.storage_policy[policy_index].__call__(
env, start_response)
except KeyError:
pass
if method == 'PUT':
resp_class = swob.HTTPCreated
else:
resp_class = swob.HTTPNotFound
self.storage_policy[policy_index].register(
method, path, resp_class, {}, '')
return self.storage_policy[policy_index].__call__(
env, start_response)
class FakeInternalClient(reconciler.InternalClient):
def __init__(self, listings):
self.app = FakeStoragePolicySwift()
self.user_agent = 'fake-internal-client'
self.request_tries = 1
self.parse(listings)
def parse(self, listings):
self.accounts = defaultdict(lambda: defaultdict(list))
for item, timestamp in listings.items():
# XXX this interface is stupid
if isinstance(timestamp, tuple):
timestamp, content_type = timestamp
else:
timestamp, content_type = timestamp, 'application/x-put'
storage_policy_index, path = item
account, container_name, obj_name = split_path(
path.encode('utf-8'), 0, 3, rest_with_last=True)
self.accounts[account][container_name].append(
(obj_name, storage_policy_index, timestamp, content_type))
for account_name, containers in self.accounts.items():
for con in containers:
self.accounts[account_name][con].sort(key=lambda t: t[0])
for account, containers in self.accounts.items():
account_listing_data = []
account_path = '/v1/%s' % account
for container, objects in containers.items():
container_path = account_path + '/' + container
container_listing_data = []
for entry in objects:
(obj_name, storage_policy_index,
timestamp, content_type) = entry
if storage_policy_index is None and not obj_name:
# empty container
continue
obj_path = container_path + '/' + obj_name
ts = Timestamp(timestamp)
headers = {'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal}
# register object response
self.app.storage_policy[storage_policy_index].register(
'GET', obj_path, swob.HTTPOk, headers)
self.app.storage_policy[storage_policy_index].register(
'DELETE', obj_path, swob.HTTPNoContent, {})
# container listing entry
last_modified = timestamp_to_last_modified(timestamp)
# some tests setup mock listings using floats, some use
# strings, so normalize here
if isinstance(timestamp, numbers.Number):
timestamp = '%f' % timestamp
obj_data = {
'bytes': 0,
# listing data is unicode
'name': obj_name.decode('utf-8'),
'last_modified': last_modified,
'hash': timestamp.decode('utf-8'),
'content_type': content_type,
}
container_listing_data.append(obj_data)
container_listing_data.sort(key=operator.itemgetter('name'))
# register container listing response
container_headers = {}
container_qry_string = '?format=json&marker=&end_marker='
self.app.register('GET', container_path + container_qry_string,
swob.HTTPOk, container_headers,
json.dumps(container_listing_data))
if container_listing_data:
obj_name = container_listing_data[-1]['name']
# client should quote and encode marker
end_qry_string = '?format=json&marker=%s&end_marker=' % (
urllib.parse.quote(obj_name.encode('utf-8')))
self.app.register('GET', container_path + end_qry_string,
swob.HTTPOk, container_headers,
json.dumps([]))
self.app.register('DELETE', container_path,
swob.HTTPConflict, {}, '')
# simple account listing entry
container_data = {'name': container}
account_listing_data.append(container_data)
# register account response
account_listing_data.sort(key=operator.itemgetter('name'))
account_headers = {}
account_qry_string = '?format=json&marker=&end_marker='
self.app.register('GET', account_path + account_qry_string,
swob.HTTPOk, account_headers,
json.dumps(account_listing_data))
end_qry_string = '?format=json&marker=%s&end_marker=' % (
urllib.parse.quote(account_listing_data[-1]['name']))
self.app.register('GET', account_path + end_qry_string,
swob.HTTPOk, account_headers,
json.dumps([]))
class TestReconcilerUtils(unittest.TestCase):
def setUp(self):
self.fake_ring = FakeRing()
reconciler.direct_get_container_policy_index.reset()
def test_parse_raw_obj(self):
got = reconciler.parse_raw_obj({
'name': "2:/AUTH_bob/con/obj",
'hash': Timestamp(2017551.49350).internal,
'last_modified': timestamp_to_last_modified(2017551.49352),
'content_type': 'application/x-delete',
})
self.assertEqual(got['q_policy_index'], 2)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 2017551.49350)
self.assertEqual(got['q_record'], 2017551.49352)
self.assertEqual(got['q_op'], 'DELETE')
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# the 'hash' field in object listing has the raw 'created_at' value
# which could be a composite of timestamps
timestamp_str = encode_timestamps(Timestamp(1234.20190),
Timestamp(1245.20190),
Timestamp(1256.20190),
explicit=True)
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': timestamp_str,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# negative test
obj_info = {
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
}
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'foo'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'appliation/x-post'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'bogus'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': '-1:/AUTH_test/container'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'asdf:/AUTH_test/c/obj'})
self.assertRaises(KeyError, reconciler.parse_raw_obj,
{'name': '0:/AUTH_test/c/obj',
'content_type': 'application/x-put'})
def test_get_container_policy_index(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
for permutation in itertools.permutations((0, 1, 2)):
reconciler.direct_get_container_policy_index.reset()
resp_headers = [stub_resp_headers[i] for i in permutation]
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
test_values = [(info['x-storage-policy-index'],
info['x-backend-status-changed-at']) for
info in resp_headers]
self.assertEqual(oldest_spi, 0,
"oldest policy index wrong "
"for permutation %r" % test_values)
def test_get_container_policy_index_with_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_change_at=next(ts),
storage_policy_index=2,
),
container_resp_headers(
status_changed_at=next(ts),
storage_policy_index=1,
),
# old timestamp, but 500 should be ignored...
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(0).internal,
storage_policy_index=0,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_with_socket_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_with_too_many_errors(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, None)
def test_get_container_policy_index_for_deleted(self):
mock_path = 'swift.container.reconciler.direct_head_container'
headers = container_resp_headers(
status_changed_at=Timestamp(time.time()).internal,
storage_policy_index=1,
)
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_for_recently_deleted(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_recreated(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# old put, no recreate
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# recently deleted
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
# recently recreated
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_split_brain(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# oldest put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# old recreate
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
# recently put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_cache(self):
now = time.time()
ts = itertools.count(int(now))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 0)
# re-mock with errors
stub_resp_headers = [
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
with mock.patch('time.time', new=lambda: now):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# still cached
self.assertEqual(oldest_spi, 0)
# propel time forward
the_future = now + 31
with mock.patch('time.time', new=lambda: the_future):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# expired
self.assertEqual(oldest_spi, None)
def test_direct_delete_container_entry(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
x_timestamp = Timestamp(time.time())
headers = {'x-timestamp': x_timestamp.internal}
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o', headers=headers)
self.assertEqual(len(connect_args), 3)
for args in connect_args:
self.assertEqual(args['method'], 'DELETE')
self.assertEqual(args['path'], '/a/c/o')
self.assertEqual(args['headers'].get('x-timestamp'),
headers['x-timestamp'])
def test_direct_delete_container_entry_with_errors(self):
# setup mock direct_delete
mock_path = \
'swift.container.reconciler.direct_delete_container_object'
stub_resp = [
None,
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
'10.0.0.12', 6201, 'sdj', 404, 'Not Found'
),
]
mock_direct_delete = mock.MagicMock()
mock_direct_delete.side_effect = stub_resp
with mock.patch(mock_path, mock_direct_delete), \
mock.patch('eventlet.greenpool.DEBUG', False):
rv = reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o')
self.assertEqual(rv, None)
self.assertEqual(len(mock_direct_delete.mock_calls), 3)
def test_add_to_reconciler_queue(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
connect_args.sort(key=lambda a: (a['ipaddr'], a['port']))
required_headers = ('x-content-type', 'x-etag')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
self.assertEqual(args['headers']['X-Content-Type'],
'application/x-delete')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_force(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
now = time.time()
with mock.patch(mock_path, fake_hc), \
mock.patch('swift.container.reconciler.time.time',
lambda: now):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT',
force=True)
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
connect_args.sort(key=lambda a: (a['ipaddr'], a['port']))
required_headers = ('x-size', 'x-content-type')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'],
Timestamp(now).internal)
self.assertEqual(args['headers']['X-Etag'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_fails(self):
mock_path = 'swift.common.direct_client.http_connect'
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(507)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT')
self.assertFalse(ret)
def test_add_to_reconciler_queue_socket_error(self):
mock_path = 'swift.common.direct_client.http_connect'
exc = socket.error(errno.ECONNREFUSED,
os.strerror(errno.ECONNREFUSED))
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(500, raise_exc=exc)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertFalse(ret)
def listing_qs(marker):
return "?format=json&marker=%s&end_marker=" % \
urllib.parse.quote(marker.encode('utf-8'))
class TestReconciler(unittest.TestCase):
maxDiff = None
def setUp(self):
self.logger = debug_logger()
conf = {}
with mock.patch('swift.container.reconciler.InternalClient'):
self.reconciler = reconciler.ContainerReconciler(conf)
self.reconciler.logger = self.logger
self.start_interval = int(time.time() // 3600 * 3600)
self.current_container_path = '/v1/.misplaced_objects/%d' % (
self.start_interval) + listing_qs('')
def _mock_listing(self, objects):
self.reconciler.swift = FakeInternalClient(objects)
self.fake_swift = self.reconciler.swift.app
def _mock_oldest_spi(self, container_oldest_spi_map):
self.fake_swift._mock_oldest_spi_map = container_oldest_spi_map
def _run_once(self):
"""
Helper method to run the reconciler once with appropriate direct-client
mocks in place.
Returns the list of direct-deleted container entries in the format
[(acc1, con1, obj1), ...]
"""
def mock_oldest_spi(ring, account, container_name):
return self.fake_swift._mock_oldest_spi_map.get(container_name, 0)
items = {
'direct_get_container_policy_index': mock_oldest_spi,
'direct_delete_container_entry': mock.DEFAULT,
}
mock_time_iter = itertools.count(self.start_interval)
with mock.patch.multiple(reconciler, **items) as mocks:
self.mock_delete_container_entry = \
mocks['direct_delete_container_entry']
with mock.patch('time.time', mock_time_iter.next):
self.reconciler.run_once()
return [c[1][1:4] for c in
mocks['direct_delete_container_entry'].mock_calls]
def test_invalid_queue_name(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/bogus"): 3618.84187,
})
deleted_container_entries = self._run_once()
# we try to find something useful
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('bogus'))])
# but only get the bogus record
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# and just leave it on the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertFalse(deleted_container_entries)
def test_invalid_queue_name_marches_onward(self):
# there's something useful there on the queue
self._mock_listing({
(None, "/.misplaced_objects/3600/00000bogus"): 3600.0000,
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# we get all the queue entries we can
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and one is garbage
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# but the other is workable
self.assertEqual(self.reconciler.stats['noop_object'], 1)
# so pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_queue_name_with_policy_index_delimiter_in_name(self):
q_path = '.misplaced_objects/3600'
obj_path = "AUTH_bob/c:sneaky/o1:sneaky"
# there's something useful there on the queue
self._mock_listing({
(None, "/%s/1:/%s" % (q_path, obj_path)): 3618.84187,
(1, '/%s' % obj_path): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we find the misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_path))])
# move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path),
('DELETE', '/v1/%s' % obj_path)])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path),
('PUT', '/v1/%s' % obj_path)])
# clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries, [(
'.misplaced_objects', '3600', '1:/%s' % obj_path)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_unable_to_direct_get_oldest_storage_policy(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
})
# the reconciler gets "None" if we can't quorum the container
self._mock_oldest_spi({'c': None})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but can't really say where to go looking
self.assertEqual(self.reconciler.stats['unavailable_container'], 1)
# we don't clean up anything
self.assertEqual(self.reconciler.stats['cleanup_object'], 0)
# and we definitely should not pop_queue
self.assertFalse(deleted_container_entries)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('PUT', '/v1/AUTH_bob/c/o1')])
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2))
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_the_other_direction(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/0:/AUTH_bob/c/o1"): 3618.84187,
(0, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('0:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[1].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '0:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_with_unicode_and_spaces(self):
# the "name" in listings and the unicode string passed to all
# functions where we call them with (account, container, obj)
obj_name = u"AUTH_bob/c \u062a/o1 \u062a"
# anytime we talk about a call made to swift for a path
obj_path = obj_name.encode('utf-8')
# this mock expects unquoted unicode because it handles container
# listings as well as paths
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/%s" % obj_name): 3618.84187,
(1, "/%s" % obj_name): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
# listing_qs encodes and quotes - so give it name
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_name))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
# these calls are to the real path
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path), # 2
('DELETE', '/v1/%s' % obj_path)]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path), # 1
('PUT', '/v1/%s' % obj_path)]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
self.assertEqual(
delete_headers.get('X-Backend-Storage-Policy-Index'), '1')
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
# this mock received the name, it's encoded down in buffered_http
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/%s' % obj_name)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_delete(self):
q_ts = time.time()
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): (
Timestamp(q_ts).internal, 'application/x-delete'),
# object exists in "correct" storage policy - slightly older
(0, "/AUTH_bob/c/o1"): Timestamp(q_ts - 1).internal,
})
self._mock_oldest_spi({'c': 0})
# the tombstone exists in the enqueued storage policy
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# delete it
self.assertEqual(self.reconciler.stats['delete_attempt'], 1)
self.assertEqual(self.reconciler.stats['delete_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
reconcile_headers = self.fake_swift.storage_policy[0].headers[1]
# we DELETE the object in the right place with q_ts + offset 2
self.assertEqual(reconcile_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_enqueued_for_the_correct_dest_noop(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# nothing to see here
self.assertEqual(self.reconciler.stats['noop_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# so we just pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_newer_than_queue_entry(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.234567, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.234567, offset=2))
# src object is cleaned up
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.123456, offset=1))
# and queue is popped
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_older_than_queue_entry(self):
# should be some sort of retry case
q_ts = time.time()
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
(1, '/AUTH_bob/c/o1'): q_ts - 0.00001, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_with_slightly_newer_tombstone(self):
# should be some sort of retry case
q_ts = float(Timestamp(time.time()))
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts, offset=2).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_server_error(self):
# should be some sort of retry case
q_ts = float(Timestamp(time.time()))
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_fails_cleanup(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.123457, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# make the DELETE blow up
self.fake_swift.storage_policy[1].register(
'DELETE', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.123457, offset=2))
# we try to cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.12346, offset=1))
# but cleanup fails!
self.assertEqual(self.reconciler.stats['cleanup_failed'], 1)
# so the queue is not popped
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
# and we'll have to retry
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_src_object_is_forever_gone(self):
# oh boy, hate to be here - this is an oldy
q_ts = self.start_interval - self.reconciler.reclaim_age - 1
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): q_ts,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but it's gone :\
self.assertEqual(self.reconciler.stats['lost_source'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# gah, look, even if it was out there somewhere - we've been at this
# two weeks and haven't found it. We can't just keep looking forever,
# so... we're done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
# dunno if this is helpful, but FWIW we don't throw tombstones?
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
self.assertEqual(self.reconciler.stats['success'], 1) # lol
def test_object_move_dest_already_moved(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so no attempt to read the source is made, but we do cleanup
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and wipe our hands of it
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_newer_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019 + 0.00001, # slightly newer
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects...
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so not attempt to read is made, but we do cleanup
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and since we cleaned up the old object, so this counts as done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_older_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.38393,
(1, "/AUTH_bob/c/o1"): 36123.38393,
(0, "/AUTH_bob/c/o1"): 36123.38393 - 0.00001, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and since our version is *newer*, we overwrite
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# ... with a q_ts + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=2))
# then clean the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with a q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=1))
# and pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '36000', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_put_fails(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest fail!
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and try to move it, but it fails
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it failed
self.assertEqual(self.reconciler.stats['copy_success'], 0)
self.assertEqual(self.reconciler.stats['copy_failed'], 1)
# ... so we don't clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['unhandled_errors'], 0)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_put_blows_up_crazy_town(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest blow up crazy town
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', blow_up, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and attempt to move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it blows up hard
self.assertEqual(self.reconciler.stats['unhandled_error'], 1)
# so we don't cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_no_such_object_no_tombstone_recent(self):
q_ts = float(Timestamp(time.time()))
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_jeb/c/o1" % q_path): q_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is recent enough that there could easily be
# tombstones on offline nodes or something, so we'll just leave it
# here and try again later
self.assertEqual(deleted_container_entries, [])
def test_object_move_no_such_object_no_tombstone_ancient(self):
queue_ts = float(Timestamp(time.time())) - \
self.reconciler.reclaim_age * 1.1
container = str(int(queue_ts // 3600 * 3600))
self._mock_listing({
(
None, "/.misplaced_objects/%s/1:/AUTH_jeb/c/o1" % container
): queue_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is old enough that the tombstones, if any, have
# probably been reaped, so we'll just give up
self.assertEqual(
deleted_container_entries,
[('.misplaced_objects', container, '1:/AUTH_jeb/c/o1')])
def test_delete_old_empty_queue_containers(self):
ts = time.time() - self.reconciler.reclaim_age * 1.1
container = str(int(ts // 3600 * 3600))
older_ts = ts - 3600
older_container = str(int(older_ts // 3600 * 3600))
self._mock_listing({
(None, "/.misplaced_objects/%s/" % container): 0,
(None, "/.misplaced_objects/%s/something" % older_container): 0,
})
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('DELETE', '/v1/.misplaced_objects/%s' % container),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('something'))])
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
def test_iter_over_old_containers_in_reverse(self):
step = reconciler.MISPLACED_OBJECTS_CONTAINER_DIVISOR
now = self.start_interval
containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
containers.append(container_name)
# add some old containers too
now -= self.reconciler.reclaim_age
old_containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
old_containers.append(container_name)
containers.sort()
old_containers.sort()
all_containers = old_containers + containers
self._mock_listing(dict((
(None, "/.misplaced_objects/%s/" % container), 0
) for container in all_containers))
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
last_container = all_containers[-1]
account_listing_calls = [
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(last_container)),
]
new_container_calls = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(containers)
][1:] # current_container get's skipped the second time around...
old_container_listings = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(old_containers)
]
old_container_deletes = [
('DELETE', '/v1/.misplaced_objects/%s' % container)
for container in reversed(old_containers)
]
old_container_calls = list(itertools.chain(*zip(
old_container_listings, old_container_deletes)))
self.assertEqual(self.fake_swift.calls,
[('GET', self.current_container_path)] +
account_listing_calls + new_container_calls +
old_container_calls)
def test_error_in_iter_containers(self):
self._mock_listing({})
# make the listing return an error
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
swob.HTTPServiceUnavailable, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors, [
'Error listing containers in account '
'.misplaced_objects (Unexpected response: '
'503 Service Unavailable)'])
def test_unhandled_exception_in_reconcile(self):
self._mock_listing({})
# make the listing blow up
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
blow_up, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors,
['Unhandled Exception trying to reconcile: '])
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from . import DeploymentUpdateBase
class TestDeploymentUpdateMixedOperations(DeploymentUpdateBase):
def test_add_node_and_relationship(self):
"""
Base: site2 is connected_to site1
site2=====>site1
Modification:
1. site3 added
2. site3 connected_to site1.
3. site2 connected_to site3.
site2=====>site1
\ ^
\ /
->site3
:return:
"""
deployment, modified_bp_path = self._deploy_and_get_modified_bp_path(
'add_node_and_relationship')
node_mapping = {
'stagnant': 'site1',
'added_relationship': 'site2',
'new': 'site3'
}
base_nodes, base_node_instances = \
self._map_node_and_node_instances(deployment.id, node_mapping)
# check all operation have been executed
self.assertDictContainsSubset(
{'source_ops_counter': '3'},
base_node_instances['added_relationship'][0]
['runtime_properties']
)
dep_update = self.client.deployment_updates.update(deployment.id,
modified_bp_path)
# wait for 'update' workflow to finish
self._wait_for_execution_to_terminate(deployment.id, 'update')
self._wait_for_successful_state(dep_update.id)
# Get all related and affected nodes and node instances
modified_nodes, modified_node_instances = \
self._map_node_and_node_instances(deployment.id, node_mapping)
# assert all unaffected nodes and node instances remained intact
self._assert_equal_entity_dicts(
base_nodes,
modified_nodes,
keys=['stagnant', 'added_relationship', 'new'],
excluded_items=['runtime_properties',
'plugins',
'relationships']
)
self._assert_equal_entity_dicts(
base_node_instances,
modified_node_instances,
keys=['stagnant', 'added_relationship', 'new'],
excluded_items=['runtime_properties', 'relationships']
)
# Check that there is only 1 from each
self.assertEquals(1, len(modified_nodes['stagnant']))
self.assertEquals(1, len(modified_node_instances['stagnant']))
self.assertEquals(1, len(modified_nodes['added_relationship']))
self.assertEquals(1,
len(modified_node_instances['added_relationship']))
self.assertEquals(1, len(modified_nodes['new']))
self.assertEquals(1, len(modified_node_instances['new']))
# get the nodes and node instances
added_relationship_node_instance = \
modified_node_instances['added_relationship'][0]
new_node = modified_nodes['new'][0]
new_node_instance = modified_node_instances['new'][0]
# assert there are 2 relationships total
self.assertEquals(1, len(new_node.relationships))
self.assertEquals(2,
len(added_relationship_node_instance.relationships))
# check the relationship between site2 and site1 is intact
self._assert_relationship(
added_relationship_node_instance.relationships,
target='site1',
expected_type='cloudify.relationships.connected_to')
# check new relationship between site2 and site3
self._assert_relationship(
added_relationship_node_instance.relationships,
target='site3',
expected_type='cloudify.relationships.connected_to')
# check the new relationship between site3 and site1 is in place
self._assert_relationship(
new_node.relationships,
target='site1',
expected_type='cloudify.relationships.connected_to')
# check all operation have been executed.
# source_ops_counter was increased for each operation between site2 and
# site1, and another source_ops_counter increasing operation was the
# establish between site2 and site3
self.assertDictContainsSubset(
{'source_ops_counter': '4'},
added_relationship_node_instance['runtime_properties']
)
self.assertDictContainsSubset(
{'source_ops_counter': '3'},
new_node_instance['runtime_properties']
)
def test_add_remove_and_modify_relationship(self):
"""
site0 relationships:
i | base | modification | comment
-------------------------------------------------
0. | site1 | site6 | new site (and removed site1)
1. | site2 | site4 | moved site (and removed site2)
2. | site3 | site2B | new site
3. | site4 | site3 | moved site
4. | site5 | - | remove site5
:return:
"""
deployment, modified_bp_path = self._deploy_and_get_modified_bp_path(
'add_remove_and_modify_relationship')
dep_update = self.client.deployment_updates.update(deployment.id,
modified_bp_path)
self._wait_for_execution_to_terminate(deployment.id, 'update')
self._wait_for_successful_state(dep_update.id)
node_mapping = {'source': 'site0'}
modified_nodes, modified_node_instances = \
self._map_node_and_node_instances(deployment.id, node_mapping)
modified_node = modified_nodes['source'][0]
modified_node_instance = modified_node_instances['source'][0]
# Assert relationship order
rel_targets = ['site6', 'site4', 'site2B', 'site3']
for index, rel_target in enumerate(rel_targets):
self.assertEquals(
modified_node['relationships'][index]['target_id'],
rel_targets[index])
for index, rel_target in enumerate(rel_targets):
self.assertEquals(
modified_node_instance[
'relationships'][index]['target_name'],
rel_targets[index]
)
# Assert all operation were executed
# Pre update:
# 1. establish site0->site3: source_ops_counter=1
# 2. establish site0->site4: source_ops_counter=2
# 3. establish site0->site5: source_ops_counter=3
# Post update:
# 5. unlink site0->site1: source_ops_counter=4
# 6. unlink site0->site2: source_ops_counter=5
# 7. establish site0->site6: source_ops_counter=6
# 8. establish site0->site2B: source_ops_counter=7
self.assertDictContainsSubset(
{'source_ops_counter': '7'},
modified_node_instance['runtime_properties']
)
def test_add_relationships_between_added_nodes(self):
"""
Tests a creatiom of deployment from scratch.
The original deployment contains only one node that will be removed.
The following diagrams depicts the new deployment:
e
/ \
c d
/ \
a b f
All of the relationships are of contained in type and the direction
is upward. i.e. a contained in c and d contained in e. f is the only
node which has no relationships from it or to it.
:return:
"""
deployment, modified_bp_path = self._deploy_and_get_modified_bp_path(
'add_relationships_between_added_nodes')
node_mapping = {
'a': 'site_a',
'b': 'site_b',
'c': 'site_c',
'd': 'site_d',
'e': 'site_e',
'f': 'site_f'
}
node_ids = set(node_mapping.keys())
root_node_ids = {'e', 'f'}
dep_update = self.client.deployment_updates.update(deployment.id,
modified_bp_path)
# wait for 'update' workflow to finish
self._wait_for_execution_to_terminate(deployment.id, 'update')
self._wait_for_successful_state(dep_update.id)
nodes, node_instances = \
self._map_node_and_node_instances(deployment.id, node_mapping)
node_instances = {k: v[0] for k, v in node_instances.iteritems()}
# Assert that f isn't connected to any node, and all of the install
# operation ran
for node in root_node_ids:
self.assertEquals(0, len(node_instances[node]['relationships']))
# Assert that each node instance had only 1 relationship
for node in node_ids - root_node_ids:
self.assertEquals(1, len(node_instances[node]['relationships']))
# Assert that node a, b, c and d have started correctly
for node in node_ids - root_node_ids:
self.assertDictContainsSubset(
{'{0}_ops_counter'.format(node): str(3)},
node_instances[node]['runtime_properties'])
# Assert that b and d established relationships successfully
# through source runtime properties
for node in {'b', 'd'}:
self.assertDictContainsSubset(
{'source_ops_counter_{0}'.format(node): str(1)},
node_instances[node]['runtime_properties'])
# Assert that a and c established relationships successfully
# through target runtime properties of node e and c (respectively)
for node in {'e', 'c'}:
self.assertDictContainsSubset(
{'target_ops_counter': str(1)},
node_instances[node]['runtime_properties'])
|
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import abc
import subprocess
import jinja2
import tempfile
import datetime
import logging
import typing
import os
import pyautogui
from AppKit import NSBundle # used to suppress macOS dock icon pop up/bounce
import utils
import browsers
def GetTemplateFileForBrowser(browser_driver: browsers.BrowserDriver,
template_file: str) -> str:
if browser_driver.name == "safari":
return f"safari_{template_file}"
else:
return template_file
class ScenarioOSADriver(abc.ABC):
"""Base Class encapsulating OSA script driving a scenario, with setup and tear
down.
"""
def __init__(self, scenario_name, duration: datetime.timedelta):
self.name = scenario_name
self.script_process = None
self.osa_script = None
self.duration = duration
def Launch(self):
"""Starts the driver script.
"""
app_info = NSBundle.mainBundle().infoDictionary()
# Suppress macOS dock icon pop up/bounce.
app_info["LSBackgroundOnly"] = "1"
# Disable aborting the sequence of movements by moving to the corner of
# the screen. This is fine because there isn't a sequence but a single move.
pyautogui.FAILSAFE = False
# Move the cursor out of the way so it's always in the same spot.
pyautogui.moveTo(0, 0)
assert self.osa_script is not None
logging.debug(f"Starting scenario {self.name}")
self.script_process = subprocess.Popen(['osascript', self.osa_script.name])
def Wait(self):
"""Waits for the script to complete.
"""
assert self.script_process is not None, "Driver wasn't launched."
logging.debug(f"Waiting for scenario {self.name}")
self.script_process.wait()
def TearDown(self):
"""Terminates the script if currently running and ensures related processes
are cleaned up.
"""
logging.debug(f"Tearing down scenario {self.name}")
if self.script_process:
utils.TerminateProcess(self.script_process)
self.osa_script.close()
@abc.abstractmethod
def Summary(self):
"""Returns a dictionary describing the scenarios parameters.
"""
pass
def IsRunning(self) -> bool:
"""Returns true if the script is currently running.
"""
return self.script_process.poll() is None
def _CompileTemplate(self, template_file: str, extra_args: typing.Dict):
"""Compiles script `template_file`, feeding `extra_args` into a temporary
file.
"""
loader = jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), "driver_scripts_templates"))
env = jinja2.Environment(loader=loader)
template = env.get_template(template_file)
self.osa_script = tempfile.NamedTemporaryFile('w+t')
self.osa_script.write(template.render(**extra_args))
self.osa_script.flush()
self._args = extra_args
def Summary(self):
"""Returns a dictionary describing the scenarios parameters.
"""
return {'name': self.name, **self._args}
class ScenarioWithBrowserOSADriver(ScenarioOSADriver):
"""Specialisation for OSA script that runs with a browser.
"""
def __init__(self, scenario_name, browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta):
super().__init__(f"{browser_driver.name}_{scenario_name}", duration)
self.browser = browser_driver
def Launch(self):
self.browser.Launch()
super().Launch()
def TearDown(self):
super().TearDown()
self.browser.TearDown()
def Summary(self):
"""Returns a dictionary describing the scenarios parameters.
"""
return {**super().Summary(), 'browser': self.browser.Summary()}
def _CompileTemplate(self, template_file, extra_args: typing.Dict):
return super()._CompileTemplate(template_file, {
"browser": self.browser.process_name,
**extra_args
})
class IdleScenario(ScenarioOSADriver):
"""Scenario that lets the system idle.
"""
def __init__(self, duration: datetime.timedelta, scenario_name="idle"):
super().__init__(scenario_name, duration)
self._CompileTemplate("idle", {
"delay": duration.total_seconds(),
})
class IdleOnSiteScenario(ScenarioWithBrowserOSADriver):
"""Scenario that lets a browser idle on a web page.
"""
def __init__(self, browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta, site_url: str, scenario_name):
super().__init__(scenario_name, browser_driver, duration)
self._CompileTemplate(
GetTemplateFileForBrowser(browser_driver, "idle_on_site"), {
"idle_site": site_url,
"delay": duration.total_seconds(),
})
@staticmethod
def Wiki(browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta):
return IdleOnSiteScenario(browser_driver, duration,
"http://www.wikipedia.com/wiki/Alessandro_Volta",
"idle_on_wiki")
@staticmethod
def Youtube(browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta):
return IdleOnSiteScenario(
browser_driver, duration,
"https://www.youtube.com/watch?v=9EE_ICC_wFw?autoplay=1",
"idle_on_youtube")
class ZeroWindowScenario(ScenarioWithBrowserOSADriver):
"""Scenario that lets a browser idle with no window.
"""
def __init__(self,
browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta,
scenario_name="zero_window"):
super().__init__(scenario_name, browser_driver, duration)
self._CompileTemplate(
GetTemplateFileForBrowser(browser_driver, "zero_window"), {
"delay": duration.total_seconds(),
})
class NavigationScenario(ScenarioWithBrowserOSADriver):
"""Scenario that has a browser navigating on web pages in a loop.
"""
NAVIGATED_SITES = [
"https://amazon.com",
"https://www.amazon.com/s?k=computer&ref=nb_sb_noss_2",
"https://google.com", "https://www.google.com/search?q=computers",
"https://www.youtube.com",
"https://www.youtube.com/results?search_query=computers",
"https://docs.google.com/document/d/1Ll-8Nvo6JlhzKEttst8GHWCc7_A8Hluy2fX99cy4Sfg/edit?usp=sharing"
]
def __init__(self,
browser_driver: browsers.BrowserDriver,
navigation_duration: datetime.timedelta,
navigation_cycles: int,
sites=NAVIGATED_SITES,
scenario_name="navigation"):
super().__init__(scenario_name, browser_driver,
navigation_duration * navigation_cycles * len(sites))
self._CompileTemplate(
GetTemplateFileForBrowser(browser_driver, "navigation"), {
"per_navigation_delay": navigation_duration.total_seconds(),
"navigation_cycles": navigation_cycles,
"sites": ",".join([f'"{site}"' for site in sites])
})
class MeetScenario(ScenarioWithBrowserOSADriver):
"""Scenario that has the browser join a Google Meet room.
"""
def __init__(self,
browser_driver: browsers.BrowserDriver,
duration: datetime.timedelta,
meeting_id: int,
scenario_name="meet"):
super().__init__(scenario_name, browser_driver, duration)
self._CompileTemplate(GetTemplateFileForBrowser(browser_driver, "meet"), {
"delay": duration.total_seconds(),
"meeting_id": meeting_id
})
def MakeScenarioDriver(scenario_name,
browser_driver: browsers.BrowserDriver,
meet_meeting_id=None) -> ScenarioOSADriver:
"""Creates scenario driver by name.
Args:
scenario_name: Identifier for the scenario to create. Supported scenarios
are: meet, idle_on_wiki, idle_on_youtube, navigation, zero_window and
idle.
browser_driver: Browser the scenario is created with.
meet_meeting_id: Optional meeting id used for meet scenario.
"""
if "idle" == scenario_name:
return IdleScenario(datetime.timedelta(minutes=60))
if not browser_driver:
return None
if "meet" == scenario_name:
return MeetScenario(browser_driver,
datetime.timedelta(minutes=60),
meeting_id=meet_meeting_id)
if "idle_on_wiki" == scenario_name:
return IdleOnSiteScenario.Wiki(browser_driver,
datetime.timedelta(minutes=60))
if "idle_on_youtube" == scenario_name:
return IdleOnSiteScenario.Youtube(browser_driver,
datetime.timedelta(minutes=60))
if "navigation" == scenario_name:
return NavigationScenario(
browser_driver,
navigation_duration=datetime.timedelta(seconds=15),
navigation_cycles=70)
if "zero_window" == scenario_name:
return ZeroWindowScenario(browser_driver, datetime.timedelta(minutes=60))
return None
|
|
import re
from clang.cindex import CursorKind
from util import trim
semicolon = ';'
open_paren = '('
close_paren = ')'
const_token = 'const'
open_brace = '{'
close_brace = '}'
open_bracket = '<'
close_bracket = '>'
comma = ','
brackets = [open_paren, close_paren, open_brace, close_brace]
def get_tokens(tu, cursor):
return [x for x in tu.get_tokens(extent=cursor.extent)]
def get_class_prefix(translation_unit, class_cursor):
retval = ''
tokens = get_tokens(translation_unit, class_cursor)
open_brackets = 0
for i in range(len(tokens)):
spelling = tokens[i].spelling
if spelling == open_brace or spelling == semicolon:
break
elif spelling == close_bracket:
open_brackets -= 1
if open_brackets == 0:
retval += spelling + '\n'
continue
elif i and not i+1 == len(tokens) and \
not retval[-1] == open_bracket and \
not retval[-1] == close_bracket and \
not retval[-1] == '\n':
if spelling == open_bracket:
open_brackets += 1
retval += ' '
retval += spelling
return retval
def without_spaces(spelling):
return spelling == '::' or spelling == '<' or spelling == '>' or spelling == ','
def get_type_alias(translation_unit, cursor):
tokens = get_tokens(translation_unit, cursor)
type_alias = ''
for token in tokens:
spelling = token.spelling
if token == tokens[-1]:
type_alias += spelling + '\n'
elif without_spaces(spelling):
alias_start = (len(type_alias) > 0 and type_alias[:-1] or '')
type_alias = alias_start + spelling
elif token == tokens[-2]:
type_alias += spelling
else:
type_alias += spelling + ' '
return type_alias
def get_typedef(translation_unit, cursor):
tokens = get_tokens(translation_unit, cursor)
typedef = ''
for token in tokens:
spelling = token.spelling
if token == tokens[-1]:
typedef += spelling + '\n'
elif without_spaces(spelling):
typedef_start = (len(typedef) > 0 and typedef[:-1] or '')
typedef = typedef_start + spelling
elif token == tokens[-2]:
typedef += spelling
else:
typedef += spelling + ' '
return typedef
def get_type_alias_or_typedef(translation_unit, cursor):
if is_type_alias(cursor.kind):
return get_type_alias(translation_unit, cursor)
if is_typedef(cursor.kind):
return get_typedef(translation_unit, cursor)
def get_variable_declaration(translation_unit, cursor):
tokens = get_tokens(translation_unit, cursor)
variable_declaration = ''
for token in tokens:
spelling = token.spelling
if token == tokens[-1]:
variable_declaration += spelling + '\n'
elif token == tokens[-2]:
variable_declaration += spelling
else:
variable_declaration += spelling + ' '
return variable_declaration
def get_enum_definition(translation_unit, cursor):
tokens = get_tokens(translation_unit, cursor)
enum_definition = ''
for token in tokens:
enum_definition += token.spelling
if token == tokens[-1]:
enum_definition += '\n'
elif token != tokens[-2]:
enum_definition += ' '
return enum_definition
def format_enum_definition(enum_indent, base_indent, enum_definition):
formatted_definition = ''
enum_prefix = trim(enum_definition.split(open_brace)[0])
formatted_definition += enum_indent + enum_prefix + '\n'
formatted_definition += enum_indent + open_brace
enum_definition = trim(trim(trim(enum_definition)[len(enum_prefix):])[1:])
enum_definition = enum_definition.replace('};','')
entries = enum_definition.split(comma)
for entry in entries:
if entry != entries[0]:
formatted_definition += comma
formatted_definition += '\n' + enum_indent + base_indent + trim(entry)
formatted_definition += '\n' + enum_indent + '};\n'
return formatted_definition
def get_function(function_indent, base_indent, translation_unit, cursor, class_prefix=''):
tokens = get_tokens(translation_unit, cursor)
function = function_indent
open_braces = 0
indent = function_indent
for i in range(len(tokens)):
spelling = tokens[i].spelling
if spelling == cursor.spelling:
spelling = class_prefix + spelling
if spelling == open_brace:
function += '\n' + indent + spelling + '\n' + indent + base_indent
open_braces += 1
indent += base_indent
elif spelling == close_brace:
open_braces -= 1
indent = function_indent + open_braces * base_indent
function = function[:-len(base_indent)]
function += spelling + '\n' + indent
if open_braces == 0:
break
elif spelling == semicolon:
function = function[:-1]
function += spelling + '\n' + indent
else:
function += spelling + ' '
if function.endswith(indent):
function = function[:-len(indent)]
if function_indent == '':
function = function[1:]
return function
def get_function_declaration(translation_unit, cursor, class_prefix=''):
tokens = get_tokens(translation_unit, cursor)
function_declaration = ''
for token in tokens:
spelling = token.spelling
if spelling == cursor.spelling:
spelling = class_prefix + spelling
if spelling == open_brace or spelling == semicolon:
break
elif spelling == '*' or spelling == '&':
function_declaration = function_declaration[:-1]
function_declaration += spelling + ' '
elif spelling == '::':
function_declaration = function_declaration[:-1]
function_declaration += spelling
else:
function_declaration += spelling + ' '
return function_declaration[:-1] + ';\n'
def is_inclusion_directive(kind):
return kind == CursorKind.INCLUSION_DIRECTIVE
def is_class(kind):
return kind == CursorKind.CLASS_DECL or \
kind == CursorKind.CLASS_TEMPLATE or \
kind == CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION
def is_struct(kind):
return kind == CursorKind.STRUCT_DECL
def is_forward_declaration(translation_unit, cursor):
if not is_class(cursor.kind) or is_struct(cursor.kind):
return False
return get_class_prefix(translation_unit, cursor)[-1] == ';'
def is_namespace(kind):
return kind == CursorKind.NAMESPACE
def is_function(kind):
return kind in [CursorKind.CXX_METHOD,
CursorKind.FUNCTION_DECL,
CursorKind.CONVERSION_FUNCTION]
def is_function_template(kind):
return kind == CursorKind.FUNCTION_TEMPLATE
def is_template(kind):
return kind == CursorKind.CLASS_TEMPLATE or \
kind == CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION or \
kind == CursorKind.FUNCTION_TEMPLATE
def is_type_alias(kind):
return kind == CursorKind.TYPE_ALIAS_DECL
def is_typedef(kind):
return kind == CursorKind.TYPEDEF_DECL
def is_static_or_global_variable(kind):
return kind == CursorKind.VAR_DECL
def is_enum(kind):
return kind == CursorKind.ENUM_DECL
def is_access_specifier(kind):
return kind == CursorKind.CXX_ACCESS_SPEC_DECL
def is_variable(kind):
return kind == CursorKind.FIELD_DECL
def is_member_variable(kind):
return is_variable(kind)
def is_constructor(kind):
return kind == CursorKind.CONSTRUCTOR
def is_destructor(kind):
return kind == CursorKind.DESTRUCTOR
class SpecifierParser(object):
def __init__(self):
self.last = ''
def parse(self,function,spelling):
if spelling == '=':
self.last = '='
elif spelling == 'default' and last == '=':
function.is_default = True
self.last = ''
elif spelling == 'delete' and last == '=':
function.is_deleted = True
self.last = ''
elif spelling == 'noexcept':
function.is_noexcept = True
self.last = ''
return function
def make_type(spellings):
function_type = ''
for spelling in spellings:
function_type += spelling
if spelling == 'const' or spelling == 'typename':
function_type += ' '
return function_type
def parse_inclusion_directive(data,cursor):
tokens = get_tokens(data.tu, cursor)
directive = tokens[0].spelling + tokens[1].spelling + ' '
spelling = tokens[2].spelling
if (spelling.startswith('"') and spelling.endswith('"')) or (spelling.startswith('<') and spelling.endswith('>')):
return directive + spelling
closing = '"'
if tokens[2].spelling == '<':
closing = '>'
for i in range(2,len(tokens)):
spelling = tokens[i].spelling
directive += spelling
if spelling == closing:
return directive + '\n'
my_identifier_regex = re.compile(r'[_a-zA-Z][_a-zA-Z0-9]*')
regex_for_identifier = r'[_a-zA-Z][_a-zA-Z0-9]*'
regex_for_type = regex_for_identifier + r'\s'
def get_function_regex(name):
return re.compile( r'constexpr*\s*virtual\s*(.*)' + name + r'\s*\( .*(=*[_a-zA-Z][_a-zA-Z0-9]*)*\)\s*(const|noexcept|override|final|\&|\&\&)*\s*' )
def read_function_specifiers(function,tokens,index):
last_was_equals = False
while True:
spelling = tokens[index].spelling
if spelling == 'noexcept':
function.is_noexcept = True
elif spelling == 'const':
function.is_const = True
elif spelling == 'override':
function.overrides_virtual = True
elif spelling == 'final':
function.is_final == True
elif spelling == open_brace or spelling == semicolon:
return function, index
elif spelling == '=':
last_was_equals = True
elif last_was_equals:
if spelling == 'default':
function.is_default = True
elif spelling == 'delete':
function.is_deleted = True
last_was_equals = False
index += 1
def same_tokens(tokens, other_tokens):
if len(tokens) != len(other_tokens):
return False
for i in range(len(tokens)):
if tokens[i].spelling != other_tokens[i].spelling:
return False
return True
class ScopeMonitor(object):
def __init__(self, open_parens = 0, open_brackets = 0, open_braces = 0):
self.initial_open_parens = open_parens
self.open_parens = self.initial_open_parens
self.initial_open_brackets = open_brackets
self.open_brackets = self.initial_open_brackets
self.initial_open_braces = open_braces
self.open_braces = self.initial_open_braces
self.opened_scope = False
def process(self,spelling):
if spelling == open_paren:
self.opened_scope = True
self.open_parens += 1
if spelling == close_paren:
self.open_parens -= 1
if spelling == open_bracket:
self.opened_scope = True
self.open_brackets += 1
if spelling == close_bracket:
self.open_brackets -= 1
if spelling == open_brace:
self.opened_scope = True
self.open_braces += 1
if spelling == close_brace:
self.open_braces -= 1
def before_scope(self):
return not self.opened_scope
def all_closed(self):
return self.open_parens == self.initial_open_parens and \
self.open_brackets == self.initial_open_brackets and \
self.open_braces == self.initial_open_braces
def get_end_of_member_initialization(index,tokens):
while tokens[index].spelling in [':', ',']:
index += 1
while tokens[index].spelling not in [open_paren, open_brace]:
index += 1
counter = ScopeMonitor()
counter.process(tokens[index].spelling)
while not counter.all_closed():
index += 1
counter.process(tokens[index].spelling)
return index
def get_all_variable_tokens(decl_tokens, tokens):
if len(decl_tokens) >= len(tokens):
return decl_tokens
index = -1
for i in range(len(tokens) - len(decl_tokens)):
if same_tokens(decl_tokens, tokens[i:len(decl_tokens) + i]):
index = i
break
if index == -1:
return decl_tokens
for i in range(index+len(decl_tokens),len(tokens)):
decl_tokens.append(tokens[i])
if tokens[i].spelling == semicolon:
return decl_tokens
def get_all_tokens(decl_tokens, tokens):
if len(decl_tokens) >= len(tokens) or decl_tokens[-1].spelling == close_brace:
return decl_tokens
index = -1
for i in range(len(tokens) - len(decl_tokens)):
if same_tokens(decl_tokens, tokens[i:len(decl_tokens)+i]):
index = i
break
if index == -1:
return decl_tokens
end_index = get_end_of_member_initialization(index+len(decl_tokens), tokens)
decl_tokens.extend(tokens[index+len(decl_tokens):end_index])
index = end_index
while tokens[index].spelling != open_brace:
decl_tokens.append(tokens[index])
index += 1
monitor = ScopeMonitor()
for i in range(index, len(tokens)):
monitor.process(tokens[i].spelling)
decl_tokens.append(tokens[i])
if not monitor.before_scope() and monitor.all_closed():
return decl_tokens
|
|
# Copyright 2016 Red Hat, Inc
# Copyright 2017 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
libvirt specific routines.
"""
import binascii
import os
import stat
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import units
from oslo_utils import uuidutils
import nova.privsep
LOG = logging.getLogger(__name__)
@nova.privsep.sys_admin_pctxt.entrypoint
def dmcrypt_create_volume(target, device, cipher, key_size, key):
"""Sets up a dmcrypt mapping
:param target: device mapper logical device name
:param device: underlying block device
:param cipher: encryption cipher string digestible by cryptsetup
:param key_size: encryption key size
:param key: encoded encryption key bytestring
"""
cmd = ('cryptsetup',
'create',
target,
device,
'--cipher=' + cipher,
'--key-size=' + str(key_size),
'--key-file=-')
key = binascii.hexlify(key).decode('utf-8')
processutils.execute(*cmd, process_input=key)
@nova.privsep.sys_admin_pctxt.entrypoint
def dmcrypt_delete_volume(target):
"""Deletes a dmcrypt mapping
:param target: name of the mapped logical device
"""
processutils.execute('cryptsetup', 'remove', target)
@nova.privsep.sys_admin_pctxt.entrypoint
def ploop_init(size, disk_format, fs_type, disk_path):
"""Initialize ploop disk, make it readable for non-root user
:param disk_format: data allocation format (raw or expanded)
:param fs_type: filesystem (ext4, ext3, none)
:param disk_path: ploop image file
"""
processutils.execute('ploop', 'init', '-s', size, '-f', disk_format, '-t',
fs_type, disk_path, check_exit_code=True)
# Add read access for all users, because "ploop init" creates
# disk with rw rights only for root. OpenStack user should have access
# to the disk to request info via "qemu-img info"
# TODO(mikal): this is a faithful rendition of the pre-privsep code from
# the libvirt driver, but it seems undesirable to me. It would be good to
# create the loop file with the right owner or group such that we don't
# need to have it world readable. I don't have access to a system to test
# this on however.
st = os.stat(disk_path)
os.chmod(disk_path, st.st_mode | stat.S_IROTH)
@nova.privsep.sys_admin_pctxt.entrypoint
def ploop_resize(disk_path, size):
"""Resize ploop disk
:param disk_path: ploop image file
:param size: new size (in bytes)
"""
processutils.execute('prl_disk_tool', 'resize',
'--size', '%dM' % (size // units.Mi),
'--resize_partition',
'--hdd', disk_path,
check_exit_code=True)
@nova.privsep.sys_admin_pctxt.entrypoint
def ploop_restore_descriptor(image_dir, base_delta, fmt):
"""Restore ploop disk descriptor XML
:param image_dir: path to where descriptor XML is created
:param base_delta: ploop image file containing the data
:param fmt: ploop data allocation format (raw or expanded)
"""
processutils.execute('ploop', 'restore-descriptor', '-f', fmt,
image_dir, base_delta,
check_exit_code=True)
@nova.privsep.sys_admin_pctxt.entrypoint
def plug_infiniband_vif(vnic_mac, device_id, fabric, net_model, pci_slot):
processutils.execute('ebrctl', 'add-port', vnic_mac, device_id,
fabric, net_model, pci_slot)
@nova.privsep.sys_admin_pctxt.entrypoint
def unplug_infiniband_vif(fabric, vnic_mac):
processutils.execute('ebrctl', 'del-port', fabric, vnic_mac)
@nova.privsep.sys_admin_pctxt.entrypoint
def plug_midonet_vif(port_id, dev):
processutils.execute('mm-ctl', '--bind-port', port_id, dev)
@nova.privsep.sys_admin_pctxt.entrypoint
def unplug_midonet_vif(port_id):
processutils.execute('mm-ctl', '--unbind-port', port_id)
@nova.privsep.sys_admin_pctxt.entrypoint
def plug_plumgrid_vif(dev, iface_id, vif_address, net_id, tenant_id):
processutils.execute('ifc_ctl', 'gateway', 'add_port', dev)
processutils.execute('ifc_ctl', 'gateway', 'ifup', dev,
'access_vm', iface_id, vif_address,
'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id)
@nova.privsep.sys_admin_pctxt.entrypoint
def unplug_plumgrid_vif(dev):
processutils.execute('ifc_ctl', 'gateway', 'ifdown', dev)
processutils.execute('ifc_ctl', 'gateway', 'del_port', dev)
@nova.privsep.sys_admin_pctxt.entrypoint
def readpty(path):
# TODO(mikal): I'm not a huge fan that we don't enforce a valid pty path
# here, but I haven't come up with a great way of doing that.
# NOTE(mikal): I am deliberately not catching the ImportError
# exception here... Some platforms (I'm looking at you Windows)
# don't have a fcntl and we may as well let them know that
# with an ImportError, not that they should be calling this at all.
import fcntl
try:
with open(path, 'r') as f:
current_flags = fcntl.fcntl(f.fileno(), fcntl.F_GETFL)
fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
current_flags | os.O_NONBLOCK)
return f.read()
except Exception as exc:
# NOTE(mikal): dear internet, I see you looking at me with your
# judging eyes. There's a story behind why we do this. You see, the
# previous implementation did this:
#
# out, err = utils.execute('dd',
# 'if=%s' % pty,
# 'iflag=nonblock',
# run_as_root=True,
# check_exit_code=False)
# return out
#
# So, it never checked stderr or the return code of the process it
# ran to read the pty. Doing something better than that has turned
# out to be unexpectedly hard because there are a surprisingly large
# variety of errors which appear to be thrown when doing this read.
#
# Therefore for now we log the errors, but keep on rolling. Volunteers
# to help clean this up are welcome and will receive free beverages.
LOG.info(
'Ignored error while reading from instance console pty: %s', exc
)
return ''
@nova.privsep.sys_admin_pctxt.entrypoint
def xend_probe():
processutils.execute('xend', 'status', check_exit_code=True)
@nova.privsep.sys_admin_pctxt.entrypoint
def create_mdev(physical_device, mdev_type, uuid=None):
"""Instantiate a mediated device."""
if uuid is None:
uuid = uuidutils.generate_uuid()
fpath = '/sys/class/mdev_bus/{0}/mdev_supported_types/{1}/create'
fpath = fpath.format(physical_device, mdev_type)
with open(fpath, 'w') as f:
f.write(uuid)
return uuid
@nova.privsep.sys_admin_pctxt.entrypoint
def systemd_run_qb_mount(qb_vol, mnt_base, cfg_file=None):
"""Mount QB volume in separate CGROUP"""
# Note(kaisers): Details on why we run without --user at bug #1756823
sysdr_cmd = ['systemd-run', '--scope', 'mount.quobyte', '--disable-xattrs',
qb_vol, mnt_base]
if cfg_file:
sysdr_cmd.extend(['-c', cfg_file])
return processutils.execute(*sysdr_cmd)
# NOTE(kaisers): this method is deliberately not wrapped in a privsep entry.
def unprivileged_qb_mount(qb_vol, mnt_base, cfg_file=None):
"""Mount QB volume"""
mnt_cmd = ['mount.quobyte', '--disable-xattrs', qb_vol, mnt_base]
if cfg_file:
mnt_cmd.extend(['-c', cfg_file])
return processutils.execute(*mnt_cmd)
@nova.privsep.sys_admin_pctxt.entrypoint
def umount(mnt_base):
"""Unmount volume"""
unprivileged_umount(mnt_base)
# NOTE(kaisers): this method is deliberately not wrapped in a privsep entry.
def unprivileged_umount(mnt_base):
"""Unmount volume"""
umnt_cmd = ['umount', mnt_base]
return processutils.execute(*umnt_cmd)
@nova.privsep.sys_admin_pctxt.entrypoint
def get_pmem_namespaces():
ndctl_cmd = ['ndctl', 'list', '-X']
nss_info = processutils.execute(*ndctl_cmd)[0]
return nss_info
@nova.privsep.sys_admin_pctxt.entrypoint
def cleanup_vpmem(devpath):
daxio_cmd = ['daxio', '-z', '-o', '%s' % devpath]
processutils.execute(*daxio_cmd)
|
|
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Linear model for testing the individual effect of each of many regressors.
This is a scoring function to be used in a feature seletion procedure, not
a free standing feature selection procedure.
This is done in 2 steps:
1. The correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
mutual_info_regression: Mutual information for a continuous target.
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
SelectPercentile: Select features based on percentile of the highest
scores.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
# compute centered values
# note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we
# need not center X
if center:
y = y - np.mean(y)
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
# compute the scaled standard deviations via moments
X_norms = np.sqrt(row_norms(X.T, squared=True) -
n_samples * X_means ** 2)
else:
X_norms = row_norms(X.T)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= np.linalg.norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], multi_output=True)
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
self.scores_, self.pvalues_ = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = int(len(scores) * self.percentile / 100)
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
mutual_info_classif:
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information between features and the target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
https://en.wikipedia.org/wiki/False_discovery_rate
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a contnuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features *
np.arange(1, n_features + 1)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues). For modes 'percentile' or 'kbest' it can return
a single array scores.
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned scores only.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
|
|
#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Simple script to be run inside the chroot. Used as a fast approximation of
emerge-$board autotest-all, by simply rsync'ing changes from trunk to sysroot.
"""
import argparse
import logging
import os
import re
import sys
from collections import namedtuple
from chromite.buildbot import constants
from chromite.buildbot import portage_utilities
from chromite.lib import cros_build_lib
from chromite.lib import git
from chromite.lib import osutils
if cros_build_lib.IsInsideChroot():
# Only import portage after we've checked that we're inside the chroot.
import portage
INCLUDE_PATTERNS_FILENAME = 'autotest-quickmerge-includepatterns'
AUTOTEST_PROJECT_NAME = 'chromiumos/third_party/autotest'
AUTOTEST_EBUILD = 'chromeos-base/autotest'
DOWNGRADE_EBUILDS = ['chromeos-base/autotest',
'chromeos-base/autotest-tests',
'chromeos-base/autotest-chrome',
'chromeos-base/autotest-factory',
'chromeos-base/autotest-telemetry',
'chromeos-base/autotest-tests-ltp',
'chromeos-base/autotest-tests-ownershipapi']
# Data structure describing a single rsync filesystem change.
#
# change_description: An 11 character string, the rsync change description
# for the particular file.
# absolute_path: The absolute path of the created or modified file.
ItemizedChange = namedtuple('ItemizedChange', ['change_description',
'absolute_path'])
# Data structure describing the rsync new/modified files or directories.
#
# new_files: A list of ItemizedChange objects for new files.
# modified_files: A list of ItemizedChange objects for modified files.
# new_directories: A list of ItemizedChange objects for new directories.
ItemizedChangeReport = namedtuple('ItemizedChangeReport',
['new_files', 'modified_files',
'new_directories'])
def GetStalePackageNames(change_list, autotest_sysroot):
"""Given a rsync change report, returns the names of stale test packages.
This function pulls out test package names for client-side tests, stored
within the client/site_tests directory tree, that had any files added or
modified and for whom any existing bzipped test packages may now be stale.
Arguments:
change_list: A list of ItemizedChange objects corresponding to changed
or modified files.
autotest_sysroot: Absolute path of autotest in the sysroot,
e.g. '/build/lumpy/usr/local/autotest'
Returns:
A list of test package names, eg ['factory_Leds', 'login_UserPolicyKeys'].
May contain duplicate entries if multiple files within a test directory
were modified.
"""
exp = os.path.abspath(autotest_sysroot) + r'/client/site_tests/(.*?)/.*'
matches = [re.match(exp, change.absolute_path) for change in change_list]
return [match.group(1) for match in matches if match]
def ItemizeChangesFromRsyncOutput(rsync_output, destination_path):
"""Convert the output of an rsync with `-i` to a ItemizedChangeReport object.
Arguments:
rsync_output: String stdout of rsync command that was run with `-i` option.
destination_path: String absolute path of the destination directory for the
rsync operations. This argument is necessary because
rsync's output only gives the relative path of
touched/added files.
Returns:
ItemizedChangeReport object giving the absolute paths of files that were
created or modified by rsync.
"""
modified_matches = re.findall(r'([.>]f[^+]{9}) (.*)', rsync_output)
new_matches = re.findall(r'(>f\+{9}) (.*)', rsync_output)
new_symlink_matches = re.findall(r'(cL\+{9}) (.*) -> .*', rsync_output)
new_dir_matches = re.findall(r'(cd\+{9}) (.*)', rsync_output)
absolute_modified = [ItemizedChange(c, os.path.join(destination_path, f))
for (c, f) in modified_matches]
# Note: new symlinks are treated as new files.
absolute_new = [ItemizedChange(c, os.path.join(destination_path, f))
for (c, f) in new_matches + new_symlink_matches]
absolute_new_dir = [ItemizedChange(c, os.path.join(destination_path, f))
for (c, f) in new_dir_matches]
return ItemizedChangeReport(new_files=absolute_new,
modified_files=absolute_modified,
new_directories=absolute_new_dir)
def GetPackageAPI(portage_root, package_cp):
"""Gets portage API handles for the given package.
Arguments:
portage_root: Root directory of portage tree. Eg '/' or '/build/lumpy'
package_cp: A string similar to 'chromeos-base/autotest-tests'.
Returns:
Returns (package, vartree) tuple, where
package is of type portage.dbapi.vartree.dblink
vartree is of type portage.dbapi.vartree.vartree
"""
if portage_root is None:
# pylint: disable-msg=E1101
portage_root = portage.root
# Ensure that portage_root ends with trailing slash.
portage_root = os.path.join(portage_root, '')
# Create a vartree object corresponding to portage_root.
trees = portage.create_trees(portage_root, portage_root)
vartree = trees[portage_root]['vartree']
# List the matching installed packages in cpv format.
matching_packages = vartree.dbapi.cp_list(package_cp)
if not matching_packages:
raise ValueError('No matching package for %s in portage_root %s' % (
package_cp, portage_root))
if len(matching_packages) > 1:
raise ValueError('Too many matching packages for %s in portage_root '
'%s' % (package_cp, portage_root))
# Convert string match to package dblink.
package_cpv = matching_packages[0]
package_split = portage_utilities.SplitCPV(package_cpv)
# pylint: disable-msg=E1101
package = portage.dblink(package_split.category,
package_split.pv, settings=vartree.settings,
vartree=vartree)
return package, vartree
def DowngradePackageVersion(portage_root, package_cp,
downgrade_to_version='0'):
"""Downgrade the specified portage package version.
Arguments:
portage_root: Root directory of portage tree. Eg '/' or '/build/lumpy'
package_cp: A string similar to 'chromeos-base/autotest-tests'.
downgrade_to_version: String version to downgrade to. Default: '0'
Returns:
True on success. False on failure (nonzero return code from `mv` command).
"""
package, _ = GetPackageAPI(portage_root, package_cp)
source_directory = package.dbdir
destination_path = os.path.join(
package.dbroot, package_cp + '-' + downgrade_to_version)
if os.path.abspath(source_directory) == os.path.abspath(destination_path):
return True
command = ['mv', source_directory, destination_path]
code = cros_build_lib.SudoRunCommand(command, error_code_ok=True).returncode
return code == 0
def UpdatePackageContents(change_report, package_cp, portage_root=None):
"""Add newly created files/directors to package contents.
Given an ItemizedChangeReport, add the newly created files and directories
to the CONTENTS of an installed portage package, such that these files are
considered owned by that package.
Arguments:
changereport: ItemizedChangeReport object for the changes to be
made to the package.
package_cp: A string similar to 'chromeos-base/autotest-tests' giving
the package category and name of the package to be altered.
portage_root: Portage root path, corresponding to the board that
we are working on. Defaults to '/'
"""
package, vartree = GetPackageAPI(portage_root, package_cp)
# Append new contents to package contents dictionary.
contents = package.getcontents().copy()
for _, filename in change_report.new_files:
contents.setdefault(filename, (u'obj', '0', '0'))
for _, dirname in change_report.new_directories:
# Strip trailing slashes if present.
contents.setdefault(dirname.rstrip('/'), (u'dir',))
# Write new contents dictionary to file.
vartree.dbapi.writeContentsToContentsFile(package, contents)
def RemoveTestPackages(stale_packages, autotest_sysroot):
"""Remove bzipped test packages from sysroot.
Arguments:
stale_packages: List of test packages names to be removed.
e.g. ['factory_Leds', 'login_UserPolicyKeys']
autotest_sysroot: Absolute path of autotest in the sysroot,
e.g. '/build/lumpy/usr/local/autotest'
"""
for package in set(stale_packages):
package_filename = 'test-' + package + '.tar.bz2'
package_file_fullpath = os.path.join(autotest_sysroot, 'packages',
package_filename)
if osutils.SafeUnlink(package_file_fullpath):
logging.info('Removed stale %s', package_file_fullpath)
def RsyncQuickmerge(source_path, sysroot_autotest_path,
include_pattern_file=None, pretend=False,
overwrite=False):
"""Run rsync quickmerge command, with specified arguments.
Command will take form `rsync -a [options] --exclude=**.pyc
--exclude=**.pyo
[optional --include-from argument]
--exclude=* [source_path] [sysroot_autotest_path]`
Arguments:
pretend: True to use the '-n' option to rsync, to perform dry run.
overwrite: True to omit '-u' option, overwrite all files in sysroot,
not just older files.
Returns:
The cros_build_lib.CommandResult object resulting from the rsync command.
"""
command = ['rsync', '-a']
if pretend:
command += ['-n']
if not overwrite:
command += ['-u']
command += ['-i']
command += ['--exclude=**.pyc']
command += ['--exclude=**.pyo']
# Exclude files with a specific substring in their name, because
# they create an ambiguous itemized report. (see unit test file for details)
command += ['--exclude=** -> *']
if include_pattern_file:
command += ['--include-from=%s' % include_pattern_file]
command += ['--exclude=*']
command += [source_path, sysroot_autotest_path]
return cros_build_lib.SudoRunCommand(command, redirect_stdout=True)
def ParseArguments(argv):
"""Parse command line arguments
Returns: parsed arguments.
"""
parser = argparse.ArgumentParser(description='Perform a fast approximation '
'to emerge-$board autotest-all, by '
'rsyncing source tree to sysroot.')
parser.add_argument('--board', metavar='BOARD', default=None, required=True)
parser.add_argument('--pretend', action='store_true',
help='Dry run only, do not modify sysroot autotest.')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing files even if newer.')
parser.add_argument('--verbose', action='store_true',
help='Print detailed change report.')
return parser.parse_args(argv)
def main(argv):
cros_build_lib.AssertInsideChroot()
args = ParseArguments(argv)
if os.geteuid() != 0:
try:
cros_build_lib.SudoRunCommand([sys.executable] + sys.argv)
except cros_build_lib.RunCommandError:
return 1
return 0
if not args.board:
print 'No board specified. Aborting.'
return 1
manifest = git.ManifestCheckout.Cached(constants.SOURCE_ROOT)
source_path = manifest.GetProjectPath(AUTOTEST_PROJECT_NAME, absolute=True)
source_path = os.path.join(source_path, '')
script_path = os.path.dirname(__file__)
include_pattern_file = os.path.join(script_path, INCLUDE_PATTERNS_FILENAME)
# TODO: Determine the following string programatically.
sysroot_path = os.path.join('/build', args.board, '')
sysroot_autotest_path = os.path.join(sysroot_path, 'usr', 'local',
'autotest', '')
rsync_output = RsyncQuickmerge(source_path, sysroot_autotest_path,
include_pattern_file, args.pretend,
args.overwrite)
if args.verbose:
logging.info(rsync_output.output)
change_report = ItemizeChangesFromRsyncOutput(rsync_output.output,
sysroot_autotest_path)
if not args.pretend:
UpdatePackageContents(change_report, AUTOTEST_EBUILD,
sysroot_path)
for ebuild in DOWNGRADE_EBUILDS:
if not DowngradePackageVersion(sysroot_path, ebuild):
logging.warning('Unable to downgrade package %s version number.',
ebuild)
stale_packages = GetStalePackageNames(
change_report.new_files + change_report.modified_files,
sysroot_autotest_path)
RemoveTestPackages(stale_packages, sysroot_autotest_path)
osutils.SafeUnlink(os.path.join(sysroot_autotest_path, 'packages.checksum'))
osutils.SafeUnlink(os.path.join(sysroot_autotest_path, 'packages',
'packages.checksum'))
if args.pretend:
logging.info('The following message is pretend only. No filesystem '
'changes made.')
logging.info('Quickmerge complete. Created or modified %s files.',
len(change_report.new_files) +
len(change_report.modified_files))
return 0
|
|
import inspect # This might get nasty.
# Hope my future me does not have to regret this xD
import copy
import itertools
from core.expression import BaseExpression, Sequence
import core.exceptions as myexc
from core.TOS import _TOS as TOS
class _Eval( object ):
def __init__( self, to_eval ):
self.to_eval = to_eval
def _eval( self, match_dict, _globals, _locals ):
# [TODO] CAREFUL!!! strings are deprecated
if isinstance( self.to_eval, str ):
if self.to_eval == "":
return True
#print( "Usage of strings as constraints/replacements is deprecated", self.to_eval )
env_locals = dict( [(k,v) for k,v in _locals.items()] )
env_locals.update(match_dict)
env_globals = dict( [(k,v) for k,v in _globals.items()] )
try:
return eval( self.to_eval, env_globals, env_locals )
# Comprehension lists and eval break things...
except NameError as e:
env_globals.update(match_dict)
#var = e.message.split("'")[1]
#var = str(e).split("'")[1]
#if var in TOS:
#env_locals[ var ] = TOS.get_operands()[ var ]
env_locals.update( dict([ (k,v[0]) for k,v in TOS.items() if k not in env_locals ]) )
return eval( self.to_eval, env_globals, env_locals)
except TypeError as e:
print(e)
print( self.to_eval )
raise e
elif isinstance( self.to_eval, BaseExpression ):
return self.to_eval
elif inspect.isfunction( self.to_eval ):
return self.to_eval( match_dict )
else:
print(self.to_eval.__class__)
raise TypeError #, self.to_eval.__class__
class Constraint( _Eval ):
pass
class Replacement( _Eval ):
pass
class RewriteRule( object ):
def __init__( self, pattern, replacement ):
self.pattern = pattern
self.replacement = replacement
def __iter__( self ):
yield self.pattern
yield self.replacement
def __repr__( self ):
if isinstance( self.pattern, BaseExpression ):
pattern = self.pattern
constraint = ""
else:
pattern, constraint = self.pattern
constraint = constraint.to_eval
return "%s /; %s -> %s" % (pattern, constraint, self.replacement.to_eval)
# Pattern matching context
class PM_context( object ):
def __init__( self ):
self.stack_patt = []
self.stack_expr = []
self.match = {}
def __copy__( self ):
cls = self.__class__
new = cls.__new__(cls)
#new = PM_context()
new.stack_patt = self.stack_patt[:]
new.stack_expr = self.stack_expr[:]
new.match = self.match.copy()
return new
def match( expr, pattern, caller_globals=None, caller_locals=None ):
if isinstance( pattern, BaseExpression ):
patt_expr = pattern
#constraint = Constraint("True")
constraint = Constraint( lambda d: True )
else:
patt_expr, constraint = pattern
ctx = PM_context()
expr_seq = Sequence( [expr] )
patt_seq = Sequence( [patt_expr] )
#ctx.stack_expr.append( expr )
#patt.match( ctx )
ctx.stack_expr.append( expr_seq )
# needed for eval to succeed
if caller_globals == None:
caller_globals = inspect.stack()[1][0].f_globals
#caller_globals = {}
if caller_locals == None:
caller_locals = inspect.stack()[1][0].f_locals
#caller_locals = {}
for match in patt_seq.match( ctx ):
if constraint._eval( match, caller_globals, caller_locals ):
yield match
#else:
#print( "Match %s failed" % match )
def matchq( expr, patt ):
caller_globals = inspect.stack()[1][0].f_globals
caller_locals = inspect.stack()[1][0].f_locals
for m in match( expr, patt, caller_globals, caller_locals ):
return True
return False
# [TODO] Try to write replace_all in terms of replace
def replace( expr, rewrite_rules ):
# needed for eval to succeed
caller_globals = inspect.stack()[1][0].f_globals
caller_locals = inspect.stack()[1][0].f_locals
#caller_globals = {}
#caller_locals = {}
#
stack = [expr._postorder_stack()] # ( node, (parent,pos), ((node, ()), ...) )
#stack = [expr._postorder_stack()] # ( node, (node, ()) )
#positions = dict( [(node, pos) for node, pos in expr._preorder_position()] )
while len(stack) > 0:
#node, children = stack.pop()
node, (parent,pos), children = stack.pop()
any_rule_applied = False
for pattern, replacement in rewrite_rules:
for _m in match( node, pattern, caller_globals, caller_locals ):
#parent, pos = positions[ id(node) ]
evaled_repl = replacement._eval( _m, caller_globals, caller_locals )
if parent:
parent.set_children( pos, evaled_repl )
else:
expr = evaled_repl
any_rule_applied = True
#expr._cleanup()
break
if any_rule_applied:
break
if not any_rule_applied:
stack.extend( reversed(children) )
#print( stack )
expr = expr._cleanup()
return expr
class dummy():
def get_head(self):
return ""
def replace_all( expr, rewrite_rules ):
# needed for eval to succeed
caller_globals = inspect.stack()[1][0].f_globals
caller_locals = inspect.stack()[1][0].f_locals
#caller_globals = {}
#caller_locals = {}
# "Fixed point"
niters = 0
keep_replacing = True
# [TODO] Fixed point
#last_expr = dummy()
while keep_replacing:
#while expr != last_expr:
#last_expr = expr
#
niters += 1
if niters > myexc.MAX_REPLACEMENT_ITERATIONS:
raise myexc.MaxReplacementIterationsExceeded
keep_replacing = False
stack = [expr._postorder_stack()] # ( node, (parent,pos), ((node, ()), ...) )
#positions = dict( [(node, pos) for node, pos in expr._preorder_position()] )
while len(stack) > 0:
#node, children = stack.pop()
node, (parent, pos), children = stack.pop()
any_rule_applied = False
for pattern, replacement in rewrite_rules:
for _m in match( node, pattern, caller_globals, caller_locals ):
evaled_repl = replacement._eval( _m, caller_globals, caller_locals )
if parent:
parent.set_children( pos, evaled_repl )
else:
expr = evaled_repl
any_rule_applied = True
keep_replacing = True
#expr._cleanup()
break
if any_rule_applied:
break
if not any_rule_applied:
stack.extend( reversed(children) )
expr = expr._cleanup() # [CHECK] was unindented one level (for chol after/updates to move a minus in times to first argument
return expr
# depth >= 0
def map_thread( f, iterables, depth ):
if depth == 0:
return f(Sequence(iterables))
else:
return [ map_thread(f, z, depth-1) for z in zip(*iterables) ]
def contains( expr, subexpr ):
# needed for eval to succeed
#caller_globals = inspect.stack()[1][0].f_globals
#caller_locals = inspect.stack()[1][0].f_locals
caller_globals = {}
caller_locals = {}
for node in expr.iterate_preorder():
for _ in match( node, subexpr, caller_locals, caller_globals ):
return True
return False
|
|
#!/bin/py
#
# Integrate exodus II file in space
#
import sys
import glob
#
# need vtk environment to read EXOII
#
try:
import vtk
except ImportError:
sys.stderr.write("Error: Can't find the file 'vtk.py'...\n")
sys.stderr.write("Did you invoke with pvpython?\n")
sys.stderr.write("Is the paraview module loaded?\n")
sys.exit(1)
# -----------------------------------------------#
# Average Fields #
# -----------------------------------------------#
def average_fields(names, var):
""" given list of filenames and var, generate average field """
# open each file, and add the field data
# to our averaged array
for name in names:
d = vtk.vtkExodusIIReader()
d.SetFileName(name)
d.UpdateInformation()
d.SetPointResultArrayStatus(var,1)
d.Update()
blocks = d.GetOutput().GetNumberOfBlocks()
data = d.GetOutput()
#
# range to integrate over
#
height = 0.804380714893
thresh = 0.004
# open text file
f = open('slices/'+name+'.txt', 'w')
for j in xrange(blocks):
blk = data.GetBlock(0).GetBlock(j)
try:
pts = blk.GetNumberOfPoints()
except:
ss='no data in this block'
else:
pt_data = blk.GetPointData().GetArray(var)
if pt_data is None:
print 'Nada!'
print 'I cannot find the variable field: ', var
print 'Exiting...'
sys.exit(1)
for i in xrange(pts):
# gather x,y,z location
z,y,x = blk.GetPoint(i)
if(abs(z-height)<thresh):
# gather point scalar value
u = pt_data.GetValue(i)
f.write(str(x)+' , '+str(y)+' , '+str(u)+'\n')
# close file
f.close()
#
# steady as she goes
#
return 0
# -----------------------------------------------#
# Perform Radial Integration #
# -----------------------------------------------#
def integrate(name, var):
""" given filename and var, generate profile """
d = vtk.vtkExodusIIReader()
d.SetFileName(name)
d.UpdateInformation()
print var
d.SetPointResultArrayStatus(var,1)
d.Update()
blocks = d.GetOutput().GetNumberOfBlocks()
data = d.GetOutput()
#
# range to integrate over
#
height = 0.804380714893
thresh = 0.004
rmin = 0.0
rmax = 1.0
nr = 10
dr = (rmax-rmin)/nr
#
# print data.GetBlock(0).GetBlock(0).GetPointData().GetArray(var).GetValue(0)
#
for j in xrange(blocks):
blk = data.GetBlock(0).GetBlock(j)
try:
pts = blk.GetNumberOfPoints()
except:
ss='no data in this block'
else:
# grabbing vtkDataArray
pt_data = blk.GetPointData().GetArray(var)
pt_data2 = blk.GetPointData().GetArray(var)
if pt_data is None:
print 'Nada!'
print 'I cannot find the variable field: ', var
print 'Exiting...'
sys.exit(1)
for i in xrange(pts):
# gather x,y,z location
z,y,x = blk.GetPoint(i)
# gather point scalar value
#print dir(pt_data)
u = pt_data.GetValue(i)
print u
# -----------------------------------------------#
# Inform user precisely what we are capable of. #
# -----------------------------------------------#
def help_msg():
print 'usage: python int.py command list'
print 'commands:'
print ' -l: list of files'
print ' -f: single file'
print ' -a: all exodus files in pwd'
print ' --var={T,u,v,w,P}: set variable'
print ' --plot: plots results'
sys.exit(0)
# -----------------------------------------------#
# Main Function #
# -----------------------------------------------#
if __name__ == "__main__":
""" When invoked from command line """
# -----------------------------------------------#
# Input Parsing #
# -----------------------------------------------#
#
# see if user needs help
#
if(len(sys.argv) <= 1):
help_msg()
else:
#
# find if --var is present in list
# then, save data and delete
#
if ('--var' in str(sys.argv)):
vart = [x for x in sys.argv if '--var=' in x]
var = str(vart[0])[6:]
print 'Setting parameter to: ', var
# now, find and delete all --var elements
for i in sys.argv:
if '--var' in str(i):
sys.argv.remove(i)
else:
print 'No parameter given, defaulting to W'
var = "w"
#
# set plot?
#
plot=0
for i in sys.argv:
if '--plot' in str(i):
plot=1
sys.argv.remove(i)
#
# provide help
#
if ('-h' in str(sys.argv[1]) or '--help' in str(sys.argv[1]) ):
help_msg()
#
# iterate over list of files
#
if ('-l' in str(sys.argv[1])):
names=[]
for i in xrange(2,len(sys.argv)):
names.append(sys.argv[i])
if not names:
print 'No files given!'
print 'Exiting...'
sys.exit(0)
#
# single file
#
if ('-f' in str(sys.argv[1])):
names=[str(sys.argv[2])]
#
# all files in pwd
#
if ('-a' in str(sys.argv[1])):
names = sorted(glob.glob("*.exo"))
if not names:
print 'No exodusII files detected!'
print 'Exiting...'
sys.exit(0)
# -----------------------------------------------#
# Finished Input Parsing #
# -----------------------------------------------#
#
# iterate + integrate over files
#
#for n in names:
# print 'integrating fields...'
# integrate(n,var)
#
# average fields
#
print 'slicing fields...'
average_fields(names,var)
#
# plot?
#
if plot == 1:
print 'plotting results...'
#
#
# nick
# 9/9/14
#
#
|
|
from typing import Tuple
from ..base import ParametrizedValue
from ..utils import listify
if False: # pragma: nocover
from .routing_modifiers import Modifier # noqa
class Socket(ParametrizedValue):
opt_key = ''
args_joiner = ','
def __init__(self, address, *, bound_workers=None, modifier=None):
"""
:param str|SocketShared address: Address ([host]:port or socket file) to bind socket to.
:param str|int|list bound_workers: Map socket to specific workers.
As you can bind a uWSGI instance to multiple sockets, you can use this option to map
specific workers to specific sockets to implement a sort of in-process Quality of Service scheme.
If you host multiple apps in the same uWSGI instance, you can easily dedicate resources to each of them.
:param Modifier modifier: Socket routing modifier.
"""
self.address = address
self.bound_workers = listify(bound_workers or [])
self._make_section_like()
if modifier:
self._set(f'{self.name}-modifier1', modifier.code)
submod = modifier.submod
if submod:
self._set(f'{self.name}-modifier2', modifier.submod)
super().__init__()
def __str__(self):
if self.address not in self.args:
self.args.insert(0, self.address)
result = super().__str__()
self.args.pop(0)
return result
class SocketDefault(Socket):
"""Bind using default protocol. See ``default_socket_type`` option."""
name = 'socket'
class SocketHttp(Socket):
"""Bind to the specified socket using HTTP"""
name = 'http-socket'
def __init__(self, address, *, http11=False, bound_workers=None, modifier=None):
"""
:param str|SocketShared address: Address ([host]:port or socket file) to bind socket to.
:param bool http11: Keep-Alive support. If set the server will try to maintain
the connection opened if a bunch of rules are respected.
This is not a smart http 1.1 parser (to avoid parsing the whole response)
but assumes the developer is generating the right headers.
This has been added to support RTSP protocol for video streaming.
:param str|int|list bound_workers: Map socket to specific workers.
As you can bind a uWSGI instance to multiple sockets, you can use this option to map
specific workers to specific sockets to implement a sort of in-process Quality of Service scheme.
If you host multiple apps in the same uWSGI instance, you can easily dedicate resources to each of them.
:param Modifier modifier: Socket routing modifier.
"""
if http11:
self.name = 'http11-socket'
super().__init__(address, bound_workers=bound_workers, modifier=modifier)
class SocketHttps(Socket):
"""Bind to the specified socket using HTTPS"""
name = 'https-socket'
def __init__(self, address, *, cert, key, ciphers=None, client_ca=None, bound_workers=None, modifier=None):
"""
:param str|SocketShared address: Address ([host]:port or socket file) to bind socket to.
:param str cert: Certificate file.
:param str key: Private key file.
:param str ciphers: Ciphers [alias] string.
Example:
* DEFAULT
* HIGH
* DHE, EDH
* https://www.openssl.org/docs/man1.1.0/apps/ciphers.html
:param str client_ca: Client CA file for client-based auth.
.. note: You can prepend ! (exclamation mark) to make client certificate
authentication mandatory.
:param str|int|list bound_workers: Map socket to specific workers.
As you can bind a uWSGI instance to multiple sockets, you can use this option to map
specific workers to specific sockets to implement a sort of in-process Quality of Service scheme.
If you host multiple apps in the same uWSGI instance, you can easily dedicate resources to each of them.
:param Modifier modifier: Socket routing modifier.
"""
super().__init__(address, bound_workers=bound_workers, modifier=modifier)
args = [cert, key]
if ciphers or client_ca:
args.extend([ciphers or '', client_ca or ''])
self.args.extend(args)
@classmethod
def get_certbot_paths(cls, domain: str) -> Tuple[str, str]:
"""Returns a tuple of paths for files (certificates_chain, private_key)
from Certbot https://certbot.eff.org
Those paths can be used to pass into Socket initializer.
.. note:: If files not found empty strings are returned.
:param domain: Domain name to get filepaths for.
"""
from pathlib import Path
certs_root = Path('/etc/letsencrypt/live/')
certs_chain = certs_root / domain / 'fullchain.pem'
certs_private = certs_root / domain / 'privkey.pem'
if certs_chain.exists() and certs_private.exists():
return str(certs_chain), str(certs_private)
return '', ''
class SocketUwsgi(Socket):
"""uwSGI specific socket using ``uwsgi`` protocol."""
name = 'uwsgi-socket'
def __init__(self, address, *, persistent=False, bound_workers=None, modifier=None):
"""
:param str|SocketShared address: Address ([host]:port or socket file) to bind socket to.
:param bool persistent: Use persistent uwsgi protocol (puwsgi).
:param str|int|list bound_workers: Map socket to specific workers.
As you can bind a uWSGI instance to multiple sockets, you can use this option to map
specific workers to specific sockets to implement a sort of in-process Quality of Service scheme.
If you host multiple apps in the same uWSGI instance, you can easily dedicate resources to each of them.
:param Modifier modifier: Socket routing modifier.
"""
if persistent:
self.name = 'puwsgi-socket'
super().__init__(address, bound_workers=bound_workers, modifier=modifier)
class SocketUwsgis(SocketHttps):
"""uwSGI specific socket using ``uwsgi`` protocol over SSL."""
name = 'suwsgi-socket'
class SocketUdp(Socket):
"""Run the udp server on the specified address.
.. note:: Mainly useful for SNMP or shared UDP logging.
"""
name = 'udp'
class SocketFastcgi(Socket):
"""Bind to the specified socket using FastCGI."""
name = 'fastcgi-socket'
def __init__(self, address, *, nph=False, bound_workers=None, modifier=None):
"""
:param str|SocketShared address: Address ([host]:port or socket file) to bind socket to.
:param bool nph: Use NPH mode ("no-parsed-header" - bypass the server completely by sending
the complete HTTP header directly to the browser).
:param str|int|list bound_workers: Map socket to specific workers.
As you can bind a uWSGI instance to multiple sockets, you can use this option to map
specific workers to specific sockets to implement a sort of in-process Quality of Service scheme.
If you host multiple apps in the same uWSGI instance, you can easily dedicate resources to each of them.
:param Modifier modifier: Socket routing modifier.
"""
if nph:
self.name = 'fastcgi-nph-socket'
super().__init__(address, bound_workers=bound_workers, modifier=modifier)
class SocketScgi(Socket):
"""Bind to the specified UNIX/TCP socket using SCGI protocol."""
name = 'scgi-socket'
def __init__(self, address, *, nph=False, bound_workers=None, modifier=None):
"""
:param str|SocketShared address: Address ([host]:port or socket file) to bind socket to.
:param bool nph: Use NPH mode ("no-parsed-header" - bypass the server completely by sending
the complete HTTP header directly to the browser).
:param str|int|list bound_workers: Map socket to specific workers.
As you can bind a uWSGI instance to multiple sockets, you can use this option to map
specific workers to specific sockets to implement a sort of in-process Quality of Service scheme.
If you host multiple apps in the same uWSGI instance, you can easily dedicate resources to each of them.
:param Modifier modifier: Socket routing modifier.
"""
if nph:
self.name = 'scgi-nph-socket'
super().__init__(address, bound_workers=bound_workers, modifier=modifier)
class SocketRaw(Socket):
"""Bind to the specified UNIX/TCP socket using RAW protocol.
Raw mode allows you to directly parse the request in your application callable.
Instead of getting a list of CGI vars/headers in your callable you only get
the file descriptor soon after accept().
You can then read()/write() to that file descriptor in full freedom.
.. note:: Raw mode disables request logging.
.. warning:: Use it as a low-level socket wrapper.
"""
name = 'raw-socket'
class SocketShared(Socket):
"""Create a shared socket for advanced jailing or IPC purposes.
Allows you to create a socket early in the server's startup
and use it after privileges drop or jailing. This can be used
to bind to privileged (<1024) ports.
Shared sockets are a way to share sockets among various uWSGI components:
you can use that to share a socket between the fastrouter and uWSGI instance.
"""
name = 'shared-socket'
def __init__(self, address, *, undeferred=False, bound_workers=None, modifier=None):
"""
:param str address: Address ([host]:port or socket file) to bind socket to.
:param bool undeferred: Use shared socket undeferred mode.
:param str|int|list bound_workers: Map socket to specific workers.
As you can bind a uWSGI instance to multiple sockets, you can use this option to map
specific workers to specific sockets to implement a sort of in-process Quality of Service scheme.
If you host multiple apps in the same uWSGI instance, you can easily dedicate resources to each of them.
:param Modifier modifier: Socket routing modifier.
"""
if undeferred:
self.name = 'undeferred-shared-socket'
super().__init__(address, bound_workers=bound_workers, modifier=modifier)
class SocketZeromq(Socket):
"""Introduce zeromq pub/sub pair."""
name = 'zeromq-socket'
|
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
)
class LimelightBaseIE(InfoExtractor):
_PLAYLIST_SERVICE_URL = 'http://production-ps.lvp.llnw.net/r/PlaylistService/%s/%s/%s'
_API_URL = 'http://api.video.limelight.com/rest/organizations/%s/%s/%s/%s.json'
def _call_playlist_service(self, item_id, method, fatal=True):
return self._download_json(
self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method),
item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal)
def _call_api(self, organization_id, item_id, method):
return self._download_json(
self._API_URL % (organization_id, self._API_PATH, item_id, method),
item_id, 'Downloading API %s JSON' % method)
def _extract(self, item_id, pc_method, mobile_method, meta_method):
pc = self._call_playlist_service(item_id, pc_method)
metadata = self._call_api(pc['orgId'], item_id, meta_method)
mobile = self._call_playlist_service(item_id, mobile_method, fatal=False)
return pc, mobile, metadata
def _extract_info(self, streams, mobile_urls, properties):
video_id = properties['media_id']
formats = []
for stream in streams:
stream_url = stream.get('url')
if not stream_url:
continue
if '.f4m' in stream_url:
formats.extend(self._extract_f4m_formats(
stream_url, video_id, fatal=False))
else:
fmt = {
'url': stream_url,
'abr': float_or_none(stream.get('audioBitRate')),
'vbr': float_or_none(stream.get('videoBitRate')),
'fps': float_or_none(stream.get('videoFrameRate')),
'width': int_or_none(stream.get('videoWidthInPixels')),
'height': int_or_none(stream.get('videoHeightInPixels')),
'ext': determine_ext(stream_url)
}
rtmp = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', stream_url)
if rtmp:
format_id = 'rtmp'
if stream.get('videoBitRate'):
format_id += '-%d' % int_or_none(stream['videoBitRate'])
fmt.update({
'url': rtmp.group('url'),
'play_path': rtmp.group('playpath'),
'app': rtmp.group('app'),
'ext': 'flv',
'format_id': format_id,
})
formats.append(fmt)
for mobile_url in mobile_urls:
media_url = mobile_url.get('mobileUrl')
if not media_url:
continue
format_id = mobile_url.get('targetMediaPlatform')
if determine_ext(media_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=format_id, fatal=False))
else:
formats.append({
'url': media_url,
'format_id': format_id,
'preference': -1,
})
self._sort_formats(formats)
title = properties['title']
description = properties.get('description')
timestamp = int_or_none(properties.get('publish_date') or properties.get('create_date'))
duration = float_or_none(properties.get('duration_in_milliseconds'), 1000)
filesize = int_or_none(properties.get('total_storage_in_bytes'))
categories = [properties.get('category')]
tags = properties.get('tags', [])
thumbnails = [{
'url': thumbnail['url'],
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
} for thumbnail in properties.get('thumbnails', []) if thumbnail.get('url')]
subtitles = {}
for caption in properties.get('captions', {}):
lang = caption.get('language_code')
subtitles_url = caption.get('url')
if lang and subtitles_url:
subtitles[lang] = [{
'url': subtitles_url,
}]
return {
'id': video_id,
'title': title,
'description': description,
'formats': formats,
'timestamp': timestamp,
'duration': duration,
'filesize': filesize,
'categories': categories,
'tags': tags,
'thumbnails': thumbnails,
'subtitles': subtitles,
}
class LimelightMediaIE(LimelightBaseIE):
IE_NAME = 'limelight'
_VALID_URL = r'(?:limelight:media:|https?://link\.videoplatform\.limelight\.com/media/\??\bmediaId=)(?P<id>[a-z0-9]{32})'
_TESTS = [{
'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86',
'info_dict': {
'id': '3ffd040b522b4485b6d84effc750cd86',
'ext': 'flv',
'title': 'HaP and the HB Prince Trailer',
'description': 'md5:8005b944181778e313d95c1237ddb640',
'thumbnail': 're:^https?://.*\.jpeg$',
'duration': 144.23,
'timestamp': 1244136834,
'upload_date': '20090604',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# video with subtitles
'url': 'limelight:media:a3e00274d4564ec4a9b29b9466432335',
'info_dict': {
'id': 'a3e00274d4564ec4a9b29b9466432335',
'ext': 'flv',
'title': '3Play Media Overview Video',
'description': '',
'thumbnail': 're:^https?://.*\.jpeg$',
'duration': 78.101,
'timestamp': 1338929955,
'upload_date': '20120605',
'subtitles': 'mincount:9',
},
'params': {
# rtmp download
'skip_download': True,
},
}]
_PLAYLIST_SERVICE_PATH = 'media'
_API_PATH = 'media'
def _real_extract(self, url):
video_id = self._match_id(url)
pc, mobile, metadata = self._extract(
video_id, 'getPlaylistByMediaId', 'getMobilePlaylistByMediaId', 'properties')
return self._extract_info(
pc['playlistItems'][0].get('streams', []),
mobile['mediaList'][0].get('mobileUrls', []) if mobile else [],
metadata)
class LimelightChannelIE(LimelightBaseIE):
IE_NAME = 'limelight:channel'
_VALID_URL = r'(?:limelight:channel:|https?://link\.videoplatform\.limelight\.com/media/\??\bchannelId=)(?P<id>[a-z0-9]{32})'
_TEST = {
'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082',
'info_dict': {
'id': 'ab6a524c379342f9b23642917020c082',
'title': 'Javascript Sample Code',
},
'playlist_mincount': 3,
}
_PLAYLIST_SERVICE_PATH = 'channel'
_API_PATH = 'channels'
def _real_extract(self, url):
channel_id = self._match_id(url)
pc, mobile, medias = self._extract(
channel_id, 'getPlaylistByChannelId',
'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1', 'media')
entries = [
self._extract_info(
pc['playlistItems'][i].get('streams', []),
mobile['mediaList'][i].get('mobileUrls', []) if mobile else [],
medias['media_list'][i])
for i in range(len(medias['media_list']))]
return self.playlist_result(entries, channel_id, pc['title'])
class LimelightChannelListIE(LimelightBaseIE):
IE_NAME = 'limelight:channel_list'
_VALID_URL = r'(?:limelight:channel_list:|https?://link\.videoplatform\.limelight\.com/media/\?.*?\bchannelListId=)(?P<id>[a-z0-9]{32})'
_TEST = {
'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b',
'info_dict': {
'id': '301b117890c4465c8179ede21fd92e2b',
'title': 'Website - Hero Player',
},
'playlist_mincount': 2,
}
_PLAYLIST_SERVICE_PATH = 'channel_list'
def _real_extract(self, url):
channel_list_id = self._match_id(url)
channel_list = self._call_playlist_service(channel_list_id, 'getMobileChannelListById')
entries = [
self.url_result('limelight:channel:%s' % channel['id'], 'LimelightChannel')
for channel in channel_list['channelList']]
return self.playlist_result(entries, channel_list_id, channel_list['title'])
|
|
from __future__ import unicode_literals
import logging
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext as _
from djblets.siteconfig.models import SiteConfiguration
from paramiko.hostkeys import HostKeyEntry
import paramiko
from reviewboard.ssh.errors import UnsupportedSSHKeyError
logger = logging.getLogger(__name__)
class SSHHostKeys(paramiko.HostKeys):
"""Manages known lists of host keys.
This is a specialization of paramiko.HostKeys that interfaces with
a storage backend to get the list of host keys.
"""
def __init__(self, storage):
paramiko.HostKeys.__init__(self)
self.storage = storage
def load(self, filename):
"""Loads all known host keys from the storage backend."""
self._entries = []
lines = self.storage.read_host_keys()
for line in lines:
try:
entry = HostKeyEntry.from_line(line)
except paramiko.SSHException:
entry = None
if entry is not None:
self._entries.append(entry)
def save(self, filename):
pass
class SSHClient(paramiko.SSHClient):
"""A client for communicating with an SSH server.
SSHClient allows for communicating with an SSH server and managing
all known host and user keys.
This is a specialization of paramiko.SSHClient, and supports all the
same capabilities.
Key access goes through an SSHStorage backend. The storage backend knows
how to look up keys and write them.
The default backend works with the site directory's :file:`data/.ssh`
directory, and supports namespaced directories for LocalSites.
"""
DEFAULT_STORAGE = 'reviewboard.ssh.storage.FileSSHStorage'
SUPPORTED_KEY_TYPES = (paramiko.RSAKey, paramiko.DSSKey)
def __init__(self, namespace=None, storage_backend=None):
"""Initialize the client.
Version Changed:
3.0.18:
Renamed the old, unused ``storage`` parameter to a supported
``storage_backend`` parameter.
Args:
namespace (unicode, optional):
The namespace to use for any SSH-related data.
storage_backend (unicode, optional):
The class path to a storage backend to use.
"""
super(SSHClient, self).__init__()
self.namespace = namespace
self._load_storage(storage_backend)
self._host_keys = SSHHostKeys(self.storage)
self.load_host_keys('')
def _load_storage(self, storage_backend=None):
"""Load the storage backend.
If an explicit storage backend is provided, it will be used.
Otherwise, this will first check the site configuration for a
``rbssh_storage_backend`` key. It will then fall back to
``settings.RBSSH_STORAGE_BACKEND``, for compatibility. If that
doesn't work, it will default to the built-in local storage backend.
Raises:
ImproperlyConfigured:
The SSH backend could not be loaded.
"""
backend_paths = []
if storage_backend:
backend_paths.append(storage_backend)
else:
try:
siteconfig = SiteConfiguration.objects.get_current()
backend_paths.append(siteconfig.get('ssh_storage_backend'))
except Exception:
pass
try:
backend_paths.append(
getattr(settings, 'RBSSH_STORAGE_BACKEND'))
except (AttributeError, ImportError):
# We may not be running in the Django environment.
pass
backend_paths.append(self.DEFAULT_STORAGE)
self.storage = None
for backend_path in backend_paths:
if not backend_path:
continue
i = backend_path.rfind('.')
module, class_name = backend_path[:i], backend_path[i + 1:]
try:
mod = import_module(module)
storage_cls = getattr(mod, class_name)
except (AttributeError, ImportError) as e:
logger.exception('Error importing SSH storage backend %s: %s',
backend_path, e)
continue
try:
self.storage = storage_cls(namespace=self.namespace)
break
except Exception as e:
logger.exception('Error instantiating SSH storage backend '
'%s: %s',
backend_path, e)
if self.storage is None:
# Since we have a default storage backend, we should never actually
# reach this, but it's better to have some sort of error rather
# than just failing or asserting.
raise ImproperlyConfigured(
_('Unable to load a suitable SSH storage backend. See the '
'log for error details.'))
def get_user_key(self):
"""Returns the keypair of the user running Review Board.
This will be an instance of :py:mod:`paramiko.PKey`, representing
a DSS or RSA key, as long as one exists. Otherwise, it may return None.
"""
key = None
fp = None
try:
key = self.storage.read_user_key()
except paramiko.SSHException as e:
logger.error('SSH: Unknown error accessing user key: %s' % e)
except paramiko.PasswordRequiredException as e:
logger.error('SSH: Unable to access password protected '
'key file: %s' % e)
except IOError as e:
logger.error('SSH: Error reading user key: %s' % e)
if fp:
fp.close()
return key
def delete_user_key(self):
"""Deletes the user key for this client.
If no key exists, this will do nothing.
"""
try:
self.storage.delete_user_key()
except Exception as e:
logger.error('Unable to delete SSH key file: %s' % e)
raise
def get_public_key(self, key):
"""Returns the public key portion of an SSH key.
This will be formatted for display.
"""
public_key = ''
if key:
base64 = key.get_base64()
# TODO: Move this wrapping logic into a common templatetag.
for i in range(0, len(base64), 64):
public_key += base64[i:i + 64] + '\n'
return public_key
def is_key_authorized(self, key):
"""Returns whether or not a public key is currently authorized."""
public_key = key.get_base64()
try:
lines = self.storage.read_authorized_keys()
for line in lines:
try:
authorized_key = line.split()[1]
except (ValueError, IndexError):
continue
if authorized_key == public_key:
return True
except IOError:
pass
return False
def generate_user_key(self, bits=2048):
"""Generates a new RSA keypair for the user running Review Board.
This will store the new key in the backend storage and return the
resulting key as an instance of :py:mod:`paramiko.RSAKey`.
If a key already exists, it's returned instead.
Callers are expected to handle any exceptions. This may raise
IOError for any problems in writing the key file, or
paramiko.SSHException for any other problems.
"""
key = self.get_user_key()
if not key:
key = paramiko.RSAKey.generate(bits)
self._write_user_key(key)
return key
def import_user_key(self, keyfile):
"""Imports an uploaded key file into Review Board.
``keyfile`` is expected to be an ``UploadedFile`` or a paramiko
``KeyFile``. If this is a valid key file, it will be saved in
the storage backend and the resulting key as an instance of
:py:mod:`paramiko.PKey` will be returned.
If a key of this name already exists, it will be overwritten.
Callers are expected to handle any exceptions. This may raise
IOError for any problems in writing the key file, or
paramiko.SSHException for any other problems.
This will raise UnsupportedSSHKeyError if the uploaded key is not
a supported type.
"""
# Try to find out what key this is.
for cls in self.SUPPORTED_KEY_TYPES:
key = None
try:
if not isinstance(keyfile, paramiko.PKey):
keyfile.seek(0)
key = cls.from_private_key(keyfile)
elif isinstance(keyfile, cls):
key = keyfile
except paramiko.SSHException:
# We don't have more detailed info than this, but most
# likely, it's not a valid key. Skip to the next.
continue
if key:
self._write_user_key(key)
return key
raise UnsupportedSSHKeyError()
def add_host_key(self, hostname, key):
"""Adds a host key to the known hosts file."""
self.storage.add_host_key(hostname, key)
def replace_host_key(self, hostname, old_key, new_key):
"""Replaces a host key in the known hosts file with another.
This is used for replacing host keys that have changed.
"""
self.storage.replace_host_key(hostname, old_key, new_key)
def _write_user_key(self, key):
"""Convenience function to write a user key and check for errors.
Any errors caused as a result of writing a user key will be logged.
"""
try:
self.storage.write_user_key(key)
except UnsupportedSSHKeyError as e:
logger.error('Failed to write unknown key type %s' % type(key))
raise
except IOError as e:
logger.error('Failed to write SSH user key: %s' % e)
raise
except Exception as e:
logger.error('Unknown error writing SSH user key: %s' % e,
exc_info=1)
raise
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.keystone import role_assignments
from heat.engine import support
class KeystoneUser(resource.Resource,
role_assignments.KeystoneRoleAssignmentMixin):
"""Heat Template Resource for Keystone User.
Users represent an individual API consumer. A user itself must be owned by
a specific domain, and hence all user names are not globally unique, but
only unique to their domain.
"""
support_status = support.SupportStatus(
version='2015.1',
message=_('Supported versions: keystone v3'))
default_client_name = 'keystone'
entity = 'users'
PROPERTIES = (
NAME, DOMAIN, DESCRIPTION, ENABLED, EMAIL, PASSWORD,
DEFAULT_PROJECT, GROUPS
) = (
'name', 'domain', 'description', 'enabled', 'email', 'password',
'default_project', 'groups'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of keystone user.'),
update_allowed=True
),
DOMAIN: properties.Schema(
properties.Schema.STRING,
_('Name of keystone domain.'),
default='default',
update_allowed=True,
constraints=[constraints.CustomConstraint('keystone.domain')]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of keystone user.'),
default='',
update_allowed=True
),
ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Keystone user is enabled or disabled.'),
default=True,
update_allowed=True
),
EMAIL: properties.Schema(
properties.Schema.STRING,
_('Email address of keystone user.'),
update_allowed=True
),
PASSWORD: properties.Schema(
properties.Schema.STRING,
_('Password of keystone user.'),
update_allowed=True
),
DEFAULT_PROJECT: properties.Schema(
properties.Schema.STRING,
_('Default project of keystone user.'),
update_allowed=True,
constraints=[constraints.CustomConstraint('keystone.project')]
),
GROUPS: properties.Schema(
properties.Schema.LIST,
_('Keystone user groups.'),
update_allowed=True,
schema=properties.Schema(
properties.Schema.STRING,
_('Keystone user group.'),
constraints=[constraints.CustomConstraint('keystone.group')]
)
)
}
properties_schema.update(
role_assignments.KeystoneRoleAssignmentMixin.mixin_properties_schema)
def validate(self):
super(KeystoneUser, self).validate()
self.validate_assignment_properties()
def client(self):
return super(KeystoneUser, self).client().client
def _update_user(self,
user_id,
domain,
new_name=None,
new_description=None,
new_email=None,
new_password=None,
new_default_project=None,
enabled=None):
values = dict()
if new_name is not None:
values['name'] = new_name
if new_description is not None:
values['description'] = new_description
if new_email is not None:
values['email'] = new_email
if new_password is not None:
values['password'] = new_password
if new_default_project is not None:
values['default_project'] = new_default_project
if enabled is not None:
values['enabled'] = enabled
# If there're no args above, keystone raises BadRequest error with
# message about not enough parameters for updating, so return from
# this method to prevent raising error.
if not values:
return
values['user'] = user_id
domain = (self.client_plugin().get_domain_id(domain))
values['domain'] = domain
return self.client().users.update(**values)
def _add_user_to_groups(self, user_id, groups):
if groups is not None:
group_ids = [self.client_plugin().get_group_id(group)
for group in groups]
for group_id in group_ids:
self.client().users.add_to_group(user_id,
group_id)
def _remove_user_from_groups(self, user_id, groups):
if groups is not None:
group_ids = [self.client_plugin().get_group_id(group)
for group in groups]
for group_id in group_ids:
self.client().users.remove_from_group(user_id,
group_id)
def _find_diff(self, updated_prps, stored_prps):
new_group_ids = [self.client_plugin().get_group_id(group)
for group in
(set(updated_prps or []) -
set(stored_prps or []))]
removed_group_ids = [self.client_plugin().get_group_id(group)
for group in
(set(stored_prps or []) -
set(updated_prps or []))]
return new_group_ids, removed_group_ids
def handle_create(self):
user_name = (self.properties[self.NAME] or
self.physical_resource_name())
description = self.properties[self.DESCRIPTION]
domain = self.client_plugin().get_domain_id(
self.properties[self.DOMAIN])
enabled = self.properties[self.ENABLED]
email = self.properties[self.EMAIL]
password = self.properties[self.PASSWORD]
default_project = self.client_plugin().get_project_id(
self.properties[self.DEFAULT_PROJECT])
groups = self.properties[self.GROUPS]
user = self.client().users.create(
name=user_name,
domain=domain,
description=description,
enabled=enabled,
email=email,
password=password,
default_project=default_project)
self.resource_id_set(user.id)
self._add_user_to_groups(user.id, groups)
self.create_assignment(user_id=user.id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
name = None
# Don't update the name if no change
if self.NAME in prop_diff:
name = prop_diff[self.NAME] or self.physical_resource_name()
description = prop_diff.get(self.DESCRIPTION)
enabled = prop_diff.get(self.ENABLED)
email = prop_diff.get(self.EMAIL)
password = prop_diff.get(self.PASSWORD)
domain = (prop_diff.get(self.DOMAIN)
or self.properties[self.DOMAIN])
default_project = prop_diff.get(self.DEFAULT_PROJECT)
self._update_user(
user_id=self.resource_id,
domain=domain,
new_name=name,
new_description=description,
enabled=enabled,
new_default_project=default_project,
new_email=email,
new_password=password
)
if self.GROUPS in prop_diff:
(new_group_ids, removed_group_ids) = self._find_diff(
prop_diff[self.GROUPS],
self.properties[self.GROUPS])
if new_group_ids:
self._add_user_to_groups(self.resource_id, new_group_ids)
if removed_group_ids:
self._remove_user_from_groups(self.resource_id,
removed_group_ids)
self.update_assignment(prop_diff=prop_diff,
user_id=self.resource_id)
def handle_delete(self):
if self.resource_id is not None:
with self.client_plugin().ignore_not_found:
if self.properties[self.GROUPS] is not None:
self._remove_user_from_groups(
self.resource_id,
[self.client_plugin().get_group_id(group)
for group in
self.properties[self.GROUPS]])
self.client().users.delete(self.resource_id)
def resource_mapping():
return {
'OS::Keystone::User': KeystoneUser
}
|
|
"""
Utilities for generating random numbers, random sequences, and
random selections.
"""
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import random
import sys
import networkx as nx
__author__ = '\n'.join(['Aric Hagberg (hagberg@lanl.gov)',
'Dan Schult(dschult@colgate.edu)',
'Ben Edwards(bedwards@cs.unm.edu)'])
import warnings as _warnings
def create_degree_sequence(n, sfunction=None, max_tries=50, **kwds):
_warnings.warn("create_degree_sequence() is deprecated",
DeprecationWarning)
""" Attempt to create a valid degree sequence of length n using
specified function sfunction(n,**kwds).
Parameters
----------
n : int
Length of degree sequence = number of nodes
sfunction: function
Function which returns a list of n real or integer values.
Called as "sfunction(n,**kwds)".
max_tries: int
Max number of attempts at creating valid degree sequence.
Notes
-----
Repeatedly create a degree sequence by calling sfunction(n,**kwds)
until achieving a valid degree sequence. If unsuccessful after
max_tries attempts, raise an exception.
For examples of sfunctions that return sequences of random numbers,
see networkx.Utils.
Examples
--------
>>> from networkx.utils import uniform_sequence, create_degree_sequence
>>> seq=create_degree_sequence(10,uniform_sequence)
"""
tries=0
max_deg=n
while tries < max_tries:
trialseq=sfunction(n,**kwds)
# round to integer values in the range [0,max_deg]
seq=[min(max_deg, max( int(round(s)),0 )) for s in trialseq]
# if graphical return, else throw away and try again
if nx.is_valid_degree_sequence(seq):
return seq
tries+=1
raise nx.NetworkXError(\
"Exceeded max (%d) attempts at a valid sequence."%max_tries)
# The same helpers for choosing random sequences from distributions
# uses Python's random module
# http://www.python.org/doc/current/lib/module-random.html
def pareto_sequence(n,exponent=1.0):
"""
Return sample sequence of length n from a Pareto distribution.
"""
return [random.paretovariate(exponent) for i in range(n)]
def powerlaw_sequence(n,exponent=2.0):
"""
Return sample sequence of length n from a power law distribution.
"""
return [random.paretovariate(exponent-1) for i in range(n)]
def zipf_rv(alpha, xmin=1, seed=None):
r"""Return a random value chosen from the Zipf distribution.
The return value is an integer drawn from the probability distribution
::math::
p(x)=\frac{x^{-\alpha}}{\zeta(\alpha,x_{min})},
where `\zeta(\alpha,x_{min})` is the Hurwitz zeta function.
Parameters
----------
alpha : float
Exponent value of the distribution
xmin : int
Minimum value
seed : int
Seed value for random number generator
Returns
-------
x : int
Random value from Zipf distribution
Raises
------
ValueError:
If xmin < 1 or
If alpha <= 1
Notes
-----
The rejection algorithm generates random values for a the power-law
distribution in uniformly bounded expected time dependent on
parameters. See [1] for details on its operation.
Examples
--------
>>> nx.zipf_rv(alpha=2, xmin=3, seed=42) # doctest: +SKIP
References
----------
..[1] Luc Devroye, Non-Uniform Random Variate Generation,
Springer-Verlag, New York, 1986.
"""
if xmin < 1:
raise ValueError("xmin < 1")
if alpha <= 1:
raise ValueError("a <= 1.0")
if not seed is None:
random.seed(seed)
a1 = alpha - 1.0
b = 2**a1
while True:
u = 1.0 - random.random() # u in (0,1]
v = random.random() # v in [0,1)
x = int(xmin*u**-(1.0/a1))
t = (1.0+(1.0/x))**a1
if v*x*(t-1.0)/(b-1.0) <= t/b:
break
return x
def zipf_sequence(n, alpha=2.0, xmin=1):
"""Return a sample sequence of length n from a Zipf distribution with
exponent parameter alpha and minimum value xmin.
See Also
--------
zipf_rv
"""
return [ zipf_rv(alpha,xmin) for _ in range(n)]
def uniform_sequence(n):
"""
Return sample sequence of length n from a uniform distribution.
"""
return [ random.uniform(0,n) for i in range(n)]
def cumulative_distribution(distribution):
"""Return normalized cumulative distribution from discrete distribution."""
cdf= [0.0]
psum=float(sum(distribution))
for i in range(0,len(distribution)):
cdf.append(cdf[i]+distribution[i]/psum)
return cdf
def discrete_sequence(n, distribution=None, cdistribution=None):
"""
Return sample sequence of length n from a given discrete distribution
or discrete cumulative distribution.
One of the following must be specified.
distribution = histogram of values, will be normalized
cdistribution = normalized discrete cumulative distribution
"""
import bisect
if cdistribution is not None:
cdf=cdistribution
elif distribution is not None:
cdf=cumulative_distribution(distribution)
else:
raise nx.NetworkXError(
"discrete_sequence: distribution or cdistribution missing")
# get a uniform random number
inputseq=[random.random() for i in range(n)]
# choose from CDF
seq=[bisect.bisect_left(cdf,s)-1 for s in inputseq]
return seq
def random_weighted_sample(mapping, k):
"""Return k items without replacement from a weighted sample.
The input is a dictionary of items with weights as values.
"""
if k > len(mapping):
raise ValueError("sample larger than population")
sample = set()
while len(sample) < k:
sample.add(weighted_choice(mapping))
return list(sample)
def weighted_choice(mapping):
"""Return a single element from a weighted sample.
The input is a dictionary of items with weights as values.
"""
# use roulette method
rnd = random.random() * sum(mapping.values())
for k, w in mapping.items():
rnd -= w
if rnd < 0:
return k
|
|
"""The tests for the Google Wifi platform."""
from datetime import datetime, timedelta
from http import HTTPStatus
from unittest.mock import Mock, patch
import homeassistant.components.google_wifi.sensor as google_wifi
from homeassistant.const import STATE_UNKNOWN
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.common import assert_setup_component, async_fire_time_changed
NAME = "foo"
MOCK_DATA = (
'{"software": {"softwareVersion":"initial",'
'"updateNewVersion":"initial"},'
'"system": {"uptime":86400},'
'"wan": {"localIpAddress":"initial", "online":true,'
'"ipAddress":true}}'
)
MOCK_DATA_NEXT = (
'{"software": {"softwareVersion":"next",'
'"updateNewVersion":"0.0.0.0"},'
'"system": {"uptime":172800},'
'"wan": {"localIpAddress":"next", "online":false,'
'"ipAddress":false}}'
)
MOCK_DATA_MISSING = '{"software": {},' '"system": {},' '"wan": {}}'
async def test_setup_minimum(hass, requests_mock):
"""Test setup with minimum configuration."""
resource = f"http://{google_wifi.DEFAULT_HOST}{google_wifi.ENDPOINT}"
requests_mock.get(resource, status_code=HTTPStatus.OK)
assert await async_setup_component(
hass,
"sensor",
{"sensor": {"platform": "google_wifi", "monitored_conditions": ["uptime"]}},
)
assert_setup_component(1, "sensor")
async def test_setup_get(hass, requests_mock):
"""Test setup with full configuration."""
resource = f"http://localhost{google_wifi.ENDPOINT}"
requests_mock.get(resource, status_code=HTTPStatus.OK)
assert await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "google_wifi",
"host": "localhost",
"name": "Test Wifi",
"monitored_conditions": [
"current_version",
"new_version",
"uptime",
"last_restart",
"local_ip",
"status",
],
}
},
)
assert_setup_component(6, "sensor")
def setup_api(hass, data, requests_mock):
"""Set up API with fake data."""
resource = f"http://localhost{google_wifi.ENDPOINT}"
now = datetime(1970, month=1, day=1)
sensor_dict = {}
with patch("homeassistant.util.dt.now", return_value=now):
requests_mock.get(resource, text=data, status_code=HTTPStatus.OK)
conditions = google_wifi.SENSOR_KEYS
api = google_wifi.GoogleWifiAPI("localhost", conditions)
for desc in google_wifi.SENSOR_TYPES:
sensor_dict[desc.key] = {
"sensor": google_wifi.GoogleWifiSensor(api, NAME, desc),
"name": f"{NAME}_{desc.key}",
"units": desc.native_unit_of_measurement,
"icon": desc.icon,
}
for name in sensor_dict:
sensor = sensor_dict[name]["sensor"]
sensor.hass = hass
return api, sensor_dict
def fake_delay(hass, ha_delay):
"""Fake delay to prevent update throttle."""
hass_now = dt_util.utcnow()
shifted_time = hass_now + timedelta(seconds=ha_delay)
async_fire_time_changed(hass, shifted_time)
def test_name(requests_mock):
"""Test the name."""
api, sensor_dict = setup_api(None, MOCK_DATA, requests_mock)
for name in sensor_dict:
sensor = sensor_dict[name]["sensor"]
test_name = sensor_dict[name]["name"]
assert test_name == sensor.name
def test_unit_of_measurement(requests_mock):
"""Test the unit of measurement."""
api, sensor_dict = setup_api(None, MOCK_DATA, requests_mock)
for name in sensor_dict:
sensor = sensor_dict[name]["sensor"]
assert sensor_dict[name]["units"] == sensor.unit_of_measurement
def test_icon(requests_mock):
"""Test the icon."""
api, sensor_dict = setup_api(None, MOCK_DATA, requests_mock)
for name in sensor_dict:
sensor = sensor_dict[name]["sensor"]
assert sensor_dict[name]["icon"] == sensor.icon
def test_state(hass, requests_mock):
"""Test the initial state."""
api, sensor_dict = setup_api(hass, MOCK_DATA, requests_mock)
now = datetime(1970, month=1, day=1)
with patch("homeassistant.util.dt.now", return_value=now):
for name in sensor_dict:
sensor = sensor_dict[name]["sensor"]
fake_delay(hass, 2)
sensor.update()
if name == google_wifi.ATTR_LAST_RESTART:
assert sensor.state == "1969-12-31 00:00:00"
elif name == google_wifi.ATTR_UPTIME:
assert sensor.state == 1
elif name == google_wifi.ATTR_STATUS:
assert sensor.state == "Online"
else:
assert sensor.state == "initial"
def test_update_when_value_is_none(hass, requests_mock):
"""Test state gets updated to unknown when sensor returns no data."""
api, sensor_dict = setup_api(hass, None, requests_mock)
for name in sensor_dict:
sensor = sensor_dict[name]["sensor"]
fake_delay(hass, 2)
sensor.update()
assert sensor.state is None
def test_update_when_value_changed(hass, requests_mock):
"""Test state gets updated when sensor returns a new status."""
api, sensor_dict = setup_api(hass, MOCK_DATA_NEXT, requests_mock)
now = datetime(1970, month=1, day=1)
with patch("homeassistant.util.dt.now", return_value=now):
for name in sensor_dict:
sensor = sensor_dict[name]["sensor"]
fake_delay(hass, 2)
sensor.update()
if name == google_wifi.ATTR_LAST_RESTART:
assert sensor.state == "1969-12-30 00:00:00"
elif name == google_wifi.ATTR_UPTIME:
assert sensor.state == 2
elif name == google_wifi.ATTR_STATUS:
assert sensor.state == "Offline"
elif name == google_wifi.ATTR_NEW_VERSION:
assert sensor.state == "Latest"
elif name == google_wifi.ATTR_LOCAL_IP:
assert sensor.state == STATE_UNKNOWN
else:
assert sensor.state == "next"
def test_when_api_data_missing(hass, requests_mock):
"""Test state logs an error when data is missing."""
api, sensor_dict = setup_api(hass, MOCK_DATA_MISSING, requests_mock)
now = datetime(1970, month=1, day=1)
with patch("homeassistant.util.dt.now", return_value=now):
for name in sensor_dict:
sensor = sensor_dict[name]["sensor"]
fake_delay(hass, 2)
sensor.update()
assert sensor.state == STATE_UNKNOWN
def test_update_when_unavailable(hass, requests_mock):
"""Test state updates when Google Wifi unavailable."""
api, sensor_dict = setup_api(hass, None, requests_mock)
api.update = Mock(
"google_wifi.GoogleWifiAPI.update",
side_effect=update_side_effect(hass, requests_mock),
)
for name in sensor_dict:
sensor = sensor_dict[name]["sensor"]
sensor.update()
assert sensor.state is None
def update_side_effect(hass, requests_mock):
"""Mock representation of update function."""
api, sensor_dict = setup_api(hass, MOCK_DATA, requests_mock)
api.data = None
api.available = False
|
|
#!/usr/bin/python3
"""
Re-write "gps_win.pl" to "GPS.py"
by Qige <qigezhao@gmail.com>
2017.10.10-2017.10.13 compatible with u-blox6 GMOUSE GPS VK-162
2017.10.16 compatible with u-blox7, Stoton GPS+BDS GNSS100B
2017.10.17 final re-format
2017.10.17 read hex & ascii GPS Sensor data. not verified yet
Done:
* verify with real GPS Sensor (GMOUSE VK-162, USB Mouse style);
* Compatible with multi protocol.
"""
import re
import sys
import time
import serial
import serial.tools.list_ports
FLAG_RUN = 1
FLAG_DBG = 0
GPS_SENSOR = 'gps.txt'
# application
def appVersion():
print('ARNPerf v7.1.4 (https://github.com/zhaoqige/arnperf.git')
print('---- by Qige <qigezhao@gmail.com> v7.1.4.181017-py ----')
print('-------------------------------------------------------')
def appHelp():
print('Usage: GSP.py com4 [gps.txt] # user defined GPS Sensor & output file')
print('Usage: GSP.py # find GPS Sensor, then write to "gps.txt"')
def cliParams():
if sys and sys.argv and len(sys.argv) >= 3:
return sys.argv[1:3] # 3rd not included
if sys and sys.argv and len(sys.argv) >= 2:
return sys.argv[1], None
return None, None
def HexToAscii(data):
if data and len(data) > 0:
result = []
for c in data:
try:
cs = chr(c)
except:
#cs = chr(str(c))
cs = chr(ord(c))
if re.match('[0-9a-zA-Z,\$,\*\.\\\\]', cs): # 0-9a-z
result.extend(cs)
return ''.join(result)
return None
# serial port handler
def spOpen(serialName):
# 115200/8/N/1
try:
if serialName:
serialFd = serial.Serial(serialName, timeout = 3)
serialFd.baudrate = 115200
#serialFd.baudrate = 9600
serialFd.bytesize = 8
serialFd.parity = serial.PARITY_NONE;
serialFd.stopbits = 1
serialFd.timeout = 3
serialFd.writeTimeout = 1
if serialFd and serialFd.readable():
return serialFd
else:
return None
except serial.SerialException:
return None
def spRead(serialFd):
if serialFd and (serialFd.readable()):
buffer = serialFd.read(512)
#buffer = '\xc4\xe3\xba\xc3\xb0\xa1121A\xba\xc5'
#buffer = '00*1F$GPGGA,092204.999,4250.5589,S,14718.5084,E,1,04,24.4,19.7,M,,,,0000*1F'
if buffer:
#return buffer # v7.1.2
# v7.1.3 fix hex data from GPS Sensor
data = HexToAscii(buffer)
return data
return None
def spWrite(serialFd, data):
if serialFd and data:
serialFd.write(data)
def spClose(serialFd):
if serialFd:
serialFd.close()
# u-blox 6 chip: GPS, start with $GP*
def GPSUblox6(msg):
if msg and len(msg) >= 6:
flagUblox6 = re.search("GPRMC|GPGGA|GPGSA|GPGSV|GPVTG|GPGLL|GPTXT", \
str(msg))
if (flagUblox6):
return 1
return 0
# u-blox 7 chip: GPS+BSD, start with $GN*
def GPSUblox7(msg):
if msg and len(msg) >= 6:
flagUblox7 = re.search("GNRMC|GNGGA|GNGSA|GBGSV|GNGSV|GNVTG|GNGLL|GNTXT", \
str(msg))
if (flagUblox7):
return 1
return 0
# find first GPS Sensor, return fd
def GPSSensorFindFd(spDev):
if spDev:
spDesc = spDev[0]
#spDesc = 'com17' # TODO: DEBUG USE ONLY!
serialFd = spOpen(spDesc)
if serialFd:
spData = spRead(serialFd) # read GPS Sensor
# DEBUG USE ONLY!
#spData = ",,,,0000*1F$GPGGA,092204.999,4250.5589,S,14718.5084,E,1,04,24.4,19.7,M,,,,0000*1F"
spDataLength = 0
if spData:
spDataLength = len(spData)
if spDataLength >= 6:
if GPSUblox6(spData) or GPSUblox7(spData):
serialName = serialFd.name
print("-> GPS sensor found:", serialName, '|', spDataLength, 'bytes')
return serialFd
else:
if spData.isalnum(): print(spData)
spClose(serialFd)
else:
print('->', spDesc, '- Device Busy or Not GPS Sensor')
else:
print('error> Unknown Device')
return None
# protocol: NEMA-0138
# $GPRMC sample data
# "$GPRMC,024813.640,A,3158.4608,N,11848.3737,E,10.05,324.27,150706,,,A*50"
def ProtoNEMA0183FindGPRMC(data):
gprmcRaw = None
if data:
gpList = re.split(r'[\$\n\r\\\n\\\r]', str(data))
if gpList and len(gpList) >= 1:
for line in gpList:
if line and re.search('GPRMC|GNRMC', line):
if (FLAG_DBG > 0): print('dbg> line:', line)
gprmcRaw = line
break
return gprmcRaw
def ProtoNEMA0183DegreeConvert(degreeRaw, isSW):
vi = int(float(degreeRaw) / 100)
val = vi + ((float(degreeRaw) - vi * 100) / 60)
if (isSW == 'S') or (isSW == 'W'):
val = 0 - val
return val
#return "A,39.0005,119.0005,0,0"
def ProtoNEMA0183ParseRecord(gprmc_raw):
if (FLAG_DBG > 0): print('dbg> $G*RMC:', gprmc_raw) # $GPRMC or $GNRMC
try:
gprmcList = gprmc_raw.split(',')
if gprmcList and len(gprmcList) >= 9:
gpsFlag = 'A' if re.search('A', gprmcList[2]) \
else 'V'
gpsLat = ProtoNEMA0183DegreeConvert(gprmcList[3], gprmcList[4]) \
if (gprmcList[3] != '' and (gprmcList[4] != '')) \
else 0
gpsLng = ProtoNEMA0183DegreeConvert(gprmcList[5], gprmcList[6]) \
if (gprmcList[6] != '' and (gprmcList[6] != '')) \
else 0
# knots to km/h
gpsSpeed = (float(gprmcList[7]) * 1.852) if (gprmcList[8] != '') \
else 0
gpsHdg = float(gprmcList[8]) if (gprmcList[8] != '') \
else 0
gpsLatlng = '%s,%.8f,%.8f,%.2f,%.1f' \
% (gpsFlag, gpsLat, gpsLng, gpsSpeed, gpsHdg)
return gpsLatlng
except:
print('error> Bad G*RMC record')
return 'V,,,,'
# GPS sync
def GPSSensorSyncLatlng(serialFd, gpsFile):
outFile = GPS_SENSOR # default gps file
if gpsFile:
outFile = gpsFile
if serialFd:
print("-> reading GPS location from", serialFd.name, ">", outFile)
while FLAG_RUN > 0:
data = spRead(serialFd)
if data:
gprmc_raw = ProtoNEMA0183FindGPRMC(data)
data = ProtoNEMA0183ParseRecord(gprmc_raw)
GPSLatlngSave(outFile, data)
time.sleep(0.9)
else:
print("error> invalid GPS sensor > ", serialFd)
appHelp()
# save parsed data+ts to file
def GPSLatlngSave(gpsFile, data):
if gpsFile and data:
ts = time.strftime("%Y-%m-%d %H:%M:%S")
print('==> GCJ-02:', data)
fd = open(gpsFile, 'w')
if fd:
fd.write(data + ',' + ts)
fd.flush()
print('---- saved at', ts)
else:
print('error> failed to save & exchange GPS location')
fd.close()
else:
print('error> NO data to save')
"""
GPS Sensor Handler
------------------
Usage: "GPS.py com8 gps.txt"
------------------
by Qige <qigezhao@gmail.com>
2017.10.10
"""
def GPSRecorder():
appVersion()
print('> reading input ...')
gpsCom, gpsFile = cliParams()
serialFd = None
if gpsCom:
print('> opening GPS Sensor:', gpsCom)
serialFd = spOpen(gpsCom)
else:
print('> finding GPS Sensor ...')
spDevList = list(serial.tools.list_ports.comports())
if len(spDevList) <= 0:
print("error: NO GPS Sensor found!")
else:
for spDev in spDevList:
serialFd = GPSSensorFindFd(spDev)
if serialFd:
break
if serialFd:
print('> read & save GPS location ...')
GPSSensorSyncLatlng(serialFd, gpsFile)
else:
print('error> NO GPS Sensor valid!')
appHelp()
# start GPS Recorder
GPSRecorder()
|
|
import pytest
from codecs import decode, encode
from six import text_type
from springfield import fields, Entity
def test_slug():
"""
Assure that slugs are generated properly
"""
slugify = fields.SlugField().adapt
for input, expect in [
('01 HDR test', '01-hdr-test'),
('--&*$#(8$jjsdsd77-----test phrase12 123--', '8jjsdsd77-test-phrase12-123'),
('1234', '1234'),
('abcdEFG', 'abcdefg'),
]:
assert slugify(input) == expect
def test_float():
"""
Assure that float can adapt various types
"""
floatify = fields.FloatField().adapt
for input, expect in [
(1.1, 1.1),
(11, 11.0),
(int(5.7), 5)
]:
assert floatify(input) == expect
def test_url():
"""
Assure that url performs some basic validation
"""
urlify = fields.UrlField().adapt
# positive tests
for input, expect in [
('http://www.google.com/SOME/path', 'http://www.google.com/SOME/path'),
('http://www.google.com/Path?foo=bar&bar=fOO', 'http://www.google.com/Path?foo=bar&bar=fOO'),
('hTTp://www.Google.com', 'http://www.google.com'),
('ftp://www.google.com', 'ftp://www.google.com'),
('https://www.google.com', 'https://www.google.com'),
(None, None),
]:
assert urlify(input) == expect
# negative tests
for input in [
'http;//www.google.com',
'http:/www.google.com',
'http:www.google.com',
'<script></script>',
'<img src="http://foo.bar/badimage">'
]:
with pytest.raises(TypeError):
urlify(input)
def test_bytes():
"""
Check that bytes encode/decode to/from json without problems. And check
the support for the `encoding` option for BytesField.
"""
escaping_bytes_field = fields.BytesField(encoding=None)
hex_bytes_field = fields.BytesField(encoding='hex')
base64_bytes_field = fields.BytesField() # base64 is the default
# Basic check of adapt and jsonify on just bytes
for input in (b'abc', b'\x00\xA0\xFF'):
for f in (escaping_bytes_field, hex_bytes_field, base64_bytes_field):
# Adapt should reverse jsonify
assert f.adapt(f.jsonify(input)) == input
# Since its already bytes, adapt is a no-op
assert f.adapt(input) == input
assert escaping_bytes_field.jsonify(input) == decode(input, 'latin1')
assert hex_bytes_field.jsonify(input) == decode(encode(input, 'hex'), 'latin1')
assert base64_bytes_field.jsonify(input) == decode(encode(input, 'base64'), 'latin1')
# BytesField doesn't jsonify unicode values
for input in (u'abc', u'\u0100', u'\u0000'):
for f in (escaping_bytes_field, hex_bytes_field, base64_bytes_field):
with pytest.raises(ValueError):
f.jsonify(input)
# BytesField doesn't adapt unicode values with code points > 255
for f in (escaping_bytes_field, hex_bytes_field, base64_bytes_field):
with pytest.raises(ValueError):
f.jsonify(u'\u0100')
# Hex encoding doesn't accept non-hex inputs
with pytest.raises(TypeError):
hex_bytes_field.adapt(u'hijklmnopq')
# Should leave null alone
for f in (escaping_bytes_field, hex_bytes_field, base64_bytes_field):
assert f.adapt(None) == None
assert f.jsonify(None) == None
def test_dotted_named_entities():
"""
Assure that EntityField can be instantiated with dotted-named
classes.
"""
class TestEntity(Entity):
foo = fields.EntityField('tests.dottedname.foo.bar.baz.Zap')
e = TestEntity(foo={'name': 'baz'})
assert e.foo.name == 'baz' # noqa
# Avoid importing the class before the TestEntity above is instantiated
# so that we know the `EntityField` import worked as expected.
from tests.dottedname.foo.bar.baz import Zap
assert isinstance(e.foo, Zap)
def test_dotted_named_entities_circular_references():
"""
Assure that circular references in entity fields are handled when
using dotted-name EntityField types.
"""
from tests.dottedname.foo.bar.bop import Property
p = Property(
name='outer',
nested={
'properties': [
Property(name='inner')
]
}
)
assert p
assert isinstance(p.nested.properties, list)
assert p.nested.properties[0].name == 'inner'
def test_dotted_named_entities_not_callable():
"""
Assure that if a dotted-name reference is not callable, that an
expected error is raised.
"""
class TestEntity(Entity):
foo = fields.EntityField('tests.dottedname.foo.bar.baz.NotCallable')
with pytest.raises(ValueError):
TestEntity(foo={'name': 'baz'})
def test_dotted_named_entities_not_importable():
"""
Assure that if invalid references are used, an expected error is raised.
"""
class RandomStringTestEntity(Entity):
foo = fields.EntityField('a.string.with.dots')
with pytest.raises(ValueError):
RandomStringTestEntity(foo='anything')
def test_dotted_named_entities_not_dotted():
"""
Assure that byte string references are actually dotted-name references.
"""
class NonDottedNameEntity(Entity):
# `Property` is a real class, but this string is not a full
# reference, so it can't be resolved and is therefore considered
# invalid.
foo = fields.EntityField('Property')
with pytest.raises(ValueError):
NonDottedNameEntity(foo={})
class ExistingNonDottedNameEntity(Entity):
# `FlexEntity` is a real class and it's likely in the local
# import scope, but it's still not considered a supported
# dotted-name class reference.
foo = fields.EntityField('FlexEntity')
with pytest.raises(ValueError):
ExistingNonDottedNameEntity(foo={})
class SelfNonDottedNameEntity(Entity):
# 'self' is a special case and is the only non-dotted,
# dotted-name class reference that we support.
foo = fields.EntityField('self')
name = fields.StringField()
result = SelfNonDottedNameEntity(
name='outer',
foo={
'name': 'inner',
'foo': {
'name': 'deeper'
}
}
)
assert result
assert result.name == 'outer'
assert result.foo.name == 'inner'
assert result.foo.foo.name == 'deeper'
def test_stringfield_return_text_type():
"""
Assure that stringfield return type should be text_type
"""
stringify = fields.StringField().adapt
assert isinstance(stringify("Hello World"), text_type)
|
|
#!/usr/bin/env python
"""
This is the top level script that runs "commands" for Wavefront.
In the longer term, the INSTALLED_COMMANDS constant should be dyanmically
generated from the commands currently installed.
"""
from __future__ import print_function
import ConfigParser
import importlib
import logging
import logging.config
import sys
import threading
import traceback
import argparse
import daemon
import daemon.pidfile
from wavefront import utils
# List of available commands to run. This is currently hard-coded, but later
# could (and should) be auto-generated from the commands installed.
INSTALLED_COMMANDS = {
'appdynamics': (
'wavefront.appdynamics',
'AppDMetricRetrieverCommand'
),
'awsbilling': (
'wavefront.awsbilling',
'AwsBillingMetricsCommand'
),
'awscloudwatch': (
'wavefront.awscloudwatch',
'AwsCloudwatchMetricsCommand'
),
'newrelic': (
'wavefront.newrelic',
'NewRelicMetricRetrieverCommand'
),
'systemchecker': (
'wavefront.system_checker',
'SystemCheckerCommand'
)
}
def parse_args():
"""
Parse user arguments and return as parser object.
"""
# there are 2 ways to configure this:
# 1 - run a single command via the command line
# 2 - run one or more commands via a configuration file
parser = argparse.ArgumentParser(description='Wavefront command line tool')
parser.add_argument('-c', help='Specify a configuration file',
dest='config')
parser.add_argument('--daemon', action='store_true', default=False,
help='Run in background (default is false)')
parser.add_argument('--out',
help=('The path to the file where stdout/stderr '
'should be redirected when running --daemon'))
parser.add_argument('--pid',
help='The path to the PID file when running --daemon')
parser.add_argument('--verbose', action='store_true', default=False,
help='More output')
args, _ = parser.parse_known_args()
if args.config:
print('Reading configuration file %s ...' % (args.config))
return WavefrontConfiguration(args.config, args)
parser = argparse.ArgumentParser(description='Wavefront command line tool')
subparsers = parser.add_subparsers(
dest='command',
help=('Available commands. Use \'wavefront <command name> -h\' to '
'get help on an individual command'))
#pylint: disable=bare-except
for command_name, details in INSTALLED_COMMANDS.iteritems():
try:
module = importlib.import_module(details[0])
except:
if args.verbose:
print('failed loading %s: %s' %
(command_name, str(sys.exc_info())))
traceback.print_exc()
continue
class_name = details[1]
command = getattr(module, class_name)(name=command_name)
subparser = subparsers.add_parser(command_name,
help=command.get_help_text())
command.add_arguments(subparser)
parser.add_argument('--verbose', action='store_true', default=False,
help='More output')
parser.add_argument('--daemon', action='store_true', default=False,
help='Run in background (default is false)')
parser.add_argument('--out', default='./wavefront.out',
help=('The path to the file where stdout/stderr '
'should be redirected when running --daemon'))
parser.add_argument('--pid', default='./wavefront.pid',
help='The path to the PID file when running --daemon')
parser.add_argument('--delay', default='0', type=float,
help=('The number of seconds to delay between each '
'execution when running --daemon'))
return parser.parse_args()
#pylint: disable=too-few-public-methods
class WavefrontThreadConfiguration(object):
"""
Simple object to wrap the configuration items of a "thread" group in
the wavefront.conf file
"""
def __init__(self, config, config_group):
self.command = config.get(config_group, 'command', None)
args = config.getlist(config_group, 'args', '')
self.verbose = config.verbose
self.command_object = get_command_object(self.command)
parser = argparse.ArgumentParser()
self.command_object.add_arguments(parser)
self.args, _ = parser.parse_known_args(args=args)
self.args.verbose = self.verbose
self.delay = int(config.get(config_group, 'delay', 0))
self.args.delay = self.delay
self.enabled = config.getboolean(config_group, 'enabled', True)
class WavefrontConfiguration(utils.Configuration):
"""
Configuration class wrapping the wavefront configuration file
"""
def __init__(self, config_file_path, command_line_args):
super(WavefrontConfiguration, self).__init__(
config_file_path=config_file_path)
if command_line_args.daemon:
self.daemon = command_line_args.daemon
else:
self.daemon = self.getboolean('global', 'daemon', False)
self.verbose = self.getboolean('global', 'verbose', False)
if command_line_args.out:
self.out = command_line_args.out
else:
self.out = self.get('global', 'out', 'wavefront.out')
if command_line_args.pid:
self.pid = command_line_args.pid
else:
self.pid = self.get('global', 'pid', 'wavefront.pid')
self.debug = self.getboolean('global', 'debug', False)
names = self.getlist('global', 'threads', [])
self.thread_configs = []
for name in names:
print('Loading thread %s' % (name.strip(),))
name = 'thread-' + name.strip()
self.thread_configs.append(WavefrontThreadConfiguration(self, name))
#pylint: disable=broad-except
def main():
"""
Main function
"""
logging.basicConfig(format='%(levelname)s: %(message)s',
level=logging.INFO)
args = parse_args()
if args.daemon:
stdout = open(args.out, 'w+')
print ('Running in background. stdout/stderr being redirected to %s ' %
(args.out))
with daemon.DaemonContext(stdout=stdout, stderr=stdout,
pidfile=daemon.pidfile.PIDLockFile(args.pid),
working_directory='.'):
execute_commands(args)
else:
execute_commands(args)
def execute_commands(args):
"""
Executes all commands specified in the configuration file and command line
Arguments:
args - argparse object or WavefrontConfiguration
"""
logger = logging.getLogger()
utils.setup_signal_handlers(logger)
if isinstance(args, WavefrontConfiguration):
try:
logging.config.fileConfig(args.config_file_path)
except ConfigParser.NoSectionError:
pass
threads = []
for conf in args.thread_configs:
if not conf.enabled:
logger.info('Skipping disabled command \'%s\'', conf.command)
continue
targs = (conf.command, conf.args,)
thread = threading.Thread(target=execute_command, args=targs,
name=conf.command)
thread.daemon = True
threads.append(thread)
thread.start()
threads_alive = threads[:]
while threads_alive and not utils.CANCEL_WORKERS_EVENT.is_set():
for thread in threads:
if thread.is_alive():
thread.join(1)
elif thread in threads_alive:
threads_alive.remove(thread)
else:
execute_command(args.command, args)
logger.info('Done.')
def execute_command(command_name, args):
"""
Executes a single command (could be in a separate thread or main thread)
Arguments:
args - argparse object or WavefrontConfiguration
"""
try:
command_object = get_command_object(command_name)
command_object.verbose = args.verbose
command_object.execute(args)
except Exception as command_err:
if args is not None and args.verbose:
raise
print('Failed to execute command "%s": %s' %
(command_name, str(command_err)))
def get_command_object(command_name):
"""
Gets the command object from the command name
Arguments:
command_name - the installed commands command key
"""
if command_name in INSTALLED_COMMANDS:
details = INSTALLED_COMMANDS[command_name]
command_module = importlib.import_module(details[0])
class_name = details[1]
return getattr(command_module, class_name)(name=command_name)
else:
raise ValueError('Command ' + str(command_name) + ' not found')
if __name__ == '__main__':
main()
|
|
import numpy as NP
from astropy.io import fits
from astropy.io import ascii
import progressbar as PGB
import interferometry as RI
import ipdb as PDB
rootdir = '/data3/t_nithyanandan/'
# rootdir = '/data3/MWA/lstbin_RA0/NT/'
filenaming_convention = 'new'
# filenaming_convention = 'old'
project_MWA = False
project_LSTbin = False
project_HERA = True
project_beams = False
project_drift_scan = False
project_global_EoR = False
project_dir = ''
if project_MWA: project_dir = 'project_MWA/'
if project_LSTbin:
if rootdir == '/data3/t_nithyanandan/':
project_dir = 'project_LSTbin/'
if project_HERA: project_dir = 'project_HERA/'
if project_beams: project_dir = 'project_beams/'
if project_drift_scan: project_dir = 'project_drift_scan/'
if project_global_EoR: project_dir = 'project_global_EoR/'
telescope_id = 'custom'
element_size = 14.0
element_shape = 'dish'
phased_array = False
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
ground_plane = None # height of antenna element above ground plane in m
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
delayerr = 0.0 # delay error rms in ns
if delayerr is None:
delayerr_str = ''
delayerr = 0.0
elif delayerr < 0.0:
raise ValueError('delayerr must be non-negative.')
else:
delayerr_str = 'derr_{0:.3f}ns'.format(delayerr)
delayerr *= 1e-9
gainerr = 0.0 # Gain error rms in dB
if gainerr is None:
gainerr_str = ''
gainerr = 0.0
elif gainerr < 0.0:
raise ValueError('gainerr must be non-negative.')
else:
gainerr_str = '_gerr_{0:.2f}dB'.format(gainerr)
nrand = 1 # Number of random realizations
if nrand is None:
nrandom_str = ''
nrand = 1
elif nrand < 1:
raise ValueError('nrandom must be positive')
else:
nrandom_str = '_nrand_{0:0d}_'.format(nrand)
if (delayerr_str == '') and (gainerr_str == ''):
nrand = 1
nrandom_str = ''
delaygain_err_str = delayerr_str + gainerr_str + nrandom_str
# array_layout = 'CIRC'
# array_layout = 'MWA-128T'
array_layout = 'HERA-331'
minR = 141.0
maxR = None
if array_layout == 'MWA-128T':
ant_info = NP.loadtxt('/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt', skiprows=6, comments='#', usecols=(0,1,2,3))
ant_id = ant_info[:,0].astype(int).astype(str)
ant_locs = ant_info[:,1:]
elif array_layout == 'HERA-7':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=7)
elif array_layout == 'HERA-19':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=19)
elif array_layout == 'HERA-37':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=37)
elif array_layout == 'HERA-61':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=61)
elif array_layout == 'HERA-91':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=91)
elif array_layout == 'HERA-127':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=127)
elif array_layout == 'HERA-169':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=169)
elif array_layout == 'HERA-217':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=217)
elif array_layout == 'HERA-271':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=271)
elif array_layout == 'HERA-331':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=331)
elif array_layout == 'CIRC':
ant_locs, ant_id = RI.circular_antenna_array(element_size, minR, maxR=maxR)
bl, bl_id = RI.baseline_generator(ant_locs, ant_id=ant_id, auto=False, conjugate=False)
bl, select_bl_ind, bl_count = RI.uniq_baselines(bl)
bl_id = bl_id[select_bl_ind]
bl_length = NP.sqrt(NP.sum(bl**2, axis=1))
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_id = bl_id[sortind]
bl_length = bl_length[sortind]
total_baselines = bl_length.size
n_bl_chunks = 64
baseline_chunk_size = 10
nside = 256
use_GSM = False
use_DSM = False
use_CSM = False
use_NVSS = False
use_SUMSS = False
use_MSS = False
use_GLEAM = False
use_PS = False
use_USM = False
use_HI_monopole = False
use_HI_fluctuations = False
use_HI_cube = True
if use_GSM:
fg_str = 'asm'
elif use_DSM:
fg_str = 'dsm'
elif use_CSM:
fg_str = 'csm'
elif use_SUMSS:
fg_str = 'sumss'
elif use_GLEAM:
fg_str = 'gleam'
elif use_PS:
fg_str = 'point'
elif use_NVSS:
fg_str = 'nvss'
elif use_USM:
fg_str = 'usm'
elif use_HI_monopole:
fg_str = 'HI_monopole'
elif use_HI_fluctuations:
fg_str = 'HI_fluctuations'
elif use_HI_cube:
fg_str = 'HI_cube'
else:
fg_str = 'other'
if use_HI_monopole:
bllstr = map(str, bl_length)
uniq_bllstr, ind_uniq_bll = NP.unique(bllstr, return_index=True)
count_uniq_bll = [bllstr.count(ubll) for ubll in uniq_bllstr]
count_uniq_bll = NP.asarray(count_uniq_bll)
bl = bl[ind_uniq_bll,:]
bl_id = bl_id[ind_uniq_bll]
bl_length = bl_length[ind_uniq_bll]
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_id = bl_id[sortind]
bl_length = bl_length[sortind]
count_uniq_bll = count_uniq_bll[sortind]
total_baselines = bl_length.size
baseline_bin_indices = range(0, int(NP.ceil(1.0*total_baselines/baseline_chunk_size)+1)*baseline_chunk_size, baseline_chunk_size)
n_bl_chunks = int(NP.ceil(1.0*total_baselines/baseline_chunk_size))
bl_chunk = range(len(baseline_bin_indices)-1)
bl_chunk = bl_chunk[:n_bl_chunks]
bl = bl[:min(baseline_bin_indices[n_bl_chunks], total_baselines),:]
bl_length = bl_length[:min(baseline_bin_indices[n_bl_chunks], total_baselines)]
bl_id = bl_id[:min(baseline_bin_indices[n_bl_chunks], total_baselines)]
Tsys = 300.0 # System temperature in K
freq = 150.0 * 1e6 # foreground center frequency in Hz
freq_resolution = 160e3 # in Hz
bpass_shape = 'rect'
f_pad = 1.0
oversampling_factor = 1.0 + f_pad
n_channels = 128
nchan = n_channels
bandpass_str = '{0:0d}x{1:.1f}_kHz'.format(nchan, freq_resolution/1e3)
use_pfb = False
pfb_instr = ''
pfb_outstr = ''
if not use_pfb:
pfb_instr = 'no_pfb_'
pfb_outstr = '_no_pfb'
obs_mode = 'drift'
# obs_mode = 'custom'
# obs_mode = 'lstbin'
avg_drifts = False
beam_switch = False
snapshots_range = None
snapshot_type_str = ''
if avg_drifts:
snapshot_type_str = 'drift_averaged_'
if beam_switch:
snapshot_type_str = 'beam_switches_'
if snapshots_range is not None:
snapshot_type_str = 'snaps_{0[0]:0d}-{0[1]:0d}_'.format(snapshots_range)
duration_str = ''
if obs_mode in ['track', 'drift']:
t_snap = 1080.0 # in seconds
n_snaps = 80
if (t_snap is not None) and (n_snaps is not None):
duration_str = '_{0:0d}x{1:.1f}s'.format(n_snaps, t_snap)
n_sky_sectors = 1
spindex_rms = 0.0
spindex_seed = None
spindex_seed_str = ''
if spindex_rms > 0.0:
spindex_rms_str = '{0:.1f}'.format(spindex_rms)
else:
spindex_rms = 0.0
if spindex_seed is not None:
spindex_seed_str = '{0:0d}_'.format(spindex_seed)
for k in range(n_sky_sectors):
if n_sky_sectors == 1:
sky_sector_str = '_all_sky_'
else:
sky_sector_str = '_sky_sector_{0:0d}_'.format(k)
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=n_bl_chunks).start()
for i in range(0, n_bl_chunks):
if filenaming_convention == 'old':
infile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[bl_chunk[i]]],bl_length[min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz_'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+pfb_instr+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(i)
else:
infile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[bl_chunk[i]]],bl_length[min(baseline_bin_indices[bl_chunk[i]]+baseline_chunk_size-1,total_baselines-1)])+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+pfb_instr+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(i)
# infile = '/data3/t_nithyanandan/project_MWA/multi_baseline_visibilities_'+avg_drifts_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[i]],bl_length[min(baseline_bin_indices[i]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+'_{0:0d}_'.format(nside)+'{0:.1f}_MHz_'.format(nchan*freq_resolution/1e6)+bpass_shape+'{0:.1f}'.format(oversampling_factor)+'_part_{0:0d}'.format(i)
if i == 0:
ia = RI.InterferometerArray(None, None, None, init_file=infile+'.fits')
else:
ia_next = RI.InterferometerArray(None, None, None, init_file=infile+'.fits')
ia.concatenate(ia_next, axis=0)
progress.update(i+1)
progress.finish()
if filenaming_convention == 'old':
outfile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'gaussian_FG_model_'+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+pfb_outstr
else:
outfile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz'.format(Tsys, bandpass_str, freq/1e6)+pfb_outstr
ia.save(outfile, verbose=True, tabtype='BinTableHDU', overwrite=True)
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from nova import context
from nova.db import base
from nova import exception
from nova.i18n import _LE, _LI, _LW
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import quota
from nova import rpc
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
floating_opts = [
cfg.StrOpt('default_floating_pool',
default='nova',
help='Default pool for floating IPs'),
cfg.BoolOpt('auto_assign_floating_ip',
default=False,
help='Autoassigning floating IP to VM'),
cfg.StrOpt('floating_ip_dns_manager',
default='nova.network.noop_dns_driver.NoopDNSDriver',
help='Full class name for the DNS Manager for floating IPs'),
cfg.StrOpt('instance_dns_manager',
default='nova.network.noop_dns_driver.NoopDNSDriver',
help='Full class name for the DNS Manager for instance IPs'),
cfg.StrOpt('instance_dns_domain',
default='',
help='Full class name for the DNS Zone for instance IPs'),
]
CONF = cfg.CONF
CONF.register_opts(floating_opts)
CONF.import_opt('public_interface', 'nova.network.linux_net')
CONF.import_opt('network_topic', 'nova.network.rpcapi')
class FloatingIP(object):
"""Mixin class for adding floating IP functionality to a manager."""
servicegroup_api = None
def init_host_floating_ips(self):
"""Configures floating ips owned by host."""
admin_context = context.get_admin_context()
try:
floating_ips = objects.FloatingIPList.get_by_host(admin_context,
self.host)
except exception.NotFound:
return
for floating_ip in floating_ips:
if floating_ip.fixed_ip_id:
try:
fixed_ip = floating_ip.fixed_ip
except exception.FixedIpNotFound:
LOG.debug('Fixed ip %s not found', floating_ip.fixed_ip_id)
continue
interface = CONF.public_interface or floating_ip.interface
try:
self.l3driver.add_floating_ip(floating_ip.address,
fixed_ip.address,
interface,
fixed_ip.network)
except processutils.ProcessExecutionError:
LOG.debug('Interface %s not found', interface)
raise exception.NoFloatingIpInterface(interface=interface)
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the floating IP resources for an instance.
calls super class allocate_for_instance() as well
rpc.called by network_api
"""
instance_uuid = kwargs.get('instance_id')
if not uuidutils.is_uuid_like(instance_uuid):
instance_uuid = kwargs.get('instance_uuid')
project_id = kwargs.get('project_id')
# call the next inherited class's allocate_for_instance()
# which is currently the NetworkManager version
# do this first so fixed ip is already allocated
nw_info = super(FloatingIP, self).allocate_for_instance(context,
**kwargs)
if CONF.auto_assign_floating_ip:
context = context.elevated()
# allocate a floating ip
floating_address = self.allocate_floating_ip(context, project_id,
True)
LOG.debug("floating IP allocation for instance "
"|%s|", floating_address,
instance_uuid=instance_uuid, context=context)
# get the first fixed address belonging to the instance
fixed_ips = nw_info.fixed_ips()
fixed_address = fixed_ips[0]['address']
# associate the floating ip to fixed_ip
self.associate_floating_ip(context,
floating_address,
fixed_address,
affect_auto_assigned=True)
# create a fresh set of network info that contains the floating ip
nw_info = self.get_instance_nw_info(context, **kwargs)
return nw_info
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating floating IP resources for an instance.
calls super class deallocate_for_instance() as well.
rpc.called by network_api
"""
if 'instance' in kwargs:
instance_uuid = kwargs['instance'].uuid
else:
instance_uuid = kwargs['instance_id']
if not uuidutils.is_uuid_like(instance_uuid):
# NOTE(francois.charlier): in some cases the instance might be
# deleted before the IPs are released, so we need to get
# deleted instances too
instance = objects.Instance.get_by_id(
context.elevated(read_deleted='yes'), instance_uuid)
instance_uuid = instance.uuid
try:
fixed_ips = objects.FixedIPList.get_by_instance_uuid(
context, instance_uuid)
except exception.FixedIpNotFoundForInstance:
fixed_ips = []
# add to kwargs so we can pass to super to save a db lookup there
kwargs['fixed_ips'] = fixed_ips
for fixed_ip in fixed_ips:
fixed_id = fixed_ip.id
floating_ips = objects.FloatingIPList.get_by_fixed_ip_id(context,
fixed_id)
# disassociate floating ips related to fixed_ip
for floating_ip in floating_ips:
address = str(floating_ip.address)
try:
self.disassociate_floating_ip(context,
address,
affect_auto_assigned=True)
except exception.FloatingIpNotAssociated:
LOG.info(_LI("Floating IP %s is not associated. Ignore."),
address)
# deallocate if auto_assigned
if floating_ip.auto_assigned:
self.deallocate_floating_ip(context, address,
affect_auto_assigned=True)
# call the next inherited class's deallocate_for_instance()
# which is currently the NetworkManager version
# call this after so floating IPs are handled first
super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
def _floating_ip_owned_by_project(self, context, floating_ip):
"""Raises if floating ip does not belong to project."""
if context.is_admin:
return
if floating_ip.project_id != context.project_id:
if floating_ip.project_id is None:
LOG.warning(_LW('Address |%(address)s| is not allocated'),
{'address': floating_ip.address})
raise exception.Forbidden()
else:
LOG.warning(_LW('Address |%(address)s| is not allocated '
'to your project |%(project)s|'),
{'address': floating_ip.address,
'project': context.project_id})
raise exception.Forbidden()
def _floating_ip_pool_exists(self, context, name):
"""Returns true if the specified floating ip pool exists. Otherwise,
returns false.
"""
pools = [pool.get('name') for pool in
self.get_floating_ip_pools(context)]
if name in pools:
return True
return False
def allocate_floating_ip(self, context, project_id, auto_assigned=False,
pool=None):
"""Gets a floating ip from the pool."""
# NOTE(tr3buchet): all network hosts in zone now use the same pool
pool = pool or CONF.default_floating_pool
use_quota = not auto_assigned
if not self._floating_ip_pool_exists(context, pool):
raise exception.FloatingIpPoolNotFound()
# Check the quota; can't put this in the API because we get
# called into from other places
try:
if use_quota:
reservations = QUOTAS.reserve(context, floating_ips=1,
project_id=project_id)
except exception.OverQuota:
LOG.warning(_LW("Quota exceeded for %s, tried to allocate "
"floating IP"), context.project_id)
raise exception.FloatingIpLimitExceeded()
try:
floating_ip = objects.FloatingIP.allocate_address(
context, project_id, pool, auto_assigned=auto_assigned)
payload = dict(project_id=project_id, floating_ip=floating_ip)
self.notifier.info(context,
'network.floating_ip.allocate', payload)
# Commit the reservations
if use_quota:
QUOTAS.commit(context, reservations, project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
if use_quota:
QUOTAS.rollback(context, reservations,
project_id=project_id)
return floating_ip
@messaging.expected_exceptions(exception.FloatingIpNotFoundForAddress)
def deallocate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Returns a floating ip to the pool."""
floating_ip = objects.FloatingIP.get_by_address(context, address)
# handle auto_assigned
if not affect_auto_assigned and floating_ip.auto_assigned:
return
use_quota = not floating_ip.auto_assigned
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# make sure floating ip is not associated
if floating_ip.fixed_ip_id:
floating_address = floating_ip.address
raise exception.FloatingIpAssociated(address=floating_address)
# clean up any associated DNS entries
self._delete_all_entries_for_ip(context,
floating_ip.address)
payload = dict(project_id=floating_ip.project_id,
floating_ip=str(floating_ip.address))
self.notifier.info(context, 'network.floating_ip.deallocate', payload)
project_id = floating_ip.project_id
# Get reservations...
try:
if use_quota:
reservations = QUOTAS.reserve(context,
project_id=project_id,
floating_ips=-1)
else:
reservations = None
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deallocating "
"floating IP"))
rows_updated = objects.FloatingIP.deallocate(context, address)
# number of updated rows will be 0 if concurrently another
# API call has also deallocated the same floating ip
if not rows_updated:
if reservations:
QUOTAS.rollback(context, reservations, project_id=project_id)
else:
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
@messaging.expected_exceptions(exception.FloatingIpNotFoundForAddress)
def associate_floating_ip(self, context, floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Makes sure everything makes sense then calls _associate_floating_ip,
rpc'ing to correct host if i'm not it.
Access to the floating_address is verified but access to the
fixed_address is not verified. This assumes that that the calling
side has already verified that the fixed_address is legal by
checking access to the instance.
"""
floating_ip = objects.FloatingIP.get_by_address(context,
floating_address)
# handle auto_assigned
if not affect_auto_assigned and floating_ip.auto_assigned:
return
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# disassociate any already associated
orig_instance_uuid = None
if floating_ip.fixed_ip_id:
# find previously associated instance
fixed_ip = floating_ip.fixed_ip
if str(fixed_ip.address) == fixed_address:
# NOTE(vish): already associated to this address
return
orig_instance_uuid = fixed_ip.instance_uuid
self.disassociate_floating_ip(context, floating_address)
fixed_ip = objects.FixedIP.get_by_address(context, fixed_address)
# send to correct host, unless i'm the correct host
network = objects.Network.get_by_id(context.elevated(),
fixed_ip.network_id)
if network.multi_host:
instance = objects.Instance.get_by_uuid(
context, fixed_ip.instance_uuid)
host = instance.host
else:
host = network.host
interface = floating_ip.interface
if host == self.host:
# i'm the correct host
self._associate_floating_ip(context, floating_address,
fixed_address, interface,
fixed_ip.instance_uuid)
else:
# send to correct host
self.network_rpcapi._associate_floating_ip(context,
floating_address, fixed_address, interface, host,
fixed_ip.instance_uuid)
return orig_instance_uuid
def _associate_floating_ip(self, context, floating_address, fixed_address,
interface, instance_uuid):
"""Performs db and driver calls to associate floating ip & fixed ip."""
interface = CONF.public_interface or interface
@utils.synchronized(unicode(floating_address))
def do_associate():
# associate floating ip
floating = objects.FloatingIP.associate(context, floating_address,
fixed_address, self.host)
fixed = floating.fixed_ip
if not fixed:
# NOTE(vish): ip was already associated
return
try:
# gogo driver time
self.l3driver.add_floating_ip(floating_address, fixed_address,
interface, fixed['network'])
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
try:
objects.FloatingIP.disassociate(context,
floating_address)
except Exception:
LOG.warning(_LW('Failed to disassociated floating '
'address: %s'), floating_address)
pass
if "Cannot find device" in six.text_type(e):
try:
LOG.error(_LE('Interface %s not found'), interface)
except Exception:
pass
raise exception.NoFloatingIpInterface(
interface=interface)
payload = dict(project_id=context.project_id,
instance_id=instance_uuid,
floating_ip=floating_address)
self.notifier.info(context,
'network.floating_ip.associate', payload)
do_associate()
@messaging.expected_exceptions(exception.FloatingIpNotFoundForAddress)
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from its fixed ip.
Makes sure everything makes sense then calls _disassociate_floating_ip,
rpc'ing to correct host if i'm not it.
"""
floating_ip = objects.FloatingIP.get_by_address(context, address)
# handle auto assigned
if not affect_auto_assigned and floating_ip.auto_assigned:
raise exception.CannotDisassociateAutoAssignedFloatingIP()
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# make sure floating ip is associated
if not floating_ip.fixed_ip_id:
floating_address = floating_ip.address
raise exception.FloatingIpNotAssociated(address=floating_address)
fixed_ip = objects.FixedIP.get_by_id(context, floating_ip.fixed_ip_id)
# send to correct host, unless i'm the correct host
network = objects.Network.get_by_id(context.elevated(),
fixed_ip.network_id)
interface = floating_ip.interface
if network.multi_host:
instance = objects.Instance.get_by_uuid(
context, fixed_ip.instance_uuid)
service = objects.Service.get_by_host_and_topic(
context.elevated(), instance.host, CONF.network_topic)
if service and self.servicegroup_api.service_is_up(service):
host = instance.host
else:
# NOTE(vish): if the service is down just deallocate the data
# locally. Set the host to local so the call will
# not go over rpc and set interface to None so the
# teardown in the driver does not happen.
host = self.host
interface = None
else:
host = network.host
if host == self.host:
# i'm the correct host
self._disassociate_floating_ip(context, address, interface,
fixed_ip.instance_uuid)
else:
# send to correct host
self.network_rpcapi._disassociate_floating_ip(context, address,
interface, host, fixed_ip.instance_uuid)
def _disassociate_floating_ip(self, context, address, interface,
instance_uuid):
"""Performs db and driver calls to disassociate floating ip."""
interface = CONF.public_interface or interface
@utils.synchronized(unicode(address))
def do_disassociate():
# NOTE(vish): Note that we are disassociating in the db before we
# actually remove the ip address on the host. We are
# safe from races on this host due to the decorator,
# but another host might grab the ip right away. We
# don't worry about this case because the minuscule
# window where the ip is on both hosts shouldn't cause
# any problems.
floating = objects.FloatingIP.disassociate(context, address)
fixed = floating.fixed_ip
if not fixed:
# NOTE(vish): ip was already disassociated
return
if interface:
# go go driver time
self.l3driver.remove_floating_ip(address, fixed.address,
interface, fixed.network)
payload = dict(project_id=context.project_id,
instance_id=instance_uuid,
floating_ip=address)
self.notifier.info(context,
'network.floating_ip.disassociate', payload)
do_disassociate()
@messaging.expected_exceptions(exception.FloatingIpNotFound)
def get_floating_ip(self, context, id):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
return dict(objects.FloatingIP.get_by_id(context, id).iteritems())
def get_floating_pools(self, context):
"""Returns list of floating pools."""
# NOTE(maurosr) This method should be removed in future, replaced by
# get_floating_ip_pools. See bug #1091668
return self.get_floating_ip_pools(context)
def get_floating_ip_pools(self, context):
"""Returns list of floating ip pools."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
pools = objects.FloatingIP.get_pool_names(context)
return [dict(name=name) for name in pools]
def get_floating_ip_by_address(self, context, address):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
return objects.FloatingIP.get_by_address(context, address)
def get_floating_ips_by_project(self, context):
"""Returns the floating IPs allocated to a project."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
return objects.FloatingIPList.get_by_project(context,
context.project_id)
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Returns the floating IPs associated with a fixed_address."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
floating_ips = objects.FloatingIPList.get_by_fixed_address(
context, fixed_address)
return [str(floating_ip.address) for floating_ip in floating_ips]
def _is_stale_floating_ip_address(self, context, floating_ip):
try:
self._floating_ip_owned_by_project(context, floating_ip)
except exception.Forbidden:
return True
return False if floating_ip.get('fixed_ip_id') else True
def migrate_instance_start(self, context, instance_uuid,
floating_addresses,
rxtx_factor=None, project_id=None,
source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
if not floating_addresses or (source and source == dest):
return
LOG.info(_LI("Starting migration network for instance %s"),
instance_uuid)
for address in floating_addresses:
floating_ip = objects.FloatingIP.get_by_address(context, address)
if self._is_stale_floating_ip_address(context, floating_ip):
LOG.warning(_LW("Floating ip address |%(address)s| no longer "
"belongs to instance %(instance_uuid)s. "
"Will not migrate it "),
{'address': address,
'instance_uuid': instance_uuid})
continue
interface = CONF.public_interface or floating_ip.interface
fixed_ip = floating_ip.fixed_ip
self.l3driver.remove_floating_ip(floating_ip.address,
fixed_ip.address,
interface,
fixed_ip.network)
# NOTE(wenjianhn): Make this address will not be bound to public
# interface when restarts nova-network on dest compute node
floating_ip.host = None
floating_ip.save()
def migrate_instance_finish(self, context, instance_uuid,
floating_addresses, host=None,
rxtx_factor=None, project_id=None,
source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
if host and not dest:
dest = host
if not floating_addresses or (source and source == dest):
return
LOG.info(_LI("Finishing migration network for instance %s"),
instance_uuid)
for address in floating_addresses:
floating_ip = objects.FloatingIP.get_by_address(context, address)
if self._is_stale_floating_ip_address(context, floating_ip):
LOG.warning(_LW("Floating ip address |%(address)s| no longer "
"belongs to instance %(instance_uuid)s. "
"Will not setup it."),
{'address': address,
'instance_uuid': instance_uuid})
continue
floating_ip.host = dest
floating_ip.save()
interface = CONF.public_interface or floating_ip.interface
fixed_ip = floating_ip.fixed_ip
self.l3driver.add_floating_ip(floating_ip.address,
fixed_ip.address,
interface,
fixed_ip.network)
def _prepare_domain_entry(self, context, domainref):
scope = domainref.scope
if scope == 'private':
this_domain = {'domain': domainref.domain,
'scope': scope,
'availability_zone': domainref.availability_zone}
else:
this_domain = {'domain': domainref.domain,
'scope': scope,
'project': domainref.project_id}
return this_domain
def get_dns_domains(self, context):
domains = []
domain_list = objects.DNSDomainList.get_all(context)
floating_driver_domain_list = self.floating_dns_manager.get_domains()
instance_driver_domain_list = self.instance_dns_manager.get_domains()
for dns_domain in domain_list:
if (dns_domain.domain in floating_driver_domain_list or
dns_domain.domain in instance_driver_domain_list):
domain_entry = self._prepare_domain_entry(context,
dns_domain)
if domain_entry:
domains.append(domain_entry)
else:
LOG.warning(_LW('Database inconsistency: DNS domain |%s| is '
'registered in the Nova db but not visible to '
'either the floating or instance DNS driver. '
'It will be ignored.'), dns_domain.domain)
return domains
def add_dns_entry(self, context, address, name, dns_type, domain):
self.floating_dns_manager.create_entry(name, address,
dns_type, domain)
def modify_dns_entry(self, context, address, name, domain):
self.floating_dns_manager.modify_address(name, address,
domain)
def delete_dns_entry(self, context, name, domain):
self.floating_dns_manager.delete_entry(name, domain)
def _delete_all_entries_for_ip(self, context, address):
domain_list = self.get_dns_domains(context)
for domain in domain_list:
names = self.get_dns_entries_by_address(context,
address,
domain['domain'])
for name in names:
self.delete_dns_entry(context, name, domain['domain'])
def get_dns_entries_by_address(self, context, address, domain):
return self.floating_dns_manager.get_entries_by_address(address,
domain)
def get_dns_entries_by_name(self, context, name, domain):
return self.floating_dns_manager.get_entries_by_name(name,
domain)
def create_private_dns_domain(self, context, domain, av_zone):
objects.DNSDomain.register_for_zone(context, domain, av_zone)
try:
self.instance_dns_manager.create_domain(domain)
except exception.FloatingIpDNSExists:
LOG.warning(_LW('Domain |%(domain)s| already exists, '
'changing zone to |%(av_zone)s|.'),
{'domain': domain, 'av_zone': av_zone})
def create_public_dns_domain(self, context, domain, project):
objects.DNSDomain.register_for_project(context, domain, project)
try:
self.floating_dns_manager.create_domain(domain)
except exception.FloatingIpDNSExists:
LOG.warning(_LW('Domain |%(domain)s| already exists, '
'changing project to |%(project)s|.'),
{'domain': domain, 'project': project})
def delete_dns_domain(self, context, domain):
objects.DNSDomain.delete_by_domain(context, domain)
self.floating_dns_manager.delete_domain(domain)
class LocalManager(base.Base, FloatingIP):
def __init__(self):
super(LocalManager, self).__init__()
# NOTE(vish): setting the host to none ensures that the actual
# l3driver commands for l3 are done via rpc.
self.host = None
self.servicegroup_api = servicegroup.API()
self.network_rpcapi = network_rpcapi.NetworkAPI()
self.floating_dns_manager = importutils.import_object(
CONF.floating_ip_dns_manager)
self.instance_dns_manager = importutils.import_object(
CONF.instance_dns_manager)
self.notifier = rpc.get_notifier('network', CONF.host)
|
|
#!/usr/bin/env python
"""
Parse training log
Evolved from parse_log.sh
"""
import os
import re
import extract_seconds
import argparse
import csv
from collections import OrderedDict
def parse_log(path_to_log):
"""Parse log file
Returns (train_dict_list, test_dict_list)
train_dict_list and test_dict_list are lists of dicts that define the table
rows
"""
regex_iteration = re.compile('Iteration (\d+)')
regex_train_output = re.compile('Train net output #(\d+): (\S+) = ([\.\deE+-]+)')
regex_test_output = re.compile('Test net output #(\d+): (\S+) = ([\.\deE+-]+)')
regex_learning_rate = re.compile('lr = ([-+]?[0-9]*\.?[0-9]+([eE]?[-+]?[0-9]+)?)')
# Pick out lines of interest
iteration = -1
learning_rate = float('NaN')
train_dict_list = []
test_dict_list = []
train_row = None
test_row = None
logfile_year = extract_seconds.get_log_created_year(path_to_log)
with open(path_to_log) as f:
start_time = extract_seconds.get_start_time(f, logfile_year)
last_time = start_time
for line in f:
iteration_match = regex_iteration.search(line)
if iteration_match:
iteration = float(iteration_match.group(1))
if iteration == -1:
# Only start parsing for other stuff if we've found the first
# iteration
continue
try:
time = extract_seconds.extract_datetime_from_line(line,
logfile_year)
except ValueError:
# Skip lines with bad formatting, for example when resuming solver
continue
# if it's another year
if time.month < last_time.month:
logfile_year += 1
time = extract_seconds.extract_datetime_from_line(line, logfile_year)
last_time = time
seconds = (time - start_time).total_seconds()
learning_rate_match = regex_learning_rate.search(line)
if learning_rate_match:
learning_rate = float(learning_rate_match.group(1))
train_dict_list, train_row = parse_line_for_net_output(
regex_train_output, train_row, train_dict_list,
line, iteration, seconds, learning_rate
)
test_dict_list, test_row = parse_line_for_net_output(
regex_test_output, test_row, test_dict_list,
line, iteration, seconds, learning_rate
)
fix_initial_nan_learning_rate(train_dict_list)
fix_initial_nan_learning_rate(test_dict_list)
return train_dict_list, test_dict_list
def parse_line_for_net_output(regex_obj, row, row_dict_list,
line, iteration, seconds, learning_rate):
"""Parse a single line for training or test output
Returns a a tuple with (row_dict_list, row)
row: may be either a new row or an augmented version of the current row
row_dict_list: may be either the current row_dict_list or an augmented
version of the current row_dict_list
"""
output_match = regex_obj.search(line)
if output_match:
if not row or row['NumIters'] != iteration:
# Push the last row and start a new one
if row:
# If we're on a new iteration, push the last row
# This will probably only happen for the first row; otherwise
# the full row checking logic below will push and clear full
# rows
row_dict_list.append(row)
row = OrderedDict([
('NumIters', iteration),
('Seconds', seconds),
('LearningRate', learning_rate)
])
# output_num is not used; may be used in the future
# output_num = output_match.group(1)
output_name = output_match.group(2)
output_val = output_match.group(3)
row[output_name] = float(output_val)
if row and len(row_dict_list) >= 1 and len(row) == len(row_dict_list[0]):
# The row is full, based on the fact that it has the same number of
# columns as the first row; append it to the list
row_dict_list.append(row)
row = None
return row_dict_list, row
def fix_initial_nan_learning_rate(dict_list):
"""Correct initial value of learning rate
Learning rate is normally not printed until after the initial test and
training step, which means the initial testing and training rows have
LearningRate = NaN. Fix this by copying over the LearningRate from the
second row, if it exists.
"""
if len(dict_list) > 1:
dict_list[0]['LearningRate'] = dict_list[1]['LearningRate']
def save_csv_files(logfile_path, output_dir, train_dict_list, test_dict_list,
delimiter=',', verbose=False):
"""Save CSV files to output_dir
If the input log file is, e.g., caffe.INFO, the names will be
caffe.INFO.train and caffe.INFO.test
"""
log_basename = os.path.basename(logfile_path)
train_filename = os.path.join(output_dir, log_basename + '.train')
write_csv(train_filename, train_dict_list, delimiter, verbose)
test_filename = os.path.join(output_dir, log_basename + '.test')
write_csv(test_filename, test_dict_list, delimiter, verbose)
def write_csv(output_filename, dict_list, delimiter, verbose=False):
"""Write a CSV file
"""
if not dict_list:
if verbose:
print('Not writing %s; no lines to write' % output_filename)
return
dialect = csv.excel
dialect.delimiter = delimiter
with open(output_filename, 'w') as f:
dict_writer = csv.DictWriter(f, fieldnames=dict_list[0].keys(),
dialect=dialect)
dict_writer.writeheader()
dict_writer.writerows(dict_list)
if verbose:
print 'Wrote %s' % output_filename
def parse_args():
description = ('Parse a Caffe training log into two CSV files '
'containing training and testing information')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('logfile_path',
help='Path to log file')
parser.add_argument('output_dir',
help='Directory in which to place output CSV files')
parser.add_argument('--verbose',
action='store_true',
help='Print some extra info (e.g., output filenames)')
parser.add_argument('--delimiter',
default=',',
help=('Column delimiter in output files '
'(default: \'%(default)s\')'))
args = parser.parse_args()
return args
def main():
args = parse_args()
train_dict_list, test_dict_list = parse_log(args.logfile_path)
save_csv_files(args.logfile_path, args.output_dir, train_dict_list,
test_dict_list, delimiter=args.delimiter)
if __name__ == '__main__':
main()
|
|
import json
import logging
import os
import re
from cerberus import Validator
from query_dsl import dsl as query_dsl
from schema import entity_types
logger = logging.getLogger('grafanizer.query')
# Dictionary to save queries. Should be updated with functions from
# this module.
_QUERIES = {}
class QueryResult(object):
"""
Models a query result
"""
def __init__(self, entity):
self.entity = entity
self.check_tuples = []
def add(self, check, metrics):
"""
Adds a check and metrics pair
@param check
@param metrics - List of metrics
"""
self.check_tuples.append((check, metrics))
def __iter__(self):
for c, metrics in self.check_tuples:
for m in metrics:
yield (self.entity, c, m)
def __nonzero__(self):
l = sum([len(metrics) for check, metrics in self.check_tuples])
return True if l > 0 else False
class Query(object):
"""
Models a query.
"""
def __init__(self, string):
"""
Inits the query
@param string - String
"""
self.tokens = query_dsl.parseString(string, parseAll=True)
def query(self, entity):
"""
Returns a tuple with the entity, check, metric if the query is
successfull. Returns None otherwise
@param entity - Entity model instance
@returns - tuple | None
"""
# Dont even bother with entities without checks
if not len(entity.checks):
return None
result = None
metric_qs = self.tokens[2] if len(self.tokens) >= 3 else []
check_qs = self.tokens[1] if len(self.tokens) >= 2 else []
entity_qs = self.tokens[0] if len(self.tokens) >= 1 else []
# Qualify the entity
for e_q in entity_qs:
# Empty entity expression, continue
if not e_q:
continue
# Check on type expression
if e_q[0] == 'type':
type_query = get_query(e_q[1])
if not type_query:
logger.warn("Couldnt find query for %s" % e_q[1])
return None
result = type_query.query(entity)
if not result:
return None
continue
# Should be an attribute expression if not empty
# and not type expression
attr, func, value = e_q
func = getattr(self, func)
if not func(value, getattr(entity, attr)):
return None
# Return if we only have entity type qualifying statements
# without check and metric qualifying statements.
if result and not check_qs and not metric_qs:
return result
else:
result = QueryResult(entity)
# Qualify the checks
checks = entity.checks
for c_q in check_qs:
if not c_q:
continue
attr, func, value = c_q
func = getattr(self, func)
checks = [c for c in checks if func(value, getattr(c, attr))]
if not checks:
return None
# Qualify the metrics
for c in checks:
metrics = c.metrics
for m_q in metric_qs:
if not m_q:
continue
attr, func, value = m_q
func = getattr(self, func)
metrics = [m for m in metrics if func(value, getattr(m, attr))]
if metrics:
result.add(c, metrics)
return result
def full(self, needle, haystack):
"""
Returns whether or not needle is equal to haystack
@param needle - String
@param haystack - String
@return - Boolean
"""
return needle.lower() == haystack.lower()
def startswith(self, needle, haystack):
"""
Returns whether or haystack starts with needle
@param needle - String
@param haystack - String
@return - Boolean
"""
return haystack.lower().startswith(needle.lower())
def endswith(self, needle, haystack):
"""
Returns whether or not haystack endswith needle
@param needle - String
@param haystack - String
@return - Boolean
"""
return haystack.lower().endswith(needle.lower())
def contains(self, needle, haystack):
"""
Returns whether or not haystack contains needle
@param needle - String
@param haystack - String
@return - Boolean
"""
return needle.lower() in haystack.lower()
def regex(self, needle, haystack):
"""
Returns whether or not the regex needle matches haystack
@param needle - String
@param haystack - String
@return - Boolean
"""
regex = re.compile(needle)
if regex.match(haystack.lower()):
return True
return False
def save_query(name, query):
"""
Saves a query with key of name
@param name - String
@param query - Query instance
"""
_QUERIES[name] = query
def get_query(name):
"""
Returns a saved query.
@param name - String
@return - Query | None
"""
return _QUERIES.get(name)
def load_entity_types(path):
"""
Entity types are basically queries.
If a query on an entity returns non None results,
that entity is considered that type.
Types are saved as queryies named by the type in the module variable
_QUERIES.
@param path - String - Path to look for entity_types or entity_types.json
"""
files = ['entity_types.json', 'entity_types']
files = [os.path.join(path, f) for f in files]
v = Validator(entity_types)
for f in files:
if os.path.isfile(f):
logger.debug("Checking for entity types in %s" % f)
try:
with open(f, 'r') as infile:
data = json.loads(infile.read())
if not v.validate(data):
logger.info("Invalid entity types:")
logger.info(v.errors)
continue
types = data['types']
for name, querystring in types.iteritems():
save_query(name, Query(querystring))
logger.debug("Created type: %s" % name)
except ValueError:
logger.info(
"Error loading entity types: File %s has invalid json" % f
)
except Exception:
logger.exception("Error loading entity types:")
|
|
# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
import neutron.plugins.ml2.drivers.openvswitch.agent.common.constants \
as ovs_const
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent.\
openflow.ovs_ofctl import ovs_bridge_test_base
call = mock.call # short hand
class OVSTunnelBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase,
ovs_bridge_test_base.OVSDVRProcessTestMixin):
dvr_process_table_id = ovs_const.DVR_PROCESS
dvr_process_next_table_id = ovs_const.PATCH_LV_TO_TUN
def setUp(self):
super(OVSTunnelBridgeTest, self).setUp()
self.setup_bridge_mock('br-tun', self.br_tun_cls)
self.stamp = self.br.default_cookie
def test_setup_default_table(self):
patch_int_ofport = 5555
mock_do_action_flows = mock.patch.object(self.br,
'do_action_flows').start()
self.mock.attach_mock(mock_do_action_flows, 'do_action_flows')
self.br.setup_default_table(patch_int_ofport=patch_int_ofport,
arp_responder_enabled=False)
flow_args = [{'priority': 1, 'in_port': patch_int_ofport,
'actions': 'resubmit(,2)'},
{'priority': 0, 'actions': 'drop'},
{'priority': 0, 'table': 2,
'dl_dst': '00:00:00:00:00:00/01:00:00:00:00:00',
'actions': 'resubmit(,20)'},
{'priority': 0, 'table': 2,
'dl_dst': '01:00:00:00:00:00/01:00:00:00:00:00',
'actions': 'resubmit(,22)'},
{'priority': 0, 'table': 3, 'actions': 'drop'},
{'priority': 0, 'table': 4, 'actions': 'drop'},
{'priority': 0, 'table': 6, 'actions': 'drop'},
{'priority': 1, 'table': 10,
'actions': 'learn(cookie=' + str(self.stamp) +
',table=20,priority=1,hard_timeout=300,'
'NXM_OF_VLAN_TCI[0..11],'
'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],'
'load:0->NXM_OF_VLAN_TCI[],'
'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],'
'output:NXM_OF_IN_PORT[]),'
'output:%s' % patch_int_ofport},
{'priority': 0, 'table': 20, 'actions': 'resubmit(,22)'}
]
expected = [call.do_action_flows('add', flow_args),
call.add_flow(priority=0, table=22, actions='drop')]
self.assertEqual(expected, self.mock.mock_calls)
def test_setup_default_table_arp_responder_enabled(self):
patch_int_ofport = 5555
mock_do_action_flows = mock.patch.object(self.br,
'do_action_flows').start()
self.mock.attach_mock(mock_do_action_flows, 'do_action_flows')
self.br.setup_default_table(patch_int_ofport=patch_int_ofport,
arp_responder_enabled=True)
flow_args = [{'priority': 1, 'in_port': patch_int_ofport,
'actions': 'resubmit(,2)'},
{'priority': 0, 'actions': 'drop'},
{'priority': 1, 'table': 2, 'dl_dst': 'ff:ff:ff:ff:ff:ff',
'actions': 'resubmit(,21)', 'proto': 'arp'},
{'priority': 0, 'table': 2,
'dl_dst': '00:00:00:00:00:00/01:00:00:00:00:00',
'actions': 'resubmit(,20)'},
{'priority': 0, 'table': 2,
'dl_dst': '01:00:00:00:00:00/01:00:00:00:00:00',
'actions': 'resubmit(,22)'},
{'priority': 0, 'table': 3, 'actions': 'drop'},
{'priority': 0, 'table': 4, 'actions': 'drop'},
{'priority': 0, 'table': 6, 'actions': 'drop'},
{'priority': 1, 'table': 10,
'actions': 'learn(cookie=' + str(self.stamp) +
',table=20,priority=1,hard_timeout=300,'
'NXM_OF_VLAN_TCI[0..11],'
'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],'
'load:0->NXM_OF_VLAN_TCI[],'
'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],'
'output:NXM_OF_IN_PORT[]),'
'output:%s' % patch_int_ofport},
{'priority': 0, 'table': 20, 'actions': 'resubmit(,22)'},
{'priority': 0, 'table': 21, 'actions': 'resubmit(,22)'}
]
expected = [call.do_action_flows('add', flow_args),
call.add_flow(priority=0, table=22, actions='drop')]
self.assertEqual(expected, self.mock.mock_calls)
def test_provision_local_vlan(self):
network_type = 'vxlan'
lvid = 888
segmentation_id = 777
distributed = False
self.br.provision_local_vlan(network_type=network_type, lvid=lvid,
segmentation_id=segmentation_id,
distributed=distributed)
expected = [
call.add_flow(priority=1, tun_id=segmentation_id,
actions='mod_vlan_vid:%s,resubmit(,10)' % lvid,
table=4),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_reclaim_local_vlan(self):
network_type = 'vxlan'
segmentation_id = 777
self.br.reclaim_local_vlan(network_type=network_type,
segmentation_id=segmentation_id)
expected = [
call.delete_flows(tun_id=segmentation_id, table=4),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_flood_to_tun(self):
vlan = 3333
tun_id = 2222
ports = [11, 44, 22, 33]
self.br.install_flood_to_tun(vlan=vlan,
tun_id=tun_id,
ports=ports)
expected = [
call.mod_flow(table=22, dl_vlan=vlan,
actions='strip_vlan,set_tunnel:%(tun)s,'
'output:%(ports)s' % {
'tun': tun_id,
'ports': ','.join(map(str, ports)),
}),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_flood_to_tun(self):
vlan = 3333
self.br.delete_flood_to_tun(vlan=vlan)
expected = [
call.delete_flows(table=22, dl_vlan=vlan),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_unicast_to_tun(self):
vlan = 3333
port = 55
mac = '08:60:6e:7f:74:e7'
tun_id = 2222
self.br.install_unicast_to_tun(vlan=vlan,
tun_id=tun_id,
port=port,
mac=mac)
expected = [
call.add_flow(priority=2, table=20, dl_dst=mac, dl_vlan=vlan,
actions='strip_vlan,set_tunnel:%(tun)s,'
'output:%(port)s' % {
'tun': tun_id,
'port': port,
}),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_unicast_to_tun(self):
vlan = 3333
mac = '08:60:6e:7f:74:e7'
self.br.delete_unicast_to_tun(vlan=vlan, mac=mac)
expected = [
call.delete_flows(table=20, dl_dst=mac, dl_vlan=vlan),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_unicast_to_tun_without_mac(self):
vlan = 3333
mac = None
self.br.delete_unicast_to_tun(vlan=vlan, mac=mac)
expected = [
call.delete_flows(table=20, dl_vlan=vlan),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_install_arp_responder(self):
vlan = 3333
ip = '192.0.2.1'
mac = '08:60:6e:7f:74:e7'
self.br.install_arp_responder(vlan=vlan, ip=ip, mac=mac)
expected = [
call.add_flow(proto='arp', nw_dst=ip,
actions='move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],'
'mod_dl_src:%(mac)s,load:0x2->NXM_OF_ARP_OP[],'
'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],'
'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],'
'load:%(mac)#x->NXM_NX_ARP_SHA[],'
'load:%(ip)#x->NXM_OF_ARP_SPA[],in_port' % {
'mac': netaddr.EUI(mac,
dialect=netaddr.mac_unix),
'ip': netaddr.IPAddress(ip),
},
priority=1, table=21, dl_vlan=vlan),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_arp_responder(self):
vlan = 3333
ip = '192.0.2.1'
self.br.delete_arp_responder(vlan=vlan, ip=ip)
expected = [
call.delete_flows(table=21, dl_vlan=vlan, proto='arp', nw_dst=ip),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_delete_arp_responder_without_ip(self):
vlan = 3333
ip = None
self.br.delete_arp_responder(vlan=vlan, ip=ip)
expected = [
call.delete_flows(table=21, dl_vlan=vlan, proto='arp'),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_setup_tunnel_port(self):
network_type = 'vxlan'
port = 11111
self.br.setup_tunnel_port(network_type=network_type, port=port)
expected = [
call.add_flow(priority=1, in_port=port, actions='resubmit(,4)'),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_cleanup_tunnel_port(self):
port = 11111
self.br.cleanup_tunnel_port(port=port)
expected = [
call.delete_flows(in_port=port),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_add_dvr_mac_tun(self):
mac = '00:02:b3:13:fe:3d'
port = 8888
self.br.add_dvr_mac_tun(mac=mac, port=port)
expected = [
call.add_flow(priority=1, table=9, dl_src=mac,
actions='output:%s' % port),
]
self.assertEqual(expected, self.mock.mock_calls)
def test_remove_dvr_mac_tun(self):
mac = '00:02:b3:13:fe:3d'
self.br.remove_dvr_mac_tun(mac=mac)
expected = [
call.delete_flows(dl_src=mac, table=9),
]
self.assertEqual(expected, self.mock.mock_calls)
def _mock_add_tunnel_port(self, deferred_br=False):
port_name = 'fake_port'
remote_ip = '192.168.1.3'
local_ip = '192.168.1.2'
tunnel_type = 'vxlan'
vxlan_udp_port = '4789'
dont_fragment = True
if deferred_br:
with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.add_port',
return_value=9999) as add_port, \
self.br.deferred() as deferred_br:
ofport = deferred_br.add_tunnel_port(port_name, remote_ip,
local_ip, tunnel_type,
vxlan_udp_port,
dont_fragment)
else:
with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.add_port',
return_value=9999) as add_port:
ofport = self.br.add_tunnel_port(port_name, remote_ip,
local_ip, tunnel_type,
vxlan_udp_port,
dont_fragment)
self.assertEqual(9999, ofport)
self.assertEqual(1, add_port.call_count)
self.assertEqual(port_name, add_port.call_args[0][0])
def _mock_delete_port(self, deferred_br=False):
port_name = 'fake_port'
if deferred_br:
with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
'delete_port') as delete_port, \
self.br.deferred() as deferred_br:
deferred_br.delete_port(port_name)
else:
with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
'delete_port') as delete_port:
self.br.delete_port(port_name)
self.assertEqual([call(port_name)], delete_port.mock_calls)
def test_add_tunnel_port(self):
self._mock_add_tunnel_port()
def test_delete_port(self):
self._mock_delete_port()
def test_deferred_br_add_tunnel_port(self):
self._mock_add_tunnel_port(True)
def test_deferred_br_delete_port(self):
self._mock_delete_port(True)
|
|
from collections.abc import Iterable
from multiprocessing import Pool
import warnings
import tempfile
import numpy as np
import scipy
from scipy import special
import scipy.signal
from astropy import log
from astropy.table import Table
import matplotlib.pyplot as plt
try:
from tqdm import tqdm as show_progress
except ImportError:
def show_progress(a, **kwargs):
return a
try:
import pyfftw
from pyfftw.interfaces.numpy_fft import fft, fftfreq, fftn, ifftn
HAS_PYFFTW = True
except ImportError:
warnings.warn("Using standard numpy fft")
from scipy.fft import fft, fftfreq, fftn, ifftn
HAS_PYFFTW = False
from stingray.pulse.overlapandsave.ols import ols
from ..utils import njit
from ..stats import pds_probability, pds_detection_level
from ..gti import create_gti_mask
def convolve_ols(a, b, memout=None):
"""Convolution using overlap-and-save.
The code for the convolution, as implemented by Ahmed Fasih, is under
stingray.pulse.overlapandsave
Mimicks scipy.signal.fftconvolve with mode='save'.
Examples
--------
>>> from scipy.signal import fftconvolve
>>> nx, nh = 21, 7
>>> x = (np.random.randint(-30, 30, size=(nx, nx)) + 1j *
... np.random.randint(-30, 30, size=(nx, nx)))
>>> h = (np.random.randint(-20, 20, size=(nh, nh)) + 1j *
... np.random.randint(-20, 20, size=(nh, nh)))
>>> ref = fftconvolve(x, h, mode='same')
>>> y = convolve(x, h) # +doctest:ellipsis
...
>>> np.allclose(ref, y)
True
"""
if isinstance(a, str):
a = np.lib.format.open_memmap(a)
return ols(a, b,
size=[
max(4 * x, int(pow(100000, 1/len(b.shape))))
for x in b.shape],
rfftn=fftn, irfftn=ifftn, out=memout)
def convolve(a, b, mode='ols', memout=None):
if np.version.version.split('.') <= ['1', '15', '0']:
mode = 'scipy'
if mode == 'ols':
return convolve_ols(a, b, memout=memout)
return scipy.signal.fftconvolve(a, b, mode='same')
@njit()
def pds_from_fft(spectr, nph):
return (spectr * spectr.conj()).real * 2 / nph
def _create_responses(range_z):
"""Create responses corresponding to different accelerations.
This is the implementation of Eq. 39 in Ransom, Eikenberry &
Middleditch 2002. See that paper for details
Parameters
----------
range_z : int
List of z values to be used for the calculation.
Returns
-------
responses : list
List of arrays describing the shape of the response function
corresponding to each value of ``range_z``.
"""
log.info("Creating responses")
responses = []
for j, z in enumerate(show_progress(range_z)):
# fdot = z / T**2
if( np.abs( z ) < 0.01 ):
responses.append(0)
continue
m = np.max([np.abs(int(2 * z)), 40])
sign = z / np.abs(z)
absz = np.abs(z)
factor = sign * 1 / np.sqrt( 2 * absz)
q_ks = np.arange(-m / 2, m / 2+ 1)
exponentials = np.exp(1j * np.pi * q_ks**2 / z)
Yks = sign * np.sqrt( 2 / absz ) * q_ks
Zks = sign * np.sqrt( 2 / absz ) * ( q_ks + z )
# print(Yks, Zks)
[SZs, CZs] = special.fresnel(Zks)
[SYs, CYs] = special.fresnel(Yks)
weights = SZs - SYs + 1j * (CYs - CZs)
responses.append(weights * exponentials * factor)
return responses
def _convolve_with_response(A, detlev, freq_intv_to_search, response_and_j,
interbin=False, memout=None):
"""Accelerate the Fourier transform and find pulsations.
This function convolves the initial Fourier transform with the response
corresponding to a constant acceleration, and searches for signals
corresponding to candidate pulsations.
Parameters
----------
A : complex array
The initial FT, normalized so that || FT ||^2 are Leahy powers.
response_and_j : tuple
Tuple containing the response matrix corresponding to a given
acceleration and its position in the list of reponses allocated
at the start of the procedure in ``accelsearch``.
detlev : float
The power level considered good for detection
freq_intv_to_search : bool array
Mask for ``A``, showing all spectral bins that should be searched
for pulsations. Note that we use the full array to calculate the
convolution with the responses, but only these bins to search for
pulsations. Had we filtered the frequencies before the convolution,
we would be sure to introduce boundary effects in the "Good"
frequency interval
Other parameters
----------------
interbin : bool
Calculate interbinning to improve sensitivity to frequencies close
to the edge of PDS bins
nproc : int
Number of processors to be used for parallel computation.
Returns
-------
result : list
List containing tuples of the kind (r, j, power) where
r is the frequency in units of 1/ T, j is the index of the
acceleration response used and power is the spectral power
"""
response, j = response_and_j
r_freqs = np.arange(A.size)
if np.asarray(response).size == 1:
accel = A
else:
accel = convolve(A, response, memout=memout)
# new_size = accel.size
# diff = new_size - A.size
# Now uses 'same'
# accel = accel[diff // 2: diff // 2 + A.size]
rf = r_freqs[freq_intv_to_search]
accel = accel[freq_intv_to_search]
if interbin:
rf, accel = \
interbin_fft(rf, accel)
powers_to_search = (accel * accel.conj()).real
candidate = powers_to_search > detlev
rs = rf[candidate]
cand_powers = powers_to_search[candidate]
results = []
for i in range(len(rs)):
r = rs[i]
cand_power = cand_powers[i]
results.append([r, j, cand_power])
return results
def _calculate_all_convolutions(A, responses, freq_intv_to_search,
detlev, debug=False, interbin=False,
nproc=4):
"""Accelerate the initial Fourier transform and find pulsations.
This function convolves the initial Fourier transform with the responses
corresponding to different amounts of constant acceleration, and searches
for signals corresponding to candidate pulsations.
Parameters
----------
A : complex array
The initial FT, normalized so that || FT ||^2 are Leahy powers.
responses : list of complex arrays
List of response functions corresponding to different values of
constant acceleration.
freq_intv_to_search : bool array
Mask for ``A``, showing all spectral bins that should be searched
for pulsations. Note that we use the full array to calculate the
convolution with the responses, but only these bins to search for
pulsations. Had we filtered the frequencies before the convolution,
we would be sure to introduce boundary effects in the "Good"
frequency interval
detlev : float
The power level considered good for detection
Other parameters
----------------
debug : bool
Dump debugging information
interbin : bool
Calculate interbinning to improve sensitivity to frequencies close
to the edge of PDS bins
nproc : int
Number of processors to be used for parallel computation.
Returns
-------
candidate_rs: array of float
Frequency of candidates in units of r = 1 / T
candidate_js: array of float
Index of the response used
candidate_powers: array of float
Power of candidates
"""
log.info("Convolving FFT with responses...")
candidate_powers = [0.]
candidate_rs = [1]
candidate_js = [2]
# print(responses)
len_responses = len(responses)
# if debug:
# fobj = open('accelsearch_dump.dat', 'w')
_, memmapfname = tempfile.mkstemp(suffix='.npy')
memout = np.lib.format.open_memmap(
memmapfname, mode='w+', dtype=A.dtype, shape=A.shape)
from functools import partial
func = partial(_convolve_with_response, A, detlev, freq_intv_to_search,
interbin=interbin, memout=memout)
if nproc == 1:
results = []
for j in show_progress(range(len_responses)):
results.append(func((responses[j], j)))
else:
with Pool(processes=nproc) as pool:
results = list(show_progress(pool.imap_unordered(
func, [(responses[j], j) for j in range(len_responses)]),
total=len_responses))
pool.close()
for res in results:
for subr in res:
candidate_powers.append(subr[2])
candidate_rs.append(subr[0])
candidate_js.append(subr[1])
# if debug:
# fobj.close()
return candidate_rs[1:], candidate_js[1:], candidate_powers[1:]
def accelsearch(times, signal, delta_z=1, fmin=1, fmax=1e32,
gti=None, zmax=100, candidate_file=None, ref_time=0,
debug=False, interbin=False, nproc=4, det_p_value=0.15,
fft_rescale=None):
"""Find pulsars with accelerated search.
The theory behind these methods is described in Ransom+02, AJ 124, 1788.
Parameters
----------
times : array of floats
An evenly spaced list of times
signal : array of floats
The light curve, in counts; same length as ``times``
Other parameters
----------------
delta_z : float
The spacing in ``z`` space (delta_z = 1 -> delta_fdot = 1/T**2)
fmin : float, default 1.
Minimum frequency to search
fmax : float, default 1e32
Maximum frequency to search
gti : ``[[gti00, gti01], [gti10, gti11], ...]``, default None
Good Time Intervals. If None, it assumes the full range
``[[time[0] - dt / 2 -- time[-1] + dt / 2]]``
zmax : int, default 100
Maximum frequency derivative to search (pos and neg), in bins.
It corresponds to ``fdot_max = zmax / T**2``, where ``T`` is the
length of the observation.
candidate_file : str, default None
Save the final candidate table to this file. If None, the table
is just returned and not saved.
ref_time : float, default 0
Reference time for the times
det_p_value : float, default 0.015
Detection p-value (tail probability of noise powers, corrected for the
number of trials)
fft_rescale : function
Any function to apply to the initial FFT, normalized by the number of
photons as FT * np.sqrt(2/nph) so that || FT ||^2 are Leahy powers.
For example, a filter to flatten the spectrum in the presence of strong
red noise.
Returns
-------
candidate_table: :class:`Table`
Table containing the candidate frequencies and frequency derivatives,
the spectral power in Leahy normalization, the detection probability,
the time and the observation length.
"""
if not isinstance(times, np.ndarray):
times = np.asarray(times)
if not isinstance(signal, np.ndarray):
signal = np.asarray(signal)
dt = times[1] - times[0]
if gti is not None:
gti = np.asarray(gti)
# Fill in the data with a constant outside GTIs
gti_mask = create_gti_mask(times, gti)
expo_fraction = np.count_nonzero(gti_mask) / len(gti_mask)
bti_mask = ~gti_mask
mean_ops = np.mean
if np.mean(signal) > 10:
mean_ops = np.median
signal[bti_mask] = mean_ops(signal[gti_mask])
else:
expo_fraction = 1
gti = np.array(
[[times[0] - dt /2, times[-1] + dt / 2]])
n_photons = np.sum(signal)
spectr = fft(signal) * np.sqrt(2 / n_photons)
freq = fftfreq(len(spectr), dt)
if debug:
_good_f = freq > 0
fig = plt.figure(figsize=(12, 8))
plt.plot(freq[_good_f], (spectr * spectr.conj()).real[_good_f],
label='initial PDS')
plt.xlabel("Frequency (Hz)")
plt.ylabel("Power (Leahy)")
plt.loglog()
if fft_rescale is not None:
log.info("Applying initial filters...")
spectr = fft_rescale(spectr)
if debug:
plt.plot(freq[_good_f], (spectr * spectr.conj()).real[_good_f],
label='PDS after filtering (if any)')
fname = candidate_file + '_initial_spec.png' \
if candidate_file else 'initial_spec.png'
plt.legend(loc=2)
del _good_f
plt.savefig(fname)
plt.close(fig)
T = times[-1] - times[0] + dt
freq_intv_to_search = (freq >= fmin) & (freq < fmax)
log.info("Starting search over full plane...")
start_z = -zmax
end_z = zmax
range_z = np.arange(start_z,end_z, delta_z)
log.info("min and max possible r_dot: {}--{}".format(delta_z/T**2,
np.max(range_z)/T**2))
freqs_to_search = freq[freq_intv_to_search]
candidate_table = Table(
names=['time', 'length', 'frac_exposure', 'power', 'prob', 'frequency',
'fdot', 'fddot', 'ntrial'],
dtype=[float] * 8 + [int])
detlev = pds_detection_level(ntrial=freqs_to_search.size,
epsilon=det_p_value)
responses = _create_responses(range_z)
candidate_rs, candidate_js, candidate_powers = \
_calculate_all_convolutions(spectr, responses,
freq_intv_to_search, detlev,
debug=debug, interbin=interbin,
nproc=nproc)
for r, j, cand_power in zip(candidate_rs, candidate_js, candidate_powers):
z = range_z[j]
cand_freq = r / T
fdot = z / T**2
prob = pds_probability(cand_power, freqs_to_search.size)
candidate_table.add_row(
[ref_time + gti[0, 0], T, expo_fraction, cand_power, prob,
cand_freq, fdot, 0, freqs_to_search.size])
if candidate_file is not None:
candidate_table.write(candidate_file + '.csv', overwrite=True)
return candidate_table
def interbin_fft(freq, fft):
"""Interbinning, a la van der Klis 1989.
Allows to recover some sensitivity in a power density spectrum when
the pulsation frequency is close to a bin edge. Here we oversample
the Fourier transform that will be used to calculate the PDS, adding
intermediate bins with the following values:
A_{k+1/2} = \\pi /4 (A_k - A_{k + 1})
Please note: The new bins are not statistically independent from the
rest. Please use simulations to estimate the correct detection
levels.
Parameters
----------
freq : array of floats
The frequency array
fft : array of complex numbers
The Fourier Transform
Returns
new_freqs : array of floats, twice the length of the original array
The new frequency array
new_fft : array of complex numbers
The interbinned Fourier Transform.
Examples
--------
>>> import numpy as np
>>> freq = [0, 0.5, 1, -1, -0.5]
>>> fft = np.array([1, 0, 1, 1, 0], dtype=float)
>>> f, F = interbin_fft(freq, fft)
>>> np.allclose(f, [0, 0.25, 0.5, 0.75, 1, -1, -0.75, -0.5, -0.25])
True
>>> pi_4 = np.pi / 4
>>> np.allclose(F, [1, -pi_4, 0, pi_4, 1, 1, -pi_4, 0, pi_4])
True
"""
import numpy as np
freq = np.asarray(freq)
fft = np.asarray(fft)
neglast = freq[-1] < 0
if neglast:
order = np.argsort(freq)
freq = freq[order]
fft = fft[order]
N = freq.size
new_N = 2 * N - 1
new_freqs = np.linspace(freq[0], freq[-1], new_N)
new_fft = np.zeros(new_N, dtype=type(fft[0]))
new_fft[::2] = fft
new_fft[1::2] = (fft[1:] - fft[:-1]) * np.pi / 4
if neglast:
fneg = new_freqs < 0
fpos = ~fneg
new_freqs = np.concatenate((new_freqs[fpos], new_freqs[fneg]))
new_fft = np.concatenate((new_fft[fpos], new_fft[fneg]))
return new_freqs, new_fft
|
|
import csv
import datetime
import numpy as np
import os
import random
import time
from math import ceil, floor
from numpy.random import randint
from plot_data import data_plotter
from plot_lattice import lattice_plotter
from plot_plasmids import plasmid_plotter_wrapper
"""
TODO
-make conj rate nutrient dependent
-remove or augment the maturity module
-ICs that resemble the PDEs
-convert lattice to np array and ref with tuples instead of separate loc 0 and loc 1 (cleaner, maybe faster)
-plasmid stats, fix replicate_plasmids method for single plasmid copy control
SPEED
-instead of explicit class structure for the states, could just use dicts (should be faster)
-use location tuples instead of lists (faster assigning)
-faster and better probability modules
-all to numpy arrays
-store cell type as well as position for faster referencing?
PLOTTING SPEED
-could save more time by not storing or plotting empties?
BUGS
-should skip self when going through surroundings, but not nutrients (i.e. dont count your current position)
"""
# IO
# =================================================
runs_folder = "runs\\" # store timestamped runs here
current_time = datetime.datetime.now().strftime("%Y-%m-%d %I.%M.%S%p")
time_folder = current_time + "\\"
current_run_folder = runs_folder + time_folder
# subfolders in the timestamped run directory:
data_folder = current_run_folder + "data\\"
plot_lattice_folder = current_run_folder + "plot_lattice\\"
plot_data_folder = current_run_folder + "plot_data\\"
dir_list = [runs_folder, current_run_folder, data_folder, plot_lattice_folder, plot_data_folder]
for dirs in dir_list:
if not os.path.exists(dirs):
os.makedirs(dirs)
# Constants
# =================================================
# simulation dimensions
n = 100 # up to 1000 tested as feasible
# simulation lattice parameters
search_radius_bacteria = 1
max_search_radius_nutrient = 3
assert search_radius_bacteria < n / 2 and max_search_radius_nutrient < n / 2
# nutrient settings
nutrient_initial_condition = 1 # 10
# division and recovery times
div_time_staph = 0.5
div_time_ecoli = 0.5
expected_recipient_div_time = div_time_ecoli # avg time for 1 R cell to divide in h
expected_donor_div_time = div_time_ecoli # avg time for 1 D cell to divide in h
# conjugation rate
conj_super_rate = 1.0
conj_ecoli_rate = 10.0
conj_staph_rate = 10 ** 4.0
expected_conj_time = conj_super_rate # avg time for 1 cell to conjugate in h (Jama: 10h for e.coli, more for staph)
# general cell settings
PLASMID_UPPER_BOUND = 10
# simulation time settings
standard_run_time = 24.0 # typical simulation time in h
turn_rate = 2.0 # average turns between each division; simulation step size
time_per_turn = expected_recipient_div_time / turn_rate
plots_period_in_turns = 2 * turn_rate # 1 or 1000 or 2 * turn_rate
total_turns = int(ceil(standard_run_time / time_per_turn))
# Classes
# =================================================
# represents the state of a lattice cell: empty, donor, recipient
class Cell(object):
def __init__(self, label, location, nutrients, plasmid_amensal=0, plasmid_target=0):
self.label = label # symbol; either "(_) Empty, (R)eceiver, (D)onor, (T)ransconjugant"
self.location = location # list [i,j]
self.nutrients = nutrients
self.plasmid_amensal = plasmid_amensal
self.plasmid_target = plasmid_target
self.plasmid_upper_bound = PLASMID_UPPER_BOUND # TODO constant global
def __str__(self):
return self.label
def deplete_nutrients(self):
assert self.nutrients > 0
self.nutrients -= 1
def get_surroundings_square(self, search_radius):
"""Specifies the location of the top left corner of the search square
Args:
search_radius: half-edge length of the square
Returns:
list of locations; length should be (2 * search_radius + 1) ** 2 (- 1 remove self?)
Notes:
- periodic BCs apply, so search boxes wrap around at boundaries
- note that we assert that search_radius be less than half the grid size
- may have different search radius depending om context (neighbouring bacteria / empty cells / nutrient)
- currently DOES NOT remove the original location
"""
row = self.location[0]
col = self.location[1]
surroundings = [(row_to_search % n, col_to_search % n)
for row_to_search in xrange(row - search_radius, row + search_radius + 1)
for col_to_search in xrange(col - search_radius, col + search_radius + 1)]
return surroundings
def get_label_surroundings(self, cell_label, search_radius):
if cell_label not in ['_', 'R', 'D', 'T']:
raise Exception("Illegal cell label (_, R, D, or T)")
neighbours = self.get_surroundings_square(search_radius=search_radius)
neighbours_of_specified_type = []
for loc in neighbours: # TODO should skip self when going through (i.e. don't count your current position)
if cell_label == lattice[loc[0]][loc[1]].label:
neighbours_of_specified_type.append(loc)
return neighbours_of_specified_type
def get_nutrient_surroundings_ordered(self, max_nutrient_radius):
"""Gives a list of POSSIBLY remaining nutrient surroundings (square), ordered by increasing radii
Args:
max_nutrient_radius: int (e.g. 2)
Returns:
ordered list of groups of locations of nutrients which may or may not be available,
specifically: [[radius=0 nutrient locs], [radius=1 nutrient locs], ..., [radius=max nutrient locs]]
Notes:
- very VERY inefficient algorithm, should clean it up
"""
# create initial surroundings, with duplicates
location_layers_nutrients = [[0]] * (max_nutrient_radius + 1)
for radius in xrange(max_nutrient_radius + 1):
location_layers_nutrients[radius] = self.get_surroundings_square(radius) # TODO speed this up later
# remove duplicates
for radius in xrange(max_nutrient_radius, 1, -1):
for loc in location_layers_nutrients[radius-1]:
location_layers_nutrients[radius].remove(loc)
return location_layers_nutrients
def is_nutrient_available(self):
"""Checks if any nutrients are available within the search radius
"""
nutrient_layers = self.get_nutrient_surroundings_ordered(max_search_radius_nutrient)
for nutrient_location_layer in nutrient_layers:
for loc in nutrient_location_layer:
if lattice[loc[0]][loc[1]].nutrients > 0:
return True
return False
def choose_and_exhaust_nutrient(self):
"""Chooses a random nutrient location to deplete by a value of 1
Notes:
- starts with locations closest to the cell (radius=0) and moves outwards
"""
nutrient_layers = self.get_nutrient_surroundings_ordered(max_search_radius_nutrient)
while len(nutrient_layers) > 0:
for nutrient_location_layer in nutrient_layers:
if len(nutrient_location_layer) > 0:
loc = random.choice(nutrient_location_layer)
if lattice[loc[0]][loc[1]].nutrients > 0:
lattice[loc[0]][loc[1]].deplete_nutrients()
return
else:
nutrient_location_layer.remove(loc) # TODO possible bug
else:
nutrient_layers.remove(nutrient_location_layer)
print "WARNING - choosing to exhaust nutrients when none are available, continuing anyways"
return
def replicate_plasmids(self):
assert self.plasmid_target == 0 # TODO NOT IMPLEMENTED
if self.plasmid_amensal == 0:
#print "Warning: updating amensal plasmid count in a cell with no amensal plasmids"
pass
elif self.plasmid_amensal == self.plasmid_upper_bound:
#print "Warning: updating amensal plasmid count in a cell when its already at max -- should this even be printed?"
pass
else:
self.plasmid_amensal += ceil((self.plasmid_upper_bound - self.plasmid_amensal) / 2) # TODO make this sensible... or stochastic
return
class Empty(Cell):
def __init__(self, location, nutrients):
Cell.__init__(self, '_', location, nutrients)
class Receiver(Cell):
def __init__(self, location, nutrients, plasmid_target):
Cell.__init__(self, 'R', location, nutrients, plasmid_target=plasmid_target) # note plasmid_amensal defaults to 0
self.pause = 0 # 0 if cell is active, non-zero means turns until active
self.refractory_div = expected_donor_div_time / 4 / time_per_turn # refractory period after division in turns
assert self.plasmid_amensal == 0
assert self.plasmid_target == 0
class Donor(Cell):
def __init__(self, location, nutrients, plasmid_amensal):
Cell.__init__(self, 'D', location, nutrients, plasmid_amensal=plasmid_amensal) # note plasmid_target defaults to 0
self.pause = 0 # 0 if cell is active, non-zero means turns until active
#self.maturity = 0 # starting probability of conjugation
#self.maxmaturity = 50 # max probability of conjugation
self.refractory_conj = ceil(0.25 / time_per_turn) # OLD VERSION: expected_conj_time/16/time_per_turn # refractory period after conjugation in turns
self.refractory_div = ceil(0.50 / time_per_turn) # OLD VERSION: expected_donor_div_time/4/time_per_turn # refractory period after division in turns
assert self.plasmid_amensal > 0
assert self.plasmid_target == 0
class Transconjugant(Cell):
def __init__(self, location, nutrients, plasmid_amensal, plasmid_target):
Cell.__init__(self, 'T', location, nutrients, plasmid_amensal=plasmid_amensal, plasmid_target=plasmid_target)
self.pause = 0 # 0 if cell is active, non-zero means turns until active
#self.maturity = 0 # starting probability of conjugation
#self.maxmaturity = 50 # max probability of conjugation
self.refractory_conj = ceil(0.25 / time_per_turn) # OLD VERSION: expected_conj_time/16/time_per_turn # refractory period after conjugation in turns
self.refractory_div = ceil(0.50 / time_per_turn) # OLD VERSION: expected_donor_div_time/4/time_per_turn # refractory period after division in turns
assert self.plasmid_amensal > 0
assert self.plasmid_target == 0
# Initiate Cell Lattice and Data Directory
# =================================================
lattice = [[Empty([x, y], nutrient_initial_condition) for y in xrange(n)] for x in xrange(n)] # this can be made faster as np array
lattice_data = np.zeros((total_turns + 1, 7)) # sublists are [turn, time, E, R, D, T, N] # TODO 7 shouldn't be hardcoded
# Functions
# =================================================
def printer():
for i in xrange(n):
str_lst = [lattice[i][j].label for j in xrange(n)]
print " " + ' '.join(str_lst)
print
def build_lattice_opposites():
pivot = n/5
anti_pivot = n - pivot - 1
lattice[pivot][pivot] = Receiver([pivot, pivot], lattice[pivot][pivot].nutrients, 0)
lattice[anti_pivot][anti_pivot] = Donor([anti_pivot, anti_pivot], lattice[anti_pivot][anti_pivot].nutrients, PLASMID_UPPER_BOUND)
return lattice
def build_lattice_random(seed=5):
# seed: determines ratio of donors to recipients for random homogeneous conditions
random_lattice = randint(seed, size=(n, n))
for i in xrange(n):
for j in xrange(n):
m = random_lattice[i][j]
if m == 0:
lattice[i][j] = Receiver([i, j], nutrient_initial_condition, 0)
elif m == 1:
lattice[i][j] = Donor([i, j], nutrient_initial_condition, randint(PLASMID_UPPER_BOUND/2, PLASMID_UPPER_BOUND + 1))
elif m in range(2, seed):
lattice[i][j] = Empty([i, j], nutrient_initial_condition)
print random_lattice, "\n"
return
def is_empty(loc):
return '_' == lattice[loc[0]][loc[1]].label
def is_recipient(loc):
return 'R' == lattice[loc[0]][loc[1]].label
def is_donor(loc):
return 'D' == lattice[loc[0]][loc[1]].label
def is_transconjugant(loc):
return 'T' == lattice[loc[0]][loc[1]].label
def get_nutrients(loc):
return lattice[loc[0]][loc[1]].nutrients
def get_label(loc):
return lattice[loc[0]][loc[1]].label
def divide(cell, empty_neighbours, new_cell_locations, dict_counts):
# division assessment parameters
nutrients_are_available = cell.is_nutrient_available()
distr = randint(0, 100) # division probability is tied to the turn rate
success = 0
if distr < 100.0 / turn_rate and nutrients_are_available and len(empty_neighbours) > 0:
# immediate division parameters
success = 1 # report division success to the simulation
daughter_loc = random.choice(empty_neighbours) # choose one of the empty neighbour cells to divide into
cell.choose_and_exhaust_nutrient() # exhaust nutrients using random search procedure
daughter_loc_nutrients = get_nutrients(daughter_loc) # store nutrients at daughter location
daughter_plasmid_target = 0 # TODO implement target/displacement module
# plasmid segregation module
if cell.plasmid_amensal != 0:
daughter_plasmid_amensal = np.random.binomial(cell.plasmid_amensal - 1, 0.5)
if daughter_plasmid_amensal == 0 or daughter_plasmid_amensal == cell.plasmid_amensal:
print "NOTE SEGREGATIVE LOSS EVENT"
cell.plasmid_amensal -= daughter_plasmid_amensal
else:
daughter_plasmid_amensal = 0
if cell.plasmid_target != 0:
raise Exception("Plasmid displacement not yet implemented") # TODO implement displacement dynamics
# division events by cell type
if 'R' == cell.label or daughter_plasmid_amensal == 0: # represents condition for segregative loss
daughter_label = 'R'
lattice[daughter_loc[0]][daughter_loc[1]] = Receiver(daughter_loc, daughter_loc_nutrients, daughter_plasmid_target)
elif 'D' == cell.label:
daughter_label = 'D'
lattice[daughter_loc[0]][daughter_loc[1]] = Donor(daughter_loc, daughter_loc_nutrients, daughter_plasmid_amensal)
#cell.maturity = floor(cell.maturity / 2)
elif 'T' == cell.label:
daughter_label = 'T'
lattice[daughter_loc[0]][daughter_loc[1]] = Transconjugant(daughter_loc, daughter_loc_nutrients, daughter_plasmid_amensal, daughter_plasmid_target)
#cell.maturity = floor(cell.maturity / 2)
else:
raise Exception("Illegal cell type")
# post-division events
cell.pause = cell.refractory_div
# update tracking variables
new_cell_locations.append(daughter_loc)
dict_counts[daughter_label] += 1
dict_counts['N'] -= 1
dict_counts['_'] -= 1
return success
def conjugate(cell, recipient_neighbours, dict_counts):
distr = randint(0, 1000) # [1, 1000]
success = 0 # note that successful conjugation = 1
conj_rate_rel_div_rate = expected_conj_time / expected_donor_div_time
assert cell.plasmid_amensal >= 1 # sanity check
if distr < (1000.0 / turn_rate) / conj_rate_rel_div_rate and len(recipient_neighbours) > 0:
# immediate conjugation parameters
success = 1
mate_loc = random.choice(recipient_neighbours)
mate = lattice[mate_loc[0]][mate_loc[1]]
# conjugation events
lattice[mate_loc[0]][mate_loc[1]] = Transconjugant(mate_loc, get_nutrients(mate_loc), 1, mate.plasmid_target) # TODO transfer more than 1 amensal plasmid?
# post-conjugation events
cell.pause = cell.refractory_conj
# update tracking variables
dict_counts['T'] += 1
dict_counts['R'] -= 1
return success
def count_cells(): # returns a dict of current cell counts: [# of empty, # of recipient, # of donor, # of nutrients]
keys = ['_', 'R', 'D', 'T', 'N']
counts = {key: 0 for key in keys}
for i in xrange(n):
for j in xrange(n):
loc = (i, j)
counts['N'] += get_nutrients(loc)
counts[get_label(loc)] += 1
return counts
def get_cell_locations():
cell_locations = []
for i in xrange(n):
for j in xrange(n):
loc = (i, j)
if not is_empty(loc):
cell_locations.append(loc)
return cell_locations
def run_sim():
# get stats for lattice initial condition before entering simulation loop, add to lattice data
print 'Turn ', 0, ' : Time Elapsed ', 0.0, "h"
dict_counts = count_cells()
lattice_data[0, :] = np.array([0, 0.0, dict_counts['_'], dict_counts['R'], dict_counts['D'], dict_counts['T'], dict_counts['N']])
# plot initial conditions
lattice_plotter(lattice, dict_counts, n, 0.0, plot_lattice_folder)
plasmid_plotter_wrapper(lattice, dict_counts, 0.0, plot_data_folder)
# simulation loop initialization
new_cell_locations = []
cell_locations = get_cell_locations()
for turn in xrange(1, total_turns + 1):
print '\nTurn ', turn, ' : Time Elapsed ', turn * time_per_turn, "h"
cell_locations = cell_locations + new_cell_locations
#random.shuffle(cell_locations) # TODO verify that we want to do this/runtime hit
new_cell_locations = []
# timestep profiling
t0_a = time.clock()
t0_b = time.time()
for loc in cell_locations:
cell = lattice[loc[0]][loc[1]]
cell.replicate_plasmids()
if 0 < cell.pause: # if paused, decrement pause timer
cell.pause -= 1
else:
empty_neighbours = cell.get_label_surroundings('_', search_radius_bacteria)
# recipient behaviour
if is_recipient(loc):
divide(cell, empty_neighbours, new_cell_locations, dict_counts)
# donor behaviour
elif is_donor(loc) or is_transconjugant(loc):
# if cell.maturity < cell.maxmaturity:
# cell.maturity += 10
recipient_neighbours = cell.get_label_surroundings('R', search_radius_bacteria)
no_division_flag = not empty_neighbours
no_conjugation_flag = not recipient_neighbours
if no_division_flag and no_conjugation_flag:
pass
elif no_division_flag:
conjugate(cell, recipient_neighbours, dict_counts)
elif no_conjugation_flag:
divide(cell, empty_neighbours, new_cell_locations, dict_counts)
else: # chance to either conjugate or divide, randomize the order of potential events
if 1 == randint(1, 3): # try to divide first (33% of the time)
if not divide(cell, empty_neighbours, new_cell_locations, dict_counts):
conjugate(cell, recipient_neighbours, dict_counts)
else: # try to conjugate first
if not conjugate(cell, recipient_neighbours, dict_counts):
divide(cell, empty_neighbours, new_cell_locations, dict_counts)
else:
print "WARNING - Cell not R or D or T"
raise Exception("Cell not R or D or T")
# get lattice stats for this timestep
counts = count_cells()
lattice_data[turn, :] = np.array([turn, turn * time_per_turn, dict_counts['_'], dict_counts['R'], dict_counts['D'], dict_counts['T'], dict_counts['N']])
# print "COUNTS actual", counts
# print "COUNTS dict", dict_counts
# timestep profiling
print "SIM process time:", time.clock() - t0_a
print "SIM wall time:", time.time() - t0_b
# periodically plot the lattice (it takes a while)
if turn % plots_period_in_turns == 0:
t0_a = time.clock()
t0_b = time.time()
timepoint = turn * time_per_turn
lattice_plotter(lattice, dict_counts, n, timepoint, plot_lattice_folder)
plasmid_plotter_wrapper(lattice, dict_counts, timepoint, plot_data_folder)
print "PLOT process time:", time.clock() - t0_a
print "PLOT wall time:", time.time() - t0_b
return lattice_data
# Main Function
# =================================================
def main():
build_lattice_random()
#build_lattice_opposites()
run_sim()
# write data to file
data_name = "lattice_data.csv"
data_file = data_folder + data_name
with open(data_file, "wb") as f:
writer = csv.writer(f)
writer.writerows(lattice_data)
# convert lattice_data to a dictionary and plot it
data_dict = {'iters': lattice_data[:, 0],
'time': lattice_data[:, 1],
'E': lattice_data[:, 2],
'R': lattice_data[:, 3],
'D': lattice_data[:, 4],
'T': lattice_data[:, 5],
'N': lattice_data[:, 6]}
data_plotter(data_dict, data_file, plot_data_folder)
print "\nDone!"
return
if __name__ == '__main__':
main()
|
|
import sys
import os
from math import sqrt
if len(sys.argv) != 2:
print "python generateSummaryStat.py [simpl-stat file]"
def getAvg(list):
total = 0
for x in list:
total += x
return (float(total)/float(len(list)))
def getStdDev(list):
avg = getAvg(list)
deviation_list = []
for x in list:
deviation_list.append(x - avg)
assert len(deviation_list) == len(list)
deviation_squared_list = []
for x in deviation_list:
deviation_squared_list.append(x*x)
assert len(deviation_list) == len(deviation_squared_list)
avgdev = getAvg(deviation_squared_list)
return sqrt(avgdev)
def printSumStats(str, blocks, cs, pre, non):
assert getStdDev([3,7,7,19]) == 6
sblocks = sorted(blocks)
scs = sorted(cs)
spre = sorted(pre)
snon = sorted(non)
print "For", str, ":"
print "\tblock:"
print "\t\tmin:", sblocks[0]
firstQ = len(sblocks)/4
firstQ = int(round(firstQ))
print "\t\t1st Q:", sblocks[firstQ]
print "\t\tavg:", getAvg(sblocks)
secondQ = int(round(firstQ*2))
print "\t\tmedian:", sblocks[secondQ]
thirdQ = firstQ * 3
thirdQ = int(round(thirdQ))
print "\t\t3rd Q:", sblocks[thirdQ]
print "\t\tmax:", sblocks[-1]
print "\t\tstd_dev:", getStdDev(sblocks)
print "\tContext Switch:"
print "\t\tmin:", scs[0]
firstQ = len(scs)/4
firstQ = int(round(firstQ))
print "\t\t1st Q:", scs[firstQ]
print "\t\tavg:", getAvg(scs)
secondQ = int(round(firstQ*2))
print "\t\tmedian:", scs[secondQ]
thirdQ = firstQ * 3
thirdQ = int(round(thirdQ))
print "\t\t3rd Q:", scs[thirdQ]
print "\t\tmax:", scs[-1]
print "\t\tstd_dev:", getStdDev(scs)
print "\tPreemptions:"
print "\t\tmin:", spre[0]
firstQ = len(spre)/4
firstQ = int(round(firstQ))
print "\t\t1st Q:", spre[firstQ]
print "\t\tavg:", getAvg(spre)
secondQ = int(round(firstQ*2))
print "\t\tmedian:", spre[secondQ]
thirdQ = firstQ * 3
thirdQ = int(round(thirdQ))
print "\t\t3rd Q:", spre[thirdQ]
print "\t\tmax:", spre[-1]
print "\t\tstd_dev:", getStdDev(spre)
print "\tNonpreemptive switches:"
print "\t\tmin:", snon[0]
firstQ = len(snon)/4
firstQ = int(round(firstQ))
print "\t\t1st Q:", snon[firstQ]
print "\t\tavg:", getAvg(snon)
secondQ = int(round(firstQ*2))
print "\t\tmedian:", snon[secondQ]
thirdQ = firstQ * 3
thirdQ = int(round(thirdQ))
print "\t\t3rd Q:", snon[thirdQ]
print "\t\tmax:", snon[-1]
print "\t\tstd_dev:", getStdDev(snon)
fout = open("py-summarystat.csv", "w")
fin = open(sys.argv[1], "r").readlines()
run_count = 0
startblocks = []
startcs = []
startpre = []
startnon = []
naablocks = []
naacs = []
naapre = []
naanon = []
nbbblocks = []
nbbcs = []
nbbpre = []
nbbnon = []
iaablocks = []
iaacs = []
iaapre = []
iaanon = []
ibbblocks = []
ibbcs = []
ibbpre = []
ibbnon = []
while len(fin) > 0:
item = fin.pop(0)
if "**RUN" in item:
assert "Start" in fin.pop(0)
item = fin.pop(0)
assert "blocks" in item
item = item.split(":")
assert len(item) == 2
startblocks.append(int(item[1].strip()))
item = fin.pop(0)
assert "context switches" in item
item = item.split(":")
assert len(item) == 2
startcs.append(int(item[1].strip()))
item = fin.pop(0)
assert "preemptions" in item
item = item.split(":")
assert len(item) == 2
startpre.append(int(item[1].strip()))
item = fin.pop(0)
assert "non-preemptive" in item
item = item.split(":")
assert len(item) == 2
startnon.append(int(item[1].strip()))
assert "NAA" in fin.pop(0)
item = fin.pop(0)
assert "blocks" in item
item = item.split(":")
assert len(item) == 2
naablocks.append(int(item[1].strip()))
item = fin.pop(0)
assert "context switches" in item
item = item.split(":")
assert len(item) == 2
naacs.append(int(item[1].strip()))
item = fin.pop(0)
assert "preemptions" in item
item = item.split(":")
assert len(item) == 2
naapre.append(int(item[1].strip()))
item = fin.pop(0)
assert "non-preemptive" in item
item = item.split(":")
assert len(item) == 2
naanon.append(int(item[1].strip()))
assert "NBB" in fin.pop(0)
item = fin.pop(0)
assert "blocks" in item
item = item.split(":")
assert len(item) == 2
nbbblocks.append(int(item[1].strip()))
item = fin.pop(0)
assert "context switches" in item
item = item.split(":")
assert len(item) == 2
nbbcs.append(int(item[1].strip()))
item = fin.pop(0)
assert "preemptions" in item
item = item.split(":")
assert len(item) == 2
nbbpre.append(int(item[1].strip()))
item = fin.pop(0)
assert "non-preemptive" in item
item = item.split(":")
assert len(item) == 2
nbbnon.append(int(item[1].strip()))
assert "IAA" in fin.pop(0)
item = fin.pop(0)
assert "blocks" in item
item = item.split(":")
assert len(item) == 2
iaablocks.append(int(item[1].strip()))
item = fin.pop(0)
assert "context switches" in item
item = item.split(":")
assert len(item) == 2
iaacs.append(int(item[1].strip()))
item = fin.pop(0)
assert "preemptions" in item
item = item.split(":")
assert len(item) == 2
iaapre.append(int(item[1].strip()))
item = fin.pop(0)
assert "non-preemptive" in item
item = item.split(":")
assert len(item) == 2
iaanon.append(int(item[1].strip()))
assert "IBB" in fin.pop(0)
item = fin.pop(0)
assert "blocks" in item
item = item.split(":")
assert len(item) == 2
ibbblocks.append(int(item[1].strip()))
item = fin.pop(0)
assert "context switches" in item
item = item.split(":")
assert len(item) == 2
ibbcs.append(int(item[1].strip()))
item = fin.pop(0)
assert "preemptions" in item
item = item.split(":")
assert len(item) == 2
ibbpre.append(int(item[1].strip()))
item = fin.pop(0)
assert "non-preemptive" in item
item = item.split(":")
assert len(item) == 2
ibbnon.append(int(item[1].strip()))
run_count += 1
print "Number of runs:",run_count
printSumStats("start", startblocks, startcs, startpre, startnon)
printSumStats("NAA", naablocks, naacs, naapre, naanon)
printSumStats("NBB", nbbblocks, nbbcs, nbbpre, nbbnon)
printSumStats("IAA", iaablocks, iaacs, iaapre, iaanon)
printSumStats("IBB", ibbblocks, ibbcs, ibbpre, ibbnon)
|
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 20:16:47 2019
@author: sayala
"""
#from load import *
def _sensorupsampletocellsbyInterpolation(df, cellsy):
'''
Function for when sensorsy in the results are less than cellsy desired.
Interpolates the dataframe.
#2DO: improve interpolation with pandas. right onw it's row by row.
_sensorupsampletocellsbyInterpolation(df, cellsy)
'''
import pandas as pd
import numpy as np
sensorsy = len(df)
#2DO: Update this section to match bifacialvf
cellCenterPVM=[]
for i in range (0, cellsy):
cellCenterPVM.append((i*sensorsy/cellsy+(i+1)*sensorsy/cellsy)/2)
df2 = pd.DataFrame()
for j in range (0, len(df.keys())):
A = list(df[df.keys()[j]])
B= np.interp(cellCenterPVM, list(range(0,sensorsy)), A)
df2[df.keys()[j]]=B
return df2
def _sensorsdownsampletocellsbyAverage(df, cellsy):
'''
df = dataframe with rows indexed by number (i.e. 0 to sensorsy) where sensorsy > cellsy
cellsy = int. usually 8 or 12.
example:
F_centeraverages = _sensorsdownsampletocellsbyAverage(F, cellsy)
'''
import numpy as np
import pandas as pd
edges=len(df)-np.floor(len(df)/(cellsy))*(cellsy)
edge1=int(np.floor(edges/2))
edge2=int(edges-edge1)
A = list(range(df.index[0]+edge1, df.index[-1]-edge2+2, int(np.floor(len(df)/(cellsy)))))
B = range(0,len(A)-1,1)
C = [df.iloc[A[x]:A[x+1]].mean(axis=0) for x in B]
df_centeraverages=pd.DataFrame(C)
return df_centeraverages
def _sensorsdownsampletocellbyCenter(df, cellsy):
'''
df = dataframe with rows indexed by number (i.e. 0 to sensorsy) where sensorsy > cellsy
cellsy = int. usually 8 or 12.
example:
F_centervalues = _sensorsdownsampletocellbyCenter(F, cellsy)
'''
import numpy as np
edges=len(df)-np.floor(len(df)/(cellsy))*(cellsy)
edge1=int(np.floor(edges/2))
edge2=int(edges-edge1)
A = list(range(df.index[0]+edge1, df.index[-1]-edge2+2, int(np.floor(len(df)/(cellsy)))))
A = [int(x+(A[1]-A[0])*0.5) for x in A]
A = A[:-1]
df_centervalues=df.loc[A]
df_centervalues=df_centervalues.reset_index(drop=True)
return df_centervalues
def _setupforPVMismatch(portraitorlandscape, sensorsy, numcells=72):
r''' Sets values for calling PVMismatch, for ladscape or portrait modes and
Example:
stdpl, cellsx, cellsy = _setupforPVMismatch(portraitorlandscape='portrait', sensorsy=100):
'''
import numpy as np
# cell placement for 'portrait'.
if numcells == 72:
stdpl=np.array([[0, 23, 24, 47, 48, 71],
[1, 22, 25, 46, 49, 70],
[2, 21, 26, 45, 50, 69],
[3, 20, 27, 44, 51, 68],
[4, 19, 28, 43, 52, 67],
[5, 18, 29, 42, 53, 66],
[6, 17, 30, 41, 54, 65],
[7, 16, 31, 40, 55, 64],
[8, 15, 32, 39, 56, 63],
[9, 14, 33, 38, 57, 62],
[10, 13, 34, 37, 58, 61],
[11, 12, 35, 36, 59, 60]])
elif numcells == 96:
stdpl=np.array([[0, 23, 24, 47, 48, 71, 72, 95],
[1, 22, 25, 46, 49, 70, 73, 94],
[2, 21, 26, 45, 50, 69, 74, 93],
[3, 20, 27, 44, 51, 68, 75, 92],
[4, 19, 28, 43, 52, 67, 76, 91],
[5, 18, 29, 42, 53, 66, 77, 90],
[6, 17, 30, 41, 54, 65, 78, 89],
[7, 16, 31, 40, 55, 64, 79, 88],
[8, 15, 32, 39, 56, 63, 80, 87],
[9, 14, 33, 38, 57, 62, 81, 86],
[10, 13, 34, 37, 58, 61, 82, 85],
[11, 12, 35, 36, 59, 60, 83, 84]])
else:
print("Error. Only 72 and 96 cells modules supported at the moment. Change numcells to either of this options!")
return
if portraitorlandscape == 'landscape':
stdpl = stdpl.transpose()
elif portraitorlandscape != 'portrait':
print("Error. portraitorlandscape variable must either be 'landscape' or 'portrait'")
return
cellsx = len(stdpl[1]); cellsy = len(stdpl)
return stdpl, cellsx, cellsy
def calculatePVMismatch(pvsys, stdpl, cellsx, cellsy, Gpoat):
r''' calls PVMismatch with all the pre-generated values on bifacial_radiance
Example:
PowerAveraged, PowerDetailed = def calculatePVMismatch(pvsys, stdpl, cellsx, cellsy, Gpoat)
'''
import numpy as np
if np.mean(Gpoat) < 0.001:
PowerAveraged = 0
PowerDetailed = 0
else:
# makes the system # 1 module, in portrait mode.
G=np.array([Gpoat]).transpose()
H = np.ones([1,cellsx])
array_det = np.dot(G,H)
array_avg = np.ones([cellsy,cellsx])*np.mean(Gpoat)
# ACtually do calculations
pvsys.setSuns({0: {0: [array_avg, stdpl]}})
PowerAveraged=pvsys.Pmp
pvsys.setSuns({0: {0: [array_det, stdpl]}})
PowerDetailed=pvsys.Pmp
return PowerAveraged, PowerDetailed
def mad_fn(data):
'''
Mean average deviation calculation for mismatch purposes.
Parameters
----------
data : np.ndarray
Gtotal irradiance measurements.
Returns
-------
scalar : return MAD / Average for a 1D array
Equation: 1/(n^2*Gavg)*Sum Sum (abs(G_i - G_j))
## Note: starting with Pandas 1.0.0 this function will not work on Series objects.
'''
import numpy as np
import pandas as pd
# Pandas returns a notimplemented error if this is a series.
if type(data) == pd.Series:
data = data.to_numpy()
return (np.abs(np.subtract.outer(data,data)).sum()/float(data.__len__())**2 / np.mean(data))*100
def analysisIrradianceandPowerMismatch(testfolder, writefiletitle, portraitorlandscape, bififactor, numcells=72, downsamplingmethod='byCenter'):
r'''
Use this when sensorsy calculated with bifacial_radiance > cellsy
Reads and calculates power output and mismatch for each file in the
testfolder where all the bifacial_radiance irradiance results .csv are saved.
First load each file, cleans it and resamples it to the numsensors set in this function,
and then calculates irradiance mismatch and PVMismatch power output for averaged, minimum,
or detailed irradiances on each cell for the cases of A) only 12 or 8 downsmaples values are
considered (at the center of each cell), and B) 12 or 8 values are obtained from averaging
all the irradiances falling in the area of the cell (No edges or inter-cell spacing are considered
at this moment). Then it saves all the A and B irradiances, as well as the cleaned/resampled
front and rear irradiances.
Ideally sensorsy in the read data is >> 12 to give results for the irradiance mismatch in the cell.
Also ideally n
Parameters
----------
testfolder: folder containing output .csv files for bifacial_radiance
writefiletitle: .csv title where the output results will be saved.
portraitorlandscape: 'portrait' or 'landscape', for PVMismatch input
which defines the electrical interconnects inside the module.
bififactor: bifaciality factor of the module. Max 1.0. ALL Rear irradiance values saved include the bifi-factor.
downsampling method: 1 - 'byCenter' - 2 - 'byAverage'
Example:
# User information.
import bifacial_radiance
testfolder=r'C:\Users\sayala\Documents\HPC_Scratch\EUPVSEC\HPC Tracking Results\RICHMOND\Bifacial_Radiance Results\PVPMC_0\results'
writefiletitle= r'C:\Users\sayala\Documents\HPC_Scratch\EUPVSEC\HPC Tracking Results\RICHMOND\Bifacial_Radiance Results\PVPMC_0\test_df.csv'
sensorsy=100
portraitorlandscape = 'portrait'
analysis.analysisIrradianceandPowerMismatch(testfolder, writefiletitle, portraitorlandscape, bififactor=1.0, numcells=72)
'''
from bifacial_radiance import load
import os, glob
import pandas as pd
# Default variables
numpanels=1 # 1 at the moment, necessary for the cleaning routine.
automatic=True
#loadandclean
# testfolder = r'C:\Users\sayala\Documents\HPC_Scratch\EUPVSEC\PinPV_Bifacial_Radiance_Runs\HPCResults\df4_FixedTilt\FixedTilt_Cairo_C_0.15\results'
filelist = sorted(os.listdir(testfolder))
#filelist = sorted(glob.glob(os.path.join('testfolder','*.csv')))
print('{} files in the directory'.format(filelist.__len__()))
# Check number of sensors on data.
temp = load.read1Result(os.path.join(testfolder,filelist[0]))
sensorsy = len(temp)
# Setup PVMismatch parameters
stdpl, cellsx, cellsy = _setupforPVMismatch(portraitorlandscape=portraitorlandscape, sensorsy=sensorsy, numcells=numcells)
F=pd.DataFrame()
B=pd.DataFrame()
for z in range(0, filelist.__len__()):
data=load.read1Result(os.path.join(testfolder,filelist[z]))
[frontres, backres] = load.deepcleanResult(data, sensorsy=sensorsy, numpanels=numpanels, automatic=automatic)
F[filelist[z]]=frontres
B[filelist[z]]=backres
B = B*bififactor
# Downsample routines:
if sensorsy > cellsy:
if downsamplingmethod == 'byCenter':
print("Sensors y > cellsy; Downsampling data by finding CellCenter method")
F = _sensorsdownsampletocellbyCenter(F, cellsy)
B = _sensorsdownsampletocellbyCenter(B, cellsy)
elif downsamplingmethod == 'byAverage':
print("Sensors y > cellsy; Downsampling data by Averaging data into Cells method")
F = _sensorsdownsampletocellsbyAverage(F, cellsy)
B = _sensorsdownsampletocellsbyAverage(B, cellsy)
else:
print ("Sensors y > cellsy for your module. Select a proper downsampling method ('byCenter', or 'byAverage')")
return
elif sensorsy < cellsy:
print("Sensors y < cellsy; Upsampling data by Interpolation")
F = _sensorupsampletocellsbyInterpolation(F, cellsy)
B = _sensorupsampletocellsbyInterpolation(B, cellsy)
elif sensorsy == cellsy:
print ("Same number of sensorsy and cellsy for your module.")
F = F
B = B
# Calculate POATs
Poat = F+B
# Define arrays to fill in:
Pavg_all=[]; Pdet_all=[]
Pavg_front_all=[]; Pdet_front_all=[]
colkeys = F.keys()
import pvmismatch
if cellsx*cellsy == 72:
cell_pos = pvmismatch.pvmismatch_lib.pvmodule.STD72
elif cellsx*cellsy == 96:
cell_pos = pvmismatch.pvmismatch_lib.pvmodule.STD96
else:
print("Error. Only 72 and 96 cells modules supported at the moment. Change numcells to either of this options!")
return
pvmod=pvmismatch.pvmismatch_lib.pvmodule.PVmodule(cell_pos=cell_pos)
pvsys = pvmismatch.pvsystem.PVsystem(numberStrs=1, numberMods=1, pvmods=pvmod)
# Calculate powers for each hour:
for i in range(0,len(colkeys)):
Pavg, Pdet = calculatePVMismatch(pvsys = pvsys, stdpl=stdpl, cellsx=cellsx, cellsy=cellsy, Gpoat=list(Poat[colkeys[i]]/1000))
Pavg_front, Pdet_front = calculatePVMismatch(pvsys = pvsys, stdpl = stdpl, cellsx = cellsx, cellsy = cellsy, Gpoat= list(F[colkeys[i]]/1000))
Pavg_all.append(Pavg)
Pdet_all.append(Pdet)
Pavg_front_all.append(Pavg_front)
Pdet_front_all.append(Pdet_front)
## Rename Rows and save dataframe and outputs.
F.index='FrontIrradiance_cell_'+F.index.astype(str)
B.index='BackIrradiance_cell_'+B.index.astype(str)
Poat.index='POAT_Irradiance_cell_'+Poat.index.astype(str)
## Transpose
F = F.T
B = B.T
Poat = Poat.T
# Statistics Calculatoins
dfst=pd.DataFrame()
dfst['MAD/G_Total'] = mad_fn(Poat.T)
dfst['Front_MAD/G_Total'] = mad_fn(F.T)
dfst['MAD/G_Total**2'] = dfst['MAD/G_Total']**2
dfst['Front_MAD/G_Total**2'] = dfst['Front_MAD/G_Total']**2
dfst['poat'] = Poat.mean(axis=1)
dfst['gfront'] = F.mean(axis=1)
dfst['grear'] = B.mean(axis=1)
dfst['bifi_ratio'] = dfst['grear']/dfst['gfront']
dfst['stdev'] = Poat.std(axis=1)/ dfst['poat']
dfst.index=Poat.index.astype(str)
# Power Calculations/Saving
Pout=pd.DataFrame()
Pout['Pavg']=Pavg_all
Pout['Pdet']=Pdet_all
Pout['Front_Pavg']=Pavg_front_all
Pout['Front_Pdet']=Pdet_front_all
Pout['Mismatch_rel'] = 100-(Pout['Pdet']*100/Pout['Pavg'])
Pout['Front_Mismatch_rel'] = 100-(Pout['Front_Pdet']*100/Pout['Front_Pavg'])
Pout.index=Poat.index.astype(str)
## Save CSV
df_all = pd.concat([Pout,dfst,Poat,F,B],axis=1)
df_all.to_csv(writefiletitle)
print("Saved Results to ", writefiletitle)
|
|
"""Package for learning complete games from data
The API of this individual module is still unstable and may change as
improvements or refinements are made.
There are two general game types in this module: learned games and deviation
games. Learned games vary by the method, but generally expose methods for
computing payoffs and may other features. Deviation games use learned games and
different functions to compute deviation payoffs via various methods.
"""
import warnings
import numpy as np
from numpy.lib import recfunctions
import sklearn
from sklearn import gaussian_process as gp
from gameanalysis import gamereader
from gameanalysis import paygame
from gameanalysis import restrict
from gameanalysis import rsgame
from gameanalysis import utils
class _DevRegressionGame(rsgame._CompleteGame): # pylint: disable=protected-access
"""A game regression model that learns deviation payoffs
This model functions as a game, but doesn't have a default way of computing
deviation payoffs. It must be wrapped with another game that uses payoff
data to compute deviation payoffs.
"""
def __init__( # pylint: disable=too-many-arguments
self, game, regressors, offset, scale, min_payoffs, max_payoffs,
rest):
super().__init__(game.role_names, game.strat_names,
game.num_role_players)
self._regressors = regressors
self._offset = offset
self._offset.setflags(write=False)
self._scale = scale
self._scale.setflags(write=False)
self._min_payoffs = min_payoffs
self._min_payoffs.setflags(write=False)
self._max_payoffs = max_payoffs
self._max_payoffs.setflags(write=False)
self._rest = rest
self._rest.setflags(write=False)
def deviation_payoffs(self, _, **_kw): # pylint: disable=arguments-differ
raise ValueError(
"regression games don't define deviation payoffs and must be "
'used as a model for a deviation game')
def get_payoffs(self, profiles):
utils.check(
self.is_profile(profiles).all(), 'must pass valid profiles')
payoffs = np.zeros(profiles.shape)
for i, (off, scale, reg) in enumerate(zip(
self._offset, self._scale, self._regressors)):
mask = profiles[..., i] > 0
profs = profiles[mask]
profs[:, i] -= 1
if profs.size:
payoffs[mask, i] = reg.predict(restrict.translate(
profs, self._rest)).ravel() * scale + off
return payoffs
def get_dev_payoffs(self, dev_profs):
"""Compute the payoff for deviating
This implementation is more efficient than the default since we don't
need to compute the payoff for non deviators."""
prof_view = np.rollaxis(restrict.translate(dev_profs.reshape(
(-1, self.num_roles, self.num_strats)), self._rest), 1, 0)
payoffs = np.empty(dev_profs.shape[:-2] + (self.num_strats,))
pay_view = payoffs.reshape((-1, self.num_strats)).T
for pays, profs, reg in zip(
pay_view, utils.repeat(prof_view, self.num_role_strats),
self._regressors):
np.copyto(pays, reg.predict(profs))
return payoffs * self._scale + self._offset
def max_strat_payoffs(self):
return self._max_payoffs.view()
def min_strat_payoffs(self):
return self._min_payoffs.view()
def restrict(self, restriction):
base = rsgame.empty_copy(self).restrict(restriction)
new_rest = self._rest.copy()
new_rest[new_rest] = restriction
regs = tuple(reg for reg, m in zip(self._regressors, restriction) if m)
return _DevRegressionGame(
base, regs, self._offset[restriction], self._scale[restriction],
self._min_payoffs[restriction], self._max_payoffs[restriction],
new_rest)
def _add_constant(self, constant):
off = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _DevRegressionGame(
self, self._regressors, self._offset + off, self._scale,
self._min_payoffs + off, self._max_payoffs + off, self._rest)
def _multiply_constant(self, constant):
mul = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _DevRegressionGame(
self, self._regressors, self._offset * mul, self._scale * mul,
self._min_payoffs * mul, self._max_payoffs * mul, self._rest)
def _add_game(self, _):
return NotImplemented
def __eq__(self, othr):
# pylint: disable-msg=protected-access
return (super().__eq__(othr) and
self._regressors == othr._regressors and
np.allclose(self._offset, othr._offset) and
np.allclose(self._scale, othr._scale) and
np.all(self._rest == othr._rest))
def __hash__(self):
return hash((super().__hash__(), self._rest.tobytes()))
def _dev_profpay(game):
"""Iterate over deviation profiles and payoffs"""
sgame = paygame.samplegame_copy(game)
profiles = sgame.flat_profiles()
payoffs = sgame.flat_payoffs()
for i, pays in enumerate(payoffs.T):
mask = (profiles[:, i] > 0) & ~np.isnan(pays)
utils.check(
mask.any(), "couldn't find deviation data for a strategy")
profs = profiles[mask]
profs[:, i] -= 1
yield i, profs, pays[mask]
def nngame_train( # pylint: disable=too-many-arguments,too-many-locals
game, epochs=100, layer_sizes=(32, 32), dropout=0.2, verbosity=0,
optimizer='sgd', loss='mean_squared_error'):
"""Train a neural network regression model
This mostly exists as a proof of concept, individual testing should be done
to make sure it is working sufficiently. This API will likely change to
support more general architectures and training.
"""
utils.check(layer_sizes, 'must have at least one layer')
utils.check(0 <= dropout < 1, 'dropout must be a valid probability')
# This is for delayed importing inf tensor flow
from keras import models, layers
model = models.Sequential()
lay_iter = iter(layer_sizes)
model.add(layers.Dense(
next(lay_iter), input_shape=[game.num_strats], activation='relu'))
for units in lay_iter:
model.add(layers.Dense(units, activation='relu'))
if dropout:
model.add(layers.Dropout(dropout))
model.add(layers.Dense(1, activation='sigmoid'))
regs = []
offsets = np.empty(game.num_strats)
scales = np.empty(game.num_strats)
for i, profs, pays in _dev_profpay(game):
# XXX Payoff normalization specific to sigmoid. If we accept alternate
# models, we need a way to compute how to potentially normalize
# payoffs.
min_pay = pays.min()
offsets[i] = min_pay
max_pay = pays.max()
scale = 1 if np.isclose(max_pay, min_pay) else max_pay - min_pay
scales[i] = scale
reg = models.clone_model(model)
reg.compile(optimizer=optimizer, loss=loss)
reg.fit(profs, (pays - min_pay) / scale, epochs=epochs,
verbose=verbosity)
regs.append(reg)
return _DevRegressionGame(
game, tuple(regs), offsets, scales, game.min_strat_payoffs(),
game.max_strat_payoffs(), np.ones(game.num_strats, bool))
def sklgame_train(game, estimator):
"""Create a regression game from an arbitrary sklearn estimator
Parameters
----------
game : RsGame
The game to learn, must have at least one payoff per strategy.
estimator : sklearn estimator
An estimator that supports clone, fit, and predict via the stand
scikit-learn estimator API.
"""
regs = []
for _, profs, pays in _dev_profpay(game):
reg = sklearn.base.clone(estimator)
reg.fit(profs, pays)
regs.append(reg)
return _DevRegressionGame(
game, tuple(regs), np.zeros(game.num_strats), np.ones(game.num_strats),
game.min_strat_payoffs(), game.max_strat_payoffs(),
np.ones(game.num_strats, bool))
class _RbfGpGame(rsgame._CompleteGame): # pylint: disable=too-many-instance-attributes,protected-access
"""A regression game using RBF Gaussian processes
This regression game has a build in deviation payoff based off of a
continuous approximation of the multinomial distribution.
"""
def __init__( # pylint: disable=too-many-locals,too-many-arguments
self, role_names, strat_names, num_role_players, offset, coefs,
lengths, sizes, profiles, alpha):
super().__init__(role_names, strat_names, num_role_players)
self._offset = offset
self._offset.setflags(write=False)
self._coefs = coefs
self._coefs.setflags(write=False)
self._lengths = lengths
self._lengths.setflags(write=False)
self._sizes = sizes
self._sizes.setflags(write=False)
self._size_starts = np.insert(self._sizes[:-1].cumsum(), 0, 0)
self._size_starts.setflags(write=False)
self._profiles = profiles
self._profiles.setflags(write=False)
self._alpha = alpha
self._alpha.setflags(write=False)
# Useful member
self._dev_players = np.repeat(
self.num_role_players - np.eye(self.num_roles, dtype=int),
self.num_role_strats, 0)
self._dev_players.setflags(write=False)
# Compute min and max payoffs
# TODO These are pretty conservative, and could maybe be made more
# accurate
sdp = self._dev_players.repeat(self.num_role_strats, 1)
max_rbf = np.einsum('ij,ij,ij->i', sdp, sdp, 1 / self._lengths)
minw = np.exp(-max_rbf / 2) # pylint: disable=invalid-unary-operand-type
mask = self._alpha > 0
pos = np.add.reduceat(self._alpha * mask, self._size_starts)
neg = np.add.reduceat(self._alpha * ~mask, self._size_starts)
self._min_payoffs = self._coefs * (pos * minw + neg) + self._offset
self._min_payoffs.setflags(write=False)
self._max_payoffs = self._coefs * (pos + neg * minw) + self._offset
self._max_payoffs.setflags(write=False)
def get_payoffs(self, profiles):
utils.check(
self.is_profile(profiles).all(), 'must pass valid profiles')
dev_profiles = np.repeat(
profiles[..., None, :] - np.eye(self.num_strats, dtype=int),
self._sizes, -2)
vec = ((dev_profiles - self._profiles) /
self._lengths.repeat(self._sizes, 0))
rbf = np.einsum('...ij,...ij->...i', vec, vec)
payoffs = self._offset + self._coefs * np.add.reduceat(
np.exp(-rbf / 2) * self._alpha, self._size_starts, -1) # pylint: disable=invalid-unary-operand-type
payoffs[profiles == 0] = 0
return payoffs
def get_dev_payoffs(self, dev_profs, *, jacobian=False): # pylint: disable=arguments-differ
dev_profiles = dev_profs.repeat(
np.add.reduceat(self._sizes, self.role_starts), -2)
vec = ((dev_profiles - self._profiles) /
self._lengths.repeat(self._sizes, 0))
rbf = np.einsum('...ij,...ij->...i', vec, vec)
exp = np.exp(-rbf / 2) * self._alpha # pylint: disable=invalid-unary-operand-type
payoffs = self._offset + self._coefs * np.add.reduceat(
exp, self._size_starts, -1)
if not jacobian:
return payoffs
jac = -(self._coefs[:, None] / self._lengths *
np.add.reduceat(exp[:, None] * vec, self._size_starts, 0))
return payoffs, jac
def max_strat_payoffs(self):
return self._max_payoffs.view()
def min_strat_payoffs(self):
return self._min_payoffs.view()
def deviation_payoffs(self, mixture, *, jacobian=False, **_): # pylint: disable=too-many-locals
players = self._dev_players.repeat(self.num_role_strats, 1)
avg_prof = players * mixture
diag = 1 / (self._lengths ** 2 + avg_prof)
diag_sizes = diag.repeat(self._sizes, 0)
diff = self._profiles - avg_prof.repeat(self._sizes, 0)
det = 1 / (1 - self._dev_players * np.add.reduceat(
mixture ** 2 * diag, self.role_starts, 1))
det_sizes = det.repeat(self._sizes, 0)
cov_diag = np.einsum('ij,ij,ij->i', diff, diff, diag_sizes)
cov_outer = np.add.reduceat(
mixture * diag_sizes * diff, self.role_starts, 1)
sec_term = np.einsum(
'ij,ij,ij,ij->i', self._dev_players.repeat(self._sizes, 0),
det_sizes, cov_outer, cov_outer)
exp = np.exp(-(cov_diag + sec_term) / 2)
coef = self._lengths.prod(1) * np.sqrt(diag.prod(1) * det.prod(1))
avg = np.add.reduceat(self._alpha * exp, self._size_starts)
payoffs = self._coefs * coef * avg + self._offset
if not jacobian:
return payoffs
beta = 1 - players * mixture * diag
jac_coef = (
((beta ** 2 - 1) * det.repeat(self.num_role_strats, 1) +
players * diag) * avg[:, None])
delta = np.repeat(cov_outer * det_sizes, self.num_role_strats, 1)
jac_exp = -self._alpha[:, None] * exp[:, None] * (
(delta * beta.repeat(self._sizes, 0) - diff * diag_sizes - 1) ** 2
- (delta - 1) ** 2)
jac_avg = (players * np.add.reduceat(jac_exp, self._size_starts, 0))
jac = -self._coefs[:, None] * coef[:, None] * (jac_coef + jac_avg) / 2
return payoffs, jac
# TODO Add function that creates sample game which draws payoffs from the
# gp distribution
def restrict(self, restriction):
restriction = np.asarray(restriction, bool)
base = rsgame.empty_copy(self).restrict(restriction)
size_mask = restriction.repeat(self._sizes)
sizes = self._sizes[restriction]
profiles = self._profiles[size_mask]
lengths = self._lengths[restriction]
zeros = (profiles[:, ~restriction] /
lengths[:, ~restriction].repeat(sizes, 0))
removed = np.exp(-np.einsum('ij,ij->i', zeros, zeros) / 2) # pylint: disable=invalid-unary-operand-type
uprofs, inds = np.unique(
recfunctions.merge_arrays([
np.arange(restriction.sum()).repeat(sizes).view([('s', int)]),
utils.axis_to_elem(profiles[:, restriction])], flatten=True),
return_inverse=True)
new_alpha = np.bincount(inds, removed * self._alpha[size_mask])
new_sizes = np.diff(np.concatenate([
[-1], np.flatnonzero(np.diff(uprofs['s'])),
[new_alpha.size - 1]]))
return _RbfGpGame(
base.role_names, base.strat_names, base.num_role_players,
self._offset[restriction], self._coefs[restriction],
lengths[:, restriction], new_sizes, uprofs['axis'], new_alpha)
def _add_constant(self, constant):
off = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _RbfGpGame(
self.role_names, self.strat_names, self.num_role_players,
self._offset + off, self._coefs, self._lengths, self._sizes,
self._profiles, self._alpha)
def _multiply_constant(self, constant):
mul = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _RbfGpGame(
self.role_names, self.strat_names, self.num_role_players,
self._offset * mul, self._coefs * mul, self._lengths, self._sizes,
self._profiles, self._alpha)
def _add_game(self, _):
return NotImplemented
def to_json(self):
base = super().to_json()
base['offsets'] = self.payoff_to_json(self._offset)
base['coefs'] = self.payoff_to_json(self._coefs)
lengths = {}
for role, strats, lens in zip(
self.role_names, self.strat_names,
np.split(self._lengths, self.role_starts[1:])):
lengths[role] = {s: self.payoff_to_json(l)
for s, l in zip(strats, lens)}
base['lengths'] = lengths
profs = {}
for role, strats, data in zip(
self.role_names, self.strat_names,
np.split(np.split(self._profiles, self._size_starts[1:]),
self.role_starts[1:])):
profs[role] = {strat: [self.profile_to_json(p) for p in dat]
for strat, dat in zip(strats, data)}
base['profiles'] = profs
alphas = {}
for role, strats, alphs in zip(
self.role_names, self.strat_names,
np.split(np.split(self._alpha, self._size_starts[1:]),
self.role_starts[1:])):
alphas[role] = {s: a.tolist() for s, a in zip(strats, alphs)}
base['alphas'] = alphas
base['type'] = 'rbf.1'
return base
def __eq__(self, othr):
# pylint: disable-msg=protected-access
return (super().__eq__(othr) and
np.allclose(self._offset, othr._offset) and
np.allclose(self._coefs, othr._coefs) and
np.allclose(self._lengths, othr._lengths) and
np.all(self._sizes == othr._sizes) and
utils.allclose_perm(
np.concatenate([
np.arange(self.num_strats).repeat(
self._sizes)[:, None],
self._profiles, self._alpha[:, None]], 1),
np.concatenate([
np.arange(othr.num_strats).repeat(
othr._sizes)[:, None],
othr._profiles, othr._alpha[:, None]], 1)))
@utils.memoize
def __hash__(self):
hprofs = np.sort(utils.axis_to_elem(np.concatenate([
np.arange(self.num_strats).repeat(self._sizes)[:, None],
self._profiles], 1))).tobytes()
return hash((super().__hash__(), hprofs))
def rbfgame_train(game, num_restarts=3): # pylint: disable=too-many-locals
"""Train a regression game with an RBF Gaussian process
This model is somewhat well tests and has a few added benefits over
standard regression models due the nature of its functional form.
Parameters
----------
game : RsGame
The game to learn. Must have at least one payoff per strategy.
num_restarts : int, optional
The number of random restarts to make with the optimizer. Higher
numbers will give a better fit (in expectation), but will take
longer.
"""
dev_players = np.maximum(game.num_role_players - np.eye(
game.num_roles, dtype=int), 1).repeat(
game.num_role_strats, 0).repeat(game.num_role_strats, 1)
bounds = np.insert(dev_players[..., None], 0, 1, 2)
# TODO Add an alpha that is smaller for points near the edge of the
# simplex, accounting for the importance of minimizing error at the
# extrema.
means = np.empty(game.num_strats)
coefs = np.empty(game.num_strats)
lengths = np.empty((game.num_strats, game.num_strats))
profiles = []
alpha = []
sizes = []
for (strat, profs, pays), bound in zip(_dev_profpay(game), bounds):
pay_mean = pays.mean()
pays -= pay_mean
reg = gp.GaussianProcessRegressor(
1.0 * gp.kernels.RBF(bound.mean(1), bound) +
gp.kernels.WhiteKernel(1), n_restarts_optimizer=num_restarts,
copy_X_train=False)
reg.fit(profs, pays)
means[strat] = pay_mean
coefs[strat] = reg.kernel_.k1.k1.constant_value
lengths[strat] = reg.kernel_.k1.k2.length_scale
uprofs, inds = np.unique(
utils.axis_to_elem(profs), return_inverse=True)
profiles.append(utils.axis_from_elem(uprofs))
alpha.append(np.bincount(inds, reg.alpha_))
sizes.append(uprofs.size)
if np.any(lengths[..., None] == bounds):
warnings.warn(
'some lengths were at their bounds, this may indicate a poor '
'fit')
return _RbfGpGame(
game.role_names, game.strat_names, game.num_role_players, means, coefs,
lengths, np.array(sizes), np.concatenate(profiles),
np.concatenate(alpha))
def rbfgame_json(json):
"""Read an rbf game from json"""
utils.check(json['type'].split('.', 1)[0] == 'rbf', 'incorrect type')
base = rsgame.empty_json(json)
offsets = base.payoff_from_json(json['offsets'])
coefs = base.payoff_from_json(json['coefs'])
lengths = np.empty((base.num_strats,) * 2)
for role, strats in json['lengths'].items():
for strat, pay in strats.items():
ind = base.role_strat_index(role, strat)
base.payoff_from_json(pay, lengths[ind])
profiles = [None] * base.num_strats
for role, strats in json['profiles'].items():
for strat, profs in strats.items():
ind = base.role_strat_index(role, strat)
profiles[ind] = np.stack([
base.profile_from_json(p, verify=False) for p in profs])
alphas = [None] * base.num_strats
for role, strats in json['alphas'].items():
for strat, alph in strats.items():
ind = base.role_strat_index(role, strat)
alphas[ind] = np.array(alph)
sizes = np.fromiter( # pragma: no branch
(a.size for a in alphas), int, base.num_strats)
return _RbfGpGame(
base.role_names, base.strat_names, base.num_role_players, offsets,
coefs, lengths, sizes, np.concatenate(profiles),
np.concatenate(alphas))
class _DeviationGame(rsgame._CompleteGame): # pylint: disable=abstract-method,protected-access
"""A game that adds deviation payoffs"""
def __init__(self, model_game):
super().__init__(model_game.role_names, model_game.strat_names,
model_game.num_role_players)
utils.check(
model_game.is_complete(),
'deviation models must be complete games')
self.model = model_game
def get_payoffs(self, profiles):
return self.model.get_payoffs(profiles)
def profiles(self):
return self.model.profiles()
def payoffs(self):
return self.model.payoffs()
def max_strat_payoffs(self):
return self.model.max_strat_payoffs()
def min_strat_payoffs(self):
return self.model.min_strat_payoffs()
def to_json(self):
base = super().to_json()
base['model'] = self.model.to_json()
return base
def __eq__(self, othr):
return (super().__eq__(othr) and
self.model == othr.model)
@utils.memoize
def __hash__(self):
return hash((super().__hash__(), self.model))
class _SampleDeviationGame(_DeviationGame):
"""Deviation payoffs by sampling from mixture
This model produces unbiased deviation payoff estimates, but they're noisy
and random and take a while to compute. This is accurate in the limit as
`num_samples` goes to infinity.
Parameters
----------
model : DevRegressionGame
A payoff model
num_samples : int, optional
The number of samples to use for each deviation estimate. Higher means
lower variance but higher computation time.
"""
def __init__(self, model, num_samples=100):
super().__init__(model)
utils.check(num_samples > 0, 'num samples must be greater than 0')
# TODO It might be interesting to play with a sample schedule, i.e.
# change the number of samples based off of the query number to
# deviation payoffs (i.e. reduce variance as we get close to
# convergence)
self.num_samples = num_samples
def deviation_payoffs(self, mixture, *, jacobian=False, **_):
"""Compute the deivation payoffs
The method computes the jacobian as if we were importance sampling the
results, i.e. the function is really always sample according to mixture
m', but then importance sample to get the actual result."""
profs = self.random_role_deviation_profiles(self.num_samples, mixture)
payoffs = self.model.get_dev_payoffs(profs)
dev_pays = payoffs.mean(0)
if not jacobian:
return dev_pays
supp = mixture > 0
weights = np.zeros(profs.shape)
weights[..., supp] = profs[..., supp] / mixture[supp]
jac = np.einsum('ij,ijk->jk', payoffs, weights.repeat(
self.num_role_strats, 1)) / self.num_samples
return dev_pays, jac
def restrict(self, restriction):
return _SampleDeviationGame(
self.model.restrict(restriction), self.num_samples)
def _add_constant(self, constant):
return _SampleDeviationGame(self.model + constant, self.num_samples)
def _multiply_constant(self, constant):
return _SampleDeviationGame(self.model * constant, self.num_samples)
def _add_game(self, othr):
try:
assert self.num_samples == othr.num_samples
return _SampleDeviationGame(
self.model + othr.model, self.num_samples)
except (AttributeError, AssertionError):
return NotImplemented
def to_json(self):
base = super().to_json()
base['samples'] = self.num_samples
base['type'] = 'sample.1'
return base
def __eq__(self, othr):
return (super().__eq__(othr) and
self.num_samples == othr.num_samples)
@utils.memoize
def __hash__(self):
return hash((super().__hash__(), self.num_samples))
def sample(game, num_samples=100):
"""Create a sample game from a model
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
num_samples : int, optional
The number of samples to take.
"""
try:
return _SampleDeviationGame(game.model, num_samples=num_samples)
except AttributeError:
return _SampleDeviationGame(game, num_samples=num_samples)
def sample_json(json):
"""Read sample game from json"""
utils.check(
json['type'].split('.', 1)[0] == 'sample', 'incorrect type')
return _SampleDeviationGame(
gamereader.loadj(json['model']), num_samples=json['samples'])
class _PointDeviationGame(_DeviationGame):
"""Deviation payoffs by point approximation
This model computes payoffs by finding the deviation payoffs from the point
estimate of the mixture. It's fast but biased. This is accurate in the
limit as the number of players goes to infinity.
For this work, the underlying implementation of get_dev_payoffs must
support floating point profiles, which only really makes sense for
regression games. For deviation payoffs to have a jacobian, the underlying
model must also support a jacobian for get_dev_payoffs.
Parameters
----------
model : DevRegressionGame
A payoff model
"""
def __init__(self, model):
super().__init__(model)
self._dev_players = np.repeat(self.num_role_players - np.eye(
self.num_roles, dtype=int), self.num_role_strats, 1)
def deviation_payoffs(self, mixture, *, jacobian=False, **_):
if not jacobian:
return self.model.get_dev_payoffs(self._dev_players * mixture)
dev, jac = self.model.get_dev_payoffs(
self._dev_players * mixture, jacobian=True)
jac *= self._dev_players.repeat(self.num_role_strats, 0)
return dev, jac
def restrict(self, restriction):
return _PointDeviationGame(self.model.restrict(restriction))
def _add_constant(self, constant):
return _PointDeviationGame(self.model + constant)
def _multiply_constant(self, constant):
return _PointDeviationGame(self.model * constant)
def _add_game(self, othr):
try:
assert isinstance(othr, _PointDeviationGame)
return _PointDeviationGame(self.model + othr.model)
except (AttributeError, AssertionError):
return NotImplemented
def to_json(self):
base = super().to_json()
base['type'] = 'point.1'
return base
def point(game):
"""Create a point game from a model
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
"""
try:
return _PointDeviationGame(game.model)
except AttributeError:
return _PointDeviationGame(game)
def point_json(json):
"""Read point game from json"""
utils.check(
json['type'].split('.', 1)[0] == 'point', 'incorrect type')
return _PointDeviationGame(gamereader.loadj(json['model']))
class _NeighborDeviationGame(_DeviationGame):
"""Create a neighbor game from a model
This takes a normalized weighted estimate of the deviation payoffs by
finding all profiles within `num_neighbors` of the maximum probability
profile for the mixture and weighting them accordingly. This is biased, but
accurate in the limit as `num_neighbors` approaches `num_players`. It also
produces discontinuities every time the maximum probability profile
switches.
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
num_neighbors : int, optional
The number of deviations to take.
"""
def __init__(self, model, num_neighbors=2):
super().__init__(model)
utils.check(num_neighbors >= 0, 'num devs must be nonnegative')
self.num_neighbors = num_neighbors
def deviation_payoffs(self, mixture, *, jacobian=False, **_):
# TODO This is not smooth because there are discontinuities when the
# maximum probability profile jumps at the boundary. If we wanted to
# make it smooth, one option would be to compute the smoother
# interpolation between this and lower probability profiles. All we
# need to ensure smoothness is that the weight at profile
# discontinuities is 0.
profiles = self.nearby_profiles(
self.max_prob_prof(mixture), self.num_neighbors)
payoffs = self.get_payoffs(profiles)
game = paygame.game_replace(self, profiles, payoffs)
return game.deviation_payoffs(mixture, ignore_incomplete=True,
jacobian=jacobian)
def restrict(self, restriction):
return _NeighborDeviationGame(
self.model.restrict(restriction), self.num_neighbors)
def _add_constant(self, constant):
return _NeighborDeviationGame(self.model + constant, self.num_neighbors)
def _multiply_constant(self, constant):
return _NeighborDeviationGame(self.model * constant, self.num_neighbors)
def _add_game(self, othr):
try:
assert self.num_neighbors == othr.num_neighbors
return _NeighborDeviationGame(
self.model + othr.model, self.num_neighbors)
except (AttributeError, AssertionError):
return NotImplemented
def to_json(self):
base = super().to_json()
base['neighbors'] = self.num_neighbors
base['type'] = 'neighbor.2'
return base
def __eq__(self, othr):
return super().__eq__(othr) and self.num_neighbors == othr.num_neighbors
@utils.memoize
def __hash__(self):
return hash((super().__hash__(), self.num_neighbors))
def neighbor(game, num_neighbors=2):
"""Create a neighbor game from a model
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
num_neighbors : int, optional
The number of deviations to explore out.
"""
try:
return _NeighborDeviationGame(game.model, num_neighbors=num_neighbors)
except AttributeError:
return _NeighborDeviationGame(game, num_neighbors=num_neighbors)
def neighbor_json(json):
"""Read neighbor game from json"""
utils.check(
json['type'].split('.', 1)[0] == 'neighbor', 'incorrect type')
return _NeighborDeviationGame(
gamereader.loadj(json['model']),
num_neighbors=json.get('neighbors', json.get('devs', None)))
|
|
"""`main` is the top level module for your Flask application."""
# Import the Flask Framework and modules
from flask import Flask
from flask.ext.dropbox import Dropbox, DropboxBlueprint
from flask import url_for, redirect, request, render_template
from flask import g, request, session as flask_session, url_for
# Google NDB
from google.appengine.ext import ndb
# Google Cloud Storage
import cloudstorage as gcs
from google.appengine.api import app_identity
# App engine specific urlfetch
from google.appengine.api import urlfetch
# utilities
import datetime
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import json
import base64
import os
import uuid
# our own Dropbox Client reference
from dropbox.client import DropboxClient
from dropbox.session import DropboxSession
# our app settings are stored here.
import settings
import logging
logger = logging.getLogger('sketchfab-upload')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
logger.addHandler(ch)
app = Flask(__name__)
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
app.config.from_object(settings)
dropbox = Dropbox(app)
dropbox.register_blueprint(url_prefix='/dropbox')
register_openers()
# Models
class User(ndb.Model):
"""Models an individual User entry with dropbox credentials and sketchfab API key."""
dropbox_uid = ndb.IntegerProperty()
dropbox_email = ndb.StringProperty()
dropbox_access_token_key = ndb.StringProperty()
dropbox_access_token_secret = ndb.StringProperty()
dropbox_cursor = ndb.StringProperty()
sketchfab_api_token = ndb.StringProperty()
created_date = ndb.DateTimeProperty(auto_now_add=True)
last_login_date = ndb.DateTimeProperty()
last_check_date = ndb.DateTimeProperty()
class Upload(ndb.Model):
"""Models an individual Upload entry."""
sketchfab_api_token = ndb.StringProperty()
sketchfab_model_id = ndb.StringProperty()
dropbox_path = ndb.StringProperty()
createdDate = ndb.DateTimeProperty(auto_now_add=True)
updatedDate = ndb.DateTimeProperty()
# routes
@app.route('/')
def home():
return render_template('home.html',
authenticated=dropbox.is_authenticated,
login_url = dropbox.login_url,
logout_url = dropbox.logout_url)
@app.route('/welcome')
def welcome():
# save user info
key, secret = flask_session['dropbox_access_token']
uid = dropbox.account_info['uid']
user = User.query(User.dropbox_uid == uid).get()
if user:
logger.info('Got returning user')
user.populate(
dropbox_email = dropbox.account_info['email'],
dropbox_access_token_key = key,
dropbox_access_token_secret = secret,
last_login_date = datetime.datetime.now())
else:
logger.info('New user')
user = User(dropbox_uid=dropbox.account_info['uid'],
dropbox_email = dropbox.account_info['email'],
dropbox_access_token_key = key,
dropbox_access_token_secret = secret,
last_login_date = datetime.datetime.now())
logger.info(u'user %s' % (user.dropbox_uid))
user.put()
return render_template('welcome.html')
@app.route('/sketchfabtoken', methods=('GET', 'POST'))
def sketchfabtoken():
if request.method == 'POST':
logger.info('got sketchfab API token')
token = request.form['sketchfabapi']
uid = dropbox.account_info['uid']
user = User.query(User.dropbox_uid == uid).get()
if user:
user.populate(sketchfab_api_token = token)
user.put()
return redirect(url_for('done'))
else:
logger.error(u'user %s not found' % uid)
return 'Sorry, user cannot be found', 500
return redirect(url_for('home'))
@app.route('/done')
def done():
return render_template('done.html')
# temporary endpoint to check for new models
@app.route('/checkdropbox')
def checkdropbox():
bucket_name = os.environ.get('BUCKET_NAME', app_identity.get_default_gcs_bucket_name())
bucket = '/' + bucket_name
users = User.query().order(-User.last_check_date).fetch(100)
for user in users:
logger.info(u'checking user %s' % user.dropbox_uid)
session = dropbox.session
session.set_token(user.dropbox_access_token_key, user.dropbox_access_token_secret)
# session = DropboxSession(app.config.get(real_name), app.config.get(real_name), app.config.get(real_name))
client = DropboxClient(session)
# cursor keeps track of satte of files, everything we will get from this API is new and should be imported
#deltas = client.delta()
deltas = client.delta(user.dropbox_cursor)
logger.info('Got response from Dropbox, number of deltas: %s' % len(deltas["entries"]))
# store the cursor of the files states and update check date.
user.populate(dropbox_cursor = deltas["cursor"], last_check_date = datetime.datetime.now())
user.put()
for delta in deltas["entries"]:
#delta is ["path", "metadata"]
if delta[1] and not delta[1]["is_dir"]:
logger.info(u"New model from Dropbox: %s" % delta[0])
upload = Upload.query(Upload.dropbox_path == delta[0], Upload.sketchfab_api_token == user.sketchfab_api_token).get()
if upload:
logger.info(u"Existing Sketchfab model for this path and user, with id: %s" % upload.sketchfab_model_id)
else:
upload = Upload(dropbox_path = delta[0], sketchfab_api_token = user.sketchfab_api_token)
logger.info(u"No existing Sketchfab model found")
os.path.basename(delta[0])
name, extension = os.path.splitext(os.path.basename(delta[0]))
filename = uuid.uuid4().hex + extension
full_file_path = bucket + '/' + filename
logger.info(u"Storing file %s to Google Cloud Storage" % full_file_path)
gcs_file = gcs.open(full_file_path, 'w')
f = client.get_file(delta[0]).read()
gcs_file.write(f)
gcs_file.close()
logger.info(u"Uploading model to Sketchfab's API, using token %s" % user.sketchfab_api_token)
url="https://api.sketchfab.com/v1/models"
data = {
'title': name,
'description': 'uploaded from Sketchfab-Dropbox',
'fileModel': gcs.open(full_file_path),
'filenameModel': filename,
'token': user.sketchfab_api_token
}
# TODO: if existing model ID, we should update the model, using a PUT request?
datamulti, headers = multipart_encode(data)
# FIXME we have issue with request size limitation : request size 10 megabytes, see https://developers.google.com/appengine/docs/python/urlfetch/#Python_Quotas_and_limits
# we may need to use sockets, see https://developers.google.com/appengine/docs/python/sockets/#making_httplib_use_sockets
response = urlfetch.fetch(url=url,
payload="".join(datamulti),
method=urlfetch.POST,
headers=headers)
logger.info(u"response: code: %s content: %s" % (response.status_code, response.content))
if response.status_code == 200:
response_json = json.loads(response.content)
# TODO check result.success
logger.info(u"saving sketchfab model with id %s" % response_json['result']['id'])
upload.populate(sketchfab_model_id = response_json['result']['id'])
upload.populate(updatedDate = datetime.datetime.now())
upload.put()
return 'Aaaannnnnd done'
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.errorhandler(500)
def page_not_found(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
|
|
# -*- coding: utf-8 -*-
"""
The Struct is a convenient way to access data in a hash.
Makes it possible to load data from redis as an object and access the fields.
Then store changes back into redis.
"""
from six import add_metaclass
from json.encoder import JSONEncoder
from functools import wraps
from .pipelines import autoexec
from .keyspaces import Hash
from .fields import TextField
from .exceptions import InvalidOperation
from .futures import Future, IS
__all__ = ['Struct']
class StructMeta(type):
"""
Data binding of a redpipe.Hash to the core of the Struct object.
Creates it dynamically on class construction.
uses the keyspace and connection fields
Meta Classes are strange beasts.
"""
def __new__(mcs, name, bases, d):
if name in ['Struct'] and d.get('__module__', '') == 'redpipe.structs':
return type.__new__(mcs, name, bases, d)
class StructHash(Hash):
keyspace = d.get('keyspace', name)
connection = d.get('connection', None)
fields = d.get('fields', {})
keyparse = d.get('keyparse', TextField)
valueparse = d.get('valueparse', TextField)
memberparse = d.get('memberparse', TextField)
keyspace_template = d.get('keyspace_template', '%s{%s}')
d['core'] = StructHash
return type.__new__(mcs, name, bases, d)
@add_metaclass(StructMeta)
class Struct(object):
"""
load and store structured data in redis using OOP patterns.
If you pass in a dictionary-like object, redpipe will write all the
values you pass in to redis to the key you specify.
By default, the primary key name is `_key`.
But you should override this in your Struct with the `key_name`
property.
.. code-block:: python
class Beer(redpipe.Struct):
fields = {'name': redpipe.TextField}
key_name = 'beer_id'
beer = Beer({'beer_id': '1', 'name': 'Schlitz'})
This will store the data you pass into redis.
It will also load any additional fields to hydrate the object.
**RedPipe** does this in the same pipelined call.
If you need a stub record that neither loads or saves data, do:
.. code-block:: python
beer = Beer({'beer_id': '1'}, no_op=True)
You can later load the fields you want using, load.
If you pass in a string we assume it is the key of the record.
redpipe loads the data from redis:
.. code-block:: python
beer = Beer('1')
assert(beer['beer_id'] == '1')
assert(beer['name'] == 'Schlitz')
If you need to load a record but only specific fields, you can say so.
.. code-block:: python
beer = Beer('1', fields=['name'])
This will exclude all other fields.
**RedPipe** cares about pipelining and efficiency, so if you need to
bundle a bunch of reads or writes together, by all means do so!
.. code-block:: python
beer_ids = ['1', '2', '3']
with redpipe.pipeline() as pipe:
beers = [Beer(i, pipe=pipe) for i in beer_ids]
print(beers)
This will pipeline all 3 together and load them in a single pass
from redis.
The following methods all accept a pipe:
* __init__
* update
* incr
* decr
* pop
* remove
* clear
* delete
You can pass a pipeline into them to make sure that the network i/o is
combined with another pipeline operation.
The other methods on the object are about accessing the data
already loaded.
So you shouldn't need to pipeline them.
One more thing ... suppose you are storing temporary data and you want it
to expire after a few days. You can easily make that happen just by
changing the object definition:
.. code-block:: python
class Beer(redpipe.Struct):
fields = {'name': redpipe.TextField}
key_name = 'beer_id'
ttl = 24 * 60 * 60 * 3
This makes sure that any set operations on the Struct will set the expiry
at the same time. If the object isn't modified for more than the seconds
specified in the ttl (stands for time-to-live), then the object will be
expired from redis. This is useful for temporary objects.
"""
__slots__ = ['key', '_data']
keyspace = None
connection = None
key_name = '_key'
fields = {}
required = set()
default_fields = 'all' # set as 'defined', 'all', or ['a', b', 'c']
field_attr_on = False
ttl = None
def __init__(self, _key_or_data, pipe=None, fields=None, no_op=False,
nx=False):
"""
class constructor
:param _key_or_data:
:param pipe:
:param fields:
:param no_op: bool
:param nx: bool
"""
self._data = {}
with self._pipe(pipe=pipe) as pipe:
# first we try treat the first arg as a dictionary.
# this is if we are passing in data to be set into the redis hash.
# if that doesn't work, we assume it must be the name of the key.
try:
# force type to dict.
# this blows up if it's a string.
coerced = dict(_key_or_data)
# look for the primary key in the data
# won't work if we don't have this.
keyname = self.key_name
# track the primary key
# it's the name of the key only.
# the keyspace we defined will transform it into the full
# name of the key.
self.key = coerced[keyname]
# remove it from our data set.
# we don't write this value into redis.
del coerced[keyname]
# no op flag means don't write or read from the db.
# if so, we just set the dictionary.
# this is useful if we are cloning the object
# or rehydrating it somehow.
if no_op:
self._data = coerced
return
required_found = self.required.intersection(coerced.keys())
if len(required_found) != len(self.required):
raise InvalidOperation('missing required field(s): %s' %
list(self.required - required_found)
)
self.update(coerced, pipe=pipe, nx=nx)
# we wind up here if a dictionary was passed in, but it
# didn't contain the primary key
except KeyError:
# can't go any further, blow up.
raise InvalidOperation(
'must specify primary key when cloning a struct')
# this is a normal case, not really exceptional.
# If you pass in the name of the key, you wind up here.
except (ValueError, TypeError):
self.key = _key_or_data
# normally we ask redis for the data from redis.
# if the no_op flag was passed we skip it.
if not no_op:
self.load(fields=fields, pipe=pipe)
def load(self, fields=None, pipe=None):
"""
Load data from redis.
Allows you to specify what fields to load.
This method is also called implicitly from the constructor.
:param fields: 'all', 'defined', or array of field names
:param pipe: Pipeline(), NestedPipeline() or None
:return: None
"""
if fields is None:
fields = self.default_fields
if fields == 'all':
return self._load_all(pipe=pipe)
if fields == 'defined':
fields = [k for k in self.fields.keys()]
if not fields:
return
with self._pipe(pipe) as pipe:
# get the list of fields.
# it returns a numerically keyed array.
# when that happens we match up the results
# to the keys we requested.
ref = self.core(pipe=pipe).hmget(self.key, fields)
def cb():
"""
This callback fires when the root pipeline executes.
At that point, we hydrate the response into this object.
:return: None
"""
for i, v in enumerate(ref.result):
k = fields[i]
# redis will return all of the fields we requested
# regardless of whether or not they are set.
# if the value is None, it's not set in redis.
# Use that as a signal to remove that value from local.
if v is None:
self._data.pop(k, None)
# as long as the field is not the primary key,
# map it into the local data strucure
elif k != self.key_name:
self._data[k] = v
# attach the callback to the pipeline.
pipe.on_execute(cb)
def _load_all(self, pipe=None):
"""
Load all data from the redis hash key into this local object.
:param pipe: optional pipeline
:return: None
"""
with self._pipe(pipe) as pipe:
ref = self.core(pipe=pipe).hgetall(self.key)
def cb():
if not ref.result:
return
for k, v in ref.result.items():
if k != self.key_name:
self._data[k] = v
pipe.on_execute(cb)
def incr(self, field, amount=1, pipe=None):
"""
Increment a field by a given amount.
Return the future
Also update the field.
:param field:
:param amount:
:param pipe:
:return:
"""
with self._pipe(pipe) as pipe:
core = self.core(pipe=pipe)
# increment the key
new_amount = core.hincrby(self.key, field, amount)
self._expire(pipe=pipe)
# we also read the value of the field.
# this is a little redundant, but otherwise we don't know exactly
# how to format the field.
# I suppose we could pass the new_amount through the formatter?
ref = core.hget(self.key, field)
def cb():
"""
Once we hear back from redis, set the value locally
in the object.
:return:
"""
self._data[field] = ref.result
pipe.on_execute(cb)
return new_amount
def decr(self, field, amount=1, pipe=None):
"""
Inverse of incr function.
:param field:
:param amount:
:param pipe:
:return: Pipeline, NestedPipeline, or None
"""
return self.incr(field, amount * -1, pipe=pipe)
def update(self, changes, pipe=None, nx=False):
"""
update the data in the Struct.
This will update the values in the underlying redis hash.
After the pipeline executes, the changes will be reflected here
in the local struct.
If any values in the changes dict are None, those fields will be
removed from redis and the instance.
The changes should be a dictionary representing the fields to change
and the values to change them to.
If you pass the nx flag, only sets the fields if they don't exist yet.
:param changes: dict
:param pipe: Pipeline, NestedPipeline, or None
:param nx: bool
:return: None
"""
if not changes:
return
# can't remove the primary key.
# maybe you meant to delete the object?
# look at delete method.
if self.key_name in changes:
raise InvalidOperation('cannot update the redis key')
# sort the change set into updates and deletes.
# the deletes are entries with None as the value.
# updates are everything else.
deletes = [k for k, v in changes.items() if IS(v, None)]
updates = {k: v for k, v in changes.items() if k not in deletes}
with self._pipe(pipe) as pipe:
core = self.core(pipe=pipe)
set_method = core.hsetnx if nx else core.hset
def build(k, v):
"""
Internal closure so we can set the field in redis and
set up a callback to write the data into the local instance
data once we hear back from redis.
:param k: the member of the hash key
:param v: the value we want to set
:return: None
"""
res = set_method(self.key, k, v)
def cb():
"""
Here's the callback.
Now that the data has been written to redis, we can
update the local state.
:return: None
"""
if not nx or res == 1:
self._data[k] = v
# attach the callback.
pipe.on_execute(cb)
# all the other stuff so far was just setup for this part
# iterate through the updates and set up the calls to redis
# along with the callbacks to update local state once the
# changes come back from redis.
for k, v in updates.items():
build(k, v)
# pass off all the delete operations to the remove call.
# happens in the same pipeline.
self.remove(deletes, pipe=pipe)
self._expire(pipe=pipe)
def remove(self, fields, pipe=None):
"""
remove some fields from the struct.
This will remove data from the underlying redis hash object.
After the pipe executes successfully, it will also remove it from
the current instance of Struct.
:param fields: list or iterable, names of the fields to remove.
:param pipe: Pipeline, NestedPipeline, or None
:return: None
"""
# no fields specified? It's a no op.
if not fields:
return
# can't remove the primary key.
# maybe you meant to call the delete method?
if self.key_name in fields:
raise InvalidOperation('cannot remove the redis key')
removed_required_fields = self.required.intersection(fields)
if len(removed_required_fields):
raise InvalidOperation('cannot remove required field(s): %s'
% list(removed_required_fields))
with self._pipe(pipe) as pipe:
# remove all the fields specified from redis.
core = self.core(pipe=pipe)
core.hdel(self.key, *fields)
self._expire(pipe=pipe)
# set up a callback to remove the fields from this local object.
def cb():
"""
once the data has been removed from redis,
Remove the data here.
:return:
"""
for k in fields:
self._data.pop(k, None)
# attach the callback.
pipe.on_execute(cb)
def copy(self):
"""
like the dictionary copy method.
:return:
"""
return self.__class__(dict(self))
@property
def persisted(self):
"""
Not certain I want to keep this around.
Is it useful?
:return:
"""
return True if self._data else False
def clear(self, pipe=None):
"""
delete the current redis key.
:param pipe:
:return:
"""
with self._pipe(pipe) as pipe:
self.core(pipe=pipe).delete(self.key)
def cb():
self._data = {}
pipe.on_execute(cb)
def get(self, item, default=None):
"""
works like the dict get method.
:param item:
:param default:
:return:
"""
return self._data.get(item, default)
def pop(self, name, default=None, pipe=None):
"""
works like the dictionary pop method.
IMPORTANT!
This method removes the key from redis.
If this is not the behavior you want, first convert your
Struct data to a dict.
:param name:
:param default:
:param pipe:
:return:
"""
f = Future()
with self._pipe(pipe) as pipe:
c = self.core(pipe)
ref = c.hget(self.key, name)
c.hdel(self.key, name)
self._expire(pipe=pipe)
def cb():
f.set(default if ref.result is None else ref.result)
self._data.pop(name)
pipe.on_execute(cb)
return f
@classmethod
def delete(cls, keys, pipe=None):
"""
Delete one or more keys from the Struct namespace.
This is a class method and unlike the `clear` method,
can be invoked without instantiating a Struct.
:param keys: the names of the keys to remove from the keyspace
:param pipe: Pipeline, NestedPipeline, or None
:return: None
"""
with cls._pipe(pipe) as pipe:
core = cls.core(pipe)
core.delete(*keys)
def _expire(self, pipe=None):
"""
delete the current redis key.
:param pipe:
:return:
"""
if self.ttl:
self.core(pipe=pipe).expire(self.key, self.ttl)
@classmethod
def _pipe(cls, pipe=None):
"""
Internal method for automatically wrapping a pipeline and
turning it into a nested pipeline with the correct connection
and one that automatically executes as it exits the context.
:param pipe: Pipeline, NestedPipeline or None
:return: Pipeline or NestedPipeline
"""
return autoexec(pipe, name=cls.connection)
def __getitem__(self, item):
"""
magic python method to make the object behave like a dictionary.
You can access data like so:
.. code-block:: python
user = User('1')
assert(user['name'] == 'bill')
assert(user['_key'] == '1')
The primary key is also included in this.
If you have defined the name of the primary key, you use that name.
Otherwise it defaults to `_key`.
If the data doesn't exist in redis, it will raise a KeyError.
I thought about making it return None, but if you want that
behavior, use the `get` method.
:param item: the name of the element in the dictionary
:return: the value of the element.
"""
if item == self.key_name:
return self.key
return self._data[item]
def __delitem__(self, key):
"""
Explicitly prevent deleting data from the object via the `del`
command.
.. code-block:: python
del user['name'] # raises InvalidOperation exception!
The reason is because I want you to use the `remove` method instead.
That way you can pipeline the removal of the redis field with
something else.
Also, you probably want to avoid a scenario where you accidentally
delete data from redis without meaning to.
:param key: the name of the element to remove from the dict.
:raise: InvalidOperation
"""
tpl = 'cannot delete %s from %s indirectly. Use the delete method.'
raise InvalidOperation(tpl % (key, self))
def __setitem__(self, key, value):
"""
Explicitly prevent setting data into this dictionary-like object.
Example:
.. code-block:: python
user = User('1')
user['name'] = 'Bob' # raises InvalidOperation exception
RedPipe does not support this because you should be using the
`update` method to change properties on the object where you can
pipeline the operation with other calls to redis.
It also avoids the problem where you accidentally change data
if you were confused and thought you were just manipulating a
regular dictionary.
:param key: the name of the element in this pseudo dict.
:param value: the value to set it to
:raise: InvalidOperation
"""
tpl = 'cannot set %s key on %s indirectly. Use the update method.'
raise InvalidOperation(tpl % (key, self))
def __getattr__(self, item):
"""
magic python method -- returns fields as an attribute of the object.
This is off by default.
You can enable it by setting `field_attr_on = True` on
your struct.
Then you can access data like so:
.. code-block:: python
user = User('1')
assert(user.name == 'bill')
:param item: str
:return: mixed
"""
try:
if self.field_attr_on:
if item == self.key_name:
return self.key
return self._data[item]
except KeyError:
if item in self.fields:
return None
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, item))
def __setattr__(self, key, value):
"""
magic python method to control setting attributes on the object.
:param key:
:param value:
:return:
"""
if self.field_attr_on and key in self.fields:
tpl = 'cannot set %s.%s directly. Use the update method.'
raise InvalidOperation(tpl % (self, key))
if key in self.__slots__ or key in self.__dict__:
return super(Struct, self).__setattr__(key, value)
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, key))
def __iter__(self):
"""
Make the `Struct` iterable, like a dictionary.
When you iterate on a dict, it yields the keys of the dictionary.
Emulating the same behavior here.
:return: generator, a list of key names in the Struct
"""
for k in self.keys():
yield k
def __len__(self):
"""
How many elements in the Struct?
This includes all the fields returned from redis + the key.
:return: int
"""
return len(dict(self))
def __contains__(self, item):
if item == self.key_name:
return True
return item in self._data
def iteritems(self):
"""
Support for the python 2 iterator of key/value pairs.
This includes the primary key name and value.
Example:
.. code-block:: python
u = User('1')
data = {k: v for k, v in u.iteritems()}
Or:
.. code-block:: python
u = User('1')
for k, v in u.iteritems():
print("%s: %s" % (k, v)
:return: generator, a list of key/value pair tuples
"""
yield self.key_name, self.key
for k, v in self._data.items():
yield k, v
def items(self):
"""
We return the list of key/value pair tuples.
Similar to iteritems but in list form instead of as
a generator.
The reason we do this is because python2 code probably expects this to
be a list. Not sure if I could care, but just covering my bases.
Example:
.. code-block:: python
u = User('1')
data = {k: v for k, v in u.items()}
Or:
.. code-block:: python
u = User('1')
for k, v in u.items():
print("%s: %s" % (k, v)
:return: list, containing key/value pair tuples.
"""
return [row for row in self.iteritems()]
def __eq__(self, other):
"""
Test for equality with another python object.
Example:
..code-block:: python
u = User('1')
assert(u == {'_key': '1', 'name': 'Bob'})
assert(u == User('1'))
The object you pass in should be a dict or an object that can
be coerced into a dict, like another Struct.
Returns True if all the keys and values match up.
:param other: can be another dictionary, or a Struct.
:return: bool
"""
if self is other:
return True
try:
if dict(self) == dict(other):
return True
except (TypeError, ValueError):
pass
return False
def keys(self):
"""
Get a list of all the keys in the Struct.
This includes the primary key name, and all the elements
that are set into redis.
Note: even if you define fields on the Struct, those keys won't
be returned unless the fields are actually written into the redis
hash.
.. code-block:: python
u = User('1')
assert(u.keys() == ['_key', 'name'])
:return: list
"""
return [row[0] for row in self.items()]
def __str__(self):
"""
A simple string representation of the object.
Contins the class name, and the primary key.
Doesn't print out all the data.
The reason is because there could be some really
complex data types in there or some really big values.
Printing that out, especially in the context of an exception
seems like a bad idea.
:return: str
"""
return "<%s:%s>" % (self.__class__.__name__, self.key)
def __repr__(self):
"""
Emulate the behavior of a dict when it is passed to repr.
:return: str
"""
return repr(dict(self))
def __getstate__(self):
"""
Used for pickling the Struct.
:return: tuple of key, and internal `_data`
"""
return self.key, self._data,
def __setstate__(self, state):
"""
used for unplickling the Struct.
:param state:
:return:
"""
self.key = state[0]
self._data = state[1]
@property
def _redpipe_struct_as_dict(self):
"""
A special namespaced property used for json encoding.
We use duck-typing and look for this property (which no other
type of object should have) so that we can try to json
serialize it by coercing it into a dict.
:return: dict
"""
return dict(self)
def _json_default_encoder(func):
"""
Monkey-Patch the core json encoder library.
This isn't as bad as it sounds.
We override the default method so that if an object
falls through and can't be encoded normally, we see if it is
a Future object and return the result to be encoded.
I set a special attribute on the Struct object so I can tell
that's what it is.
If that doesn't work, I fall back to the earlier behavior.
The nice thing about patching the library this way is that it
won't inerfere with existing code and it can itself be wrapped
by other methods.
So it's very extensible.
:param func: the JSONEncoder.default method.
:return: an object that can be json serialized.
"""
@wraps(func)
def inner(self, o):
try:
return o._redpipe_struct_as_dict # noqa
except AttributeError:
pass
return func(self, o)
return inner
JSONEncoder.default = _json_default_encoder(JSONEncoder.default)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental library that exposes XLA operations directly in TensorFlow.
It is sometimes useful to be able to build HLO programs directly from
TensorFlow. This file provides Tensorflow operators that mirror the semantics of
HLO operators as closely as possible.
Note: There is no promise of backward or forward compatibility for operators
defined in this module. This is primarily because the underlying HLO operators
do not promise backward or forward compatibility.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tf2xla.ops import gen_xla_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# TODO(phawkins): provide wrappers for all XLA operators. Currently the missing
# ops include:
# infeed/outfeed (available via tf.contrib.tpu)
# collectives, e.g., cross-replica-sum (available via tf.contrib.tpu)
# conditional
# gather/scatter
# collapse
# This file reuses builtin names (following XLA's names, so we can call things
# like xla.max), so we capture the builtin versions here.
# pylint: disable=redefined-builtin
_max = max
_min = min
_slice = slice # pylint: disable=invalid-name
constant = constant_op.constant
# Unary operators.
# For most arithmetic operators there is a TensorFlow operator
# that exactly corresponds to each XLA operator. Rather than defining
# XLA-specific variants, we reuse the corresponding TensorFlow operator.
# TODO(phawkins): It would be even better to have TensorFlow operators that 1:1
# wrap every HLO operator, because that would allow us to be confident that the
# semantics match.
def _unary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def unary_op_wrapper(x, name=None):
return fn(x, name=name)
return unary_op_wrapper
abs = _unary_op(math_ops.abs)
# TODO(phawkins): implement clz.
conj = _unary_op(math_ops.conj)
cos = _unary_op(math_ops.cos)
ceil = _unary_op(math_ops.ceil)
digamma = _unary_op(math_ops.digamma)
erf = _unary_op(math_ops.erf)
erfc = _unary_op(math_ops.erfc)
# TODO(phawkins): implement erfinv
exp = _unary_op(math_ops.exp)
expm1 = _unary_op(math_ops.expm1)
floor = _unary_op(math_ops.floor)
imag = _unary_op(math_ops.imag)
is_finite = _unary_op(math_ops.is_finite)
lgamma = _unary_op(math_ops.lgamma)
log = _unary_op(math_ops.log)
log1p = _unary_op(math_ops.log1p)
logical_not = _unary_op(math_ops.logical_not)
neg = _unary_op(math_ops.neg)
real = _unary_op(math_ops.real)
# TODO(phawkins): unlike xla::Round, this rounds to even instead of zero for
# numbers halfway between two integers.
round = _unary_op(math_ops.round)
sin = _unary_op(math_ops.sin)
sign = _unary_op(math_ops.sign)
tanh = _unary_op(math_ops.tanh)
# Binary operators
# The main difference between TensorFlow and XLA binary ops is the broadcasting
# semantics. TensorFlow uses Numpy-style broadcasting semantics, whereas XLA
# requires an explicit specification of which dimensions to broadcast if the
# arguments have different ranks.
def _broadcasting_binary_op(fn):
"""Wraps a binary Tensorflow operator and performs XLA-style broadcasting."""
def broadcasting_binary_op_wrapper(x, y, broadcast_dims=None, name=None):
"""Inner wrapper function."""
broadcast_dims = broadcast_dims or []
broadcast_dims = ops.convert_to_tensor(broadcast_dims, dtypes.int64)
# Rather than relying on having static shape information in the TensorFlow
# graph, we use an XlaBroadcastHelper op that can compute the correct shapes
# at JIT compilation time.
x, y = gen_xla_ops.xla_broadcast_helper(x, y, broadcast_dims)
return fn(x, y, name=name)
return broadcasting_binary_op_wrapper
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
dtypes.int8: dtypes.uint8,
dtypes.int16: dtypes.uint16,
dtypes.int32: dtypes.uint32,
dtypes.int64: dtypes.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {
dtypes.uint8: dtypes.int8,
dtypes.uint16: dtypes.int16,
dtypes.uint32: dtypes.int32,
dtypes.uint64: dtypes.int64,
}
def _shift_right_logical_helper(x, y, name=None):
"""Performs an integer right logical shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
signed = dtype in _SIGNED_TO_UNSIGNED_TABLE
if signed:
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[dtype]
x = math_ops.cast(x, unsigned_dtype)
y = math_ops.cast(y, unsigned_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if signed:
output = math_ops.cast(output, dtype)
return output
def _shift_right_arithmetic_helper(x, y, name=None):
"""Performs an integer right arithmetic shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
unsigned = dtype in _UNSIGNED_TO_SIGNED_TABLE
if unsigned:
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[dtype]
x = math_ops.cast(x, signed_dtype)
y = math_ops.cast(y, signed_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if unsigned:
output = math_ops.cast(output, dtype)
return output
add = _broadcasting_binary_op(math_ops.add)
sub = _broadcasting_binary_op(math_ops.sub)
mul = _broadcasting_binary_op(math_ops.mul)
div = _broadcasting_binary_op(math_ops.div)
rem = _broadcasting_binary_op(gen_math_ops.mod)
max = _broadcasting_binary_op(math_ops.maximum)
min = _broadcasting_binary_op(math_ops.minimum)
atan2 = _broadcasting_binary_op(math_ops.atan2)
complex = _broadcasting_binary_op(math_ops.complex)
logical_and = _broadcasting_binary_op(math_ops.logical_and)
logical_or = _broadcasting_binary_op(math_ops.logical_or)
logical_xor = _broadcasting_binary_op(math_ops.logical_xor)
eq = _broadcasting_binary_op(math_ops.equal)
ne = _broadcasting_binary_op(math_ops.not_equal)
ge = _broadcasting_binary_op(math_ops.greater_equal)
gt = _broadcasting_binary_op(math_ops.greater)
le = _broadcasting_binary_op(math_ops.less_equal)
lt = _broadcasting_binary_op(math_ops.less)
pow = _broadcasting_binary_op(math_ops.pow)
shift_left = _broadcasting_binary_op(bitwise_ops.left_shift)
shift_right_logical = _broadcasting_binary_op(_shift_right_logical_helper)
shift_right_arithmetic = _broadcasting_binary_op(_shift_right_arithmetic_helper)
def _binary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def binary_op_wrapper(x, y, name=None):
return fn(x, y, name=name)
return binary_op_wrapper
transpose = _binary_op(array_ops.transpose)
rev = _binary_op(array_ops.reverse)
bitcast_convert_type = array_ops.bitcast
def broadcast(x, dims, name=None):
x = ops.convert_to_tensor(x)
shape = array_ops.concat([constant_op.constant(dims),
array_ops.shape(x)],
axis=0)
return array_ops.broadcast_to(x, shape, name=name)
def clamp(a, x, b, name=None):
return min(max(a, x, name=name), b, name=name)
concatenate = array_ops.concat
def conv(lhs,
rhs,
window_strides,
padding,
lhs_dilation,
rhs_dilation,
dimension_numbers,
feature_group_count=1,
precision_config=None,
name=None):
"""Wraps the XLA ConvGeneralDilated operator.
ConvGeneralDilated is the most general form of XLA convolution and is
documented at
https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
Args:
lhs: the input tensor
rhs: the kernel tensor
window_strides: the inter-window strides
padding: the padding to apply at the start and end of each input dimensions
lhs_dilation: dilation to apply between input elements
rhs_dilation: dilation to apply between kernel elements
dimension_numbers: a `ConvolutionDimensionNumbers` proto.
feature_group_count: number of feature groups for grouped convolution.
precision_config: a `PrecisionConfigProto` proto.
name: an optional name for the operator
Returns:
A tensor representing the output of the convolution.
"""
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_conv(
lhs,
rhs,
window_strides=window_strides,
padding=padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
feature_group_count=feature_group_count,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
convert_element_type = math_ops.cast
def dot(lhs, rhs, name=None):
return math_ops.tensordot(lhs, rhs, axes=1, name=name)
def dot_general(lhs, rhs, dimension_numbers, precision_config=None, name=None):
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_dot(
lhs,
rhs,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
dynamic_slice = gen_xla_ops.xla_dynamic_slice
dynamic_update_slice = gen_xla_ops.xla_dynamic_update_slice
# TODO(phawkins): generalize tf.pad to support interior padding, and then remove
# the XLA-specific pad operator.
pad = gen_xla_ops.xla_pad
def random_normal(mu, sigma, dims, name=None):
mu = ops.convert_to_tensor(mu)
return random_ops.random_normal(
dims, mean=mu, stddev=sigma, dtype=mu.dtype, name=name)
def random_uniform(minval, maxval, dims, name=None):
minval = ops.convert_to_tensor(minval)
return random_ops.random_uniform(
dims, minval, maxval, dtype=minval.dtype, name=name)
recv = gen_xla_ops.xla_recv
reduce = gen_xla_ops.xla_reduce
def reduce_window(operand,
init,
reducer,
window_dimensions,
window_strides=None,
base_dilations=None,
window_dilations=None,
padding=None,
name=None):
"""Wraps the XLA ReduceWindow operator.
ReduceWindow is documented at
https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow .
Args:
operand: the input tensor
init: a scalar tensor representing the initial value for the reduction
reducer: a reduction function that combines a pair of scalars.
window_dimensions: shape of the window, as a list of integers
window_strides: inter-window strides, as a list of integers. Optional; if
omitted, defaults to strides of 1.
padding: padding to apply to 'operand'. List of (low, high) pairs of
integers that specify the padding to apply before and after each
dimension. Optional; if omitted, defaults to no padding.
name: the operator name, or None.
Returns:
A tensor that represents the output of the reduce_window operator.
"""
window_strides = window_strides or [1] * len(window_dimensions)
base_dilations = base_dilations or [1] * len(window_dimensions)
window_dilations = window_dilations or [1] * len(window_dimensions)
padding = padding or [(0, 0)] * len(window_dimensions)
return gen_xla_ops.xla_reduce_window(
input=operand,
init_value=init,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=base_dilations,
window_dilations=window_dilations,
padding=padding,
computation=reducer,
name=name)
def reshape(x, new_sizes, dimensions=None, name=None):
if dimensions is not None:
x = array_ops.transpose(x, dimensions)
x = array_ops.reshape(x, new_sizes, name=name)
return x
def select(condition, x, y, name=None):
return array_ops.where(condition, x, y, name)
select_and_scatter = gen_xla_ops.xla_select_and_scatter
send = gen_xla_ops.xla_send
def slice(x, start_dims, limit_dims, strides):
spec = [
_slice(start, limit, stride)
for (start, limit, stride) in zip(start_dims, limit_dims, strides)
]
return x[tuple(spec)]
sort = gen_xla_ops.xla_sort
key_value_sort = gen_xla_ops.xla_key_value_sort
while_loop = gen_xla_ops.xla_while
|
|
# App.py
# Application stuff.
# The application is responsible for managing the main frame window.
#
# We also grab the FileOpen command, to invoke our Python editor
" The PythonWin application code. Manages most aspects of MDI, etc "
import win32con
import win32api
import win32ui
import sys
import string
import os
from pywin.mfc import window, dialog, afxres
from pywin.mfc.thread import WinApp
import traceback
import regutil
import scriptutils
## NOTE: App and AppBuild should NOT be used - instead, you should contruct your
## APP class manually whenever you like (just ensure you leave these 2 params None!)
## Whoever wants the generic "Application" should get it via win32iu.GetApp()
# These are "legacy"
AppBuilder = None
App = None # default - if used, must end up a CApp derived class.
# Helpers that should one day be removed!
def AddIdleHandler(handler):
print "app.AddIdleHandler is deprecated - please use win32ui.GetApp().AddIdleHandler() instead."
return win32ui.GetApp().AddIdleHandler(handler)
def DeleteIdleHandler(handler):
print "app.DeleteIdleHandler is deprecated - please use win32ui.GetApp().DeleteIdleHandler() instead."
return win32ui.GetApp().DeleteIdleHandler(handler)
# Helper for writing a Window position by name, and later loading it.
def SaveWindowSize(section,rect,state=""):
""" Writes a rectangle to an INI file
Args: section = section name in the applications INI file
rect = a rectangle in a (cy, cx, y, x) tuple
(same format as CREATESTRUCT position tuples)."""
left, top, right, bottom = rect
if state: state = state + " "
win32ui.WriteProfileVal(section,state+"left",left)
win32ui.WriteProfileVal(section,state+"top",top)
win32ui.WriteProfileVal(section,state+"right",right)
win32ui.WriteProfileVal(section,state+"bottom",bottom)
def LoadWindowSize(section, state=""):
""" Loads a section from an INI file, and returns a rect in a tuple (see SaveWindowSize)"""
if state: state = state + " "
left = win32ui.GetProfileVal(section,state+"left",0)
top = win32ui.GetProfileVal(section,state+"top",0)
right = win32ui.GetProfileVal(section,state+"right",0)
bottom = win32ui.GetProfileVal(section,state+"bottom",0)
return (left, top, right, bottom)
def RectToCreateStructRect(rect):
return (rect[3]-rect[1], rect[2]-rect[0], rect[1], rect[0] )
# Define FrameWindow and Application objects
#
# The Main Frame of the application.
class MainFrame(window.MDIFrameWnd):
sectionPos = "Main Window"
statusBarIndicators = ( afxres.ID_SEPARATOR, #// status line indicator
afxres.ID_INDICATOR_CAPS,
afxres.ID_INDICATOR_NUM,
afxres.ID_INDICATOR_SCRL,
win32ui.ID_INDICATOR_LINENUM,
win32ui.ID_INDICATOR_COLNUM )
def OnCreate(self, cs):
self._CreateStatusBar()
return 0
def _CreateStatusBar(self):
self.statusBar = win32ui.CreateStatusBar(self)
self.statusBar.SetIndicators(self.statusBarIndicators)
self.HookCommandUpdate(self.OnUpdatePosIndicator, win32ui.ID_INDICATOR_LINENUM)
self.HookCommandUpdate(self.OnUpdatePosIndicator, win32ui.ID_INDICATOR_COLNUM)
def OnUpdatePosIndicator(self, cmdui):
editControl = scriptutils.GetActiveEditControl()
value = " " * 5
if editControl is not None:
try:
startChar, endChar = editControl.GetSel()
lineNo = editControl.LineFromChar(startChar)
colNo = endChar - editControl.LineIndex(lineNo)
if cmdui.m_nID==win32ui.ID_INDICATOR_LINENUM:
value = "%0*d" % (5, lineNo + 1)
else:
value = "%0*d" % (3, colNo + 1)
except win32ui.error:
pass
cmdui.SetText(value)
cmdui.Enable()
def PreCreateWindow(self, cc):
cc = self._obj_.PreCreateWindow(cc)
pos = LoadWindowSize(self.sectionPos)
self.startRect = pos
if pos[2] - pos[0]:
rect = RectToCreateStructRect(pos)
cc = cc[0], cc[1], cc[2], cc[3], rect, cc[5], cc[6], cc[7], cc[8]
return cc
def OnDestroy(self, msg):
# use GetWindowPlacement(), as it works even when min'd or max'd
rectNow = self.GetWindowPlacement()[4]
if rectNow != self.startRect:
SaveWindowSize(self.sectionPos, rectNow)
return 0
class CApp(WinApp):
" A class for the application "
def __init__(self):
self.oldCallbackCaller = None
WinApp.__init__(self, win32ui.GetApp() )
self.idleHandlers = []
def InitInstance(self):
" Called to crank up the app "
numMRU = win32ui.GetProfileVal("Settings","Recent File List Size", 10)
win32ui.LoadStdProfileSettings(numMRU)
# self._obj_.InitMDIInstance()
if win32api.GetVersionEx()[0]<4:
win32ui.SetDialogBkColor()
win32ui.Enable3dControls()
# install a "callback caller" - a manager for the callbacks
# self.oldCallbackCaller = win32ui.InstallCallbackCaller(self.CallbackManager)
self.LoadMainFrame()
self.SetApplicationPaths()
def ExitInstance(self):
" Called as the app dies - too late to prevent it here! "
win32ui.OutputDebug("Application shutdown\n")
# Restore the callback manager, if any.
try:
win32ui.InstallCallbackCaller(self.oldCallbackCaller)
except AttributeError:
pass
if self.oldCallbackCaller:
del self.oldCallbackCaller
self.frame=None # clean Python references to the now destroyed window object.
self.idleHandlers = []
# Attempt cleanup if not already done!
if self._obj_: self._obj_.AttachObject(None)
self._obj_ = None
global App
global AppBuilder
App = None
AppBuilder = None
return 0
def HaveIdleHandler(self, handler):
return handler in self.idleHandlers
def AddIdleHandler(self, handler):
self.idleHandlers.append(handler)
def DeleteIdleHandler(self, handler):
self.idleHandlers.remove(handler)
def OnIdle(self, count):
try:
ret = 0
handlers = self.idleHandlers[:] # copy list, as may be modified during loop
for handler in handlers:
try:
thisRet = handler(handler, count)
except:
print "Idle handler %s failed" % (repr(handler))
traceback.print_exc()
print "Idle handler removed from list"
try:
self.DeleteIdleHandler(handler)
except ValueError: # Item not in list.
pass
thisRet = 0
ret = ret or thisRet
return ret
except KeyboardInterrupt:
pass
def CreateMainFrame(self):
return MainFrame()
def LoadMainFrame(self):
" Create the main applications frame "
self.frame = self.CreateMainFrame()
self.SetMainFrame(self.frame)
self.frame.LoadFrame(win32ui.IDR_MAINFRAME, win32con.WS_OVERLAPPEDWINDOW)
self.frame.DragAcceptFiles() # we can accept these.
self.frame.ShowWindow(win32ui.GetInitialStateRequest())
self.frame.UpdateWindow()
self.HookCommands()
def OnHelp(self,id, code):
try:
if id==win32ui.ID_HELP_GUI_REF:
helpFile = regutil.GetRegisteredHelpFile("Pythonwin Reference")
helpCmd = win32con.HELP_CONTENTS
else:
helpFile = regutil.GetRegisteredHelpFile("Main Python Documentation")
helpCmd = win32con.HELP_FINDER
if helpFile is None:
win32ui.MessageBox("The help file is not registered!")
else:
import help
help.OpenHelpFile(helpFile, helpCmd)
except:
t, v, tb = sys.exc_info()
win32ui.MessageBox("Internal error in help file processing\r\n%s: %s" % (t,v))
tb = None # Prevent a cycle
def DoLoadModules(self, modules):
# XXX - this should go, but the debugger uses it :-(
# dont do much checking!
for module in modules:
__import__(module)
def HookCommands(self):
self.frame.HookMessage(self.OnDropFiles,win32con.WM_DROPFILES)
self.HookCommand(self.HandleOnFileOpen,win32ui.ID_FILE_OPEN)
self.HookCommand(self.HandleOnFileNew,win32ui.ID_FILE_NEW)
self.HookCommand(self.OnFileMRU,win32ui.ID_FILE_MRU_FILE1)
self.HookCommand(self.OnHelpAbout,win32ui.ID_APP_ABOUT)
self.HookCommand(self.OnHelp, win32ui.ID_HELP_PYTHON)
self.HookCommand(self.OnHelp, win32ui.ID_HELP_GUI_REF)
# Hook for the right-click menu.
self.frame.GetWindow(win32con.GW_CHILD).HookMessage(self.OnRClick,win32con.WM_RBUTTONDOWN)
def SetApplicationPaths(self):
# Load the users/application paths
new_path = []
apppath=win32ui.GetProfileVal('Python','Application Path','').split(';')
for path in apppath:
if len(path)>0:
new_path.append(win32ui.FullPath(path))
for extra_num in range(1,11):
apppath=win32ui.GetProfileVal('Python','Application Path %d'%extra_num,'').split(';')
if len(apppath) == 0:
break
for path in apppath:
if len(path)>0:
new_path.append(win32ui.FullPath(path))
sys.path = new_path + sys.path
def OnRClick(self,params):
" Handle right click message "
# put up the entire FILE menu!
menu = win32ui.LoadMenu(win32ui.IDR_TEXTTYPE).GetSubMenu(0)
menu.TrackPopupMenu(params[5]) # track at mouse position.
return 0
def OnDropFiles(self,msg):
" Handle a file being dropped from file manager "
hDropInfo = msg[2]
self.frame.SetActiveWindow() # active us
nFiles = win32api.DragQueryFile(hDropInfo)
try:
for iFile in range(0,nFiles):
fileName = win32api.DragQueryFile(hDropInfo, iFile)
win32ui.GetApp().OpenDocumentFile( fileName )
finally:
win32api.DragFinish(hDropInfo);
return 0
# No longer used by Pythonwin, as the C++ code has this same basic functionality
# but handles errors slightly better.
# It all still works, tho, so if you need similar functionality, you can use it.
# Therefore I havent deleted this code completely!
# def CallbackManager( self, ob, args = () ):
# """Manage win32 callbacks. Trap exceptions, report on them, then return 'All OK'
# to the frame-work. """
# import traceback
# try:
# ret = apply(ob, args)
# return ret
# except:
# # take copies of the exception values, else other (handled) exceptions may get
# # copied over by the other fns called.
# win32ui.SetStatusText('An exception occured in a windows command handler.')
# t, v, tb = sys.exc_info()
# traceback.print_exception(t, v, tb.tb_next)
# try:
# sys.stdout.flush()
# except (NameError, AttributeError):
# pass
# Command handlers.
def OnFileMRU( self, id, code ):
" Called when a File 1-n message is recieved "
fileName = win32ui.GetRecentFileList()[id - win32ui.ID_FILE_MRU_FILE1]
win32ui.GetApp().OpenDocumentFile(fileName)
def HandleOnFileOpen( self, id, code ):
" Called when FileOpen message is received "
win32ui.GetApp().OnFileOpen()
def HandleOnFileNew( self, id, code ):
" Called when FileNew message is received "
win32ui.GetApp().OnFileNew()
def OnHelpAbout( self, id, code ):
" Called when HelpAbout message is received. Displays the About dialog. "
win32ui.InitRichEdit()
dlg=AboutBox()
dlg.DoModal()
def _GetRegistryValue(key, val, default = None):
# val is registry value - None for default val.
try:
hkey = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, key)
return win32api.RegQueryValueEx(hkey, val)[0]
except win32api.error:
try:
hkey = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, key)
return win32api.RegQueryValueEx(hkey, val)[0]
except win32api.error:
return default
scintilla = "Scintilla is Copyright 1998-2008 Neil Hodgson (http://www.scintilla.org)"
idle = "This program uses IDLE extensions by Guido van Rossum, Tim Peters and others."
contributors = "Thanks to the following people for making significant contributions: Roger Upole, Sidnei da Silva, Sam Rushing, Curt Hagenlocher, Dave Brennan, Roger Burnham, Gordon McMillan, Neil Hodgson, Laramie Leavitt. (let me know if I have forgotten you!)"
# The About Box
class AboutBox(dialog.Dialog):
def __init__(self, idd=win32ui.IDD_ABOUTBOX):
dialog.Dialog.__init__(self, idd)
def OnInitDialog(self):
text = "Pythonwin - Python IDE and GUI Framework for Windows.\n\n%s\n\nPython is %s\n\n%s\n\n%s\n\n%s" % (win32ui.copyright, sys.copyright, scintilla, idle, contributors)
self.SetDlgItemText(win32ui.IDC_EDIT1, text)
# Get the build number - written by installers.
# For distutils build, read pywin32.version.txt
import distutils.sysconfig
site_packages = distutils.sysconfig.get_python_lib(plat_specific=1)
try:
build_no = open(os.path.join(site_packages, "pywin32.version.txt")).read().strip()
ver = "pywin32 build %s" % build_no
except EnvironmentError:
ver = None
if ver is None:
# See if we are Part of Active Python
ver = _GetRegistryValue("SOFTWARE\\ActiveState\\ActivePython", "CurrentVersion")
if ver is not None:
ver = "ActivePython build %s" % (ver,)
if ver is None:
ver = ""
self.SetDlgItemText(win32ui.IDC_ABOUT_VERSION, ver)
self.HookCommand(self.OnButHomePage, win32ui.IDC_BUTTON1)
def OnButHomePage(self, id, code):
if code == win32con.BN_CLICKED:
win32api.ShellExecute(0, "open", "http://starship.python.net/crew/mhammond/win32", None, "", 1)
def Win32RawInput(prompt=None):
"Provide raw_input() for gui apps"
# flush stderr/out first.
try:
sys.stdout.flush()
sys.stderr.flush()
except:
pass
if prompt is None: prompt = ""
ret=dialog.GetSimpleInput(prompt)
if ret==None:
raise KeyboardInterrupt("operation cancelled")
return ret
def Win32Input(prompt=None):
"Provide input() for gui apps"
return eval(input(prompt))
try:
raw_input
# must be py2x...
sys.modules['__builtin__'].raw_input=Win32RawInput
except NameError:
# must be py3k
import code
code.InteractiveConsole.input=Win32Input
def HaveGoodGUI():
"""Returns true if we currently have a good gui available.
"""
return "pywin.framework.startup" in sys.modules
def CreateDefaultGUI( appClass = None):
"""Creates a default GUI environment
"""
if appClass is None:
import intpyapp # Bring in the default app - could be param'd later.
appClass = intpyapp.InteractivePythonApp
# Create and init the app.
appClass().InitInstance()
def CheckCreateDefaultGUI():
"""Checks and creates if necessary a default GUI environment.
"""
rc = HaveGoodGUI()
if not rc:
CreateDefaultGUI()
return rc
|
|
# :[diStorm64}: Python binding
# Copyright (c) 2009, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
info = (
":[diStorm64}: by Gil Dabah, http://ragestorm.net/distorm/\n"
"Python binding by Mario Vilas, http://breakingcode.wordpress.com/\n"
)
__revision__ = "$Id: __init__.py 376 2009-08-24 16:42:29Z QvasiModo $"
__all__ = [
'Decode',
'DecodeGenerator',
'Decode16Bits',
'Decode32Bits',
'Decode64Bits',
]
from ctypes import *
from exceptions import *
from os.path import split, join
#==============================================================================
# Load the diStorm library
SUPPORT_64BIT_OFFSET = True
_OffsetType = c_ulonglong
try:
_distorm_path = split(__file__)[0]
_distorm_file = join(_distorm_path, 'libdistorm64.dylib')
_distorm = cdll.LoadLibrary(_distorm_file)
except OSError:
raise ImportError, "Error loading diStorm: dynamic link library not found"
try:
distorm_decode = _distorm.distorm_decode64
except AttributeError:
raise ImportError, "Error loading diStorm: exported function not found"
#==============================================================================
# diStorm C interface
MAX_TEXT_SIZE = 60
MAX_INSTRUCTIONS = 1000
DECRES_NONE = 0
DECRES_SUCCESS = 1
DECRES_MEMORYERR = 2
DECRES_INPUTERR = 3
_DecodeType = c_uint
_DecodeResult = c_uint
class _WString (Structure):
_fields_ = [
('length', c_uint), # unused
('p', c_char * MAX_TEXT_SIZE),
]
class _DecodedInst (Structure):
_fields_ = [
('mnemonic', _WString),
('operands', _WString),
('instructionHex', _WString),
('size', c_uint),
('offset', _OffsetType),
]
distorm_decode.restype = _DecodeResult
distorm_decode.argtypes = [
_OffsetType, # codeOffset
c_void_p, # code
c_int, # codeLen
_DecodeType, # dt
POINTER(_DecodedInst), # result
c_uint, # maxInstructions
POINTER(c_uint) # usedInstructionsCount
]
#==============================================================================
# diStorm Python interface
Decode16Bits = 0 # 80286 decoding
Decode32Bits = 1 # IA-32 decoding
Decode64Bits = 2 # AMD64 decoding
OffsetTypeSize = sizeof(_OffsetType) * 8 # XXX why 8 ???
def DecodeGenerator(codeOffset, code, dt = Decode32Bits):
"""
@type codeOffset: long
@param codeOffset: Memory address where the code is located.
This is B{not} an offset into the code!
It's the actual memory address where it was read from.
@type code: str
@param code: Code to disassemble.
@type dt: int
@param dt: Disassembly type. Can be one of the following:
* L{Decode16Bits}: 80286 decoding
* L{Decode32Bits}: IA-32 decoding
* L{Decode64Bits}: AMD64 decoding
@rtype: generator of tuple( long, int, str, str )
@return: Generator of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise ValueError: Invalid arguments.
"""
# Sanitize the code parameter.
code = str(code)
# Stop the iteration if there's no code to disassemble.
if code == '':
return
# Sanitize the codeOffset parameter.
if not codeOffset:
codeOffset = 0
# Check the validity of the decode type.
if dt not in (Decode16Bits, Decode32Bits, Decode64Bits):
raise ValueError, "Invalid decode type value: %r" % (dt,)
# Prepare input buffer.
codeLen = len(code) # total bytes to disassemble
code = create_string_buffer(code) # allocate code buffer
p_code = addressof(code) # pointer to code buffer
# Prepare output buffer.
l_result = MAX_INSTRUCTIONS # length of output array
result = (_DecodedInst * l_result)() # allocate output array
p_result = pointer(result) # pointer to output array
p_result = cast(p_result, POINTER(_DecodedInst))
# Prepare used instructions counter.
usedInstructionsCount = c_uint(0)
p_usedInstructionsCount = byref(usedInstructionsCount)
# Loop while we have code left to disassemble.
while codeLen > 0:
# Call the decode function.
status = distorm_decode(codeOffset, p_code, min(codeLen, l_result), dt,
p_result, l_result, p_usedInstructionsCount)
if status == DECRES_INPUTERR:
raise ValueError, "Invalid arguments passed to distorm_decode()"
if status == DECRES_MEMORYERR:
raise MemoryError, "Not enough memory to disassemble"
used = usedInstructionsCount.value
if not used:
break
## raise AssertionError, "Internal error while disassembling"
# Yield each decoded instruction but the last one.
for index in xrange(used - 1):
di = result[index]
asm = '%s %s' % (di.mnemonic.p, di.operands.p)
pydi = ( di.offset, di.size, asm, di.instructionHex.p )
yield pydi
# Continue decoding from the last instruction found.
# This prevents truncating the last instruction.
# If there are no more instructions to decode, yield
# the last one and stop the iteration.
di = result[used - 1]
delta = di.offset - codeOffset
if delta <= 0:
asm = '%s %s' % (di.mnemonic.p, di.operands.p)
pydi = ( di.offset, di.size, asm, di.instructionHex.p )
yield pydi
break
codeOffset = codeOffset + delta
p_code = p_code + delta
codeLen = codeLen - delta
# Reset the used instructions counter.
usedInstructionsCount.value = 0
def Decode(offset, code, type = Decode32Bits):
"""
@type offset: long
@param offset: Memory address where the code is located.
This is B{not} an offset into the code!
It's the actual memory address where it was read from.
@type code: str
@param code: Code to disassemble.
@type type: int
@param type: Disassembly type. Can be one of the following:
* L{Decode16Bits}: 80286 decoding
* L{Decode32Bits}: IA-32 decoding
* L{Decode64Bits}: AMD64 decoding
@rtype: list of tuple( long, int, str, str )
@return: List of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise ValueError: Invalid arguments.
"""
return list( DecodeGenerator(offset, code, type) )
|
|
from __future__ import print_function
from datetime import datetime
import time
import json
def donotchange(fn):
return fn
class ComputeBaseType:
# users = {}
# tenants = {}
# : the dict for the flavors
flavors = {}
# : the dict for the images
images = {}
# : the dict for the servers
servers = {}
# : the dict for the security_groups
security_groups = {}
# : the dict for the stacks
stacks = {}
# : the dict for usage data
usage = {}
# : the dict for the set_credentials
credential = None
# : the unique string identifying this cloud
label = None
def __init__(self, label, cred=None):
self.credential = cred
self.label = label
def _clear(self):
# self.users = {}
# self.tenants = {}
self.flavors = {} # global var
self.images = {} # global var
self.servers = {} # global var
self.security_groups = {} # global var
self.stacks = {}
self.usage = {}
self.credential = None # global var
self.label = None # global var
self.type = None
self.user_id = None
self.auth_token = None
def info(self):
"""obtain some basic information about the cloud"""
print("Label:", self.label)
print("Type:", self.type)
print("Flavors:", len(self.flavors))
print("Servers:", len(self.servers))
print("Images:", len(self.images))
print("Security Groups:", len(self.security_groups))
print("Stacks:", len(self.stacks))
print("Usage:", self.usage)
# print "Users:", len(self.users)
# print "Tenants:", len(self.tenants)
def connect(self):
"""connect to the cloud"""
raise NotImplementedError()
def config(self, dict):
"""uses the dict to conduct some configuration with the parameters passed"""
raise NotImplementedError()
def find_user_id(self, force=False):
"""finds the user id of a user and caches it. If a chaced
value is ther it will use that. If you specify force, it will
regenerate it"""
self.user_id = "unkown"
raise NotImplementedError()
def dump(self, type="server", with_manager=False):
"""returns a string that contains information about the cloud. One can ste the type to 'images','flavors','servers'"""
selection = type.lower()[0]
if selection == 'i':
d = self.images.copy()
elif selection == 'f':
d = self.flavors.copy()
elif selection == 's':
d = self.servers.copy()
elif selection == 'e':
d = self.security_groups.copy()
elif selection == 't':
d = self.stacks.copy()
elif selection == 'u':
d = self.usage.copy()
elif type is not None:
print("refresh type not supported")
assert False
else:
d = {}
with_manager = True
if not with_manager:
for element in d.keys():
try:
del d[element]['manager']
except:
pass
return d
def get(self, type="server"):
"""returns information in a dict for 'servers','flavours','images'"""
selection = type.lower()[0]
d = {}
if selection == 'i':
d = self.images
elif selection == 'f':
d = self.flavors
elif selection == 's':
d = self.servers
elif selection == 'e':
d = self.security_groups
elif selection == 't':
d = self.stacks
elif selection == 'u':
d = self.usage
# elif selection == 'u':
# d = self.users
# elif selection == 't':
# d = self.tenants
elif type is not None:
print("refresh type not supported")
assert False
return d
# identity management moved to its dedicated class
"""
def _get_users_dict(self):
raise NotImplementedError()
def _get_tenants_dict(self):
raise NotImplementedError()
"""
def _get_images_dict(self):
raise NotImplementedError()
def _get_flavors_dict(self):
raise NotImplementedError()
def _get_servers_dict(self):
raise NotImplementedError()
def _get_security_groups_dict(self):
raise NotImplementedError()
def vm_create(self, name=None,
flavor_name=None,
image_id=None,
security_groups=None,
key_name=None,
meta=None):
"""create a virtual machine with the given parameters"""
raise NotImplementedError()
def vm_delete(self, id):
"""delete the virtual machine with the id"""
raise NotImplementedError()
def vms_project(self, refresh=False):
raise NotImplementedError()
def rename(self, old, new, id=None):
"""rename the firtual machine with the name old to the name new"""
raise NotImplementedError()
def usage(self, start, end, format='dict'):
"""returns the usage data between start and end date"""
raise NotImplementedError()
def limits(self):
"""returns a dict of limits that the cloud will maintain for a user and/or the project"""
raise NotImplementedError()
def get_limits(self):
"""returns a dict of limits that the cloud will maintain for a user and/or the project"""
raise NotImplementedError()
def get_absolute_limits(self):
"""returns a dict of absolute limits with current usage information"""
raise NotImplementedError()
def get_usage(self):
raise NotImplementedError()
def get_quota(self):
raise NotImplementedError()
def stack_create(self):
raise NotImplementedError()
def stack_delete(self):
raise NotImplementedError()
def wait(self, vm_id, vm_status, seconds=2):
"""waits a number of seconds and than refreshes information form the cloud"""
print('refersh', vm_id)
self.refresh()
new_status = self.status(vm_id)
print(new_status)
while str(new_status) != str(vm_status):
time.sleep(seconds)
self.refresh()
new_status = self.status(vm_id)
#
# print
#
def __str__(self):
"""
print everything but the set_credentials that is known about this
cloud in json format.
"""
information = {
'label': self.label,
'flavors': self.flavors,
'servers': self.servers,
'images': self.images,
'security groups': self.security_groups,
'stacks': self.stacks,
'usage': self.usage,
# 'users': self.users,
# 'users': len(self.users),
# 'tenants': self.tenants,
}
return json.dumps(information, indent=4)
#
# get methods
#
# TODO BUG REMOVE THIS METHOD and replace with .type
# def type():
# return self.type
def vms(self):
"""returns the dict of the servers. deprecated."""
return self.servers
def status(self, vm_id):
"""returns that status of a given virtual machine"""
return self.servers[vm_id]['status']
def set_credentials(self, cred):
"""sets the set_credentials to the dict cred"""
self.credential = cred
def refresh(self, type=None):
"""refreshes the information of the cache for a given type 'images', 'flavors', 'servers', or 'all' for all of them"""
time_stamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%SZ')
selection = ""
if type:
selection = type.lower()[0]
list_function = self._get_servers_dict
data = self.servers
if selection == 'a' or type is None:
self.refresh("images")
self.refresh("flavors")
self.refresh("servers")
return
elif selection == 'i':
list_function = self._get_images_dict
data = self.images
elif selection == 'f':
list_function = self._get_flavors_dict
data = self.flavors
elif selection == 's':
list_function = self._get_servers_dict
data = self.servers
elif selection == 'e':
list_function = self._get_security_groups_dict
data = self.security_groups
elif selection == 't':
list_function = self._get_stacks_dict
data = self.stacks
elif selection == 'u':
list_function = self._get_usage_dict
data = self.usage
# elif selection == 'u':
# list_function = self._get_users_dict
# d = self.users
# elif selection == 't':
# list_function = self._get_tenants_dict
# d = self.tenants
elif type is not None:
print("refresh type not supported")
assert False
list_func = list_function()
if len(list_func) == 0:
if selection == 'i':
self.images = {}
elif selection == 'f':
self.flavors = {}
elif selection == 's':
self.servers = {}
elif selection == 'e':
self.security_groups = {}
elif selection == 't':
self.stacks = {}
elif selection == 'u':
self.usage = {}
# elif selection == 'u':
# self.users = {}
# elif selection == 't':
# self.tenants = {}
else:
data_updated = {}
for key in list_func:
element = list_func[key]
# id = list[element]['id']
# d[id] = list[element]
# d[id]['cm_refresh'] = time_stamp
# element is a dictionary. It doesn't have to lookup the list
# like 'list[element]...'. element[...] simply works.
id = element['id']
data_updated[id] = element
data_updated[id]['cm_refresh'] = time_stamp
data = data_updated
def keypair_list(self):
raise NotImplementedError()
def keypair_add(self, keyname, keycontent):
raise NotImplementedError()
def keypair_remove(self, keyname):
raise NotImplementedError()
|
|
#!/usr/bin/env python
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
clparser.requirement( coverage_language_expression )
returns requirement object (either elementary or combined requirement)
corresponding to the coverage language expression.
"""
# tema libraries:
import tema.coverage.coverage as coverage
# python standard:
import re
# operators listed in the order of precedence
# (the first binds most loosely)
OPERATORS=["or","and","then"]
ACTIONSHORTHANDS=["action","actions"]
VALUESHORTHANDS=["value","values"]
RESERVED=OPERATORS+ACTIONSHORTHANDS+VALUESHORTHANDS
class ParseError(Exception):
pass
class ParseTreeNode:
def __init__(self,parent=None,children=None,string="",item=None):
self.parent=parent
self.children=children
self.string=string
self.item=item
# automatically add this node to the children of the parent
if parent!=None:
parent.children.append(self)
# The rest of the methods are for testing
def dumps(self,indent=""):
s=indent+self.string+'\n'
for c in self.children:
s+=c.dumps(indent+'\t')
return s
def __str__(self):
return self.dumps()
def equal_strings(self,other):
childcount=len(self.children)
return self.string==other.string and \
childcount==len(other.children) and \
[self.children[i].equal_strings(other.children[i])
for i in range(childcount)]==[True]*childcount
# Operators in parse tree are stored inside OperatorItem objects
class OperatorItem:
def __init__(self,name):
self.name=name.lower()
def __lt__(self,other):
if not isinstance(other,OperatorItem):
raise TypeError("Operator item compared to %s." % type(other))
return OPERATORS.index(self.name)<OPERATORS.index(other.name)
def __str__(self):
return self.name
def getOperator(self):
return OPERATORS.index(self.name)
class ERParser:
"""Parser for Elementary Requirements"""
def __init__(self,model):
if model:
self._actionstrings=[ a.toString() for a in model.getActions() ]
else:
self._actionstrings=None
def _expand_action_regexp(self,regexp):
"""If model was given in the constructor (that is,
_actionstrings is set), we expand action regexp to explicit
action names. Otherwise just return [regexp].
Effect: when expanded,
actions branch.*
requires that every action with branch prefix must be
executed. If not expanded, the same would require that an
action with branch prefix and an action with loop prefix must
be executed.
value 1,2,3
requires that at least one of the values 1, 2 and 3 is used.
"""
if self._actionstrings: # expand item regexp
regexplist=[]
for a in self._actionstrings:
if regexp.match(a):
regexplist.append(re.compile(re.escape(a)))
self.log(" %s" % a)
else:
regexplist=[regexp]
return regexplist
def parse(self,slist):
"""Parses first n strings in the string list slist. Returns
either (None,slist) if the list does not begin with an
elementary requirement; or (ElementaryRequirement-object,
rest_of_slist), if an elementary requirement was
found. rest_of_slist is the parameter list without elementary
requirement.
"""
# Elementary requirement consists of at least two elements: a
# short-hand notation and a regular expression (which should
# not be among the reserved words).
if len(slist)<2 or slist[1] in RESERVED:
return None,slist
# Should the items be compiled to regular expressions?
if slist[0] in ACTIONSHORTHANDS:
try:
itemregexp=re.compile(slist[1])
except:
raise ParseError("'%s' is not a valid regular expression" % slist[1])
# short-hand notation:
# action REGEXP <=> any value >= 1 for actions REGEXP
# actions REGEXP <=> every value >= 1 for actions REGEXP
if slist[0]==ACTIONSHORTHANDS[0]:
q=coverage.Query()
q.setItemType(coverage.eqiAction)
self.log("%s '%s' matches to:" % (ACTIONSHORTHANDS[0],slist[1]))
q.setItemRegExps(self._expand_action_regexp(itemregexp))
er=coverage.ElementaryRequirement()
er.setQuery(q)
er.setLowerBoundRequirement(coverage.eqqAny,1)
return er,slist[2:]
elif slist[0]==ACTIONSHORTHANDS[1]:
q=coverage.Query()
q.setItemType(coverage.eqiAction)
self.log("%s '%s' matches to:" % (ACTIONSHORTHANDS[0],slist[1]))
q.setItemRegExps(self._expand_action_regexp(itemregexp))
er=coverage.ElementaryRequirement()
er.setQuery(q)
er.setLowerBoundRequirement(coverage.eqqAll,1)
return er,slist[2:]
elif slist[0]==VALUESHORTHANDS[0]:
q=coverage.Query()
q.setItemType(coverage.eqiValue)
valuelist=slist[1].split(',')
self.log("use at least one of values: %s" % str(valuelist))
q.setItemRegExps(valuelist)
er=coverage.ElementaryRequirement()
er.setQuery(q)
er.setValueCoverageRequirement(coverage.eqqAny,valuelist)
return er,slist[2:]
elif slist[0]==VALUESHORTHANDS[1]:
q=coverage.Query()
q.setItemType(coverage.eqiValue)
valuelist=slist[1].split(',')
self.log("use all values: %s" % str(valuelist))
q.setItemRegExps(valuelist)
er=coverage.ElementaryRequirement()
er.setQuery(q)
er.setValueCoverageRequirement(coverage.eqqAll,valuelist)
return er,slist[2:]
else:
return None,slist
class CRParser:
"""Parser for Combined Requirements;
CR ::= ER | CR (and|or|then) CR | "(" CR ")"
"""
def _cleanup_tree(self,treenode):
"""Called after parsing. Checks that all parentheses have been
matched, removes parentheses nodes, raises parse error if
there are leafnodes without elementary criteria (for example,
caused by 'actions a and ()') ."""
# find the root
while treenode.parent: treenode=treenode.parent
rootnode=treenode
if len(rootnode.children)==0:
raise ParseError("Empty coverage requirement")
# check tree validity, remove parentheses
node_stack=[rootnode]
while node_stack:
node=node_stack.pop()
if node.string=="(":
raise ParseError("Unmatched '('.")
if isinstance(node.item,OperatorItem):
if len(node.children)<2:
raise ParseError("Too few parameters for operator '%s'" % node.item.name)
node_stack.extend(node.children)
if node.string=="()":
if len(node.children)==0:
raise ParseError("Empty parenthesis")
# remove node from tree, keep the order of node's parents children
newchildren=[]
for c in node.parent.children:
if c!=node:
newchildren.append(c)
else:
newchildren.extend(node.children)
node.parent.children=newchildren
node.children=[]
def parse(self,slist,treenode,erparser):
if (hasattr(self,"original_string")):
self.log("Coverage requirement: %s" %
self.original_string)
"""Parses the strings in the slist. Returns parse tree."""
if len(slist)==0:
self._cleanup_tree(treenode)
return
if slist[0]=="(": ### OPEN PARENTHESES
if not treenode.string in ["ROOT","("] + OPERATORS:
raise ParseError("Cannot open parentheses here: '%s'" % " ".join(slist[:5]))
self.parse(slist[1:],
ParseTreeNode(parent=treenode,children=[],string=slist[0]),
erparser)
elif slist[0]==")": ### CLOSE PARENTHESES
# search for the last open parentheses
# continue parsing from that treenode
while treenode.string!="(":
treenode=treenode.parent
if not treenode:
raise ParseError("Unmatched ')': '%s...'" % " ".join(slist[:5]))
treenode.string+=")"
self.parse(slist[1:],
treenode,
erparser)
elif slist[0].lower() in OPERATORS: ### OPERATOR
if not (isinstance(treenode.item,coverage.ElementaryRequirement)
or treenode.string=="()"):
raise ParseError("Operator not expected: '%s...'" % " ".join(slist[:5]))
operitem=OperatorItem(slist[0])
# We go towards the root in the parse tree until we find a
# node whose parent is 1) ROOT, 2) open parentheses or 3)
# an operator with equal or lower precedence. Replace that
# node with this operator.
while not treenode.parent.string in ["ROOT","("] \
and \
not (treenode.parent.string in OPERATORS \
and treenode.parent.item<operitem):
treenode=treenode.parent
newnode=ParseTreeNode(parent=treenode.parent,
children=[treenode],
string=str(operitem),
item=operitem)
treenode.parent=newnode
newnode.parent.children.remove(treenode)
self.parse(slist[1:],newnode,erparser)
else: ### ELEMENTARY REQUIREMENT
# try if slist starts with Elementary Requirement
# parent should be either root without other children or operator
if (not treenode.string in OPERATORS) and \
(not treenode.string in ["ROOT", "("]):
#(len(treenode.children)>0):
raise ParseError("Operator expected before: '%s...'" % " ".join(slist[:5]))
er,rest=erparser.parse(slist)
if not er:
raise ParseError("Syntax error: '%s...'" % " ".join(slist[:5]))
newnode=ParseTreeNode(parent=treenode,
children=[],
string=" ".join(slist[:len(slist)-len(rest)]),
item=er)
self.parse(rest,newnode,erparser)
def _split_to_slist(s):
"""Splits coverage language string to a list of strings. Here we
make sure that parentheses () will be separate items in list.
???Consider: it would be better to separate only those parentheses
which are not escaped with backslash; this would allow using
(escaped) parentheses in the regular expressions."""
return s.replace("("," ( ").replace(")"," ) ").split()
def _Replace_Operators_with_CombinedRequirements(node):
"""Overwrite item-fields of Operator tree nodes with
corresponding CombinedRequirements"""
for child in node.children: # go to leaf nodes
_Replace_Operators_with_CombinedRequirements(child)
# Now every item of the children is either ElementaryRequirement
# or CombinedRequirement. If this node includes Operator, make it
# CombinedRequirement. Otherwise this node must be
# ElementaryRequirement.
if isinstance(node.item,OperatorItem):
cr=coverage.CombinedRequirement()
cr.setOperator(node.item.getOperator())
cr.setRequirements( [c.item for c in node.children] )
node.item=cr
elif not isinstance(node.item,coverage.ElementaryRequirement)\
and not node.string=="ROOT":
print "What the hexx is this?",node.item
raise "HEXXISH ERROR"
def parse(s,model=None):
"""This function is mostly for internal and testing use. To get
the requirement of the coverage language expression, use
the requirement function."""
crparser=CRParser()
erparser=ERParser(model)
# Make beautiful log entries (all parsers write 'Coverage:')
# log method is plugged to requirement function by main program
if hasattr(requirement,'log'):
class Coverage: pass
dummycoverage=Coverage()
logfunc=lambda msg: requirement.log(dummycoverage,msg)
crparser.log=logfunc
erparser.log=logfunc
else:
crparser.log = lambda msg: None
erparser.log = lambda msg: None
rootnode=ParseTreeNode(string="ROOT",children=[])
slist=_split_to_slist(s)
crparser.original_string=s
crparser.parse(slist,rootnode,erparser)
return rootnode
def requirement(s,model=None):
"""Returns requirement object (either elementary or combined
requirement) corresponding to the coverage language expression."""
rootnode=parse(s,model)
_Replace_Operators_with_CombinedRequirements(rootnode)
return rootnode.children[0].item
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import argparse
import logging
import random
import signal
import mxnet as mx
import numpy as np
import horovod.mxnet as hvd
from common import find_mxnet, dali, fit, data
from mlperf_logging.mllog import constants
from mlperf_log_utils import mx_resnet_print_event,mx_resnet_print_start, mx_resnet_print_end, mpiwrapper, mlperf_submission_log
def add_general_args(parser):
parser.add_argument('--verbose', type=int, default=0,
help='turn on reporting of chosen algos for convolution, etc.')
parser.add_argument('--seed', type=int, default=None,
help='set the seed for python, nd and mxnet rngs')
parser.add_argument('--custom-bn-off', type=int, default=0,
help='disable use of custom batchnorm kernel')
parser.add_argument('--fuse-bn-relu', type=int, default=0,
help='have batchnorm kernel perform activation relu')
parser.add_argument('--fuse-bn-add-relu', type=int, default=0,
help='have batchnorm kernel perform add followed by activation relu')
parser.add_argument('--input-layout', type=str, default='NCHW',
help='the layout of the input data (e.g. NCHW)')
parser.add_argument('--conv-layout', type=str, default='NCHW',
help='the layout of the data assumed by the conv operation (e.g. NCHW)')
parser.add_argument('--conv-algo', type=int, default=-1,
help='set the convolution algos (fwd, dgrad, wgrad)')
parser.add_argument('--force-tensor-core', type=int, default=0,
help='require conv algos to be tensor core')
parser.add_argument('--batchnorm-layout', type=str, default='NCHW',
help='the layout of the data assumed by the batchnorm operation (e.g. NCHW)')
parser.add_argument('--batchnorm-eps', type=float, default=2e-5,
help='the amount added to the batchnorm variance to prevent output explosion.')
parser.add_argument('--batchnorm-mom', type=float, default=0.9,
help='the leaky-integrator factor controling the batchnorm mean and variance.')
parser.add_argument('--pooling-layout', type=str, default='NCHW',
help='the layout of the data assumed by the pooling operation (e.g. NCHW)')
parser.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
parser.add_argument('--bn-group', type=int, default=1, choices=[1, 2, 4],
help='Group of processes to collaborate on BatchNorm ops')
def _get_gpu(gpus):
idx = hvd.local_rank()
gpu = gpus.split(",")[idx]
return gpu
class MLPerfInit(mx.init.Xavier):
def _init_weight(self, name, arg):
if name.startswith("fc"):
mx.ndarray.random.normal(0, 0.01, out=arg)
else:
return super()._init_weight(name, arg)
class BNZeroInit(mx.init.Xavier):
def _init_gamma(self, name, arg):
if name.endswith("bn3_gamma"):
arg[:] = 0.0
else:
arg[:] = 1.0
if __name__ == '__main__':
mx_resnet_print_start(key=constants.INIT_START, uniq=False)
# parse args
parser = argparse.ArgumentParser(description="MLPerf RN50v1.5 training script",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_general_args(parser)
fit.add_fit_args(parser)
dali.add_dali_args(parser)
parser.set_defaults(
# network
network = 'resnet-v1b',
num_layers = 50,
# data
resize = 256,
num_classes = 1000,
num_examples = 1281167,
image_shape = '3,224,224',
# train
num_epochs = 100,
lr_step_epochs = '30,60,80',
dtype = 'float32'
)
args = parser.parse_args()
args.local_rank = None
if 'horovod' in args.kv_store:
# initialize Horovod with mpi4py comm
hvd.init(mpiwrapper.get_comm())
args.gpus = _get_gpu(args.gpus)
kv = None
local_rank = hvd.local_rank()
args.local_rank = local_rank
# dummy Horovod ops to initialize resources
ctx=mx.gpu(local_rank)
tensor1 = mx.nd.zeros(shape=(1), dtype='float16', ctx=ctx)
tensor2 = mx.nd.zeros(shape=(1), dtype='float32', ctx=ctx)
summed1 = hvd.allreduce(tensor1, average=False)
summed2 = hvd.allreduce(tensor2, average=False)
framework = 'MxNet NGC {}'.format(os.environ["NVIDIA_MXNET_VERSION"])
# DISABLE FOR NOW. CAUSES CRASHES.
#mlperf_submission_log(
# benchmark=mlperf_constants.RESNET,
# framework=framework,
#)
# Load network
from importlib import import_module
net = import_module('symbols.'+args.network)
# Initialize seed + random number generators
if args.seed is None:
args.seed = int(random.SystemRandom().randint(0, 2**16 - 1))
mx_resnet_print_event(key=constants.SEED, val=args.seed)
if 'horovod' in args.kv_store:
np.random.seed(args.seed)
all_seeds = np.random.randint(2**16, size=(hvd.size()))
args.seed = int(all_seeds[hvd.rank()])
else:
kv = mx.kvstore.create(args.kv_store)
random.seed(args.seed)
np.random.seed(args.seed)
mx.random.seed(args.seed)
# Devices for training
devs = mx.cpu() if args.gpus is None or args.gpus == "" else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# Load symbol definiton and create model
sym = net.get_symbol(**vars(args))
model = mx.mod.Module(context=devs, symbol=sym)
# Weights init
initializer = MLPerfInit(
rnd_type='gaussian', factor_type="in", magnitude=2) if not args.bn_gamma_init0 else BNZeroInit(rnd_type='gaussian', factor_type="in", magnitude=2)
# Start DALI pipeline
if not args.use_dali:
lambda_fnc_dali_get_rec_iter=data.build_input_pipeline(args,kv)
else:
lambda_fnc_dali_get_rec_iter=dali.build_input_pipeline(args, kv)
arg_params, aux_params = None, None
# Model fetch and broadcast
if 'horovod' in args.kv_store:
# Create dummy data shapes and bind them to the model
data_shapes = [mx.io.DataDesc('data',(args.batch_size, 224, 224, 4),'float16')]
label_shapes = [mx.io.DataDesc('softmax_label',(args.batch_size,),'float32')]
model.bind(data_shapes=data_shapes, label_shapes=label_shapes)
# Horovod: fetch and broadcast parameters
model.init_params(initializer, arg_params=arg_params, aux_params=aux_params)
(arg_params, aux_params) = model.get_params()
if arg_params is not None:
hvd.broadcast_parameters(arg_params, root_rank=0)
if aux_params is not None:
hvd.broadcast_parameters(aux_params, root_rank=0)
model.set_params(arg_params=arg_params, aux_params=aux_params)
mx.ndarray.waitall()
mx_resnet_print_end(key=constants.INIT_STOP)
# Start training
fit.fit(args, kv, model, initializer, lambda_fnc_dali_get_rec_iter, devs, arg_params, aux_params)
# Timeout alarm for possible hangs at job end
# TODO: REMOVE THIS!
signal.alarm(90)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Simple HTTP client implementation based on the ``httplib`` module in the
standard library.
"""
from base64 import b64encode
from datetime import datetime
import errno
import socket
import time
import sys
try:
from threading import Lock
except ImportError:
from dummy_threading import Lock
try:
from http.client import BadStatusLine, HTTPConnection, HTTPSConnection
except ImportError:
from httplib import BadStatusLine, HTTPConnection, HTTPSConnection
try:
from email.Utils import parsedate
except ImportError:
from email.utils import parsedate
from couchdb import json
from couchdb import util
__all__ = ['HTTPError', 'PreconditionFailed', 'ResourceNotFound',
'ResourceConflict', 'ServerError', 'Unauthorized', 'RedirectLimit',
'Session', 'Resource']
__docformat__ = 'restructuredtext en'
if sys.version < '2.7':
from httplib import CannotSendHeader, _CS_REQ_STARTED, _CS_REQ_SENT
class NagleMixin:
"""
Mixin to upgrade httplib connection types so headers and body can be
sent at the same time to avoid triggering Nagle's algorithm.
Based on code originally copied from Python 2.7's httplib module.
"""
def endheaders(self, message_body=None):
if self.__dict__['_HTTPConnection__state'] == _CS_REQ_STARTED:
self.__dict__['_HTTPConnection__state'] = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def _send_output(self, message_body=None):
self._buffer.extend(("", ""))
msg = "\r\n".join(self._buffer)
del self._buffer[:]
if isinstance(message_body, str):
msg += message_body
message_body = None
self.send(msg)
if message_body is not None:
self.send(message_body)
class HTTPConnection(NagleMixin, HTTPConnection):
pass
class HTTPSConnection(NagleMixin, HTTPSConnection):
pass
class HTTPError(Exception):
"""Base class for errors based on HTTP status codes >= 400."""
class PreconditionFailed(HTTPError):
"""Exception raised when a 412 HTTP error is received in response to a
request.
"""
class ResourceNotFound(HTTPError):
"""Exception raised when a 404 HTTP error is received in response to a
request.
"""
class ResourceConflict(HTTPError):
"""Exception raised when a 409 HTTP error is received in response to a
request.
"""
class ServerError(HTTPError):
"""Exception raised when an unexpected HTTP error is received in response
to a request.
"""
class Unauthorized(HTTPError):
"""Exception raised when the server requires authentication credentials
but either none are provided, or they are incorrect.
"""
class RedirectLimit(Exception):
"""Exception raised when a request is redirected more often than allowed
by the maximum number of redirections.
"""
CHUNK_SIZE = 1024 * 8
class ResponseBody(object):
def __init__(self, resp, conn_pool, url, conn):
self.resp = resp
self.chunked = self.resp.msg.get('transfer-encoding') == 'chunked'
self.conn_pool = conn_pool
self.url = url
self.conn = conn
def __del__(self):
if not self.chunked:
self.close()
else:
self.resp.close()
if self.conn:
# Since chunked responses can be infinite (i.e. for
# feed=continuous), and we want to avoid leaking sockets
# (even if just to prevent ResourceWarnings when running
# the test suite on Python 3), we'll close this connection
# eagerly. We can't get it into the clean state required to
# put it back into the ConnectionPool (since we don't know
# when it ends and we can only do blocking reads). Finding
# out whether it might in fact end would be relatively onerous
# and require a layering violation.
self.conn.close()
def read(self, size=None):
bytes = self.resp.read(size)
if size is None or len(bytes) < size:
self.close()
return bytes
def _release_conn(self):
self.conn_pool.release(self.url, self.conn)
self.conn_pool, self.url, self.conn = None, None, None
def close(self):
while not self.resp.isclosed():
self.resp.read(CHUNK_SIZE)
if self.conn:
self._release_conn()
def iterchunks(self):
assert self.chunked
while True:
if self.resp.isclosed():
break
chunksz = int(self.resp.fp.readline().strip(), 16)
if not chunksz:
self.resp.fp.read(2) #crlf
self.resp.close()
self._release_conn()
break
chunk = self.resp.fp.read(chunksz)
for ln in chunk.splitlines():
yield ln
self.resp.fp.read(2) #crlf
RETRYABLE_ERRORS = frozenset([
errno.EPIPE, errno.ETIMEDOUT,
errno.ECONNRESET, errno.ECONNREFUSED, errno.ECONNABORTED,
errno.EHOSTDOWN, errno.EHOSTUNREACH,
errno.ENETRESET, errno.ENETUNREACH, errno.ENETDOWN
])
class Session(object):
def __init__(self, cache=None, timeout=None, max_redirects=5,
retry_delays=[0], retryable_errors=RETRYABLE_ERRORS):
"""Initialize an HTTP client session.
:param cache: an instance with a dict-like interface or None to allow
Session to create a dict for caching.
:param timeout: socket timeout in number of seconds, or `None` for no
timeout (the default)
:param retry_delays: list of request retry delays.
"""
from couchdb import __version__ as VERSION
self.user_agent = 'CouchDB-Python/%s' % VERSION
# XXX We accept a `cache` dict arg, but the ref gets overwritten later
# during cache cleanup. Do we remove the cache arg (does using a shared
# Session instance cover the same use cases?) or fix the cache cleanup?
# For now, let's just assign the dict to the Cache instance to retain
# current behaviour.
if cache is not None:
cache_by_url = cache
cache = Cache()
cache.by_url = cache_by_url
else:
cache = Cache()
self.cache = cache
self.max_redirects = max_redirects
self.perm_redirects = {}
self.connection_pool = ConnectionPool(timeout)
self.retry_delays = list(retry_delays) # We don't want this changing on us.
self.retryable_errors = set(retryable_errors)
def request(self, method, url, body=None, headers=None, credentials=None,
num_redirects=0):
if url in self.perm_redirects:
url = self.perm_redirects[url]
method = method.upper()
if headers is None:
headers = {}
headers.setdefault('Accept', 'application/json')
headers['User-Agent'] = self.user_agent
cached_resp = None
if method in ('GET', 'HEAD'):
cached_resp = self.cache.get(url)
if cached_resp is not None:
etag = cached_resp[1].get('etag')
if etag:
headers['If-None-Match'] = etag
if (body is not None and not isinstance(body, util.strbase) and
not hasattr(body, 'read')):
body = json.encode(body).encode('utf-8')
headers.setdefault('Content-Type', 'application/json')
if body is None:
headers.setdefault('Content-Length', '0')
elif isinstance(body, util.strbase):
headers.setdefault('Content-Length', str(len(body)))
else:
headers['Transfer-Encoding'] = 'chunked'
authorization = basic_auth(credentials)
if authorization:
headers['Authorization'] = authorization
path_query = util.urlunsplit(('', '') + util.urlsplit(url)[2:4] + ('',))
conn = self.connection_pool.get(url)
def _try_request_with_retries(retries):
while True:
try:
return _try_request()
except socket.error as e:
ecode = e.args[0]
if ecode not in self.retryable_errors:
raise
try:
delay = next(retries)
except StopIteration:
# No more retries, raise last socket error.
raise e
finally:
time.sleep(delay)
conn.close()
def _try_request():
try:
conn.putrequest(method, path_query, skip_accept_encoding=True)
for header in headers:
conn.putheader(header, headers[header])
if body is None:
conn.endheaders()
else:
if isinstance(body, util.strbase):
if isinstance(body, util.utype):
conn.endheaders(body.encode('utf-8'))
else:
conn.endheaders(body)
else: # assume a file-like object and send in chunks
conn.endheaders()
while 1:
chunk = body.read(CHUNK_SIZE)
if not chunk:
break
if isinstance(chunk, util.utype):
chunk = chunk.encode('utf-8')
status = ('%x\r\n' % len(chunk)).encode('utf-8')
conn.send(status + chunk + b'\r\n')
conn.send(b'0\r\n\r\n')
return conn.getresponse()
except BadStatusLine as e:
# httplib raises a BadStatusLine when it cannot read the status
# line saying, "Presumably, the server closed the connection
# before sending a valid response."
# Raise as ECONNRESET to simplify retry logic.
if e.line == '' or e.line == "''":
raise socket.error(errno.ECONNRESET)
else:
raise
resp = _try_request_with_retries(iter(self.retry_delays))
status = resp.status
# Handle conditional response
if status == 304 and method in ('GET', 'HEAD'):
resp.read()
self.connection_pool.release(url, conn)
status, msg, data = cached_resp
if data is not None:
data = util.StringIO(data)
return status, msg, data
elif cached_resp:
self.cache.remove(url)
# Handle redirects
if status == 303 or \
method in ('GET', 'HEAD') and status in (301, 302, 307):
resp.read()
self.connection_pool.release(url, conn)
if num_redirects > self.max_redirects:
raise RedirectLimit('Redirection limit exceeded')
location = resp.getheader('location')
if status == 301:
self.perm_redirects[url] = location
elif status == 303:
method = 'GET'
return self.request(method, location, body, headers,
num_redirects=num_redirects + 1)
data = None
streamed = False
# Read the full response for empty responses so that the connection is
# in good state for the next request
if method == 'HEAD' or resp.getheader('content-length') == '0' or \
status < 200 or status in (204, 304):
resp.read()
self.connection_pool.release(url, conn)
# Buffer small non-JSON response bodies
elif int(resp.getheader('content-length', sys.maxsize)) < CHUNK_SIZE:
data = resp.read()
self.connection_pool.release(url, conn)
# For large or chunked response bodies, do not buffer the full body,
# and instead return a minimal file-like object
else:
data = ResponseBody(resp, self.connection_pool, url, conn)
streamed = True
# Handle errors
if status >= 400:
ctype = resp.getheader('content-type')
if data is not None and 'application/json' in ctype:
data = json.decode(data.decode('utf-8'))
error = data.get('error'), data.get('reason')
elif method != 'HEAD':
error = resp.read()
self.connection_pool.release(url, conn)
else:
error = ''
if status == 401:
raise Unauthorized(error)
elif status == 404:
raise ResourceNotFound(error)
elif status == 409:
raise ResourceConflict(error)
elif status == 412:
raise PreconditionFailed(error)
else:
raise ServerError((status, error))
# Store cachable responses
if not streamed and method == 'GET' and 'etag' in resp.msg:
self.cache.put(url, (status, resp.msg, data))
if not streamed and data is not None:
data = util.StringIO(data)
return status, resp.msg, data
def cache_sort(i):
return datetime.fromtimestamp(time.mktime(parsedate(i[1][1]['Date'])))
class Cache(object):
"""Content cache."""
# Some random values to limit memory use
keep_size, max_size = 10, 75
def __init__(self):
self.by_url = {}
def get(self, url):
return self.by_url.get(url)
def put(self, url, response):
self.by_url[url] = response
if len(self.by_url) > self.max_size:
self._clean()
def remove(self, url):
self.by_url.pop(url, None)
def _clean(self):
ls = sorted(self.by_url.items(), key=cache_sort)
self.by_url = dict(ls[-self.keep_size:])
class ConnectionPool(object):
"""HTTP connection pool."""
def __init__(self, timeout):
self.timeout = timeout
self.conns = {} # HTTP connections keyed by (scheme, host)
self.lock = Lock()
def get(self, url):
scheme, host = util.urlsplit(url, 'http', False)[:2]
# Try to reuse an existing connection.
self.lock.acquire()
try:
conns = self.conns.setdefault((scheme, host), [])
if conns:
conn = conns.pop(-1)
else:
conn = None
finally:
self.lock.release()
# Create a new connection if nothing was available.
if conn is None:
if scheme == 'http':
cls = HTTPConnection
elif scheme == 'https':
cls = HTTPSConnection
else:
raise ValueError('%s is not a supported scheme' % scheme)
conn = cls(host, timeout=self.timeout)
conn.connect()
return conn
def release(self, url, conn):
scheme, host = util.urlsplit(url, 'http', False)[:2]
self.lock.acquire()
try:
self.conns.setdefault((scheme, host), []).append(conn)
finally:
self.lock.release()
def __del__(self):
for key, conns in list(self.conns.items()):
for conn in conns:
conn.close()
class Resource(object):
def __init__(self, url, session, headers=None):
if sys.version_info[0] == 2 and isinstance(url, util.utype):
url = url.encode('utf-8') # kind of an ugly hack for issue 235
self.url, self.credentials = extract_credentials(url)
if session is None:
session = Session()
self.session = session
self.headers = headers or {}
def __call__(self, *path):
obj = type(self)(urljoin(self.url, *path), self.session)
obj.credentials = self.credentials
obj.headers = self.headers.copy()
return obj
def delete(self, path=None, headers=None, **params):
return self._request('DELETE', path, headers=headers, **params)
def get(self, path=None, headers=None, **params):
return self._request('GET', path, headers=headers, **params)
def head(self, path=None, headers=None, **params):
return self._request('HEAD', path, headers=headers, **params)
def post(self, path=None, body=None, headers=None, **params):
return self._request('POST', path, body=body, headers=headers,
**params)
def put(self, path=None, body=None, headers=None, **params):
return self._request('PUT', path, body=body, headers=headers, **params)
def delete_json(self, path=None, headers=None, **params):
return self._request_json('DELETE', path, headers=headers, **params)
def get_json(self, path=None, headers=None, **params):
return self._request_json('GET', path, headers=headers, **params)
def post_json(self, path=None, body=None, headers=None, **params):
return self._request_json('POST', path, body=body, headers=headers,
**params)
def put_json(self, path=None, body=None, headers=None, **params):
return self._request_json('PUT', path, body=body, headers=headers,
**params)
def _request(self, method, path=None, body=None, headers=None, **params):
all_headers = self.headers.copy()
all_headers.update(headers or {})
if path is not None:
url = urljoin(self.url, path, **params)
else:
url = urljoin(self.url, **params)
return self.session.request(method, url, body=body,
headers=all_headers,
credentials=self.credentials)
def _request_json(self, method, path=None, body=None, headers=None, **params):
status, headers, data = self._request(method, path, body=body,
headers=headers, **params)
if 'application/json' in headers.get('content-type', ''):
data = json.decode(data.read().decode('utf-8'))
return status, headers, data
def extract_credentials(url):
"""Extract authentication (user name and password) credentials from the
given URL.
>>> extract_credentials('http://localhost:5984/_config/')
('http://localhost:5984/_config/', None)
>>> extract_credentials('http://joe:secret@localhost:5984/_config/')
('http://localhost:5984/_config/', ('joe', 'secret'))
>>> extract_credentials('http://joe%40example.com:secret@localhost:5984/_config/')
('http://localhost:5984/_config/', ('joe@example.com', 'secret'))
"""
parts = util.urlsplit(url)
netloc = parts[1]
if '@' in netloc:
creds, netloc = netloc.split('@')
credentials = tuple(util.urlunquote(i) for i in creds.split(':'))
parts = list(parts)
parts[1] = netloc
else:
credentials = None
return util.urlunsplit(parts), credentials
def basic_auth(credentials):
"""Generates authorization header value for given credentials.
>>> basic_auth(('root', 'relax'))
b'Basic cm9vdDpyZWxheA=='
>>> basic_auth(None)
>>> basic_auth(())
"""
if credentials:
token = b64encode(('%s:%s' % credentials).encode('latin1'))
return ('Basic %s' % token.strip().decode('latin1')).encode('ascii')
def quote(string, safe=''):
if isinstance(string, util.utype):
string = string.encode('utf-8')
return util.urlquote(string, safe)
def urlencode(data):
if isinstance(data, dict):
data = data.items()
params = []
for name, value in data:
if isinstance(value, util.utype):
value = value.encode('utf-8')
params.append((name, value))
return util.urlencode(params)
def urljoin(base, *path, **query):
"""Assemble a uri based on a base, any number of path segments, and query
string parameters.
>>> urljoin('http://example.org', '_all_dbs')
'http://example.org/_all_dbs'
A trailing slash on the uri base is handled gracefully:
>>> urljoin('http://example.org/', '_all_dbs')
'http://example.org/_all_dbs'
And multiple positional arguments become path parts:
>>> urljoin('http://example.org/', 'foo', 'bar')
'http://example.org/foo/bar'
All slashes within a path part are escaped:
>>> urljoin('http://example.org/', 'foo/bar')
'http://example.org/foo%2Fbar'
>>> urljoin('http://example.org/', 'foo', '/bar/')
'http://example.org/foo/%2Fbar%2F'
>>> urljoin('http://example.org/', None) #doctest:+IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: argument 2 to map() must support iteration
"""
if base and base.endswith('/'):
base = base[:-1]
retval = [base]
# build the path
path = '/'.join([''] + [quote(s) for s in path])
if path:
retval.append(path)
# build the query string
params = []
for name, value in query.items():
if type(value) in (list, tuple):
params.extend([(name, i) for i in value if i is not None])
elif value is not None:
if value is True:
value = 'true'
elif value is False:
value = 'false'
params.append((name, value))
if params:
retval.extend(['?', urlencode(params)])
return ''.join(retval)
|
|
#! /usr/bin/env python
# coding: utf-8
"""Pocketwalk tool runner."""
# [ Imports ]
# [ -Python ]
import os
import pathlib
import pty
import selectors
import subprocess
import sys
from pprint import pprint
# [ -Third Party ]
import pytoml as toml
from runaway import signals
# [ API ]
def get_tool_runner():
"""Get the tool runner plugin."""
return ToolRunner()
# [ Internal ]
class ToolRunner:
"""Tool Runner Plugin."""
def __init__(self):
"""Init the state."""
self._running_tools = {}
self._return_codes = {}
self._reported_tools = {}
# [ API ]
async def get_tool_state(self):
"""Get the tool state."""
state = {}
for tool, return_code in self._return_codes.items():
state[tool] = {
'running': False,
'return code': return_code,
}
for tool in self._running_tools:
state[tool] = {
'running': True,
'return code': None,
}
return state
def all_tools_passed(self, tools):
"""Return whether all of the tools have passed."""
return all(self._return_codes.get(t, None) == 0 for t in tools)
def any_tools_not_done(self):
"""Return whether any of the tools are not done."""
return bool(self._running_tools)
async def return_codes(self, tools):
"""Return the return codes."""
return [self._return_codes[t] for t in tools if t in self._return_codes]
async def ensure_tools_running(self, contexts_for_tools, *, on_completion):
"""
Ensure the tools are running with their current contexts.
Calls the on_completion function with the tool and RC on completion of each tool.
Runs the tools concurrently.
"""
tools = contexts_for_tools.keys()
tools_to_start = [t for t in tools if t not in self._running_tools]
if tools_to_start:
print(f"Starting tools: {tools_to_start}")
for this_tool in tools_to_start:
if this_tool in self._return_codes:
del self._return_codes[this_tool]
self._running_tools[this_tool] = {
'context': contexts_for_tools[this_tool],
'process future': await signals.future(
self._run_tool,
this_tool,
context=contexts_for_tools[this_tool],
on_completion=on_completion,
),
}
for state in self._running_tools.values():
exc_info = state['process future'].exception
if exc_info:
raise exc_info[1].with_traceback(exc_info[2])
async def get_tools_failing_preconditions(self, contexts, *, tools_to_run):
"""Get the tools which are failing their preconditions."""
failing = {}
for tool, current_context in contexts['current_state'].items():
if not all(self._return_codes.get(t, None) == 0 for t in current_context['preconditions']):
failing[tool] = current_context
if any(t in current_context['preconditions'] for t in tools_to_run):
failing[tool] = current_context
return failing
async def filter_out_reported_tools(self, tools_with_contexts):
"""Filter out any previously reported tools."""
unreported_tools = {}
for tool, context in tools_with_contexts.items():
reported_context = self._reported_tools.get(tool, None)
context = context.copy()
if 'affected files' in context:
del context['affected files']
if reported_context != context:
unreported_tools[tool] = context
return unreported_tools
async def cleanup(self):
"""Ensure the tools are stopped."""
print("Cleaning up tools...")
tools_to_stop = list(self._running_tools.keys())
for this_tool in tools_to_stop:
await signals.cancel(self._running_tools[this_tool]['process future'])
del self._running_tools[this_tool]
self._return_codes[this_tool] = 130
if tools_to_stop:
print(f"Cancelled running tools: {tools_to_stop}")
print("Done.")
async def ensure_stale_tools_stopped(self, contexts_for_tools):
"""
Ensure the tools are stopped.
Stops the given tools if their contexts are stale.
"""
tools = contexts_for_tools.keys()
tools_to_stop = [t for t in tools if t in self._running_tools and (
self._running_tools[t]['context'] != contexts_for_tools[t]
)]
for this_tool in tools_to_stop:
await signals.cancel(self._running_tools[this_tool]['process future'])
del self._running_tools[this_tool]
if tools_to_stop:
print(f"Stopped stale tools: {tools_to_stop}")
async def ensure_tools_stopped(self, contexts_for_tools, *, reason):
"""Ensure the tools are stopped."""
tools_to_stop = [t for t in self._running_tools if t in contexts_for_tools]
for this_tool in tools_to_stop:
await signals.cancel(self._running_tools[this_tool]['process future'])
del self._running_tools[this_tool]
if tools_to_stop:
print(f"Stopped tools with {reason}: {tools_to_stop}")
async def ensure_removed_tools_stopped(self, config):
"""Ensure tools not in the tools list are stopped."""
tools_to_stop = [t for t in self._running_tools if t not in config['tools']]
for this_tool in tools_to_stop:
await signals.cancel(self._running_tools[this_tool]['process future'])
del self._running_tools[this_tool]
if tools_to_stop:
print(f"Stopped removed tools: {tools_to_stop}")
async def replay_previous_results_for(self, tools):
"""Replay the previous results for the given tools."""
previous_results = {t: {
'output': (
pathlib.Path.cwd() / '.pocketwalk.cache' / t
).with_suffix('.output').read_bytes(),
'return code': max(toml.loads((
pathlib.Path.cwd() / '.pocketwalk.cache' / t
).with_suffix('.return_codes').read_text()).values()),
} for t in tools.keys()}
return_codes = []
for this_tool, results in previous_results.items():
print(f"{this_tool} is unchanged. Last output:")
print(results['output'].decode('utf-8'))
self._report_tool_result(this_tool, return_code=results['return code'])
return_codes.append(results['return code'])
self._return_codes[this_tool] = results['return code']
for this_tool, context in tools.items():
context = context.copy()
if 'affected files' in context:
del context['affected files']
self._reported_tools[this_tool] = context
return return_codes
# [ Internal ]
@staticmethod
def _report_tool_result(tool, *, return_code):
"""Report the tool's result."""
if return_code != 0:
print(f"{tool} failed with RC {return_code}")
else:
print(f"{tool} passed")
@staticmethod
def _process_output(stdout):
"""Process the output."""
line = os.read(stdout, 1024)
if line:
sys.stdout.buffer.write(line)
sys.stdout.flush()
return line
async def _run_tool(self, tool, *, context, on_completion):
"""Run a single tool."""
if tool in self._return_codes:
del self._return_codes[tool]
previous_rcs = await self._load_rcs(tool, context=context)
targets_used = self._get_targets(previous_rcs=previous_rcs, context=context)
substituted = []
# add previously failing targets that are still valid targets to the list to run
for this_arg in context['config']:
if this_arg == '{affected_targets}':
substituted += targets_used
else:
substituted.append(this_arg)
args = [tool] + substituted
if not targets_used:
targets_used = ["*"]
pprint(args)
output, process = await self._run_pty(args)
self._report_tool_result(tool, return_code=process.returncode)
await on_completion(tool, context=context)
(pathlib.Path.cwd() / '.pocketwalk.cache' / tool).with_suffix('.output').write_bytes(output)
await self._save_rcs(tool, targets_used=targets_used, return_code=process.returncode, previous_rcs=previous_rcs)
self._return_codes[tool] = process.returncode
context = context.copy()
if 'affected files' in context:
del context['affected files']
self._reported_tools[tool] = context
del self._running_tools[tool]
if not self._running_tools:
print("No tools running.")
async def _load_rcs(self, tool, *, context):
"""Load saved RC's."""
rc_path = (pathlib.Path.cwd() / '.pocketwalk.cache' / tool).with_suffix('.return_codes')
# TOML parsing
try:
previous_rcs = {path: rc for path, rc in toml.loads(rc_path.read_text()).items() if path in context['target files']}
except FileNotFoundError:
previous_rcs = {}
return previous_rcs
async def _save_rcs(self, tool, *, targets_used, return_code, previous_rcs):
"""Save RC's."""
rc_path = (pathlib.Path.cwd() / '.pocketwalk.cache' / tool).with_suffix('.return_codes')
# save old RC's for current targets
new_rcs = {}
for path in list(previous_rcs.keys()):
new_rcs[path] = previous_rcs[path]
# save the actual RC's for paths that were used
for path in targets_used:
new_rcs[path] = return_code
rc_path.write_text(toml.dumps(new_rcs))
@staticmethod
def _get_targets(*, context, previous_rcs):
"""Get targets for the tool."""
targets_used = []
# add previously failing targets that are still valid targets to the list to run
for path, return_code in list(previous_rcs.items()):
if return_code != 0:
targets_used.append(path)
if '{affected_targets}' in context['config']:
targets_used += context['affected files']
return list(set(targets_used))
async def _run_pty(self, args):
"""Run a PTY."""
try:
# make a pseudo terminal for the subprocess so we get colors and such
output_side, input_side = pty.openpty()
process = subprocess.Popen(args, stdout=input_side, stderr=subprocess.STDOUT, start_new_session=True)
await signals.sleep(1)
selector = selectors.DefaultSelector()
selector.register(output_side, selectors.EVENT_READ, self._process_output)
output = b''
while selector.get_map():
await signals.sleep(0)
events = selector.select(0)
for key, _mask in events:
output += key.data(output_side)
if process.poll() is not None:
break
while process.poll() is None:
signals.sleep(0)
return output, process
except GeneratorExit:
print("TERMINATED")
process.terminate()
process.kill()
raise
finally:
os.close(input_side)
os.close(output_side)
selector.close()
# [ Vulture ]
assert all((
get_tool_runner,
))
|
|
import sublime
from heapq import heapify, heappop, heapreplace
from lisp_highlight_configuration \
import ColorMode, RegionColor, Configuration
from types import Region, Scope, ColorableSpan
def color_scopes(scopes, config, cursors, supported_brackets):
"""Splits and transforms the scopes into colorable regions.
The result of this transform is a list of _visible_ regions,
exact appearance of which is controlled by the `config`.
Args:
[scopes] - a sorted list of scopes to get transformed
config - the Configuration to use
[cursors] - a list of current cursors
[supported_brackets]
- a list of (left, right) pairs of strings that denote
the valid kinds of brackets
Returns:
[colorable_regions] - a list of resulting colorable regions
"""
def color_type_of(scope):
if scope.is_not_consistent_with(supported_brackets):
return RegionColor.INCONSISTENT, None
if scope.is_primary_mainline():
return RegionColor.PRIMARY, None
if scope.is_secondary_mainline():
return RegionColor.SECONDARY, scope.outer_index
if scope.is_adjacent_to(cursors):
return RegionColor.ADJACENT, None
if scope.is_offside():
return RegionColor.OFFSIDE, scope.inner_index
def extents_of(scope, mode):
if mode is ColorMode.NONE:
return []
if mode is ColorMode.BRACKETS:
return list(scope.bracket_regions())
if mode is ColorMode.EXPRESSION:
return [scope.expression_region()]
def suitable(scope, (kind, index)):
if kind is RegionColor.OFFSIDE:
return index <= config.offside_limit
if kind is RegionColor.ADJACENT:
need_left = config.adjacent_left
need_right = config.adjacent_right
for cursor in cursors:
if (need_left and scope.left_bracket.contains(cursor)) or \
(need_right and scope.right_bracket.contains(cursor)):
return True
else:
return False
return True
def touching(left_scope, right_scope):
left_region = left_scope.expression_region()
right_region = right_scope.expression_region()
return left_region.touches(right_region)
result = []
bg_scope_stack = []
for scope in scopes:
kind, index = fg_color_type = color_type_of(scope)
if not suitable(scope, fg_color_type):
continue
mode = config.mode[kind]
if mode is ColorMode.NONE:
continue
while bg_scope_stack and not touching(bg_scope_stack[-1], scope):
bg_scope_stack.pop()
bg_color_stack = map(color_type_of, bg_scope_stack)
for extent in extents_of(scope, mode):
region = ColorableSpan(extent, fg_color_type, bg_color_stack)
result.append(region)
if mode is ColorMode.EXPRESSION:
bg_scope_stack.append(scope)
return result
def split_into_disjoint(spans, lines):
"""Splits a list of colorable spans into disjoint colorable spans.
This is necessary as Sublime Text does not handle region intersections
properly, so we have to split them up by ourselves. Splitting by line
boundaries is necessary to correctly handle bracket and expression
backgrounds for current lines.
Args:
[spans] - a list of colorable spans to be split
[lines] - a list of regions denoting the lines
Returns:
[spans] - a sorted list of disjoint colorable spans
"""
if not spans: return []
# Throwing in fake zero-length spans to denote the line boundaries. They
# will be used only for splitting and will get filtered out of the results.
#
# Lines are (begin, end) where begin is the point at the start of the line
# and end is the one at the newline character. The line boundaries are all
# the beginnings and the trailing end.
def make_linebreak(point):
return ColorableSpan(Region(point, point), None, None)
def linebreak(span):
return (span.foreground is None) and (span.background_stack is None)
linebreaks = map(make_linebreak, [L.begin for L in lines] + [lines[-1].end])
# We make use of the heap property to efficiently split the spans into
# disjoint parts with a sweeping line algorithm. The resulting span list
# also gets automagically sorted.
def heap_min(heap):
return heap[0]
def heap_min_next(heap):
return min(heap[1], heap[2]) if len(heap) > 2 else heap[1]
spans = spans[:]
spans.extend(linebreaks)
heapify(spans)
def overlap(left_span, right_span):
return left_span.extent.overlaps(right_span.extent)
def left_touch(span1, span2):
return span1.extent.begin == span2.extent.begin
def trim(inner_span, outer_span):
extent = Region(inner_span.extent.end, outer_span.extent.end)
foreground = outer_span.foreground
background_stack = outer_span.background_stack
return ColorableSpan(extent, foreground, background_stack)
def split(outer_span, inner_span):
extent1 = Region(outer_span.extent.begin, inner_span.extent.begin)
extent2 = Region(inner_span.extent.end, outer_span.extent.end)
foreground = outer_span.foreground
background_stack = outer_span.background_stack
return ColorableSpan(extent1, foreground, background_stack), \
ColorableSpan(extent2, foreground, background_stack)
result = []
while len(spans) > 1:
leftmost, next_one = heap_min(spans), heap_min_next(spans)
# Invariant: leftmost must be disjoint from all other spans
if overlap(leftmost, next_one):
if left_touch(leftmost, next_one):
# LL...... -> LL......
# NNNNN... ..FFF...
following = trim(leftmost, next_one)
heappop(spans)
heapreplace(spans, following)
else:
# LLLLLLL. -> LL...FF.
# ..NNN... ..NNN...
leftmost, following = split(leftmost, next_one)
heapreplace(spans, following)
else:
# LLL.....
# ....NNN.
heappop(spans)
if not linebreak(leftmost):
result.append(leftmost)
last_span = spans[0]
if not linebreak(last_span):
result.append(last_span)
return result
def prepend_background(spans, line_extents):
"""Prepends proper terminating background to colorable spans.
It is necessary to ensure that each colorable span has at least something
in its background color stack, and that this something is not transparent.
This will be either a 'current line' background for spans that are located
in the same line as the cursor or a normal background for everything else.
Args:
[spans] - a list of colorable spans to update
[line_extents] - a list of regions denoting the cursors' lines
Returns:
[spans] - a list of updated colorable spans
"""
def prepend_background(span):
current_line_color = [(RegionColor.CURRENT_LINE, None)]
background_color = [(RegionColor.BACKGROUND, None)]
background_stack = span.background_stack
for line in line_extents:
if line.contains(span.extent):
background_stack = current_line_color + background_stack
break
else:
background_stack = background_color + background_stack
return ColorableSpan(span.extent, span.foreground, background_stack)
return map(prepend_background, spans)
def compute_span_color(span, config):
"""Determines the exact color of a span, with transparency removed.
Args:
span - a colorable span to be colored
config - the Configuration to use for picking colors
Returns:
(fg, bg) - a tuple of resulting merged color
"""
def color_of((kind, index)):
color = config.color[kind]
if isinstance(color, list):
color = color[(index - 1) % len(color)]
return color
foreground, background = color_of(span.foreground)
underlying_background = reversed(span.background_stack)
while background is None:
_, background = color_of(next(underlying_background))
return foreground, background
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class VPNService(neutron.NeutronResource):
"""A resource for VPN service in Neutron.
VPN service is a high level object that associates VPN with a specific
subnet and router.
"""
required_service_extension = 'vpnaas'
PROPERTIES = (
NAME, DESCRIPTION, ADMIN_STATE_UP,
SUBNET_ID, SUBNET, ROUTER_ID, ROUTER
) = (
'name', 'description', 'admin_state_up',
'subnet_id', 'subnet', 'router_id', 'router'
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, DESCRIPTION_ATTR, NAME_ATTR, ROUTER_ID_ATTR,
STATUS, SUBNET_ID_ATTR, TENANT_ID,
) = (
'admin_state_up', 'description', 'name', 'router_id',
'status', 'subnet_id', 'tenant_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the vpn service.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the vpn service.'),
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('Administrative state for the vpn service.'),
default=True,
update_allowed=True
),
SUBNET_ID: properties.Schema(
properties.Schema.STRING,
support_status=support.SupportStatus(
status=support.HIDDEN,
message=_('Use property %s.') % SUBNET,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2'
)
),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet in which the vpn service will be created.'),
support_status=support.SupportStatus(version='2014.2'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
ROUTER_ID: properties.Schema(
properties.Schema.STRING,
_('Unique identifier for the router to which the vpn service '
'will be inserted.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='6.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s') % ROUTER,
version='2015.1',
previous_status=support.SupportStatus(version='2013.2'))
),
constraints=[
constraints.CustomConstraint('neutron.router')
]
),
ROUTER: properties.Schema(
properties.Schema.STRING,
_('The router to which the vpn service will be inserted.'),
support_status=support.SupportStatus(version='2015.1'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.router')
]
)
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of the vpn service.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('The description of the vpn service.'),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_('The name of the vpn service.'),
type=attributes.Schema.STRING
),
ROUTER_ID_ATTR: attributes.Schema(
_('The unique identifier of the router to which the vpn service '
'was inserted.'),
type=attributes.Schema.STRING
),
STATUS: attributes.Schema(
_('The status of the vpn service.'),
type=attributes.Schema.STRING
),
SUBNET_ID_ATTR: attributes.Schema(
_('The unique identifier of the subnet in which the vpn service '
'was created.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('The unique identifier of the tenant owning the vpn service.'),
type=attributes.Schema.STRING
),
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
[self.SUBNET],
value_path=[self.SUBNET_ID]
),
translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
[self.ROUTER],
value_path=[self.ROUTER_ID]
)
]
def _show_resource(self):
return self.client().show_vpnservice(self.resource_id)['vpnservice']
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
self.client_plugin().resolve_subnet(props, self.SUBNET, 'subnet_id')
self.client_plugin().resolve_router(props, self.ROUTER, 'router_id')
vpnservice = self.client().create_vpnservice({'vpnservice': props})[
'vpnservice']
self.resource_id_set(vpnservice['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.prepare_update_properties(prop_diff)
self.client().update_vpnservice(self.resource_id,
{'vpnservice': prop_diff})
def handle_delete(self):
try:
self.client().delete_vpnservice(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
class IPsecSiteConnection(neutron.NeutronResource):
"""A resource for IPsec site connection in Neutron.
This resource has details for the site-to-site IPsec connection, including
the peer CIDRs, MTU, peer address, DPD settings and status.
"""
required_service_extension = 'vpnaas'
PROPERTIES = (
NAME, DESCRIPTION, PEER_ADDRESS, PEER_ID, PEER_CIDRS, MTU,
DPD, PSK, INITIATOR, ADMIN_STATE_UP, IKEPOLICY_ID,
IPSECPOLICY_ID, VPNSERVICE_ID,
) = (
'name', 'description', 'peer_address', 'peer_id', 'peer_cidrs', 'mtu',
'dpd', 'psk', 'initiator', 'admin_state_up', 'ikepolicy_id',
'ipsecpolicy_id', 'vpnservice_id',
)
_DPD_KEYS = (
DPD_ACTIONS, DPD_INTERVAL, DPD_TIMEOUT,
) = (
'actions', 'interval', 'timeout',
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, AUTH_MODE, DESCRIPTION_ATTR, DPD_ATTR,
IKEPOLICY_ID_ATTR, INITIATOR_ATTR, IPSECPOLICY_ID_ATTR, MTU_ATTR,
NAME_ATTR, PEER_ADDRESS_ATTR, PEER_CIDRS_ATTR, PEER_ID_ATTR, PSK_ATTR,
ROUTE_MODE, STATUS, TENANT_ID, VPNSERVICE_ID_ATTR,
) = (
'admin_state_up', 'auth_mode', 'description', 'dpd',
'ikepolicy_id', 'initiator', 'ipsecpolicy_id', 'mtu',
'name', 'peer_address', 'peer_cidrs', 'peer_id', 'psk',
'route_mode', 'status', 'tenant_id', 'vpnservice_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the ipsec site connection.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the ipsec site connection.'),
update_allowed=True
),
PEER_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('Remote branch router public IPv4 address or IPv6 address or '
'FQDN.'),
required=True
),
PEER_ID: properties.Schema(
properties.Schema.STRING,
_('Remote branch router identity.'),
required=True
),
PEER_CIDRS: properties.Schema(
properties.Schema.LIST,
_('Remote subnet(s) in CIDR format.'),
required=True,
schema=properties.Schema(
properties.Schema.STRING,
constraints=[
constraints.CustomConstraint('net_cidr')
]
)
),
MTU: properties.Schema(
properties.Schema.INTEGER,
_('Maximum transmission unit size (in bytes) for the ipsec site '
'connection.'),
default=1500
),
DPD: properties.Schema(
properties.Schema.MAP,
_('Dead Peer Detection protocol configuration for the ipsec site '
'connection.'),
schema={
DPD_ACTIONS: properties.Schema(
properties.Schema.STRING,
_('Controls DPD protocol mode.'),
default='hold',
constraints=[
constraints.AllowedValues(['clear', 'disabled',
'hold', 'restart',
'restart-by-peer']),
]
),
DPD_INTERVAL: properties.Schema(
properties.Schema.INTEGER,
_('Number of seconds for the DPD delay.'),
default=30
),
DPD_TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('Number of seconds for the DPD timeout.'),
default=120
),
}
),
PSK: properties.Schema(
properties.Schema.STRING,
_('Pre-shared key string for the ipsec site connection.'),
required=True
),
INITIATOR: properties.Schema(
properties.Schema.STRING,
_('Initiator state in lowercase for the ipsec site connection.'),
default='bi-directional',
constraints=[
constraints.AllowedValues(['bi-directional', 'response-only']),
]
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('Administrative state for the ipsec site connection.'),
default=True,
update_allowed=True
),
IKEPOLICY_ID: properties.Schema(
properties.Schema.STRING,
_('Unique identifier for the ike policy associated with the '
'ipsec site connection.'),
required=True
),
IPSECPOLICY_ID: properties.Schema(
properties.Schema.STRING,
_('Unique identifier for the ipsec policy associated with the '
'ipsec site connection.'),
required=True
),
VPNSERVICE_ID: properties.Schema(
properties.Schema.STRING,
_('Unique identifier for the vpn service associated with the '
'ipsec site connection.'),
required=True
),
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_('The administrative state of the ipsec site connection.'),
type=attributes.Schema.STRING
),
AUTH_MODE: attributes.Schema(
_('The authentication mode of the ipsec site connection.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('The description of the ipsec site connection.'),
type=attributes.Schema.STRING
),
DPD_ATTR: attributes.Schema(
_('The dead peer detection protocol configuration of the ipsec '
'site connection.'),
type=attributes.Schema.MAP
),
IKEPOLICY_ID_ATTR: attributes.Schema(
_('The unique identifier of ike policy associated with the ipsec '
'site connection.'),
type=attributes.Schema.STRING
),
INITIATOR_ATTR: attributes.Schema(
_('The initiator of the ipsec site connection.'),
type=attributes.Schema.STRING
),
IPSECPOLICY_ID_ATTR: attributes.Schema(
_('The unique identifier of ipsec policy associated with the '
'ipsec site connection.'),
type=attributes.Schema.STRING
),
MTU_ATTR: attributes.Schema(
_('The maximum transmission unit size (in bytes) of the ipsec '
'site connection.'),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_('The name of the ipsec site connection.'),
type=attributes.Schema.STRING
),
PEER_ADDRESS_ATTR: attributes.Schema(
_('The remote branch router public IPv4 address or IPv6 address '
'or FQDN.'),
type=attributes.Schema.STRING
),
PEER_CIDRS_ATTR: attributes.Schema(
_('The remote subnet(s) in CIDR format of the ipsec site '
'connection.'),
type=attributes.Schema.LIST
),
PEER_ID_ATTR: attributes.Schema(
_('The remote branch router identity of the ipsec site '
'connection.'),
type=attributes.Schema.STRING
),
PSK_ATTR: attributes.Schema(
_('The pre-shared key string of the ipsec site connection.'),
type=attributes.Schema.STRING
),
ROUTE_MODE: attributes.Schema(
_('The route mode of the ipsec site connection.'),
type=attributes.Schema.STRING
),
STATUS: attributes.Schema(
_('The status of the ipsec site connection.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('The unique identifier of the tenant owning the ipsec site '
'connection.'),
type=attributes.Schema.STRING
),
VPNSERVICE_ID_ATTR: attributes.Schema(
_('The unique identifier of vpn service associated with the ipsec '
'site connection.'),
type=attributes.Schema.STRING
),
}
def _show_resource(self):
return self.client().show_ipsec_site_connection(self.resource_id)[
'ipsec_site_connection']
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
ipsec_site_connection = self.client().create_ipsec_site_connection(
{'ipsec_site_connection': props})['ipsec_site_connection']
self.resource_id_set(ipsec_site_connection['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.client().update_ipsec_site_connection(
self.resource_id, {'ipsec_site_connection': prop_diff})
def handle_delete(self):
try:
self.client().delete_ipsec_site_connection(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
class IKEPolicy(neutron.NeutronResource):
"""A resource for IKE policy in Neutron.
The Internet Key Exchange policy identifyies the authentication and
encryption algorithm used during phase one and phase two negotiation of a
VPN connection.
"""
required_service_extension = 'vpnaas'
PROPERTIES = (
NAME, DESCRIPTION, AUTH_ALGORITHM, ENCRYPTION_ALGORITHM,
PHASE1_NEGOTIATION_MODE, LIFETIME, PFS, IKE_VERSION,
) = (
'name', 'description', 'auth_algorithm', 'encryption_algorithm',
'phase1_negotiation_mode', 'lifetime', 'pfs', 'ike_version',
)
_LIFETIME_KEYS = (
LIFETIME_UNITS, LIFETIME_VALUE,
) = (
'units', 'value',
)
ATTRIBUTES = (
AUTH_ALGORITHM_ATTR, DESCRIPTION_ATTR, ENCRYPTION_ALGORITHM_ATTR,
IKE_VERSION_ATTR, LIFETIME_ATTR, NAME_ATTR, PFS_ATTR,
PHASE1_NEGOTIATION_MODE_ATTR, TENANT_ID,
) = (
'auth_algorithm', 'description', 'encryption_algorithm',
'ike_version', 'lifetime', 'name', 'pfs',
'phase1_negotiation_mode', 'tenant_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the ike policy.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the ike policy.'),
update_allowed=True
),
AUTH_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('Authentication hash algorithm for the ike policy.'),
default='sha1',
constraints=[
constraints.AllowedValues(['sha1']),
]
),
ENCRYPTION_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('Encryption algorithm for the ike policy.'),
default='aes-128',
constraints=[
constraints.AllowedValues(['3des', 'aes-128', 'aes-192',
'aes-256']),
]
),
PHASE1_NEGOTIATION_MODE: properties.Schema(
properties.Schema.STRING,
_('Negotiation mode for the ike policy.'),
default='main',
constraints=[
constraints.AllowedValues(['main']),
]
),
LIFETIME: properties.Schema(
properties.Schema.MAP,
_('Safety assessment lifetime configuration for the ike policy.'),
schema={
LIFETIME_UNITS: properties.Schema(
properties.Schema.STRING,
_('Safety assessment lifetime units.'),
default='seconds',
constraints=[
constraints.AllowedValues(['seconds', 'kilobytes']),
]
),
LIFETIME_VALUE: properties.Schema(
properties.Schema.INTEGER,
_('Safety assessment lifetime value in specified '
'units.'),
default=3600
),
}
),
PFS: properties.Schema(
properties.Schema.STRING,
_('Perfect forward secrecy in lowercase for the ike policy.'),
default='group5',
constraints=[
constraints.AllowedValues(['group2', 'group5', 'group14']),
]
),
IKE_VERSION: properties.Schema(
properties.Schema.STRING,
_('Version for the ike policy.'),
default='v1',
constraints=[
constraints.AllowedValues(['v1', 'v2']),
]
),
}
attributes_schema = {
AUTH_ALGORITHM_ATTR: attributes.Schema(
_('The authentication hash algorithm used by the ike policy.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('The description of the ike policy.'),
type=attributes.Schema.STRING
),
ENCRYPTION_ALGORITHM_ATTR: attributes.Schema(
_('The encryption algorithm used by the ike policy.'),
type=attributes.Schema.STRING
),
IKE_VERSION_ATTR: attributes.Schema(
_('The version of the ike policy.'),
type=attributes.Schema.STRING
),
LIFETIME_ATTR: attributes.Schema(
_('The safety assessment lifetime configuration for the ike '
'policy.'),
type=attributes.Schema.MAP
),
NAME_ATTR: attributes.Schema(
_('The name of the ike policy.'),
type=attributes.Schema.STRING
),
PFS_ATTR: attributes.Schema(
_('The perfect forward secrecy of the ike policy.'),
type=attributes.Schema.STRING
),
PHASE1_NEGOTIATION_MODE_ATTR: attributes.Schema(
_('The negotiation mode of the ike policy.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('The unique identifier of the tenant owning the ike policy.'),
type=attributes.Schema.STRING
),
}
def _show_resource(self):
return self.client().show_ikepolicy(self.resource_id)['ikepolicy']
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
ikepolicy = self.client().create_ikepolicy({'ikepolicy': props})[
'ikepolicy']
self.resource_id_set(ikepolicy['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.client().update_ikepolicy(self.resource_id,
{'ikepolicy': prop_diff})
def handle_delete(self):
try:
self.client().delete_ikepolicy(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
class IPsecPolicy(neutron.NeutronResource):
"""A resource for IPsec policy in Neutron.
The IP security policy specifying the authentication and encryption
algorithm, and encapsulation mode used for the established VPN connection.
"""
required_service_extension = 'vpnaas'
PROPERTIES = (
NAME, DESCRIPTION, TRANSFORM_PROTOCOL, ENCAPSULATION_MODE,
AUTH_ALGORITHM, ENCRYPTION_ALGORITHM, LIFETIME, PFS,
) = (
'name', 'description', 'transform_protocol', 'encapsulation_mode',
'auth_algorithm', 'encryption_algorithm', 'lifetime', 'pfs',
)
_LIFETIME_KEYS = (
LIFETIME_UNITS, LIFETIME_VALUE,
) = (
'units', 'value',
)
ATTRIBUTES = (
AUTH_ALGORITHM_ATTR, DESCRIPTION_ATTR, ENCAPSULATION_MODE_ATTR,
ENCRYPTION_ALGORITHM_ATTR, LIFETIME_ATTR, NAME_ATTR, PFS_ATTR,
TENANT_ID, TRANSFORM_PROTOCOL_ATTR,
) = (
'auth_algorithm', 'description', 'encapsulation_mode',
'encryption_algorithm', 'lifetime', 'name', 'pfs',
'tenant_id', 'transform_protocol',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the ipsec policy.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the ipsec policy.'),
update_allowed=True
),
TRANSFORM_PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Transform protocol for the ipsec policy.'),
default='esp',
constraints=[
constraints.AllowedValues(['esp', 'ah', 'ah-esp']),
]
),
ENCAPSULATION_MODE: properties.Schema(
properties.Schema.STRING,
_('Encapsulation mode for the ipsec policy.'),
default='tunnel',
constraints=[
constraints.AllowedValues(['tunnel', 'transport']),
]
),
AUTH_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('Authentication hash algorithm for the ipsec policy.'),
default='sha1',
constraints=[
constraints.AllowedValues(['sha1']),
]
),
ENCRYPTION_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('Encryption algorithm for the ipsec policy.'),
default='aes-128',
constraints=[
constraints.AllowedValues(['3des', 'aes-128', 'aes-192',
'aes-256']),
]
),
LIFETIME: properties.Schema(
properties.Schema.MAP,
_('Safety assessment lifetime configuration for the ipsec '
'policy.'),
schema={
LIFETIME_UNITS: properties.Schema(
properties.Schema.STRING,
_('Safety assessment lifetime units.'),
default='seconds',
constraints=[
constraints.AllowedValues(['seconds',
'kilobytes']),
]
),
LIFETIME_VALUE: properties.Schema(
properties.Schema.INTEGER,
_('Safety assessment lifetime value in specified '
'units.'),
default=3600
),
}
),
PFS: properties.Schema(
properties.Schema.STRING,
_('Perfect forward secrecy for the ipsec policy.'),
default='group5',
constraints=[
constraints.AllowedValues(['group2', 'group5', 'group14']),
]
),
}
attributes_schema = {
AUTH_ALGORITHM_ATTR: attributes.Schema(
_('The authentication hash algorithm of the ipsec policy.'),
type=attributes.Schema.STRING
),
DESCRIPTION_ATTR: attributes.Schema(
_('The description of the ipsec policy.'),
type=attributes.Schema.STRING
),
ENCAPSULATION_MODE_ATTR: attributes.Schema(
_('The encapsulation mode of the ipsec policy.'),
type=attributes.Schema.STRING
),
ENCRYPTION_ALGORITHM_ATTR: attributes.Schema(
_('The encryption algorithm of the ipsec policy.'),
type=attributes.Schema.STRING
),
LIFETIME_ATTR: attributes.Schema(
_('The safety assessment lifetime configuration of the ipsec '
'policy.'),
type=attributes.Schema.MAP
),
NAME_ATTR: attributes.Schema(
_('The name of the ipsec policy.'),
type=attributes.Schema.STRING
),
PFS_ATTR: attributes.Schema(
_('The perfect forward secrecy of the ipsec policy.'),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_('The unique identifier of the tenant owning the ipsec policy.'),
type=attributes.Schema.STRING
),
TRANSFORM_PROTOCOL_ATTR: attributes.Schema(
_('The transform protocol of the ipsec policy.'),
type=attributes.Schema.STRING
),
}
def _show_resource(self):
return self.client().show_ipsecpolicy(self.resource_id)['ipsecpolicy']
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
ipsecpolicy = self.client().create_ipsecpolicy(
{'ipsecpolicy': props})['ipsecpolicy']
self.resource_id_set(ipsecpolicy['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.client().update_ipsecpolicy(self.resource_id,
{'ipsecpolicy': prop_diff})
def handle_delete(self):
try:
self.client().delete_ipsecpolicy(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def resource_mapping():
return {
'OS::Neutron::VPNService': VPNService,
'OS::Neutron::IPsecSiteConnection': IPsecSiteConnection,
'OS::Neutron::IKEPolicy': IKEPolicy,
'OS::Neutron::IPsecPolicy': IPsecPolicy,
}
|
|
from __future__ import unicode_literals
import re
import xml.etree.ElementTree
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
)
class VevoIE(InfoExtractor):
"""
Accepts urls from vevo.com or in the format 'vevo:{id}'
(currently used by MTVIE and MySpaceIE)
"""
_VALID_URL = r'''(?x)
(?:https?://www\.vevo\.com/watch/(?:[^/]+/(?:[^/]+/)?)?|
https?://cache\.vevo\.com/m/html/embed\.html\?video=|
https?://videoplayer\.vevo\.com/embed/embedded\?videoId=|
vevo:)
(?P<id>[^&?#]+)'''
_TESTS = [{
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
"md5": "95ee28ee45e70130e3ab02b0f579ae23",
'info_dict': {
'id': 'GB1101300280',
'ext': 'mp4',
"upload_date": "20130624",
"uploader": "Hurts",
"title": "Somebody to Die For",
"duration": 230.12,
"width": 1920,
"height": 1080,
# timestamp and upload_date are often incorrect; seem to change randomly
'timestamp': int,
}
}, {
'note': 'v3 SMIL format',
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
'md5': 'f6ab09b034f8c22969020b042e5ac7fc',
'info_dict': {
'id': 'USUV71302923',
'ext': 'mp4',
'upload_date': '20140219',
'uploader': 'Cassadee Pope',
'title': 'I Wish I Could Break Your Heart',
'duration': 226.101,
'age_limit': 0,
'timestamp': int,
}
}, {
'note': 'Age-limited video',
'url': 'https://www.vevo.com/watch/justin-timberlake/tunnel-vision-explicit/USRV81300282',
'info_dict': {
'id': 'USRV81300282',
'ext': 'mp4',
'age_limit': 18,
'title': 'Tunnel Vision (Explicit)',
'uploader': 'Justin Timberlake',
'upload_date': 're:2013070[34]',
'timestamp': int,
},
'params': {
'skip_download': 'true',
}
}]
_SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/'
def _real_initialize(self):
req = compat_urllib_request.Request(
'http://www.vevo.com/auth', data=b'')
webpage = self._download_webpage(
req, None,
note='Retrieving oauth token',
errnote='Unable to retrieve oauth token',
fatal=False)
if webpage is False:
self._oauth_token = None
else:
self._oauth_token = self._search_regex(
r'access_token":\s*"([^"]+)"',
webpage, 'access token', fatal=False)
def _formats_from_json(self, video_info):
last_version = {'version': -1}
for version in video_info['videoVersions']:
# These are the HTTP downloads, other types are for different manifests
if version['sourceType'] == 2:
if version['version'] > last_version['version']:
last_version = version
if last_version['version'] == -1:
raise ExtractorError('Unable to extract last version of the video')
renditions = xml.etree.ElementTree.fromstring(last_version['data'])
formats = []
# Already sorted from worst to best quality
for rend in renditions.findall('rendition'):
attr = rend.attrib
format_note = '%(videoCodec)s@%(videoBitrate)4sk, %(audioCodec)s@%(audioBitrate)3sk' % attr
formats.append({
'url': attr['url'],
'format_id': attr['name'],
'format_note': format_note,
'height': int(attr['frameheight']),
'width': int(attr['frameWidth']),
})
return formats
def _formats_from_smil(self, smil_xml):
formats = []
smil_doc = xml.etree.ElementTree.fromstring(smil_xml.encode('utf-8'))
els = smil_doc.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
for el in els:
src = el.attrib['src']
m = re.match(r'''(?xi)
(?P<ext>[a-z0-9]+):
(?P<path>
[/a-z0-9]+ # The directory and main part of the URL
_(?P<cbr>[0-9]+)k
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
_(?P<vcodec>[a-z0-9]+)
_(?P<vbr>[0-9]+)
_(?P<acodec>[a-z0-9]+)
_(?P<abr>[0-9]+)
\.[a-z0-9]+ # File extension
)''', src)
if not m:
continue
format_url = self._SMIL_BASE_URL + m.group('path')
formats.append({
'url': format_url,
'format_id': 'SMIL_' + m.group('cbr'),
'vcodec': m.group('vcodec'),
'acodec': m.group('acodec'),
'vbr': int(m.group('vbr')),
'abr': int(m.group('abr')),
'ext': m.group('ext'),
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return formats
def _download_api_formats(self, video_id):
if not self._oauth_token:
self._downloader.report_warning(
'No oauth token available, skipping API HLS download')
return []
api_url = 'https://apiv2.vevo.com/video/%s/streams/hls?token=%s' % (
video_id, self._oauth_token)
api_data = self._download_json(
api_url, video_id,
note='Downloading HLS formats',
errnote='Failed to download HLS format list', fatal=False)
if api_data is None:
return []
m3u8_url = api_data[0]['url']
return self._extract_m3u8_formats(
m3u8_url, video_id, entry_protocol='m3u8_native', ext='mp4',
preference=0)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
response = self._download_json(json_url, video_id)
video_info = response['video']
if not video_info:
if 'statusMessage' in response:
raise ExtractorError('%s said: %s' % (self.IE_NAME, response['statusMessage']), expected=True)
raise ExtractorError('Unable to extract videos')
formats = self._formats_from_json(video_info)
is_explicit = video_info.get('isExplicit')
if is_explicit is True:
age_limit = 18
elif is_explicit is False:
age_limit = 0
else:
age_limit = None
# Download via HLS API
formats.extend(self._download_api_formats(video_id))
# Download SMIL
smil_blocks = sorted((
f for f in video_info['videoVersions']
if f['sourceType'] == 13),
key=lambda f: f['version'])
smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % (
self._SMIL_BASE_URL, video_id, video_id.lower())
if smil_blocks:
smil_url_m = self._search_regex(
r'url="([^"]+)"', smil_blocks[-1]['data'], 'SMIL URL',
default=None)
if smil_url_m is not None:
smil_url = smil_url_m
if smil_url:
smil_xml = self._download_webpage(
smil_url, video_id, 'Downloading SMIL info', fatal=False)
if smil_xml:
formats.extend(self._formats_from_smil(smil_xml))
self._sort_formats(formats)
timestamp_ms = int_or_none(self._search_regex(
r'/Date\((\d+)\)/',
video_info['launchDate'], 'launch date', fatal=False))
return {
'id': video_id,
'title': video_info['title'],
'formats': formats,
'thumbnail': video_info['imageUrl'],
'timestamp': timestamp_ms // 1000,
'uploader': video_info['mainArtists'][0]['artistName'],
'duration': video_info['duration'],
'age_limit': age_limit,
}
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Reader ops from io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import os
import shutil
import threading
import zlib
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
prefix_path = "tensorflow/core/lib"
# pylint: disable=invalid-name
TFRecordCompressionType = tf_record.TFRecordCompressionType
# pylint: enable=invalid-name
# Edgar Allan Poe's 'Eldorado'
_TEXT = b"""Gaily bedight,
A gallant knight,
In sunshine and in shadow,
Had journeyed long,
Singing a song,
In search of Eldorado.
But he grew old
This knight so bold
And o'er his heart a shadow
Fell as he found
No spot of ground
That looked like Eldorado.
And, as his strength
Failed him at length,
He met a pilgrim shadow
'Shadow,' said he,
'Where can it be
This land of Eldorado?'
'Over the Mountains
Of the Moon'
Down the Valley of the Shadow,
Ride, boldly ride,'
The shade replied,
'If you seek for Eldorado!'
"""
class IdentityReaderTest(test.TestCase):
def _ExpectRead(self, sess, key, value, expected):
k, v = sess.run([key, value])
self.assertAllEqual(expected, k)
self.assertAllEqual(expected, v)
def testOneEpoch(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
self.assertAllEqual(0, work_completed.eval())
self.assertAllEqual(0, produced.eval())
self.assertAllEqual(0, queued_length.eval())
queue.enqueue_many([["A", "B", "C"]]).run()
queue.close().run()
self.assertAllEqual(3, queued_length.eval())
self._ExpectRead(sess, key, value, b"A")
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"B")
self._ExpectRead(sess, key, value, b"C")
self.assertAllEqual(3, produced.eval())
self.assertAllEqual(0, queued_length.eval())
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
self.assertAllEqual(3, work_completed.eval())
self.assertAllEqual(3, produced.eval())
self.assertAllEqual(0, queued_length.eval())
def testMultipleEpochs(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([["DD", "EE"]])
key, value = reader.read(queue)
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
queue.close().run()
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testSerializeRestore(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([["X", "Y", "Z"]]).run()
key, value = reader.read(queue)
self._ExpectRead(sess, key, value, b"X")
self.assertAllEqual(1, produced.eval())
state = reader.serialize_state().eval()
self._ExpectRead(sess, key, value, b"Y")
self._ExpectRead(sess, key, value, b"Z")
self.assertAllEqual(3, produced.eval())
queue.enqueue_many([["Y", "Z"]]).run()
queue.close().run()
reader.restore_state(state).run()
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"Y")
self._ExpectRead(sess, key, value, b"Z")
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
self.assertAllEqual(3, produced.eval())
self.assertEqual(bytes, type(state))
with self.assertRaises(ValueError):
reader.restore_state([])
with self.assertRaises(ValueError):
reader.restore_state([state, state])
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state[1:]).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state[:-1]).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state + b"ExtraJunk").run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(b"PREFIX" + state).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(b"BOGUS" + state[5:]).run()
def testReset(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
queue.enqueue_many([["X", "Y", "Z"]]).run()
self._ExpectRead(sess, key, value, b"X")
self.assertLess(0, queued_length.eval())
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"Y")
self.assertLess(0, work_completed.eval())
self.assertAllEqual(2, produced.eval())
reader.reset().run()
self.assertAllEqual(0, work_completed.eval())
self.assertAllEqual(0, produced.eval())
self.assertAllEqual(1, queued_length.eval())
self._ExpectRead(sess, key, value, b"Z")
queue.enqueue_many([["K", "L"]]).run()
self._ExpectRead(sess, key, value, b"K")
class WholeFileReaderTest(test.TestCase):
def setUp(self):
super(WholeFileReaderTest, self).setUp()
self._filenames = [
os.path.join(self.get_temp_dir(), "whole_file.%d.txt" % i)
for i in range(3)
]
self._content = [b"One\na\nb\n", b"Two\nC\nD", b"Three x, y, z"]
for fn, c in zip(self._filenames, self._content):
with open(fn, "wb") as h:
h.write(c)
def tearDown(self):
for fn in self._filenames:
os.remove(fn)
super(WholeFileReaderTest, self).tearDown()
def _ExpectRead(self, sess, key, value, index):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(self._filenames[index]), k)
self.assertAllEqual(self._content[index], v)
def testOneEpoch(self):
with self.test_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([self._filenames]).run()
queue.close().run()
key, value = reader.read(queue)
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
self._ExpectRead(sess, key, value, 2)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testInfiniteEpochs(self):
with self.test_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([self._filenames])
key, value = reader.read(queue)
enqueue.run()
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
enqueue.run()
self._ExpectRead(sess, key, value, 2)
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
enqueue.run()
self._ExpectRead(sess, key, value, 2)
self._ExpectRead(sess, key, value, 0)
class TextLineReaderTest(test.TestCase):
def setUp(self):
super(TextLineReaderTest, self).setUp()
self._num_files = 2
self._num_lines = 5
def _LineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _CreateFiles(self, crlf=False):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
for j in range(self._num_lines):
f.write(self._LineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it sometimes.
if j + 1 != self._num_lines or i == 0:
f.write(b"\r\n" if crlf else b"\n")
return filenames
def _testOneEpoch(self, files):
with self.test_session() as sess:
reader = io_ops.TextLineReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_lines):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 1), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testOneEpochLF(self):
self._testOneEpoch(self._CreateFiles(crlf=False))
def testOneEpochCRLF(self):
self._testOneEpoch(self._CreateFiles(crlf=True))
def testSkipHeaderLines(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TextLineReader(skip_header_lines=1, name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_lines - 1):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 2), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j + 1), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
class FixedLengthRecordReaderTest(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
self._hop_bytes = 2
def _Record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _OverlappedRecord(self, f, r):
record_str = "".join([
str(i)[0]
for i in range(r * self._hop_bytes,
r * self._hop_bytes + self._record_bytes)
])
return compat.as_bytes(record_str)
# gap_bytes=hop_bytes-record_bytes
def _CreateFiles(self, num_records, gap_bytes):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_records > 0:
f.write(self._Record(i, 0))
for j in range(1, num_records):
if gap_bytes > 0:
f.write(b"G" * gap_bytes)
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def _CreateOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _CreateGzipFiles(self, num_records, gap_bytes):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with gzip.GzipFile(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_records > 0:
f.write(self._Record(i, 0))
for j in range(1, num_records):
if gap_bytes > 0:
f.write(b"G" * gap_bytes)
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _CreateZlibFiles(self, num_records, gap_bytes):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn+".tmp", "wb") as f:
f.write(b"H" * self._header_bytes)
if num_records > 0:
f.write(self._Record(i, 0))
for j in range(1, num_records):
if gap_bytes > 0:
f.write(b"G" * gap_bytes)
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
with open(fn+".tmp", "rb") as f:
cdata = zlib.compress(f.read())
with open(fn, "wb") as zf:
zf.write(cdata)
return filenames
def _CreateGzipOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with gzip.GzipFile(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
return filenames
def _CreateZlibOverlappedRecordFiles(self, num_overlapped_records):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(),
"fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with open(fn+".tmp", "wb") as f:
f.write(b"H" * self._header_bytes)
if num_overlapped_records > 0:
all_records_str = "".join([
str(i)[0]
for i in range(self._record_bytes + self._hop_bytes *
(num_overlapped_records - 1))
])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
with open(fn+".tmp", "rb") as f:
cdata = zlib.compress(f.read())
with open(fn, "wb") as zf:
zf.write(cdata)
return filenames
# gap_bytes=hop_bytes-record_bytes
def _TestOneEpoch(self, files, num_records, gap_bytes, encoding=None):
hop_bytes = 0 if gap_bytes == 0 else self._record_bytes + gap_bytes
with self.test_session() as sess:
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=hop_bytes,
encoding=encoding,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(num_records):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def _TestOneEpochWithHopBytes(self, files, num_overlapped_records, encoding=None):
with self.test_session() as sess:
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=self._hop_bytes,
encoding=encoding,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(num_overlapped_records):
k, v = sess.run([key, value])
print(v)
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._OverlappedRecord(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes)
def testGzipOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateGzipFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes, encoding="GZIP")
def testZlibOneEpoch(self):
for num_records in [0, 7]:
# gap_bytes=0: hop_bytes=0
# gap_bytes=1: hop_bytes=record_bytes+1
for gap_bytes in [0, 1]:
files = self._CreateZlibFiles(num_records, gap_bytes)
self._TestOneEpoch(files, num_records, gap_bytes, encoding="ZLIB")
def testOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateOverlappedRecordFiles(num_overlapped_records)
self._TestOneEpochWithHopBytes(files, num_overlapped_records)
def testGzipOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateGzipOverlappedRecordFiles(num_overlapped_records, )
self._TestOneEpochWithHopBytes(files, num_overlapped_records, encoding="GZIP")
def testZlibOneEpochWithHopBytes(self):
for num_overlapped_records in [0, 2]:
files = self._CreateZlibOverlappedRecordFiles(num_overlapped_records)
self._TestOneEpochWithHopBytes(files, num_overlapped_records, encoding="ZLIB")
class TFRecordReaderTest(test.TestCase):
def setUp(self):
super(TFRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = tf_record.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._Record(i, j))
return filenames
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TFRecordReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testReadUpTo(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TFRecordReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
batch_size = 3
key, value = reader.read_up_to(queue, batch_size)
queue.enqueue_many([files]).run()
queue.close().run()
num_k = 0
num_v = 0
while True:
try:
k, v = sess.run([key, value])
# Test reading *up to* batch_size records
self.assertLessEqual(len(k), batch_size)
self.assertLessEqual(len(v), batch_size)
num_k += len(k)
num_v += len(v)
except errors_impl.OutOfRangeError:
break
# Test that we have read everything
self.assertEqual(self._num_files * self._num_records, num_k)
self.assertEqual(self._num_files * self._num_records, num_v)
def testReadZlibFiles(self):
files = self._CreateFiles()
zlib_files = []
for i, fn in enumerate(files):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([zlib_files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % zlib_files[i]))
self.assertAllEqual(self._Record(i, j), v)
def testReadGzipFiles(self):
files = self._CreateFiles()
gzip_files = []
for i, fn in enumerate(files):
with open(fn, "rb") as f:
cdata = f.read()
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(zfn, "wb") as f:
f.write(cdata)
gzip_files.append(zfn)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([gzip_files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % gzip_files[i]))
self.assertAllEqual(self._Record(i, j), v)
class TFRecordWriterZlibTest(test.TestCase):
def setUp(self):
super(TFRecordWriterZlibTest, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
writer = tf_record.TFRecordWriter(fn, options=options)
for j in range(self._num_records):
writer.write(self._Record(i, j))
writer.close()
del writer
return filenames
def _WriteRecordsToFile(self, records, name="tf_record"):
fn = os.path.join(self.get_temp_dir(), name)
writer = tf_record.TFRecordWriter(fn, options=None)
for r in records:
writer.write(r)
writer.close()
del writer
return fn
def _ZlibCompressFile(self, infile, name="tfrecord.z"):
# zlib compress the file and write compressed contents to file.
with open(infile, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), name)
with open(zfn, "wb") as f:
f.write(cdata)
return zfn
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testZLibFlushRecord(self):
fn = self._WriteRecordsToFile([b"small record"], "small_record")
with open(fn, "rb") as h:
buff = h.read()
# creating more blocks and trailing blocks shouldn't break reads
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS)
output = b""
for c in buff:
if isinstance(c, int):
c = six.int2byte(c)
output += compressor.compress(c)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FINISH)
# overwrite the original file with the compressed data
with open(fn, "wb") as h:
h.write(output)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(1, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue(fn).run()
queue.close().run()
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % fn))
self.assertAllEqual(b"small record", v)
def testZlibReadWrite(self):
"""Verify that files produced are zlib compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "zlib_read_write.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write.tfrecord.z")
# read the compressed contents and verify.
actual = []
for r in tf_record.tf_record_iterator(
zfn,
options=tf_record.TFRecordOptions(
tf_record.TFRecordCompressionType.ZLIB)):
actual.append(r)
self.assertEqual(actual, original)
def testZlibReadWriteLarge(self):
"""Verify that writing large contents also works."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
fn = self._WriteRecordsToFile(original, "zlib_read_write_large.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write_large.tfrecord.z")
# read the compressed contents and verify.
actual = []
for r in tf_record.tf_record_iterator(
zfn,
options=tf_record.TFRecordOptions(
tf_record.TFRecordCompressionType.ZLIB)):
actual.append(r)
self.assertEqual(actual, original)
def testGzipReadWrite(self):
"""Verify that files produced are gzip compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "gzip_read_write.tfrecord")
# gzip compress the file and write compressed contents to file.
with open(fn, "rb") as f:
cdata = f.read()
gzfn = os.path.join(self.get_temp_dir(), "tf_record.gz")
with gzip.GzipFile(gzfn, "wb") as f:
f.write(cdata)
actual = []
for r in tf_record.tf_record_iterator(
gzfn, options=tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)):
actual.append(r)
self.assertEqual(actual, original)
class TFRecordIteratorTest(test.TestCase):
def setUp(self):
super(TFRecordIteratorTest, self).setUp()
self._num_records = 7
def _Record(self, r):
return compat.as_bytes("Record %d" % r)
def _WriteCompressedRecordsToFile(
self,
records,
name="tfrecord.z",
compression_type=tf_record.TFRecordCompressionType.ZLIB):
fn = os.path.join(self.get_temp_dir(), name)
options = tf_record.TFRecordOptions(compression_type=compression_type)
writer = tf_record.TFRecordWriter(fn, options=options)
for r in records:
writer.write(r)
writer.close()
del writer
return fn
def _ZlibDecompressFile(self, infile, name="tfrecord", wbits=zlib.MAX_WBITS):
with open(infile, "rb") as f:
cdata = zlib.decompress(f.read(), wbits)
zfn = os.path.join(self.get_temp_dir(), name)
with open(zfn, "wb") as f:
f.write(cdata)
return zfn
def testIterator(self):
fn = self._WriteCompressedRecordsToFile(
[self._Record(i) for i in range(self._num_records)],
"compressed_records")
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = tf_record.tf_record_iterator(fn, options)
for i in range(self._num_records):
record = next(reader)
self.assertAllEqual(self._Record(i), record)
with self.assertRaises(StopIteration):
record = next(reader)
def testWriteZlibRead(self):
"""Verify compression with TFRecordWriter is zlib library compatible."""
original = [b"foo", b"bar"]
fn = self._WriteCompressedRecordsToFile(original,
"write_zlib_read.tfrecord.z")
zfn = self._ZlibDecompressFile(fn, "write_zlib_read.tfrecord")
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testWriteZlibReadLarge(self):
"""Verify compression for large records is zlib library compatible."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
fn = self._WriteCompressedRecordsToFile(original,
"write_zlib_read_large.tfrecord.z")
zfn = self._ZlibDecompressFile(fn, "write_zlib_read_large.tf_record")
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testWriteGzipRead(self):
original = [b"foo", b"bar"]
fn = self._WriteCompressedRecordsToFile(
original,
"write_gzip_read.tfrecord.gz",
compression_type=TFRecordCompressionType.GZIP)
with gzip.GzipFile(fn, "rb") as f:
cdata = f.read()
zfn = os.path.join(self.get_temp_dir(), "tf_record")
with open(zfn, "wb") as f:
f.write(cdata)
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testBadFile(self):
"""Verify that tf_record_iterator throws an exception on bad TFRecords."""
fn = os.path.join(self.get_temp_dir(), "bad_file")
with tf_record.TFRecordWriter(fn) as writer:
writer.write(b"123")
fn_truncated = os.path.join(self.get_temp_dir(), "bad_file_truncated")
with open(fn, "rb") as f:
with open(fn_truncated, "wb") as f2:
# DataLossError requires that we've written the header, so this must
# be at least 12 bytes.
f2.write(f.read(14))
with self.assertRaises(errors_impl.DataLossError):
for _ in tf_record.tf_record_iterator(fn_truncated):
pass
class AsyncReaderTest(test.TestCase):
def testNoDeadlockFromQueue(self):
"""Tests that reading does not block main execution threads."""
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
thread_data_t = collections.namedtuple("thread_data_t",
["thread", "queue", "output"])
thread_data = []
# Create different readers, each with its own queue.
for i in range(3):
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
reader = io_ops.TextLineReader()
_, line = reader.read(queue)
output = []
t = threading.Thread(
target=AsyncReaderTest._RunSessionAndSave,
args=(sess, [line], output))
thread_data.append(thread_data_t(t, queue, output))
# Start all readers. They are all blocked waiting for queue entries.
sess.run(variables.global_variables_initializer())
for d in thread_data:
d.thread.start()
# Unblock the readers.
for i, d in enumerate(reversed(thread_data)):
fname = os.path.join(self.get_temp_dir(), "deadlock.%s.txt" % i)
with open(fname, "wb") as f:
f.write(("file-%s" % i).encode())
d.queue.enqueue_many([[fname]]).run()
d.thread.join()
self.assertEqual([[("file-%s" % i).encode()]], d.output)
@staticmethod
def _RunSessionAndSave(sess, args, output):
output.append(sess.run(args))
class LMDBReaderTest(test.TestCase):
def setUp(self):
super(LMDBReaderTest, self).setUp()
# Copy database out because we need the path to be writable to use locks.
path = os.path.join(prefix_path, "lmdb", "testdata", "data.mdb")
self.db_path = os.path.join(self.get_temp_dir(), "data.mdb")
shutil.copy(path, self.db_path)
def testReadFromFile(self):
with self.test_session() as sess:
reader = io_ops.LMDBReader(name="test_read_from_file")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue([self.db_path]).run()
queue.close().run()
for i in range(10):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(k), compat.as_bytes(str(i)))
self.assertAllEqual(
compat.as_bytes(v), compat.as_bytes(str(chr(ord("a") + i))))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testReadFromFolder(self):
with self.test_session() as sess:
reader = io_ops.LMDBReader(name="test_read_from_folder")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue([self.db_path]).run()
queue.close().run()
for i in range(10):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(k), compat.as_bytes(str(i)))
self.assertAllEqual(
compat.as_bytes(v), compat.as_bytes(str(chr(ord("a") + i))))
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
if __name__ == "__main__":
test.main()
|
|
"""Robot module."""
from typing import Any, Optional, Sequence, Sized, Union
import attr
import numpy as np # type: ignore
import scipy.optimize # type: ignore
from pybotics.errors import PyboticsError
from pybotics.json_encoder import JSONEncoder
from pybotics.kinematic_chain import KinematicChain, MDHKinematicChain
from pybotics.tool import Tool
def _ndof_zeros_factory(robot: Any) -> np.ndarray:
return np.zeros(len(robot.kinematic_chain))
def _joint_limits_factory(robot: Any) -> np.ndarray:
return np.repeat((-np.pi, np.pi), len(robot.kinematic_chain)).reshape((2, -1))
@attr.s
class Robot(Sized):
"""Robot manipulator class."""
kinematic_chain = attr.ib(type=KinematicChain)
tool = attr.ib(factory=lambda: Tool(), type=Tool)
world_frame = attr.ib(factory=lambda: np.eye(4), type=np.ndarray) # type: ignore
random_state = attr.ib(
factory=lambda: np.random.RandomState(), # type: ignore
type=np.random.RandomState,
)
home_position = attr.ib(
default=attr.Factory(factory=_ndof_zeros_factory, takes_self=True),
type=np.ndarray,
)
_joints = attr.ib(
default=attr.Factory(factory=_ndof_zeros_factory, takes_self=True),
type=np.ndarray,
)
_joint_limits = attr.ib(
default=attr.Factory(factory=_joint_limits_factory, takes_self=True),
type=np.ndarray,
)
def __len__(self) -> int:
"""
Get the number of degrees of freedom.
:return: number of degrees of freedom
"""
return len(self.kinematic_chain)
def to_json(self) -> str:
"""Encode robot model as JSON."""
encoder = JSONEncoder(sort_keys=True)
return encoder.encode(self)
def fk(self, q: Optional[Sequence[float]] = None) -> np.ndarray:
"""
Compute the forward kinematics of a given position.
Uses the current position if None is given.
:param q:
:return: 4x4 transform matrix of the FK pose
"""
# validate
q = self.joints if q is None else q
# gather transforms
# noinspection PyListCreation
transforms = []
transforms.append(self.world_frame)
transforms.extend(self.kinematic_chain.transforms(q))
transforms.append(self.tool.matrix)
# matrix multiply through transforms
pose = np.eye(4, dtype=float)
for t in transforms:
pose = np.dot(pose, t)
return pose
def ik(
self, pose: np.ndarray, q: Optional[Sequence[float]] = None
) -> Optional[np.ndarray]:
"""Solve the inverse kinematics."""
x0 = self.joints if q is None else q
result = scipy.optimize.least_squares(
fun=_ik_cost_function, x0=x0, bounds=self.joint_limits, args=(pose, self)
) # type: scipy.optimize.OptimizeResult
if result.success: # pragma: no cover
actual_pose = self.fk(result.x)
if np.allclose(actual_pose, pose, atol=1e-3):
return result.x
return None
@property
def ndof(self) -> int:
"""
Get the number of degrees of freedom.
:return: number of degrees of freedom
"""
return len(self)
@property
def joints(self) -> Union[Sequence[float], np.ndarray]:
"""
Get the robot configuration (e.g., joint positions for serial robot).
:return: robot position
"""
return self._joints
@joints.setter
def joints(self, value: np.ndarray) -> None:
"""Set joints."""
if np.any(value < self.joint_limits[0]) or np.any(value > self.joint_limits[1]):
raise PyboticsError("Joint limits exceeded.")
self._joints = value
@property
def joint_limits(self) -> np.ndarray:
"""
Limits of the robot position (e.g., joint limits).
:return: limits with shape (2,num_dof) where first row is upper limits
"""
return self._joint_limits
@joint_limits.setter
def joint_limits(self, value: np.ndarray) -> None:
"""Set joint limits."""
if value.shape[0] != 2 or value.shape[1] != len(self):
raise PyboticsError(f"position_limits must have shape=(2,{len(self)})")
self._joint_limits = value
def jacobian_world(self, q: Optional[Sequence[float]] = None) -> np.ndarray:
"""Calculate the Jacobian wrt the world frame."""
q = self.joints if q is None else q
j_fl = self.jacobian_flange(q)
pose = self.fk(q)
rotation = pose[:3, :3]
j_tr = np.zeros((6, 6), dtype=float)
j_tr[:3, :3] = rotation
j_tr[3:, 3:] = rotation
j_w = np.dot(j_tr, j_fl)
return j_w
def jacobian_flange(self, q: Optional[Sequence[float]] = None) -> np.ndarray:
"""Calculate the Jacobian wrt the flange frame."""
q = self.joints if q is None else q
# init Cartesian jacobian (6-dof in space)
jacobian_flange = np.zeros((6, self.ndof))
current_transform = self.tool.matrix.copy()
for i in reversed(range(self.ndof)):
d = np.array(
[
-current_transform[0, 0] * current_transform[1, 3]
+ current_transform[1, 0] * current_transform[0, 3],
-current_transform[0, 1] * current_transform[1, 3]
+ current_transform[1, 1] * current_transform[0, 3],
-current_transform[0, 2] * current_transform[1, 3]
+ current_transform[1, 2] * current_transform[0, 3],
]
)
delta = current_transform[2, 0:3]
jacobian_flange[:, i] = np.hstack((d, delta))
current_link = self.kinematic_chain.links[i]
p = q[i]
current_link_transform = current_link.transform(p)
current_transform = np.dot(current_link_transform, current_transform)
return jacobian_flange
def compute_joint_torques(
self, wrench: Sequence[float], q: Optional[Sequence[float]] = None
) -> np.ndarray:
"""
Calculate the joint torques due to external flange force.
Method from:
5.9 STATIC FORCES IN MANIPULATORS
Craig, John J. Introduction to robotics: mechanics and control.
Vol. 3. Upper Saddle River: Pearson Prentice Hall, 2005.
:param wrench:
:param q:
:return:
"""
if q is None:
q = self.joints
# split wrench into components
force = wrench[:3]
moment = wrench[-3:]
# init output
joint_torques = [moment[-1]]
# loop through links from flange to base
# each iteration calculates for link i-1
for i, p in reversed(list(enumerate(q))): # pragma: no cover
if i == 0:
break
# get current link transform
transform = self.kinematic_chain.links[i].transform(p)
# calculate force applied to current link
rotation = transform[:3, :3]
force = np.dot(rotation, force)
# calculate moment applied to current link
q = transform[:3, -1]
moment = np.dot(rotation, moment) + np.cross(q, force)
# append z-component as joint torque
joint_torques.append(moment[-1])
# reverse torques into correct order
return np.array(list(reversed(joint_torques)), dtype=float)
def clamp_joints(self, q: Sequence[float]) -> Optional[np.ndarray]:
"""Limit joints to joint limits."""
return np.clip(q, self.joint_limits[0], self.joint_limits[1])
def random_joints(self, in_place: bool = False) -> Optional[np.ndarray]:
"""Generate random joints within limits."""
q = self.random_state.uniform(
low=self.joint_limits[0], high=self.joint_limits[1]
)
if in_place:
self.joints = q
return None
else:
return q
@classmethod
def from_parameters(cls, parameters: Sequence[float]) -> Sized:
"""Construct Robot from Kinematic Chain parameters."""
# FIXME: assumes MDH revolute robot
kc = MDHKinematicChain.from_parameters(parameters)
return cls(kinematic_chain=kc)
def _ik_cost_function(q: np.ndarray, pose: np.ndarray, robot: Robot) -> np.ndarray:
actual_pose = robot.fk(q)
diff = np.abs(actual_pose - pose)
return diff.ravel()
|
|
#!/usr/bin/env python
import sys
import nltk
import inflect
import re
import string
from textblob import TextBlob
import grammars as g
import util
import backup_answer as b
import parse as p
import sentence_edit_distance as edit
import np_util as n
extractor = p.SuperNPExtractor()
def compare_phrases(q_phrase, t_phrase, uncommon):
q_words = [word.lemmatize() for word, tag in q_phrase]
t_words = [word.lemmatize() for word, tag in t_phrase]
def get_bigrams(words):
return [words[i-1:i+1] for i in xrange(1, len(words))]
q_grams = get_bigrams(q_words)
t_grams = get_bigrams(t_words)
def judge_gram(target):
if target in uncommon:
return 3.5 + (1 if target.istitle() else 0)
else:
return 1
ans1 = sum([max([judge_gram(g[0]), judge_gram(g[1])])
for g in q_grams if g in t_grams]) - float(len(q_words)) + 1
ans2 = sum([judge_gram(i)
for i in q_words if i in t_words]) - float(len(q_words))
return (ans1, ans2, t_phrase)
def examine_rels(q, q_phrase, bestrels, uncommon, mode):
def taglist(l):
return [tag for word, tag in l]
if mode == 'IS':
comp1, comp2, rel = max([compare_phrases(q_phrase, relation, uncommon)
for best, relation in bestrels])
print >> sys.stderr, comp1, comp2
if comp1 >= -2 and comp2 >= 0:
#"at most 2 unmatched bigrams and 1 unmatched unigram"
return "Yes"
elif comp1 >= -2 and comp2 >= -1:
#if there's an unmatched unigram, it's either a red herring
#or good enough
print >> sys.stderr, q_phrase
print >> sys.stderr, rel
for word, tag in q_phrase:
if (word, tag) not in rel:
if tag in ['IN', 'NNP', 'CD']:
return "No"
return "Yes"
elif mode == 'OBJECT':
nextidx = q.tokens.index('what') + 1
nexttoken = q.tokens[nextidx]
npsidx = sum([1 for i in n.idxs if i <= nextidx])
subj = n.nps[npsidx]
rest = q.tags[n.idxs[npsidx] + len(n.get_np_tags(subj, q)):]
if (nexttoken in ['is', 'was', 'do', 'does', 'will', 'did', 'can', 'must']):
#what VB NP VP (what will Jake do,..)
#compare things
q_phrase = n.get_np_tags(subj, q) + [q.tags[nextidx]] + rest
comp1, comp2, rel = max([compare_phrases(q_phrase, relation, uncommon)
for best, relation in bestrels])
print >> sys.stderr, comp1, comp2
relwords = [word for word, tag in rel]
reltags = [tag for word, tag in rel]
answerstart = 0
if subj in relwords:
answerstart = relwords.index(subj.split()[0]) + len(subj.split()) + 1
answerend = answerstart
if relwords[answerend] == rest[0][0]:
answerstart += len(rest)
if ',' in reltags[answerstart:]:
answerend = reltags[answerstart:].index(',')
elif '.' in reltags[answerstart:]:
answerend = reltags[answerstart:].index('.')
else:
answerend = -1
else:
try:
answerend = relwords[answerstart:].index(rest[0][0])
except:
answerend = -1
return " ".join(relwords[answerstart:answerend])
else:
q_phrase = q_phrase
comp1, comp2, rel = max([compare_phrases(q_phrase, relation, uncommon)
for best, relation in bestrels])
q_ne = p.extract_named_entities(q)
relblob = TextBlob(" ".join([word for word, tag in rel]))
r_ne = p.extract_named_entities(relblob)
diff = dict([(ne, tag) for ne, tag in r_ne.iteritems()
if ne not in q_ne.keys()])
if len(diff) == 0:
return ""
return diff[0]
elif mode == 'PERSON':
q_phrase = q_phrase
comp1, comp2, rel = max([compare_phrases(q_phrase, relation, uncommon)
for best, relation in bestrels])
q_ne = p.extract_named_entities(q)
relblob = TextBlob(" ".join([word for word, tag in rel]))
r_ne = p.extract_named_entities(relblob)
diff = dict([(ne, tag) for ne, tag in r_ne.iteritems()
if ne not in q_ne.keys()])
diff = dict([(ne, tag) for ne, tag in diff.iteritems()
if tag in ['PERSON', 'GPE']])
if len(diff) == 0:
return ""
return diff.keys()[0]
elif mode == 'GPE':
q_phrase = q_phrase
comp1, comp2, rel = max([compare_phrases(q_phrase, relation, uncommon)
for best, relation in bestrels])
q_ne = p.extract_named_entities(q)
relblob = TextBlob(" ".join([word for word, tag in rel]))
r_ne = p.extract_named_entities(relblob)
diff = dict([(ne, tag) for ne, tag in r_ne.iteritems()
if ne not in q_ne.keys()])
diff_gpe = dict([(ne, tag) for ne, tag in diff.iteritems()
if tag in ['GPE']])
diff = dict([(ne, tag) for ne, tag in diff.iteritems()
if tag in ['OBJECT', 'PERSON']])
if len(diff_gpe) == 0:
return diff_gpe.keys()[0]
if len(diff) == 0:
return ""
nextidx = q.tokens.index('what') + 1
relwords = [word for word, tag in rel]
reltags = [tag for word, tag in rel]
prpidx = 0
prp = ""
next = ""
if 'PRP' in reltags[nextidx:] or 'PP' in reltags[nextidx:]:
prpidx = reltags[nextidx:].index('PRP')
prp = relwords[prpidx]
next = relwords[prpidx + 1:]
elif 'IN' in reltags[nextidx:]:
prpidx = reltags[nextidx:].index('IN')
prp = relwords[prpidx]
next = relwords[prpidx + 1:]
def find_ne(word):
for e in diff.keys():
if word in e.split():
return word
return None
for w in next:
ans = find_ne(word)
if ans != None:
return " ".join([prp, ans])
elif mode == 'DATETIME':
q_phrase = q_phrase
comp1, comp2, rel = max([compare_phrases(q_phrase, relation, uncommon)
for best, relation in bestrels])
q_ne = p.extract_named_entities(q)
relblob = TextBlob(" ".join([word for word, tag in rel]))
r_ne = p.extract_named_entities(relblob)
diff = dict([(ne, tag) for ne, tag in r_ne.iteritems()
if ne not in q_ne.keys()])
diff = dict([(ne, tag) for ne, tag in diff.iteritems()
if tag in ['DATETIME']])
if len(diff) == 0:
return ""
return diff.keys()[0]
elif mode == 'ABSTRACT':
bestrels = [(best, rel) for best, rel in bestrels
if any([word in ['because', 'due', 'by', 'since']
for word, tag in rel])]
if len(bestrels) == 0:
return ""
comp1, comp2, rel = max([compare_phrases(q_phrase, relation, uncommon)
for best, relation in bestrels])
words = [word for word, tag in rel]
tags = [tag for word, tag in rel]
def find_whyword(rel):
for w in ['because', 'due', 'by', 'since']:
if w in words:
return w
return None
why = find_whyword(rel)
answerstart = words.index(why)
answerend = -1
if ',' in tags[answerstart:]:
answerend = tags[answerstart:].index(',')
return " ".join(words[answerstart:answerend])
elif mode == 'NUMBER':
comp1, comp2, rel = max([compare_phrases(q_phrase, relation, uncommon)
for best, relation in bestrels])
words = [word for word, tag in rel]
tags = [tag for word, tag in rel]
num = "many"
q_ne = p.extract_named_entities(q)
relblob = TextBlob(" ".join([word for word, tag in rel]))
r_ne = p.extract_named_entities(relblob)
diff = dict([(ne, tag) for ne, tag in r_ne.iteritems()
if ne not in q_ne.keys()])
diff = dict([(ne, tag) for ne, tag in diff.iteritems()
if tag in ['NUMBER']])
ans = ""
if len(diff) == 0:
if 'all' in words or 'every' in words:
return 'all'
if 'most' in words:
return 'most'
if 'some' in words:
return 'some'
if 'many' in words:
return 'many'
same = dict([(ne, tag) for ne, tag in r_ne.iteritems()
if ne not in q_ne.keys()])
if len(same) == 0:
return ""
idx = words.index(same.keys()[0].split()[0]) - 2
ans = words[idx]
else:
ans == diff.keys()[0]
if not ans.isnumeric:
for d in diff.keys()[1:]:
if ans.isnumeric:
break
else:
ans += " " + d
if ans == 'one':
if 'every' in words or 'each' in words:
return 'all'
return ans
elif mode == 'VERB PHRASE':
comp1, comp2, rel = max([compare_phrases(q_phrase, relation, uncommon)
for best, relation in bestrels])
words = [word for word, tag in rel]
tags = [tag for word, tag in rel]
if 'IN' not in tags:
return ""
answerstart = tags.index('IN')
answerend = -1
if ',' in tags[answerstart:]:
answerend = tags[answerstart:].index(',')
return " ".join(words[answerstarrt:answerend])
#nothing found here
return ""
def parse_first(q, database, uncommon, mode):
words = q.words.lower()
nps = n.nps
tags = q.tags
if len(nps) == 0:
print >> sys.stderr, "No subject found"
return "No"
subj = nps[0] #assuming subject is the first noun phrase
print >> sys.stderr, "\tSubject:", subj
first = words[0]
if True:#first.lower() in ['is','was']: #is this everything?
#question is an "is/was ___ NP/AP"
#get index of the first word after the noun phrase
loc = n.np_idx(subj, q)
nexti = loc + len(subj.split()) + (1 if "'" in subj else 0)
subj_tags = n.get_np_tags(subj, q)
#get potential relations from database
closest = n.get_similar_np(subj_tags, database)
if closest == None:
#retry with partial noun phrases
closest = n.get_similar_np(subj_tags[0:1], database)
nexti = nexti + 1 - len(subj_tags)
if closest == None:
closest = n.get_similar_np(subj_tags[0:2], database)
nexti = nexti + 1
if closest == None:
return ""
q_phrase = tags[nexti:]
print >> sys.stderr, q_phrase
if len(q_phrase) <= 1:
q_phrase = tags
#construct possible relations
rel_tags = [database[close][e]["relation"]
+ database[close][e]["pos"]
for close in closest
for e in database[close]]
#get the best, compare
bestrels = edit.distance(q_phrase, rel_tags)
return examine_rels(q, q_phrase, bestrels, uncommon, mode)
elif first.lower() in ['does','did','will']:
#question is a "does/did/will ___ VP"
return "No"
return "No"
def parse_second(q, blob, uncommon, mode):
sents = blob.sentences
q_phrase = q.tags[2:]
if mode == 'IS':
q_phrase = q.tags[1:]
q_phrase = q_phrase[:n.idxs[0]] + q_phrase[n.idxs[0] + 1:]
bestrels = edit.distance(q_phrase, [s.tags for s in sents if len(s.tags) > 6])
return examine_rels(q, q_phrase, bestrels, uncommon, mode)
return ""
def parse_question(question, database, raw):
q = question
q = q[0:1].lower() + q[1:]
q = TextBlob(q, np_extractor=extractor)
n.init_nps(q)
#get 25th percentile words by frequency
#actually, just gets infrequent words
bigblob = TextBlob(raw, np_extractor=extractor)
freqdict = bigblob.word_counts
backwards = [(c, w) for w, c in freqdict.iteritems()]
cutoff = (0.25 * sum([c for c, w in backwards]))
best = 0
#for c, w in sorted(backwards):
# best = c
# cutoff -= c
# if cutoff < 0:
# break
uncommon = [w for c, w in sorted(backwards) if 1 < c < 4]#best]
mode = q.words[0].upper()
if mode in ['IS', 'WAS', 'DO', 'DOES', 'DID', 'WILL']:
mode = 'IS'
#else:
# if q.tags[0][1][0] != 'W':
# tagtypes = [tag[0] for word, tag in q.tags]
# if 'W' not in tagtypes:
# return 'IS'
# idx = tagtypes.index('W')
# mode = p.determine_question_type(q[idx:])
# else:
# mode = p.determine_question_type(q)
else:
mode = None
for i in xrange(0, len(q.tokens)-1):
mode = p.determine_question_type(q.tokens[i:])
if mode != None:
break
if mode == None:
mode = 'IS'
print >> sys.stderr, mode
try:
first_attempt = parse_first(q, database, uncommon, mode)
except:
first_attempt = ""
if first_attempt != "":
return first_attempt
try:
second_attempt = parse_second(q, bigblob, uncommon, mode)
except:
second_attempt = ""
if second_attempt != "":
return second_attempt
third_attempt = b.backup_answer(q, n.nps, raw)
if third_attempt != "":
return third_attempt
if len(n.nps) > 0:
return n.nps[0]
else:
return "Yes" #guess
if __name__ == "__main__":
q = raw_input("Ask a question\n")
q = TextBlob(q, np_extractor=extractor)
print q.noun_phrases
noun_phrases, idxs = n.get_nps_from_blob(q)
print noun_phrases
print q.words
first = noun_phrases[0]
print n.get_np_tags(first, q)
print q.tags
print q.parse()
#print p.extract_generic_relations(q)
|
|
# Copyright 2014 Brocade Communications System, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Implentation of Brocade SVI service Plugin."""
from oslo.config import cfg
from oslo.utils import excutils
from neutron.common import constants as l3_constants
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import db
from neutron.plugins.ml2.drivers.brocade.db import models as brocade_db
from neutron.plugins.ml2.drivers.brocade.nos import nosdriver as driver
from neutron.services.l3_router import l3_router_plugin as router
DEVICE_OWNER_ROUTER_INTF = l3_constants.DEVICE_OWNER_ROUTER_INTF
DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW
DEVICE_OWNER_FLOATINGIP = l3_constants.DEVICE_OWNER_FLOATINGIP
ML2_BROCADE = [cfg.StrOpt('address', default='',
help=_('The address of the host to SSH to')),
cfg.StrOpt('username', default='admin',
help=_('The SSH username to use')),
cfg.StrOpt('password', default='password', secret=True,
help=_('The SSH password to use')),
cfg.StrOpt('rbridge_id', default=1,
help=_('Rbridge id of provider edge router(s)')),
]
cfg.CONF.register_opts(ML2_BROCADE, "ml2_brocade")
LOG = logging.getLogger(__name__)
class BrocadeSVIPlugin(router.L3RouterPlugin):
"""Brocade SVI service Plugin."""
def __init__(self):
"""Initialize Brocade Plugin
Specify switch address and db configuration.
"""
super(BrocadeSVIPlugin, self).__init__()
self._switch = None
self._driver = None
self.brocade_init()
def brocade_init(self):
"""Brocade specific initialization."""
LOG.debug("brocadeSVIPlugin::brocade_init()")
self._switch = {'address': cfg.CONF.ml2_brocade.address,
'username': cfg.CONF.ml2_brocade.username,
'password': cfg.CONF.ml2_brocade.password,
'rbridge_id': cfg.CONF.ml2_brocade.rbridge_id
}
self._driver = driver.NOSdriver()
LOG.info(_LI("rbridge id %s"), self._switch['rbridge_id'])
def create_router(self, context, router):
"""Creates a vrf on NOS device."""
LOG.debug("BrocadeSVIPlugin.create_router called: ")
with context.session.begin(subtransactions=True):
new_router = super(BrocadeSVIPlugin, self).create_router(context,
router)
# Router on VDX
try:
switch = self._switch
self._driver.create_router(switch['address'],
switch['username'],
switch['password'],
switch['rbridge_id'],
str(new_router['id']))
except Exception:
with excutils.save_and_reraise_exception():
with context.session.begin(subtransactions=True):
super(BrocadeSVIPlugin, self).delete_router(
context,
new_router['id'])
LOG.debug("BrocadeSVIPlugin.create_router: "
"router created on VDX switch")
return new_router
def delete_router(self, context, router_id):
"""Delete a vrf on NOS device."""
router = super(BrocadeSVIPlugin, self).get_router(context, router_id)
super(BrocadeSVIPlugin, self).delete_router(context, router_id)
switch = self._switch
self._driver.delete_router(switch['address'],
switch['username'],
switch['password'],
switch['rbridge_id'],
str(router['id']))
def add_router_interface(self, context, router_id, interface_info):
"""creates svi on NOS device and assigns ip addres to SVI."""
LOG.debug("BrocadeSVIPlugin.add_router_interface on VDX: "
"router_id=%(router_id)s "
"interface_info=%(interface_info)r",
{'router_id': router_id, 'interface_info': interface_info})
with context.session.begin(subtransactions=True):
info = super(BrocadeSVIPlugin, self).add_router_interface(
context, router_id, interface_info)
port = db.get_port(context.session, info["port_id"])
# shutting down neutron port to allow NOS to do Arp/Routing
port['admin_state_up'] = False
port['port'] = port
self._core_plugin.update_port(context, info["port_id"], port)
interface_info = info
subnet = self._core_plugin._get_subnet(context,
interface_info["subnet_id"])
cidr = subnet["cidr"]
net_addr, net_len = self.net_addr(cidr)
gateway_ip = subnet["gateway_ip"]
network_id = subnet['network_id']
bnet = brocade_db.get_network(context, network_id)
vlan_id = bnet['vlan']
gateway_ip_cidr = gateway_ip + '/' + str(net_len)
LOG.debug("Allocated cidr %(cidr)s from the pool, "
"network_id %(net_id)s "
"bnet %(bnet)s "
"vlan %(vlan_id)d ", {'cidr': gateway_ip_cidr,
'net_id': network_id,
'bnet': bnet,
'vlan_id': int(vlan_id)})
port_filters = {'network_id': [network_id],
'device_owner': [DEVICE_OWNER_ROUTER_INTF]}
port_count = self._core_plugin.get_ports_count(context,
port_filters)
LOG.info(_LI("BrocadeSVIPlugin.add_router_interface ports_count "
"%d"),
port_count)
# port count is checked against 2 since the current port is already
# added to db
if port_count == 2:
# This subnet is already part of some router
# (this is not supported in this version of brocade svi plugin)
msg = _("BrocadeSVIPlugin: adding redundant router interface "
"is not supported")
LOG.error(msg)
raise Exception(msg)
try:
switch = self._switch
self._driver.create_svi(switch['address'],
switch['username'],
switch['password'],
switch['rbridge_id'],
vlan_id,
gateway_ip_cidr,
str(router_id))
except Exception:
LOG.error(_LE("Failed to create Brocade resources to add router "
"interface. info=%(info)s, router_id=%(router_id)s"),
{"info": info, "router_id": router_id})
with excutils.save_and_reraise_exception():
with context.session.begin(subtransactions=True):
self.remove_router_interface(context, router_id,
interface_info)
return info
def remove_router_interface(self, context, router_id, interface_info):
"""Deletes svi from NOS device."""
LOG.debug("BrocadeSVIPlugin.remove_router_interface called: "
"router_id=%(router_id)s "
"interface_info=%(interface_info)r",
{'router_id': router_id, 'interface_info': interface_info})
with context.session.begin(subtransactions=True):
info = super(BrocadeSVIPlugin, self).remove_router_interface(
context, router_id, interface_info)
try:
subnet = self._core_plugin._get_subnet(context,
info['subnet_id'])
cidr = subnet['cidr']
net_addr, net_len = self.net_addr(cidr)
gateway_ip = subnet['gateway_ip']
network_id = subnet['network_id']
bnet = brocade_db.get_network(context, network_id)
vlan_id = bnet['vlan']
gateway_ip_cidr = gateway_ip + '/' + str(net_len)
LOG.debug("remove_router_interface removed cidr %(cidr)s"
" from the pool,"
" network_id %(net_id)s bnet %(bnet)s"
" vlan %(vlan_id)d",
{'cidr': gateway_ip_cidr,
'net_id': network_id,
'bnet': bnet,
'vlan_id': int(vlan_id)})
switch = self._switch
self._driver.delete_svi(switch['address'],
switch['username'],
switch['password'],
switch['rbridge_id'],
vlan_id,
gateway_ip_cidr,
str(router_id))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Fail remove of interface from brocade "
"router interface. info=%(info)s, "
"router_id=%(router_id)s"),
{"info": info, "router_id": router_id})
return True
@staticmethod
def net_addr(addr):
"""Get network address prefix and length from a given address."""
if addr is None:
return None, None
nw_addr, nw_len = addr.split('/')
nw_len = int(nw_len)
return nw_addr, nw_len
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NDHWC", False), ("NDHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCDHW" format is only supported on CUDA.
test_configs += [("NCDHW", True)]
return test_configs
class Conv3DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
if use_gpu:
if not test_util.CudaSupportsHalfMatMulAndConv():
return [dtypes.float32]
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16]
else:
return [dtypes.float64, dtypes.float32, dtypes.float16]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
padding, data_format, dtype, use_gpu):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing numbers from 0 to 1.
# We keep the input tensor values fairly small to avoid overflowing a float16
# tensor during the conv3d
x1 = [f * 1.0 / total_size_1 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 / total_size_2 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if data_format == "NCDHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv3d(t1, t2, strides, padding=padding,
data_format=data_format)
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
results = []
for data_format, use_gpu in GetTestConfigs():
for dtype in self._DtypesToTest(use_gpu):
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_format,
dtype,
use_gpu=use_gpu)
results.append(result)
with self.test_session() as sess:
values = sess.run(results)
for value in values:
print("expected = ", expected)
print("actual = ", value)
tol = 1e-6
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(expected, value.flatten(), atol=tol,
rtol=tol)
def testConv3D1x1x1Filter(self):
expected_output = [
0.18518519, 0.22222222, 0.25925926, 0.40740741, 0.5 ,
0.59259259, 0.62962963, 0.77777778, 0.92592593, 0.85185185,
1.05555556, 1.25925926, 1.07407407, 1.33333333, 1.59259259,
1.2962963 , 1.61111111, 1.92592593
]
# These are equivalent to the Conv2D1x1 case.
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 1, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 1, 2, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
# Expected values computed using scipy's correlate function.
def testConv3D2x2x2Filter(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 4.2650463 , 4.35763889,
4.45023148, 6.73032407, 6.89236111, 7.05439815, 7.22337963,
7.39930556, 7.57523148, 9.68865741, 9.93402778, 10.17939815,
10.18171296, 10.44097222, 10.70023148
]
# expected_shape = [1, 3, 1, 2, 5]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
stride=1,
padding="VALID",
expected=expected_output)
def testConv3DStrides(self):
expected_output = [
0.06071429, 0.08988095, 0.10238095, 0.11488095, 0.12738095,
0.13988095, 0.08452381, 0.26071429, 0.35238095, 0.36488095,
0.37738095, 0.38988095, 0.40238095, 0.23452381, 0.46071429,
0.61488095, 0.62738095, 0.63988095, 0.65238095, 0.66488095,
0.38452381, 1.12738095, 1.48988095, 1.50238095, 1.51488095,
1.52738095, 1.53988095, 0.88452381, 1.32738095, 1.75238095,
1.76488095, 1.77738095, 1.78988095, 1.80238095, 1.03452381,
1.52738095, 2.01488095, 2.02738095, 2.03988095, 2.05238095,
2.06488095, 1.18452381, 2.19404762, 2.88988095, 2.90238095,
2.91488095, 2.92738095, 2.93988095, 1.68452381, 2.39404762,
3.15238095, 3.16488095, 3.17738095, 3.18988095, 3.20238095,
1.83452381, 2.59404762, 3.41488095, 3.42738095, 3.43988095,
3.45238095, 3.46488095, 1.98452381
]
self._VerifyValues(
tensor_in_sizes=[1, 5, 8, 7, 1],
filter_in_sizes=[1, 2, 3, 1, 1],
stride=[2, 3, 1], # different stride for each spatial dimension
padding="SAME",
expected=expected_output)
def testConv3D2x2x2FilterStride2(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 9.68865741, 9.93402778,
10.17939815
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv3DStride3(self):
expected_output = [
1.51140873, 1.57167659, 1.63194444, 1.56349206, 1.62673611,
1.68998016, 1.6155754 , 1.68179563, 1.74801587, 1.9280754 ,
2.01215278, 2.09623016, 1.98015873, 2.0672123 , 2.15426587,
2.03224206, 2.12227183, 2.21230159, 4.4280754 , 4.65500992,
4.88194444, 4.48015873, 4.71006944, 4.93998016, 4.53224206,
4.76512897, 4.99801587, 4.84474206, 5.09548611, 5.34623016,
4.8968254 , 5.15054563, 5.40426587, 4.94890873, 5.20560516,
5.46230159
]
self._VerifyValues(
tensor_in_sizes=[1, 6, 7, 8, 2],
filter_in_sizes=[3, 2, 1, 2, 3],
stride=3,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2FilterStride2Same(self):
expected_output = [
3.77199074, 3.85069444, 3.92939815, 2.0162037 , 2.06597222,
2.11574074, 9.68865741, 9.93402778, 10.17939815, 4.59953704,
4.73263889, 4.86574074
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
def testKernelSmallerThanStride(self):
expected_output = [
0.03703704, 0.11111111, 0.25925926, 0.33333333, 0.7037037 ,
0.77777778, 0.92592593, 1.
]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="SAME",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="VALID",
expected=expected_output)
expected_output = [
0.54081633, 0.58017493, 0.28061224, 0.81632653, 0.85568513,
0.40306122, 0.41873178, 0.4340379 , 0.19642857, 2.46938776,
2.50874636, 1.1377551 , 2.74489796, 2.78425656, 1.26020408,
1.16873178, 1.1840379 , 0.51785714, 1.09511662, 1.10604956,
0.44642857, 1.17164723, 1.18258017, 0.47704082, 0.3691691 ,
0.37244898, 0.125
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="SAME",
expected=expected_output)
expected_output = [
0.540816, 0.580175, 0.816327, 0.855685, 2.469388, 2.508746,
2.744898, 2.784257
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="VALID",
expected=expected_output)
def testKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 2, 1],
filter_in_sizes=[2, 1, 2, 1, 2],
stride=1,
padding="VALID",
expected=[1.5625, 1.875])
def _ConstructAndTestGradientForConfig(
self, batch, input_shape, filter_shape, in_depth, out_depth, stride,
padding, test_input, data_format, use_gpu):
input_planes, input_rows, input_cols = input_shape
filter_planes, filter_rows, filter_cols = filter_shape
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
filter_shape = [
filter_planes, filter_rows, filter_cols, in_depth, out_depth
]
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if padding == "VALID":
output_planes = int(
math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
output_rows = int(
math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
output_cols = int(
math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
else:
output_planes = int(math.ceil(float(input_planes) / strides[1]))
output_rows = int(math.ceil(float(input_rows) / strides[2]))
output_cols = int(math.ceil(float(input_cols) / strides[3]))
output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
for data_type in self._DtypesToTest(use_gpu=use_gpu):
# TODO(mjanusz): Modify gradient_checker to also provide max relative
# error and synchronize the tolerance levels between the tests for forward
# and backward computations.
if data_type == dtypes.float64:
tolerance = 1e-8
elif data_type == dtypes.float32:
tolerance = 5e-3
elif data_type == dtypes.float16:
tolerance = 1e-3
with self.test_session(use_gpu=use_gpu):
orig_input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
if data_format == "NCDHW":
input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
new_strides = test_util.NHWCToNCHW(strides)
else:
input_tensor = orig_input_tensor
new_strides = strides
conv = nn_ops.conv3d(
input_tensor, filter_tensor, new_strides, padding,
data_format=data_format, name="conv")
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(orig_input_tensor,
input_shape,
conv,
output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
filter_shape,
conv,
output_shape)
if data_type != dtypes.float16:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
print("conv3d gradient error = ", err)
self.assertLess(err, tolerance)
def ConstructAndTestGradient(self, **kwargs):
for data_format, use_gpu in GetTestConfigs():
self._ConstructAndTestGradientForConfig(data_format=data_format,
use_gpu=use_gpu, **kwargs)
def testInputGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 5, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(4, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 5),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(7, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 7, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(4, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=False)
def testInputGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 2, 2),
filter_shape=(3, 2, 1),
in_depth=2,
out_depth=1,
stride=1,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(7, 3, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 3, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=True)
def testFilterGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def disabledtestFilterGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=False)
if __name__ == "__main__":
test.main()
|
|
#|############################################################################################################
#
# NAME:
# cameraFrustum
#
# AUTHOR:
# Kiel Gnebba (ksg@kielgnebba.com)
#
# VERSION:
# v. 1.0
#
# DESCRIPTION:
# This script will...
# - allow you to select differents types of objects that are inside the camera frustum
# - default is all visible for current frame
# - uncheck "current frame only" to run the whole timeline
#
# INSTALLATION:
# Copy the script into your scripts/ directory
# If you're unsure where that is run this in the script editor:
# mel == internalVar -userScriptDir;
# python == import maya.cmds as cmds; print cmds.internalVar(userScriptDir=True)
#
# USAGE:
# To use just run:
# import maya.cmds as cmds
# scriptName = 'cameraFrustum'
# scriptsDir = cmds.internalVar(userScriptDir=True)
# execfile(scriptsDir + scriptName + '.py')
# cameraFrustum()
#
# Or you can call the file directly from where ever you put it
# execfile(C:/...where_ever.../cameraFrustum.py)
# cameraFrustum()
#
# HISTORY:
# 03/21/2011 -- v. 1.0
# - first release
#
#|############################################################################################################
#import
import time
import maya.cmds as cmds
import maya.OpenMaya as OpenMaya
import math
import maya.mel as mel
#*************************************************************************************************************
#*start cameraFrustum_loadCamera()
def cameraFrustum_loadCamera():
proc = 'cameraFrustum_loadCamera'
printString = '\n\n////////////////////////////////////////////////////////////////////////////////////////////\n'
printString += ('// ' + proc + ' details: \n//\n')
#-------------------------------------------------------------------------------------------------------------
#get selected camera
selCamShape = cmds.ls(sl=1, dag=1, type='shape', long=1)
#make sure only 1 camera is selected
if len(selCamShape)==0:
cmds.warning('nothing selected...select a camera and load')
elif len(selCamShape)>1:
cmds.warning('more then one thing selected...select a camera and load')
elif cmds.nodeType(selCamShape[0]) != 'camera':
cmds.warning('selected object \"' + selCamShape[0] + '\" is not a camera...select a camera and load')
else:
#remove any camera in list and load selected camera into ui
camera = selCamShape[0]
cmds.textScrollList('cameraFrustum_loadCameraTSL', e=1, ra=1)
cmds.textScrollList('cameraFrustum_loadCameraTSL', e=1, a=[camera])
cameraFrustum_refreshClip()
#-------------------------------------------------------------------------------------------------------------
#print
printString += '////////////////////////////////////////////////////////////////////////////////////////////\n\n'
#print printString
#print ('COMPLETE -- check script editor for details...\n')
#*************************************************************************************************************
#*end cameraFrustum_loadCamera()
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
#*************************************************************************************************************
#*start cameraFrustum_toggleClip()
def cameraFrustum_toggleClip():
proc = 'cameraFrustum_toggleClip'
printString = '\n\n////////////////////////////////////////////////////////////////////////////////////////////\n'
printString += ('// ' + proc + ' details: \n//\n')
#-------------------------------------------------------------------------------------------------------------
#get loaded camera
cameraLoaded = cmds.textScrollList('cameraFrustum_loadCameraTSL', q=1, ni=1)
#make sure a camera is loaded
if cameraLoaded==0:
cmds.warning('no camera loaded...select a camera and load')
else:
#determin if clipping plane is on or off
selCamShape = cmds.textScrollList('cameraFrustum_loadCameraTSL', q=1, ai=1)
clipDisplay = cmds.renderManip(selCamShape[0], q=1, cam=1)
clipDisplayOn = 0
if clipDisplay[3] == 0:
clipDisplayOn = 1
#toggle
cmds.renderManip(selCamShape[0], e=1, cam=(clipDisplay[0], clipDisplay[1], clipDisplay[2], clipDisplayOn, clipDisplay[4]))
#-------------------------------------------------------------------------------------------------------------
#print
printString += '////////////////////////////////////////////////////////////////////////////////////////////\n\n'
#print printString
#print ('COMPLETE -- check script editor for details...\n')
#*************************************************************************************************************
#*end cameraFrustum_toggleClip()
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
#*************************************************************************************************************
#*start cameraFrustum_toggleFrustum()
def cameraFrustum_toggleFrustum():
proc = 'cameraFrustum_toggleFrustum'
printString = '\n\n////////////////////////////////////////////////////////////////////////////////////////////\n'
printString += ('// ' + proc + ' details: \n//\n')
#-------------------------------------------------------------------------------------------------------------
#check if frustum exists and toggle on or off
prefix = 'frust_'
frustumGrpName = prefix + 'camera_frustum_all_grp'
if cmds.objExists(frustumGrpName)==1:
deleteFrustStuff = cmds.ls('frust*')
cmds.delete(deleteFrustStuff)
else:
cameraFrustum_build()
cameraFrustum_scale()
#-------------------------------------------------------------------------------------------------------------
#print
printString += '////////////////////////////////////////////////////////////////////////////////////////////\n\n'
#print printString
#print ('COMPLETE -- check script editor for details...\n')
#*************************************************************************************************************
#*end cameraFrustum_toggleFrustum()
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
#*************************************************************************************************************
#*start cameraFrustum_refreshClip()
def cameraFrustum_refreshClip():
proc = 'cameraFrustum_refreshClip'
printString = '\n\n////////////////////////////////////////////////////////////////////////////////////////////\n'
printString += ('// ' + proc + ' details: \n//\n')
#-------------------------------------------------------------------------------------------------------------
#get loaded camera
cameraLoaded = cmds.textScrollList('cameraFrustum_loadCameraTSL', q=1, ni=1)
#make sure a camera is loaded
if cameraLoaded==0:
cmds.warning('no camera loaded...select a camera and load')
else:
#get current clipping plane values
selCamShape = cmds.textScrollList('cameraFrustum_loadCameraTSL', q=1, ai=1)
selCamXform = cmds.listRelatives(selCamShape[0], p=1)
nearClipPlane = cmds.camera(selCamXform, q=1, ncp=1)
farClipPlane = cmds.camera(selCamXform, q=1, fcp=1)
#update ui
cmds.floatFieldGrp('cameraFrustum_nearFF', e=1, value1=nearClipPlane)
cmds.floatFieldGrp('cameraFrustum_farFF', e=1, value1=farClipPlane)
#-------------------------------------------------------------------------------------------------------------
#print
printString += '////////////////////////////////////////////////////////////////////////////////////////////\n\n'
#print printString
#print ('COMPLETE -- check script editor for details...\n')
#*************************************************************************************************************
#*end cameraFrustum_refreshClip()
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
#*************************************************************************************************************
#*start cameraFrustum_selectAllDataType()
def cameraFrustum_selectAllDataType():
proc = 'cameraFrustum_selectAllDataType'
printString = '\n\n////////////////////////////////////////////////////////////////////////////////////////////\n'
printString += ('// ' + proc + ' details: \n//\n')
#-------------------------------------------------------------------------------------------------------------
#select all data type check boxes
chkBoxList = cmds.gridLayout('cameraFrustum_typeGrid', q=1, ca=1)
eachChkBox = chkBoxList[0]
for eachChkBox in chkBoxList:
isSelected = cmds.checkBox(eachChkBox, q=1, value=1)
if isSelected == 0:
cmds.checkBox(eachChkBox, e=1, value=1)
#-------------------------------------------------------------------------------------------------------------
#print
printString += '////////////////////////////////////////////////////////////////////////////////////////////\n\n'
#print printString
#print ('COMPLETE -- check script editor for details...\n')
#*************************************************************************************************************
#*end cameraFrustum_selectAllDataType()
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
#*************************************************************************************************************
#*start cameraFrustum_deSelectAllDataType()
def cameraFrustum_deSelectAllDataType():
proc = 'cameraFrustum_deSelectAllDataType'
printString = '\n\n////////////////////////////////////////////////////////////////////////////////////////////\n'
printString += ('// ' + proc + ' details: \n//\n')
#-------------------------------------------------------------------------------------------------------------
#select all data type check boxes
chkBoxList = cmds.gridLayout('cameraFrustum_typeGrid', q=1, ca=1)
eachChkBox = chkBoxList[0]
for eachChkBox in chkBoxList:
isSelected = cmds.checkBox(eachChkBox, q=1, value=1)
if isSelected == 1:
cmds.checkBox(eachChkBox, e=1, value=0)
#-------------------------------------------------------------------------------------------------------------
#print
printString += '////////////////////////////////////////////////////////////////////////////////////////////\n\n'
#print printString
#print ('COMPLETE -- check script editor for details...\n')
#*************************************************************************************************************
#*end cameraFrustum_deSelectAllDataType()
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
#*************************************************************************************************************
#*start cameraFrustum_build()
def cameraFrustum_build():
procString = 'cameraFrustum_build'
printString = '\n\n////////////////////////////////////////////////////////////////////////////////////////////\n'
printString += ('// ' + procString + ' details: \n//\n')
#-------------------------------------------------------------------------------------------------------------
#get current selection
selection = cmds.ls(sl=1)
#get loaded camera
cameraLoaded = cmds.textScrollList('cameraFrustum_loadCameraTSL', q=1, ni=1)
#make sure a camera is loaded
if cameraLoaded==0:
cmds.error('no camera loaded...select a camera and load')
else:
#create frustum only if one doesnt already exist
selCamShape = cmds.textScrollList('cameraFrustum_loadCameraTSL', q=1, ai=1)
selCamXform = cmds.listRelatives(selCamShape[0], p=1)
prefix = 'frust_'
frustumGrpName = prefix + 'camera_frustum_all_grp'
if cmds.objExists(frustumGrpName)==0:
#create main grp
frustumMainGrp = cmds.group(em=1, n=frustumGrpName);
cmds.setAttr(frustumGrpName + '.tx', lock=1, keyable=0, channelBox=0)
cmds.setAttr(frustumGrpName + '.ty', lock=1, keyable=0, channelBox=0)
cmds.setAttr(frustumGrpName + '.tz', lock=1, keyable=0, channelBox=0)
cmds.setAttr(frustumGrpName + '.rx', lock=1, keyable=0, channelBox=0)
cmds.setAttr(frustumGrpName + '.ry', lock=1, keyable=0, channelBox=0)
cmds.setAttr(frustumGrpName + '.rz', lock=1, keyable=0, channelBox=0)
cmds.setAttr(frustumGrpName + '.sx', lock=1, keyable=0, channelBox=0)
cmds.setAttr(frustumGrpName + '.sy', lock=1, keyable=0, channelBox=0)
cmds.setAttr(frustumGrpName + '.sz', lock=1, keyable=0, channelBox=0)
cmds.setAttr(frustumGrpName + '.v', lock=1, keyable=0, channelBox=0)
#create frustum geo
frustumGeo = cmds.polyCube(w=2, h=2, d=2, n=prefix + 'camera_frustum_geo')
cmds.delete(frustumGeo[0], constructionHistory=True)
cmds.parent(frustumGeo[0], frustumMainGrp)
#load plugin "nearestPointOnMesh.mll" if needed and connect
plugin = cmds.pluginInfo('nearestPointOnMesh.mll', q=1, l=1)
if plugin==0:
cmds.loadPlugin('nearestPointOnMesh.mll')
nearNodeName = prefix + 'npomNode'
npomNode = cmds.createNode('nearestPointOnMesh', n=nearNodeName)
cmds.connectAttr(frustumGeo[0] + '.worldMesh', npomNode + '.inMesh')
#create clusters
cmds.select(frustumGeo[0] + '.vtx[4:7]', r=1)
nearCluster = cmds.cluster(n=prefix + 'camera_nearFrustum_cluster')
cmds.select(frustumGeo[0] + '.vtx[0:3]', r=1)
farCluster = cmds.cluster(n=prefix + 'camera_farFrustum_cluster')
#create near/far/camera locs
cameraLoc = cmds.spaceLocator(p=(0, 0, 0), n=prefix + 'camera_loc')
cmds.parent(cameraLoc[0], frustumMainGrp)
nearLoc = cmds.spaceLocator(p=(0, 0, 0), n=prefix + 'camera_nearFrustum_loc')
cmds.move(0, 0, -1)
farLoc = cmds.spaceLocator(p=(0, 0, 0), n=prefix + 'camera_farFrustum_loc')
cmds.move(0, 0, 1)
#parent clusters under loc -- parent locs under camera loc
cmds.parent(nearCluster[1], nearLoc[0])
cmds.parent(farCluster[1], farLoc[0])
cmds.parent(nearLoc[0], cameraLoc[0])
cmds.parent(farLoc[0], cameraLoc[0])
#constrain camera loc to camera
cmds.parentConstraint(selCamXform, cameraLoc, weight=1)
#reselect original selection
if len(selection) > 0:
cmds.select(selection, r=1)
#-------------------------------------------------------------------------------------------------------------
#print
printString += '////////////////////////////////////////////////////////////////////////////////////////////\n\n'
#print printString
#print ('COMPLETE -- check script editor for details...\n')
#*************************************************************************************************************
#*end cameraFrustum_build()
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
#*************************************************************************************************************
#*start cameraFrustum_scale()
def cameraFrustum_scale():
procString = 'cameraFrustum_scale'
printString = '\n\n////////////////////////////////////////////////////////////////////////////////////////////\n'
printString += ('// ' + procString + ' details: \n//\n')
#-------------------------------------------------------------------------------------------------------------
#make sure the frustum geo exists
prefix = 'frust_'
frustumGrpName = prefix + 'camera_frustum_all_grp'
if cmds.objExists(frustumGrpName)==1:
#get loaded camera
prefix = 'frust_'
selCamShape = cmds.textScrollList('cameraFrustum_loadCameraTSL', q=1, ai=1)
selCamXform = cmds.listRelatives(selCamShape[0], p=1)
#get camera data
verticalFieldOfView = cmds.camera(selCamXform, q=1, vfv=1)
horizontalFieldOfView = cmds.camera(selCamXform, q=1, hfv=1)
lensSqueezeRatio = cmds.camera(selCamXform, q=1, lsr=1)
cameraScale = cmds.camera(selCamXform, q=1, cs=1)
nearClipPlane = cmds.floatFieldGrp('cameraFrustum_nearFF', q=1, value1=1)
farClipPlane = cmds.floatFieldGrp('cameraFrustum_farFF', q=1, value1=1)
#convert degrees to radians if needed
angleUnits = cmds.currentUnit(q=1, a=1)
if angleUnits == 'deg':
verticalFieldOfView = verticalFieldOfView * 0.0174532925
horizontalFieldOfView = horizontalFieldOfView * 0.0174532925
#get X/Y coordinates in 2d
verticalAngle = math.tan(verticalFieldOfView *.5)
horizontalAngle = math.tan(horizontalFieldOfView *.5)
#apply camera lens squeeze
horizontalFieldOfView = horizontalFieldOfView * lensSqueezeRatio;
#apply camera scale
verticalAngle = verticalAngle * cameraScale;
horizontalFieldOfView = horizontalFieldOfView * cameraScale;
#set camera near and far locs
nearClipPLaneOffset = 0
cmds.setAttr('frust_camera_nearFrustum_loc.translateZ', -nearClipPlane+nearClipPLaneOffset)
cmds.setAttr('frust_camera_farFrustum_loc.translateZ', -farClipPlane)
#get maya linear working units
linearUnits = cmds.currentUnit(q=1, l=1)
factor = 1.0
if 'mm' == linearUnits:
factor = 0.1
if 'm' == linearUnits:
factor = 100
if 'in' == linearUnits:
factor = 2.54
if 'ft' == linearUnits:
factor = 30.48
if 'yd' == linearUnits:
factor = 91.44
#get the near and far new scale
nearScaleX = math.fabs(((nearClipPlane-nearClipPLaneOffset)*horizontalAngle)*factor)
nearScaleY = math.fabs(((nearClipPlane-nearClipPLaneOffset)*verticalAngle)*factor)
farScaleX = math.fabs((farClipPlane*horizontalAngle)*factor)
farScaleY = math.fabs((farClipPlane*verticalAngle)*factor)
#set scale
scaleGain = cmds.floatFieldGrp('cameraFrustum_scaleGainFF', q=1, value1=1)
cmds.setAttr('frust_camera_nearFrustum_loc.scaleX', nearScaleX+(scaleGain))
cmds.setAttr('frust_camera_nearFrustum_loc.scaleY', nearScaleY+(scaleGain))
cmds.setAttr('frust_camera_farFrustum_loc.scaleX', farScaleX+(scaleGain))
cmds.setAttr('frust_camera_farFrustum_loc.scaleY', farScaleY+(scaleGain))
#-------------------------------------------------------------------------------------------------------------
#print
printString += '////////////////////////////////////////////////////////////////////////////////////////////\n\n'
#print printString
#print ('COMPLETE -- check script editor for details...\n')
#*************************************************************************************************************
#*end cameraFrustum_scale()
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
#*************************************************************************************************************
#*start cameraFrustum_check()
def cameraFrustum_check():
procString = 'cameraFrustum_check'
printString = '\n\n////////////////////////////////////////////////////////////////////////////////////////////\n'
printString += ('// ' + procString + ' details: \n//\n')
startTime = time.time()
gMainProgressBar = mel.eval('$tmp = $gMainProgressBar')
isCancelled=0
#-------------------------------------------------------------------------------------------------------------
#get current panel
curPanel = cmds.getPanel(withFocus=1)
#determine current show settings
currentShowSettings=[]
if curPanel[0:10] == 'modelPanel':
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, nurbsCurves=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, nurbsSurfaces=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, polymeshes=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, subdivSurfaces=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, planes=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, lights=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, cameras=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, joints=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, ikHandles=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, deformers=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, dynamics=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, fluids=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, hairSystems=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, follicles=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, nCloths=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, nParticles=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, nRigids=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, dynamicConstraints=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, locators=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, dimensions=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, pivots=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, handles=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, textures=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, strokes=1))
currentShowSettings.append (cmds.modelEditor(curPanel, q=1, manipulators=1))
#set all to none
cmds.modelEditor(curPanel, e=1, allObjects=0)
#current camera
selCamShape = cmds.textScrollList('cameraFrustum_loadCameraTSL', q=1, ai=1)
selCamXform = cmds.listRelatives(selCamShape[0], p=1, fullPath=1)
#determin action
actionInvert = cmds.checkBox('cameraFrustum_actionInvertCB', q=1, value=1)
actionSelect = cmds.checkBox('cameraFrustum_actionSelectCB', q=1, value=1)
actionHide = cmds.checkBox('cameraFrustum_actionHideCB', q=1, value=1)
actionDelete = cmds.checkBox('cameraFrustum_actionDeleteCB', q=1, value=1)
actionLayer = cmds.checkBox('cameraFrustum_actionDisplayLayerCB', q=1, value=1)
actionKeep = cmds.checkBox('cameraFrustum_actionKeepCB', q=1, value=1)
#determin limits
limitEverything = cmds.checkBox('cameraFrustum_limitsEverythingCB', q=1, value=1)
limitSelection = cmds.checkBox('cameraFrustum_limitsSelectionCB', q=1, value=1)
limitVisible = cmds.checkBox('cameraFrustum_limitsVisibleCB', q=1, value=1)
limitRL = cmds.checkBox('cameraFrustum_limitsRLCB', q=1, value=1)
limitGrp = cmds.checkBox('cameraFrustum_limitsInsideGrpCB', q=1, value=1)
limitGrpName = cmds.textField('cameraFrustum_limitsInsideGrpTXT', q=1, text=1)
limitWild = cmds.checkBox('cameraFrustum_limitsWildCardCB', q=1, value=1)
limitWildName = cmds.textField('cameraFrustum_limitsWildCardTXT', q=1, text=1)
#determin type
typeMesh = cmds.checkBox('cameraFrustum_typeMeshCB', q=1, value=1)
typeLocators = cmds.checkBox('cameraFrustum_typeLocatorsCB', q=1, value=1)
typeNurbs = cmds.checkBox('cameraFrustum_typeNurbsCB', q=1, value=1)
typeLights = cmds.checkBox('cameraFrustum_typeLightsCB', q=1, value=1)
typeCurves = cmds.checkBox('cameraFrustum_typeCurvesCB', q=1, value=1)
typeDynamics = cmds.checkBox('cameraFrustum_typeDynamicsCB', q=1, value=1)
typeJoints = cmds.checkBox('cameraFrustum_typeJointsCB', q=1, value=1)
typeCameras = cmds.checkBox('cameraFrustum_typeCamerasCB', q=1, value=1)
#create all selection lists
allMesh = []
allMesh[:] = []
allNurbs = []
allNurbs[:] = []
allCurves = []
allCurves[:] = []
allLocators = []
allLocators[:] = []
allLights = []
allLights[:] = []
allDynamics = []
allDynamics[:] = []
allJoints = []
allJoints[:] = []
allCameras = []
allCameras[:] = []
allSelection = []
allSelection[:] = []
#get select objects
if limitSelection == 1:
allSelection = cmds.ls(sl=1, long=1)
cmds.select(cl=1)
elif limitVisible == 1:
allSelection = cmds.ls(visible=1, transforms=1, long=1)
elif limitRL == 1:
currentRenderLayer = cmds.editRenderLayerGlobals(q=1, currentRenderLayer=1)
allSelection = cmds.editRenderLayerMembers(currentRenderLayer, query=True, fullNames=1)
elif limitGrp == 1:
if cmds.objExists(limitGrpName)==1:
allSelection = cmds.listRelatives(limitGrpName, allDescendents=1, fullPath=1)
else:
cmds.warning('no group named: ' + str(limitGrpName))
elif limitWild == 1:
selectString = 'catch(`select -r "' + limitWildName + '"`)'
mel.eval(selectString)
wildSelection = []
wildSelection[:] = []
wildSelection = cmds.ls(long=1, sl=1)
cmds.select(cl=1)
if len(wildSelection)>0:
allSelection = wildSelection
else:
cmds.warning('nothing with this namespace: ' + str(limitGrpName))
#get all meshes
if typeMesh == 1:
if limitEverything == 1:
allMesh = cmds.ls(long=1, type='mesh')
if limitSelection == 1 or limitVisible == 1 or limitRL == 1 or limitGrp == 1 or limitWild == 1:
for eachSelection in allSelection:
cmds.select(eachSelection, r=1)
eachType = cmds.ls(sl=1, dag=1, type='shape', long=1)
if len(eachType) > 0:
nodeType = cmds.nodeType(eachType[0])
if nodeType == 'mesh':
allMesh.append(eachType[0])
#get all nurbs
if typeNurbs == 1:
if limitEverything == 1:
allNurbs = cmds.ls(long=1, type='nurbsSurface')
if limitSelection == 1 or limitVisible == 1 or limitRL == 1 or limitGrp == 1 or limitWild == 1:
for eachSelection in allSelection:
cmds.select(eachSelection, r=1)
eachType = cmds.ls(sl=1, dag=1, type='shape')
if len(eachType) > 0:
nodeType = cmds.nodeType(eachType[0])
if nodeType == 'nurbsSurface':
allNurbs.append(eachType[0])
#get all curves
if typeCurves == 1:
if limitEverything == 1:
allCurves = cmds.ls(long=1, type='nurbsCurve')
if limitSelection == 1 or limitVisible == 1 or limitRL == 1 or limitGrp == 1 or limitWild == 1:
for eachSelection in allSelection:
cmds.select(eachSelection, r=1)
eachType = cmds.ls(sl=1, dag=1, type='shape', long=1)
if len(eachType) > 0:
nodeType = cmds.nodeType(eachType[0])
if nodeType == 'nurbsCurve':
allCurves.append(eachType[0])
#get all locators
if typeLocators == 1:
if limitEverything == 1:
allLocators = cmds.ls(long=1, type='locator')
if limitSelection == 1 or limitVisible == 1 or limitRL == 1 or limitGrp == 1 or limitWild == 1:
for eachSelection in allSelection:
cmds.select(eachSelection, r=1)
eachType = cmds.ls(sl=1, dag=1, type='shape', long=1)
if len(eachType) > 0:
nodeType = cmds.nodeType(eachType[0])
if nodeType == 'locator':
allLocators.append(eachType[0])
#get all joints
if typeJoints == 1:
if limitEverything == 1:
allJoints = cmds.ls(long=1, type='joint')
if limitSelection == 1 or limitVisible == 1 or limitRL == 1 or limitGrp == 1 or limitWild == 1:
for eachSelection in allSelection:
cmds.select(eachSelection, r=1)
eachType = cmds.ls(sl=1)
if len(eachType) > 0:
nodeType = cmds.nodeType(eachType[0])
if nodeType == 'joint':
allJoints.append(eachType[0])
#get all cameras
if typeCameras == 1:
if limitEverything == 1:
allCameras = cmds.ls(long=1, cameras=1)
if limitSelection == 1 or limitVisible == 1 or limitRL == 1 or limitGrp == 1 or limitWild == 1:
for eachSelection in allSelection:
cmds.select(eachSelection, r=1)
eachType = cmds.ls(sl=1, dag=1, type='shape', long=1)
if len(eachType) > 0:
nodeType = cmds.nodeType(eachType[0])
if nodeType == 'camera':
allCameras.append(eachType[0])
#get all lights
if typeLights == 1:
if limitEverything == 1:
allLights = cmds.ls(long=1, lights=1)
if limitSelection == 1 or limitVisible == 1 or limitRL == 1 or limitGrp == 1 or limitWild == 1:
for eachSelection in allSelection:
cmds.select(eachSelection, r=1)
eachType = cmds.ls(sl=1, dag=1, type='shape', long=1)
if len(eachType) > 0:
nodeType = cmds.nodeType(eachType[0])
if nodeType == 'ambientLight' or nodeType == 'directionalLight' or nodeType == 'pointLight' or nodeType == 'spotLight' or nodeType == 'areaLight' or nodeType == 'volumeLight':
allLights.append(eachType[0])
#get all dynamics
if typeDynamics == 1:
if limitEverything == 1:
allEmitters = cmds.ls(long=1, type='pointEmitter')
allnParticle = cmds.ls(long=1, type='nParticle')
allParticles = cmds.ls(long=1, type='particle')
allAirField = cmds.ls(long=1, type='airField')
allDragField = cmds.ls(long=1, type='dragField')
allGravityField = cmds.ls(long=1, type='gravityField')
allNewtonField = cmds.ls(long=1, type='newtonField')
allRadialField = cmds.ls(long=1, type='radialField')
allTurbulenceField = cmds.ls(long=1, type='turbulenceField')
allUniformField = cmds.ls(long=1, type='uniformField')
allVortexField = cmds.ls(long=1, type='vortexField')
allVolumeAxisField = cmds.ls(long=1, type='volumeAxisField')
allFluids = cmds.ls(long=1, type='fluidShape')
allDynamics = allEmitters + allnParticle + allParticles + allAirField + allDragField + allGravityField + allNewtonField + allRadialField + allTurbulenceField + allUniformField + allVortexField + allVolumeAxisField + allFluids
if limitSelection == 1 or limitVisible == 1 or limitRL == 1 or limitGrp == 1 or limitWild == 1:
for eachSelection in allSelection:
cmds.select(eachSelection, r=1)
eachType = cmds.ls(sl=1, dag=1, type='shape', long=1)
if len(eachType) > 0:
nodeType = cmds.nodeType(eachType[0])
if nodeType == 'nParticle' or nodeType == 'particle' or nodeType == 'fluidShape':
allDynamics.append(eachType[0])
else:
nodeType = cmds.nodeType(eachSelection)
if nodeType == 'pointEmitter' or nodeType == 'airField' or nodeType == 'dragField' or nodeType == 'gravityField' or nodeType == 'newtonField' or nodeType == 'radialField' or nodeType == 'turbulenceField' or nodeType == 'uniformField' or nodeType == 'vortexField' or nodeType == 'volumeAxisField':
allDynamics.append(eachSelection)
#list for object inside frustum
insideFrustum=[]
insideFrustum[:] = []
insideFrustumAll=[]
insideFrustumAll[:] = []
allTypes = []
allTypes[:] = []
allTypes += allMesh + allNurbs + allCurves + allLocators + allJoints + allCameras + allLights + allDynamics
allTypes = list(set(allTypes))
allTypesStart = []
allTypesStart[:] = []
allTypesStart = allTypes
#get start, end and increment frame
startFrame = cmds.intField('cameraFrustum_startIF', q=1, value=1)
endFrame = cmds.intField('cameraFrustum_endIF', q=1, value=1)
frameIncrement = cmds.intField('cameraFrustum_keyIF', q=1, value=1)
currentFrame = cmds.currentTime(q=1)
currentFrameOnly = cmds.checkBox('cameraFrustum_currentFrameCB', q=1, value=1)
if currentFrameOnly == 1:
startFrame = currentFrame
endFrame = currentFrame
frameIncrement = 1
#progress bar
bakingText = 'Frustum Checking......(Press ESC To Cancel)'
progressAmount = int(endFrame-startFrame)
if currentFrameOnly == 1:
progressAmount=1
cmds.progressBar('cameraFrustum_progBar', e=1, maxValue=progressAmount)
cmds.text('cameraFrustum_cancelTxt', e=1, label=bakingText)
cmds.progressBar('cameraFrustum_progBar', e=1, progress=0)
cmds.progressBar(gMainProgressBar, edit=1, beginProgress=1, isInterruptable=1, status='Frustum Checking......', maxValue=progressAmount)
if len(allTypes)>0:
t = startFrame
while t <= endFrame:
cmds.currentTime(t, e=1)
#for each in allTypes:
e=0
while e < len(allTypes):
each = allTypes[e]
#parent = cmds.listRelatives(each, p=1, f=1)
#eachObject = str(parent[0])
objParent = cmds.listRelatives(each, p=1, f=1)
parent=[]
parent.append(objParent)
eachObject=''
newParent = str(parent[0])
if newParent=='None':
eachObject = str(each)
else:
eachObject = str(objParent[0])
center = cmds.xform(eachObject ,q=1, ws=1, t=1)
selBoundingBox = cmds.xform(eachObject ,q=1, ws=1, boundingBox=1)
bBox_0 = center[0:3]
bBox_1 = selBoundingBox[0:3]
bBox_2 = [selBoundingBox[3]-selBoundingBox[0]+selBoundingBox[0], selBoundingBox[1], selBoundingBox[2]]
bBox_3 = [selBoundingBox[0], (selBoundingBox[4]-selBoundingBox[1])+selBoundingBox[1], selBoundingBox[2]]
bBox_4 = [(selBoundingBox[3]-selBoundingBox[0])+selBoundingBox[0], (selBoundingBox[4]-selBoundingBox[1])+selBoundingBox[1], selBoundingBox[2]]
bBox_5 = [selBoundingBox[0], selBoundingBox[1], (selBoundingBox[5]-selBoundingBox[2])+selBoundingBox[2]]
bBox_6 = [(selBoundingBox[3]-selBoundingBox[0])+selBoundingBox[0], selBoundingBox[1], (selBoundingBox[5]-selBoundingBox[2])+selBoundingBox[2]]
bBox_7 = [selBoundingBox[0], (selBoundingBox[4]-selBoundingBox[1])+selBoundingBox[1], (selBoundingBox[5]-selBoundingBox[2])+selBoundingBox[2]]
bBox_8 = [(selBoundingBox[3]-selBoundingBox[0])+selBoundingBox[0], (selBoundingBox[4]-selBoundingBox[1])+selBoundingBox[1], (selBoundingBox[5]-selBoundingBox[2])+selBoundingBox[2]]
allBboxPoints = [
bBox_0[0], bBox_0[1], bBox_0[2],
bBox_1[0], bBox_1[1], bBox_1[2],
bBox_2[0], bBox_2[1], bBox_2[2],
bBox_3[0], bBox_3[1], bBox_3[2],
bBox_4[0], bBox_4[1], bBox_4[2],
bBox_5[0], bBox_5[1], bBox_5[2],
bBox_6[0], bBox_6[1], bBox_6[2],
bBox_7[0], bBox_7[1], bBox_7[2],
bBox_8[0], bBox_8[1], bBox_8[2]]
count = 0
i=0
while i < 27:
allBboxPointsVec = [allBboxPoints[i], allBboxPoints[i+1], allBboxPoints[i+2]]
cmds.setAttr ('frust_npomNode.inPositionX', allBboxPointsVec[0])
cmds.setAttr ('frust_npomNode.inPositionY', allBboxPointsVec[1])
cmds.setAttr ('frust_npomNode.inPositionZ', allBboxPointsVec[2])
closestPostionX = cmds.getAttr('frust_npomNode.positionX')
closestPostionY = cmds.getAttr('frust_npomNode.positionY')
closestPostionZ = cmds.getAttr('frust_npomNode.positionZ')
# Create two MVectors
vectorA = OpenMaya.MVector(allBboxPointsVec[0], allBboxPointsVec[1], allBboxPointsVec[2])
vectorB = OpenMaya.MVector(closestPostionX, closestPostionY, closestPostionZ)
vectorC = (vectorA - vectorB)
vectorC.normalize()
position = [vectorC.x, vectorC.y, vectorC.z]
norX = cmds.getAttr('frust_npomNode.normalX')
norY = cmds.getAttr('frust_npomNode.normalY')
norZ = cmds.getAttr('frust_npomNode.normalZ')
vectorD = OpenMaya.MVector(norX, norY, norZ)
vectorD.normalize()
normal = [vectorD.x, vectorD.y, vectorD.z]
dotProd = (normal[0]*position[0]) + (normal[1]*position[1]) + (normal[2]*position[2])
#if actionInvert==0:
if dotProd >= 0:
count+=1
i=27
#else:
#if dotProd <= 0:
#count+=1
#i=27
i+=3
if count>0:
insideFrustum.append(each)
allTypes.remove(each)
e-=1
e+=1
#progress bar update or cancel
if cmds.progressBar(gMainProgressBar, q=1, isCancelled=1):
isCancelled=1
cmds.progressBar('cameraFrustum_progBar', e=1, progress=0)
cmds.text('cameraFrustum_cancelTxt', e=1, label='')
break
cmds.progressBar('cameraFrustum_progBar', e=1, step=1)
cmds.progressBar(gMainProgressBar, edit=1, step=1)
t+=frameIncrement
#invert frustum
if actionInvert==0:
insideFrustumAll = insideFrustum
else:
insideFrustumAll = allTypesStart
for each in insideFrustum:
if each in allTypesStart:
insideFrustumAll.remove(each)
#remove camera if in list
if selCamShape[0] in insideFrustumAll:
insideFrustumAll.remove(str(selCamShape[0]))
if selCamXform[0] in insideFrustumAll:
insideFrustumAll.remove(str(selCamXform[0]))
#action
if actionSelect == 1:
if len(insideFrustumAll)>0:
cmds.select(insideFrustumAll, r=1)
else:
cmds.select(cl=1)
#delete all frust* stuff
if actionKeep == 0:
deleteFrustStuff = cmds.ls('frust*')
cmds.delete(deleteFrustStuff)
#return show selection
if curPanel[0:10] == 'modelPanel':
cmds.modelEditor(curPanel, e=1, nurbsCurves=currentShowSettings[0])
cmds.modelEditor(curPanel, e=1, nurbsSurfaces=currentShowSettings[1])
cmds.modelEditor(curPanel, e=1, polymeshes=currentShowSettings[2])
cmds.modelEditor(curPanel, e=1, subdivSurfaces=currentShowSettings[3])
cmds.modelEditor(curPanel, e=1, planes=currentShowSettings[4])
cmds.modelEditor(curPanel, e=1, lights=currentShowSettings[5])
cmds.modelEditor(curPanel, e=1, cameras=currentShowSettings[6])
cmds.modelEditor(curPanel, e=1, joints=currentShowSettings[7])
cmds.modelEditor(curPanel, e=1, ikHandles=currentShowSettings[8])
cmds.modelEditor(curPanel, e=1, deformers=currentShowSettings[9])
cmds.modelEditor(curPanel, e=1, dynamics=currentShowSettings[10])
cmds.modelEditor(curPanel, e=1, fluids=currentShowSettings[11])
cmds.modelEditor(curPanel, e=1, hairSystems=currentShowSettings[12])
cmds.modelEditor(curPanel, e=1, follicles=currentShowSettings[13])
cmds.modelEditor(curPanel, e=1, nCloths=currentShowSettings[14])
cmds.modelEditor(curPanel, e=1, nParticles=currentShowSettings[15])
cmds.modelEditor(curPanel, e=1, nRigids=currentShowSettings[16])
cmds.modelEditor(curPanel, e=1, dynamicConstraints=currentShowSettings[17])
cmds.modelEditor(curPanel, e=1, locators=currentShowSettings[18])
cmds.modelEditor(curPanel, e=1, dimensions=currentShowSettings[19])
cmds.modelEditor(curPanel, e=1, pivots=currentShowSettings[20])
cmds.modelEditor(curPanel, e=1, handles=currentShowSettings[21])
cmds.modelEditor(curPanel, e=1, textures=currentShowSettings[22])
cmds.modelEditor(curPanel, e=1, strokes=currentShowSettings[23])
cmds.modelEditor(curPanel, e=1, manipulators=currentShowSettings[24])
#timer
endTime = time.time()
totalTime = (endTime - startTime)
timeStr = ' seconds'
if totalTime > 60:
totalTime /= 60
timeStr = ' minutes'
if isCancelled == 0:
cmds.text('cameraFrustum_cancelTxt', e=1, label='Total Time: ' + str(totalTime) + timeStr)
else:
cmds.text('cameraFrustum_cancelTxt', e=1, label='Cancelled -- Total Time: ' + str(totalTime) + timeStr)
#progress bar end
cmds.progressBar('cameraFrustum_progBar', e=1, progress=0)
cmds.progressBar(gMainProgressBar, edit=1, endProgress=1)
#-------------------------------------------------------------------------------------------------------------
#print
#endTime = time.time()
#totalTime = endTime - startTime
#printString += '// total time: ' + str(totalTime) + ' seconds\n'
#printString += '////////////////////////////////////////////////////////////////////////////////////////////\n\n'
#print printString
#print ('COMPLETE -- check script editor for details...\n')
#*************************************************************************************************************
#*end cameraFrustum_check()
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
#*************************************************************************************************************
#*start cameraFrustum()
def cameraFrustum():
procString = 'cameraFrustum'
printString = '\n\n////////////////////////////////////////////////////////////////////////////////////////////\n'
printString += ('// ' + procString + ' details: \n//\n')
#-------------------------------------------------------------------------------------------------------------
#window creation
if cmds.window('cameraFrustum_win', exists=1)==1:
cmds.deleteUI('cameraFrustum_win')
cmds.window('cameraFrustum_win', title="Camera Frustum Checking", resizeToFitChildren=1, maximizeButton=0, sizeable=1)
cmds.formLayout('cameraFrustum_mainForm')
cmds.columnLayout('cameraFrustum_mainCol', adj=1, p='cameraFrustum_mainForm')
cmds.text('cameraFrustum_cancelTxt', font='tinyBoldLabelFont', label='', align='center', w=60, p='cameraFrustum_mainForm')
cmds.progressBar('cameraFrustum_progBar', maxValue=100, h=10, p='cameraFrustum_mainForm')
cmds.button('cameraFrustum_executeButton', l='Build List', c='cameraFrustum_refreshClip(); cameraFrustum_build(); cameraFrustum_scale(); cameraFrustum_check()', h=10, p='cameraFrustum_mainForm')
#load camera frameLayout
cmds.frameLayout('cameraFrustum_loadCameraFrame', l='Load Camera', marginHeight=5, collapsable=1, collapse=0, borderStyle='etchedIn', p='cameraFrustum_mainCol')
cmds.formLayout('cameraFrustum_loadCameraForm', p='cameraFrustum_loadCameraFrame')
cmds.textScrollList('cameraFrustum_loadCameraTSL', numberOfRows=1, annotation='double-click to select camera', doubleClickCommand='selCam = cmds.textScrollList(\'cameraFrustum_loadCameraTSL\', q=1, selectItem=1); cmds.select(selCam, r=1)', p='cameraFrustum_loadCameraForm')
cmds.button('cameraFrustum_loadCameraButton', l='load', w=80, h=30, c='cameraFrustum_loadCamera()', p='cameraFrustum_loadCameraForm')
cmds.formLayout('cameraFrustum_loadCameraForm', e=1,
attachForm=[
('cameraFrustum_loadCameraButton', 'top', 5),
('cameraFrustum_loadCameraButton', 'left', 40),
('cameraFrustum_loadCameraButton', 'bottom', 5),
('cameraFrustum_loadCameraTSL', 'top', 5),
('cameraFrustum_loadCameraTSL', 'bottom', 5),
('cameraFrustum_loadCameraTSL', 'right', 40)
],
attachControl=[
('cameraFrustum_loadCameraTSL', 'left', 40, 'cameraFrustum_loadCameraButton')
])
#build frustum frameLayout
cmds.frameLayout('cameraFrustum_buildFrustumFrame', l='Build Camera Frustum', marginHeight=5, collapsable=1, collapse=0, borderStyle='etchedIn', p='cameraFrustum_mainCol')
cmds.formLayout('cameraFrustum_buildFrustumForm', p='cameraFrustum_buildFrustumFrame')
cmds.button('cameraFrustum_refreshButton', l='refresh', w=45, h=35, c='cameraFrustum_refreshClip()', p='cameraFrustum_buildFrustumForm')
cmds.floatFieldGrp('cameraFrustum_nearFF', el=' Near Clip Plane', value1=0, p='cameraFrustum_buildFrustumForm')
cmds.floatFieldGrp('cameraFrustum_farFF', el=' Far Clip Plane', value1=1000, p='cameraFrustum_buildFrustumForm')
cmds.floatFieldGrp('cameraFrustum_scaleGainFF', el=' Frustum Scale Gain', precision=3, value1=5, changeCommand='cameraFrustum_refreshClip();cameraFrustum_scale()', p='cameraFrustum_buildFrustumForm')
cmds.button('cameraFrustum_toggleClipButton', l='clipping planes', w=90, h=20, c='cameraFrustum_toggleClip()', p='cameraFrustum_buildFrustumForm')
cmds.button('cameraFrustum_buildFrustumButton', l='build frustum', w=80, h=20, c='cameraFrustum_toggleFrustum()', p='cameraFrustum_buildFrustumForm')
cmds.formLayout('cameraFrustum_buildFrustumForm', e=1,
attachForm=[
('cameraFrustum_refreshButton', 'top', 10),
('cameraFrustum_refreshButton', 'left', 25),
('cameraFrustum_nearFF', 'top', 4),
('cameraFrustum_nearFF', 'right', 20),
('cameraFrustum_nearFF', 'left', 92),
('cameraFrustum_farFF', 'left', 92),
('cameraFrustum_scaleGainFF', 'top', 4),
('cameraFrustum_scaleGainFF', 'left', 290),
('cameraFrustum_toggleClipButton', 'left', 290)
],
attachControl=[
('cameraFrustum_farFF', 'top', 5, 'cameraFrustum_nearFF'),
('cameraFrustum_toggleClipButton', 'top', 4, 'cameraFrustum_scaleGainFF'),
('cameraFrustum_buildFrustumButton', 'top', 4, 'cameraFrustum_scaleGainFF'),
('cameraFrustum_buildFrustumButton', 'left', 10, 'cameraFrustum_toggleClipButton')
])
#frameRange frameLayout
cmds.frameLayout('cameraFrustum_frameRangeFrame', l='Frame Range Options', marginHeight=5, collapsable=1, collapse=0, borderStyle='etchedIn', p='cameraFrustum_mainCol')
cmds.formLayout('cameraFrustum_frameRangeForm', p='cameraFrustum_frameRangeFrame')
cmds.button('cameraFrustum_timeButton', l='time', w=35, h=35, c='cmds.intField(\'cameraFrustum_startIF\', e=1, value=cmds.playbackOptions(q=1, min=1));cmds.intField(\'cameraFrustum_endIF\', e=1, value=cmds.playbackOptions(q=1, max=1))', p='cameraFrustum_frameRangeForm')
cmds.text('cameraFrustum_startFrameTxt', l='Start Frame', w=60, p='cameraFrustum_frameRangeForm')
cmds.text('cameraFrustum_endFrameTxt', l='End Frame', w=60, p='cameraFrustum_frameRangeForm')
cmds.intField('cameraFrustum_startIF', value=cmds.playbackOptions(q=1, min=1), editable=1, w=60, p='cameraFrustum_frameRangeForm')
cmds.intField('cameraFrustum_endIF', value=cmds.playbackOptions(q=1, max=1), editable=1, w=60, p='cameraFrustum_frameRangeForm')
cmds.checkBox('cameraFrustum_currentFrameCB', l='Current Frame Only', value=1, align='left', p='cameraFrustum_frameRangeForm')
cmds.text('cameraFrustum_keyFrameTxt', l='Check Every', w=60, p='cameraFrustum_frameRangeForm')
cmds.intField('cameraFrustum_keyIF', value=1, editable=1, minValue=1, w=60, p='cameraFrustum_frameRangeForm')
cmds.formLayout('cameraFrustum_frameRangeForm', e=1,
attachForm=[
('cameraFrustum_timeButton', 'top', 8),
('cameraFrustum_timeButton', 'left', 45),
('cameraFrustum_startFrameTxt', 'top', 5),
('cameraFrustum_startIF', 'top', 2),
('cameraFrustum_currentFrameCB', 'top', 4)
],
attachControl=[
('cameraFrustum_startFrameTxt', 'left', 30, 'cameraFrustum_timeButton'),
('cameraFrustum_endFrameTxt', 'top', 10, 'cameraFrustum_startFrameTxt'),
('cameraFrustum_endFrameTxt', 'left', 30, 'cameraFrustum_timeButton'),
('cameraFrustum_startIF', 'left', 5, 'cameraFrustum_startFrameTxt'),
('cameraFrustum_endIF', 'top', 4, 'cameraFrustum_startIF'),
('cameraFrustum_endIF', 'left', 5, 'cameraFrustum_endFrameTxt'),
('cameraFrustum_currentFrameCB', 'left', 56, 'cameraFrustum_startIF'),
('cameraFrustum_keyFrameTxt', 'left', 75, 'cameraFrustum_endIF'),
('cameraFrustum_keyFrameTxt', 'top', 9, 'cameraFrustum_currentFrameCB'),
('cameraFrustum_keyIF', 'top', 5, 'cameraFrustum_currentFrameCB'),
('cameraFrustum_keyIF', 'left', 5, 'cameraFrustum_keyFrameTxt')
])
#return data frameLayout
cmds.frameLayout('cameraFrustum_returnDataFrame', l='Return Data', marginHeight=5, collapsable=1, collapse=0, borderStyle='etchedIn', p='cameraFrustum_mainCol')
cmds.formLayout('cameraFrustum_returnDataForm', p='cameraFrustum_returnDataFrame')
cmds.columnLayout('cameraFrustum_actionCol', adj=1, p='cameraFrustum_returnDataForm')
cmds.columnLayout('cameraFrustum_typeCol', adj=1, p='cameraFrustum_returnDataForm')
#data type frameLayout
cmds.frameLayout('cameraFrustum_typeFrame', l='Data Type', ec='cmds.frameLayout(\'cameraFrustum_componentFrame\', e=1, collapse=1)', marginHeight=5, collapsable=1, collapse=0, borderStyle='etchedIn', p='cameraFrustum_typeCol')
cmds.formLayout('cameraFrustum_typeForm', p='cameraFrustum_typeFrame')
cmds.button('cameraFrustum_typeAllButton', l='All', w=60, h=20, c='cameraFrustum_selectAllDataType()', p='cameraFrustum_typeForm')
cmds.button('cameraFrustum_typeNoneButton', l='None', w=60, h=20, c='cameraFrustum_deSelectAllDataType()', p='cameraFrustum_typeForm')
cmds.gridLayout('cameraFrustum_typeGrid', numberOfColumns=2, cellWidthHeight=(105, 20), p='cameraFrustum_typeForm')
cmds.checkBox('cameraFrustum_typeMeshCB', l='Mesh', value=1, align='left', p='cameraFrustum_typeGrid')
cmds.checkBox('cameraFrustum_typeJointsCB', l='Joints', value=1, align='left', p='cameraFrustum_typeGrid')
cmds.checkBox('cameraFrustum_typeNurbsCB', l='Nurbs', value=1, align='left', p='cameraFrustum_typeGrid')
cmds.checkBox('cameraFrustum_typeLightsCB', l='Lights', value=1, align='left', p='cameraFrustum_typeGrid')
cmds.checkBox('cameraFrustum_typeCurvesCB', l='Curves', value=1, align='left', p='cameraFrustum_typeGrid')
cmds.checkBox('cameraFrustum_typeDynamicsCB', l='Dynamics', value=1, align='left', p='cameraFrustum_typeGrid')
cmds.checkBox('cameraFrustum_typeLocatorsCB', l='Locators', value=1, align='left', p='cameraFrustum_typeGrid')
cmds.checkBox('cameraFrustum_typeCamerasCB', l='Cameras', value=1, align='left', p='cameraFrustum_typeGrid')
cmds.formLayout('cameraFrustum_typeForm', e=1,
attachForm=[
('cameraFrustum_typeAllButton', 'left', 85),
('cameraFrustum_typeAllButton', 'top', 5),
('cameraFrustum_typeNoneButton', 'top', 5),
('cameraFrustum_typeGrid', 'left', 65),
('cameraFrustum_typeGrid', 'right', 0),
('cameraFrustum_typeGrid', 'bottom', 5)
],
attachControl=[
('cameraFrustum_typeNoneButton', 'left', 10, 'cameraFrustum_typeAllButton'),
('cameraFrustum_typeGrid', 'top', 10, 'cameraFrustum_typeAllButton')
])
#component frameLayout
cmds.frameLayout('cameraFrustum_componentFrame', l='Component Type', ec='cmds.frameLayout(\'cameraFrustum_typeFrame\', e=1, collapse=1)', marginHeight=5, collapsable=1, collapse=1, borderStyle='etchedIn', p='cameraFrustum_typeCol')
cmds.formLayout('cameraFrustum_componentForm', p='cameraFrustum_componentFrame')
cmds.gridLayout('cameraFrustum_componentGrid', numberOfColumns=2, cellWidthHeight=(105, 20), p='cameraFrustum_componentForm')
cmds.checkBox('cameraFrustum_typeVertexCB', l='Vertex', value=0, align='left', p='cameraFrustum_componentGrid')
cmds.checkBox('cameraFrustum_typeEdgeCB', l='Edge', value=0, align='left', p='cameraFrustum_componentGrid')
cmds.checkBox('cameraFrustum_typeFaceCB', l='Face', value=0, align='left', p='cameraFrustum_componentGrid')
cmds.formLayout('cameraFrustum_componentForm', e=1,
attachForm=[
('cameraFrustum_componentGrid', 'left', 65),
('cameraFrustum_componentGrid', 'right', 0),
('cameraFrustum_componentGrid', 'top', 5),
('cameraFrustum_componentGrid', 'bottom', 5)
])
#action frameLayout
cmds.frameLayout('cameraFrustum_actionFrame', l='Action', marginHeight=5, collapsable=1, collapse=0, borderStyle='etchedIn', p='cameraFrustum_actionCol')
cmds.formLayout('cameraFrustum_actionForm', p='cameraFrustum_actionFrame')
cmds.gridLayout('cameraFrustum_actionGrid', numberOfColumns=1, cellWidthHeight=(110, 20), p='cameraFrustum_actionForm')
cmds.checkBox('cameraFrustum_actionInvertCB', l='Invert Frustum', value=0, align='left', p='cameraFrustum_actionGrid')
cmds.checkBox('cameraFrustum_actionSelectCB', l='Select', value=1, align='left', p='cameraFrustum_actionGrid')
cmds.checkBox('cameraFrustum_actionHideCB', l='Hide', enable=0, value=0, align='left', p='cameraFrustum_actionGrid')
cmds.checkBox('cameraFrustum_actionDeleteCB', l='Delete', enable=0, value=0, align='left', p='cameraFrustum_actionGrid')
cmds.checkBox('cameraFrustum_actionDisplayLayerCB', l='Display Layer', enable=0, value=0, align='left', p='cameraFrustum_actionGrid')
cmds.checkBox('cameraFrustum_actionKeepCB', l='Keep Frustum Geo', value=0, align='left', p='cameraFrustum_actionGrid')
cmds.formLayout('cameraFrustum_actionForm', e=1,
attachForm=[
('cameraFrustum_actionGrid', 'left', 15),
('cameraFrustum_actionGrid', 'right', 0),
('cameraFrustum_actionGrid', 'top', 5),
('cameraFrustum_actionGrid', 'bottom', 5)
])
#limits frameLayout
cmds.frameLayout('cameraFrustum_limitsFrame', l='Limits', marginHeight=5, collapsable=1, collapse=0, borderStyle='etchedIn', p='cameraFrustum_actionCol')
cmds.formLayout('cameraFrustum_limitsForm', p='cameraFrustum_limitsFrame')
cmds.gridLayout('cameraFrustum_limitsGrid', numberOfColumns=1, cellWidthHeight=(165, 20), p='cameraFrustum_limitsForm')
cmds.checkBox('cameraFrustum_limitsEverythingCB', l='Everything', value=0, align='left', p='cameraFrustum_limitsGrid')
cmds.checkBox('cameraFrustum_limitsSelectionCB', l='Selection', value=0, align='left', p='cameraFrustum_limitsGrid')
cmds.checkBox('cameraFrustum_limitsVisibleCB', l='Visible', value=1, align='left', p='cameraFrustum_limitsGrid')
cmds.checkBox('cameraFrustum_limitsRLCB', l='Current Render Layer', value=0, align='left', p='cameraFrustum_limitsGrid')
cmds.checkBox('cameraFrustum_limitsInsideGrpCB', l='Inside Group', value=0, align='left', p='cameraFrustum_limitsGrid')
cmds.textField('cameraFrustum_limitsInsideGrpTXT', text='name of group')
cmds.checkBox('cameraFrustum_limitsWildCardCB', l='*Wild Card*', value=0, align='left', p='cameraFrustum_limitsGrid')
cmds.textField('cameraFrustum_limitsWildCardTXT', text='e.g. -- *name, *:name')
cmds.formLayout('cameraFrustum_limitsForm', e=1,
attachForm=[
('cameraFrustum_limitsGrid', 'left', 15),
('cameraFrustum_limitsGrid', 'right', 0),
('cameraFrustum_limitsGrid', 'top', 5),
('cameraFrustum_limitsGrid', 'bottom', 5)
])
#edit return data form
cmds.formLayout('cameraFrustum_returnDataForm', e=1,
attachForm=[
('cameraFrustum_typeCol', 'top', 0),
('cameraFrustum_typeCol', 'right', 0),
('cameraFrustum_typeCol', 'bottom', 0),
('cameraFrustum_actionCol', 'top', 0),
('cameraFrustum_actionCol', 'left', 0),
('cameraFrustum_actionCol', 'bottom', 0)
],
attachPosition=[
('cameraFrustum_typeCol', 'left', 0, 40),
('cameraFrustum_actionCol', 'right', 0, 40)
])
#edit mainForm
cmds.formLayout('cameraFrustum_mainForm', e=1,
attachForm=[
('cameraFrustum_mainCol', 'top', 0),
('cameraFrustum_mainCol', 'left', 0),
('cameraFrustum_mainCol', 'right', 0),
('cameraFrustum_mainCol', 'bottom', 80),
('cameraFrustum_cancelTxt', 'left', 5),
('cameraFrustum_cancelTxt', 'right', 5),
('cameraFrustum_cancelTxt', 'bottom', 60),
('cameraFrustum_progBar', 'left', 5),
('cameraFrustum_progBar', 'right', 5),
('cameraFrustum_progBar', 'bottom', 40),
('cameraFrustum_executeButton', 'left', 5),
('cameraFrustum_executeButton', 'right', 5),
('cameraFrustum_executeButton', 'bottom', 2)
],
attachControl=[
('cameraFrustum_cancelTxt', 'top', 5, 'cameraFrustum_mainCol'),
('cameraFrustum_progBar', 'top', 5, 'cameraFrustum_cancelTxt'),
('cameraFrustum_executeButton', 'top', 5, 'cameraFrustum_progBar')
])
#show and resize window
cmds.showWindow('cameraFrustum_win')
cmds.window('cameraFrustum_win', e=1, wh=[510,715])
#run some def's
cameraFrustum_loadCamera()
#-------------------------------------------------------------------------------------------------------------
#print
printString += '////////////////////////////////////////////////////////////////////////////////////////////\n\n'
#print printString
#print ('COMPLETE -- check script editor for details...\n')
#*************************************************************************************************************
#*end cameraFrustum()
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for Oppia feedback threads and messages."""
__author__ = 'Koji Ashida'
from core.platform import models
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
import feconf
import utils
from google.appengine.ext import ndb
STATUS_CHOICES_OPEN = 'open'
STATUS_CHOICES_FIXED = 'fixed'
STATUS_CHOICES_IGNORED = 'ignored'
STATUS_CHOICES_COMPLIMENT = 'compliment'
STATUS_CHOICES_NOT_ACTIONABLE = 'not_actionable'
STATUS_CHOICES = [
STATUS_CHOICES_OPEN,
STATUS_CHOICES_FIXED,
STATUS_CHOICES_IGNORED,
STATUS_CHOICES_COMPLIMENT,
STATUS_CHOICES_NOT_ACTIONABLE,
]
class FeedbackThreadModel(base_models.BaseModel):
"""Threads for each exploration.
The id/key of instances of this class has the form
[EXPLORATION_ID].[THREAD_ID]
"""
# ID of the exploration the thread is about.
exploration_id = ndb.StringProperty(required=True, indexed=True)
# ID of state the thread is for. Does not exist if the thread is about the
# entire exploration.
state_name = ndb.StringProperty(indexed=True)
# ID of the user who started the thread. This may be None if the feedback
# was given anonymously by a learner.
original_author_id = ndb.StringProperty(indexed=True)
# Latest status of the thread.
status = ndb.StringProperty(
default=STATUS_CHOICES_OPEN,
choices=STATUS_CHOICES,
required=True,
indexed=True,
)
# Latest subject of the thread.
subject = ndb.StringProperty(indexed=False)
# Summary text of the thread.
summary = ndb.TextProperty(indexed=False)
@classmethod
def generate_new_thread_id(cls, exploration_id):
"""Generates a new thread id, unique within the exploration.
Exploration ID + the generated thread ID is globally unique.
"""
MAX_RETRIES = 10
RAND_RANGE = 127 * 127
for i in range(MAX_RETRIES):
thread_id = (
utils.base64_from_int(utils.get_current_time_in_millisecs()) +
utils.base64_from_int(utils.get_random_int(RAND_RANGE)))
if not cls.get_by_exp_and_thread_id(exploration_id, thread_id):
return thread_id
raise Exception(
'New thread id generator is producing too many collisions.')
@classmethod
def _generate_id(cls, exploration_id, thread_id):
return '.'.join([exploration_id, thread_id])
@classmethod
def create(cls, exploration_id, thread_id):
"""Creates a new FeedbackThreadModel entry.
Throws an exception if a thread with the given exploration ID and
thread ID combination exists already.
"""
instance_id = cls._generate_id(exploration_id, thread_id)
if cls.get_by_id(instance_id):
raise Exception('Feedback thread ID conflict on create.')
return cls(id=instance_id)
@classmethod
def get_by_exp_and_thread_id(cls, exploration_id, thread_id):
"""Gets the FeedbackThreadModel entry for the given ID.
Returns None if the thread is not found or is already deleted.
"""
return cls.get_by_id(cls._generate_id(exploration_id, thread_id))
@classmethod
def get_threads(cls, exploration_id):
"""Returns an array of threads associated to the exploration.
Does not include the deleted entries.
"""
return cls.get_all().filter(
cls.exploration_id == exploration_id).fetch(
feconf.DEFAULT_QUERY_LIMIT)
class FeedbackMessageModel(base_models.BaseModel):
"""Feedback messages. One or more of these messages make a thread.
The id/key of instances of this class has the form
[EXPLORATION_ID].[THREAD_ID].[MESSAGE_ID]
"""
# ID corresponding to an entry of FeedbackThreadModel in the form of
# [EXPLORATION_ID].[THREAD_ID]
thread_id = ndb.StringProperty(required=True, indexed=True)
# 0-based sequential numerical ID. Sorting by this field will create the
# thread in chronological order.
message_id = ndb.IntegerProperty(required=True, indexed=True)
# ID of the user who posted this message. This may be None if the feedback
# was given anonymously by a learner.
author_id = ndb.StringProperty(indexed=True)
# New thread status. Must exist in the first message of a thread. For the
# rest of the thread, should exist only when the status changes.
updated_status = ndb.StringProperty(choices=STATUS_CHOICES, indexed=True)
# New thread subject. Must exist in the first message of a thread. For the
# rest of the thread, should exist only when the subject changes.
updated_subject = ndb.StringProperty(indexed=False)
# Message text. Allowed not to exist (e.g. post only to update the status).
text = ndb.StringProperty(indexed=False)
@classmethod
def _generate_id(cls, thread_id, message_id):
return '.'.join([thread_id, str(message_id)])
@property
def exploration_id(self):
return self.id.split('.')[0]
def get_thread_subject(self):
return FeedbackThreadModel.get_by_id(self.thread_id).subject
@classmethod
def create(cls, thread_id, message_id):
"""Creates a new FeedbackMessageModel entry.
Throws an exception if a message with the given thread ID and message
ID combination exists already.
"""
instance_id = cls._generate_id(thread_id, message_id)
if cls.get_by_id(instance_id):
raise Exception('Feedback message ID conflict on create.')
return cls(id=instance_id)
@classmethod
def get(cls, thread_id, message_id, strict=True):
"""Gets the FeedbackMessageModel entry for the given ID.
If the message id is valid and it is not marked as deleted, returns the
message instance. Otherwise:
- if strict is True, raises EntityNotFoundError
- if strict is False, returns None.
"""
instance_id = cls._generate_id(thread_id, message_id)
return super(FeedbackMessageModel, cls).get(instance_id, strict=strict)
@classmethod
def get_messages(cls, thread_id):
"""Returns an array of messages in the thread.
Does not include the deleted entries.
"""
return cls.get_all().filter(
cls.thread_id == thread_id).fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_most_recent_message(cls, thread_id):
return cls.get_all().filter(
cls.thread_id == thread_id).order(-cls.last_updated).get()
@classmethod
def get_message_count(cls, thread_id):
"""Returns the number of messages in the thread.
Includes the deleted entries.
"""
return cls.get_all(include_deleted_entities=True).filter(
cls.thread_id == thread_id).count()
@classmethod
def get_all_messages(cls, page_size, urlsafe_start_cursor):
return cls._fetch_page_sorted_by_last_updated(
cls.query(), page_size, urlsafe_start_cursor)
|
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import weakref
import imath
import IECore
import Gaffer
import GafferTest
class ContextTest( GafferTest.TestCase ) :
def testFrameAccess( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c["frame"], 1.0 )
c.setFrame( 10.5 )
self.assertEqual( c.getFrame(), 10.5 )
self.assertEqual( c["frame"], 10.5 )
def testChangedSignal( self ) :
c = Gaffer.Context()
changes = []
def f( context, name ) :
self.assertTrue( context.isSame( c ) )
changes.append( ( name, context.get( name, None ) ) )
cn = c.changedSignal().connect( f )
c["a"] = 2
self.assertEqual( changes, [ ( "a", 2 ) ] )
c["a"] = 3
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ) ] )
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ), ( "b", 1 ) ] )
# when an assignment makes no actual change, the signal should not
# be triggered again.
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ), ( "b", 1 ) ] )
# Removing variables should also trigger the changed signal.
del changes[:]
c.remove( "a" )
self.assertEqual( changes, [ ( "a", None ) ] )
del c["b"]
self.assertEqual( changes, [ ( "a", None ), ( "b", None ) ] )
def testTypes( self ) :
c = Gaffer.Context()
c["int"] = 1
self.assertEqual( c["int"], 1 )
self.assertEqual( c.get( "int" ), 1 )
c.set( "int", 2 )
self.assertEqual( c["int"], 2 )
self.assertIsInstance( c["int"], int )
c["float"] = 1.0
self.assertEqual( c["float"], 1.0 )
self.assertEqual( c.get( "float" ), 1.0 )
c.set( "float", 2.0 )
self.assertEqual( c["float"], 2.0 )
self.assertIsInstance( c["float"], float )
c["string"] = "hi"
self.assertEqual( c["string"], "hi" )
self.assertEqual( c.get( "string" ), "hi" )
c.set( "string", "bye" )
self.assertEqual( c["string"], "bye" )
self.assertIsInstance( c["string"], str )
c["v2i"] = imath.V2i( 1, 2 )
self.assertEqual( c["v2i"], imath.V2i( 1, 2 ) )
self.assertEqual( c.get( "v2i" ), imath.V2i( 1, 2 ) )
c.set( "v2i", imath.V2i( 1, 2 ) )
self.assertEqual( c["v2i"], imath.V2i( 1, 2 ) )
self.assertIsInstance( c["v2i"], imath.V2i )
c["v3i"] = imath.V3i( 1, 2, 3 )
self.assertEqual( c["v3i"], imath.V3i( 1, 2, 3 ) )
self.assertEqual( c.get( "v3i" ), imath.V3i( 1, 2, 3 ) )
c.set( "v3i", imath.V3i( 1, 2, 3 ) )
self.assertEqual( c["v3i"], imath.V3i( 1, 2, 3 ) )
self.assertIsInstance( c["v3i"], imath.V3i )
c["v2f"] = imath.V2f( 1, 2 )
self.assertEqual( c["v2f"], imath.V2f( 1, 2 ) )
self.assertEqual( c.get( "v2f" ), imath.V2f( 1, 2 ) )
c.set( "v2f", imath.V2f( 1, 2 ) )
self.assertEqual( c["v2f"], imath.V2f( 1, 2 ) )
self.assertIsInstance( c["v2f"], imath.V2f )
c["v3f"] = imath.V3f( 1, 2, 3 )
self.assertEqual( c["v3f"], imath.V3f( 1, 2, 3 ) )
self.assertEqual( c.get( "v3f" ), imath.V3f( 1, 2, 3 ) )
c.set( "v3f", imath.V3f( 1, 2, 3 ) )
self.assertEqual( c["v3f"], imath.V3f( 1, 2, 3 ) )
self.assertIsInstance( c["v3f"], imath.V3f )
def testCopying( self ) :
c = Gaffer.Context()
c["i"] = 10
c2 = Gaffer.Context( c )
self.assertEqual( c2["i"], 10 )
c["i"] = 1
self.assertEqual( c["i"], 1 )
self.assertEqual( c2["i"], 10 )
def testEquality( self ) :
c = Gaffer.Context()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
self.assertFalse( c != c2 )
c["somethingElse"] = 1
self.assertNotEqual( c, c2 )
self.assertFalse( c == c2 )
def testCurrent( self ) :
# if nothing has been made current then there should be a default
# constructed context in place.
c = Gaffer.Context.current()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
# and we should be able to change that using the with statement
c2["something"] = 1
with c2 :
self.assertTrue( Gaffer.Context.current().isSame( c2 ) )
self.assertEqual( Gaffer.Context.current()["something"], 1 )
# and bounce back to the original
self.assertTrue( Gaffer.Context.current().isSame( c ) )
def testCurrentIsThreadSpecific( self ) :
c = Gaffer.Context()
self.assertFalse( c.isSame( Gaffer.Context.current() ) )
def f() :
self.assertFalse( c.isSame( Gaffer.Context.current() ) )
with Gaffer.Context() :
pass
with c :
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
t = threading.Thread( target = f )
t.start()
t.join()
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
self.assertFalse( c.isSame( Gaffer.Context.current() ) )
def testThreading( self ) :
# for good measure, run testCurrent() in a load of threads at
# the same time.
threads = []
for i in range( 0, 1000 ) :
t = threading.Thread( target = self.testCurrent )
t.start()
threads.append( t )
for t in threads :
t.join()
def testSetWithObject( self ) :
c = Gaffer.Context()
v = IECore.StringVectorData( [ "a", "b", "c" ] )
c.set( "v", v )
self.assertEqual( c.get( "v" ), v )
self.assertFalse( c.get( "v" ).isSame( v ) )
self.assertEqual( c["v"], v )
self.assertFalse( c["v"].isSame( v ) )
def testGetFallbackValue( self ) :
c = Gaffer.Context()
self.assertEqual( c.get( "f" ), None )
self.assertEqual( c.get( "f", 10 ), 10 )
c["f"] = 1.0
self.assertEqual( c.get( "f" ), 1.0 )
def testReentrancy( self ) :
c = Gaffer.Context()
with c :
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
with c :
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
def testLifeTime( self ) :
c = Gaffer.Context()
w = weakref.ref( c )
self.assertTrue( w() is c )
with c :
pass
del c
self.assertIsNone( w() )
def testWithBlockReturnValue( self ) :
with Gaffer.Context() as c :
self.assertIsInstance( c, Gaffer.Context )
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
def testSubstitute( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "$a/$b/something.###.tif" ), "apple/bear/something.020.tif" )
self.assertEqual( c.substitute( "$a/$dontExist/something.###.tif" ), "apple//something.020.tif" )
self.assertEqual( c.substitute( "${badlyFormed" ), "" )
def testSubstituteTildeInMiddle( self ) :
c = Gaffer.Context()
self.assertEqual( c.substitute( "a~b" ), "a~b" )
def testSubstituteWithMask( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "~", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.TildeSubstitutions ), "~" )
self.assertEqual( c.substitute( "#", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.FrameSubstitutions ), "#" )
self.assertEqual( c.substitute( "$a/${b}", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.VariableSubstitutions ), "$a/${b}" )
self.assertEqual( c.substitute( "\\", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.EscapeSubstitutions ), "\\" )
self.assertEqual( c.substitute( "\\$a", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.EscapeSubstitutions ), "\\apple" )
self.assertEqual( c.substitute( "#${a}", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.FrameSubstitutions ), "#apple" )
self.assertEqual( c.substitute( "#${a}", IECore.StringAlgo.Substitutions.NoSubstitutions ), "#${a}" )
def testFrameAndVariableSubstitutionsAreDifferent( self ) :
c = Gaffer.Context()
c.setFrame( 3 )
# Turning off variable substitutions should have no effect on '#' substitutions.
self.assertEqual( c.substitute( "###.$frame" ), "003.3" )
self.assertEqual( c.substitute( "###.$frame", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.VariableSubstitutions ), "003.$frame" )
# Turning off '#' substitutions should have no effect on variable substitutions.
self.assertEqual( c.substitute( "###.$frame" ), "003.3" )
self.assertEqual( c.substitute( "###.$frame", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.FrameSubstitutions ), "###.3" )
def testInternedStringVectorDataSubstitutions( self ) :
c = Gaffer.Context()
c["test1"] = IECore.InternedStringVectorData( [ "a", "b" ] )
c["test2"] = IECore.InternedStringVectorData()
self.assertEqual( c.substitute( "${test1}" ), "/a/b" )
self.assertEqual( c.substitute( "${test2}" ), "/" )
def testNames( self ) :
c = Gaffer.Context()
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond" ] ) )
c["a"] = 10
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
cc = Gaffer.Context( c )
self.assertEqual( set( cc.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
cc["b"] = 20
self.assertEqual( set( cc.names() ), set( [ "frame", "framesPerSecond", "a", "b" ] ) )
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
self.assertEqual( cc.names(), cc.keys() )
@GafferTest.TestRunner.PerformanceTestMethod()
def testManyContexts( self ) :
GafferTest.testManyContexts()
def testGetWithAndWithoutCopying( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
# we should be getting a copy each time by default
self.assertFalse( c["test"].isSame( c["test"] ) )
# meaning that if we modify the returned value, no harm is done
c["test"].append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2 ] ) )
# if we ask nicely, we can get a reference to the internal
# value without any copying.
self.assertTrue( c.get( "test", _copy=False ).isSame( c.get( "test", _copy=False ) ) )
# but then if we modify the returned value, we are changing the
# context itself too. this should be avoided - we're just doing it
# here to test that we are indeed referencing the internal value.
c.get( "test", _copy=False ).append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2, 10 ] ) )
def testGetWithDefaultAndCopyArgs( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
self.assertTrue( c.get( "test", 10, _copy=False ).isSame( c.get( "test", 20, _copy=False ) ) )
self.assertTrue( c.get( "test", defaultValue=10, _copy=False ).isSame( c.get( "test", defaultValue=20, _copy=False ) ) )
def testCopyWithSharedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# both contexts reference the same object, but c2 at least owns
# a reference to its values, and can be used after c1 has been
# deleted.
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r + 1 )
del c1
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
def testCopyWithBorrowedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# check that c2 doesn't own a reference
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
# make sure we delete c2 before we delete c1
del c2
# check that we're ok to access c1 after deleting c2
self.assertEqual( c1["testInt"], 20 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
def testSetOnBorrowedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsReleasesReference( self ) :
c1 = Gaffer.Context()
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1.get( "testIntVector", _copy=False ).refCount(), r )
def testHash( self ) :
c = Gaffer.Context()
hashes = [ c.hash() ]
c["test"] = 1
hashes.append( c.hash() )
c["test"] = 2
hashes.append( c.hash() )
c["test2"] = "test2"
hashes.append( c.hash() )
self.assertEqual( len( hashes ), 4 )
self.assertEqual( len( set( str( h ) for h in hashes ) ), len( hashes ) )
c["test2"] = "test2" # no change
self.assertEqual( c.hash(), hashes[-1] )
def testChanged( self ) :
c = Gaffer.Context()
c["test"] = IECore.StringVectorData( [ "one" ] )
h = c.hash()
cs = GafferTest.CapturingSlot( c.changedSignal() )
d = c.get( "test", _copy = False ) # dangerous! the context won't know if we make changes
d.append( "two" )
self.assertEqual( c.get( "test" ), IECore.StringVectorData( [ "one", "two" ] ) )
self.assertEqual( len( cs ), 0 )
c.changed( "test" ) # let the context know what we've been up to
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0], ( c, "test" ) )
self.assertNotEqual( c.hash(), h )
def testHashIgnoresUIEntries( self ) :
c = Gaffer.Context()
h = c.hash()
c["ui:test"] = 1
self.assertEqual( h, c.hash() )
@GafferTest.TestRunner.PerformanceTestMethod()
def testManySubstitutions( self ) :
GafferTest.testManySubstitutions()
@GafferTest.TestRunner.PerformanceTestMethod()
def testManyEnvironmentSubstitutions( self ) :
GafferTest.testManyEnvironmentSubstitutions()
def testEscapedSubstitutions( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( r"\${a}.\$b" ), "${a}.$b" )
self.assertEqual( c.substitute( r"\~" ), "~" )
self.assertEqual( c.substitute( r"\#\#\#\#" ), "####" )
# really we're passing \\ to substitute and getting back \ -
# the extra slashes are escaping for the python interpreter.
self.assertEqual( c.substitute( "\\\\" ), "\\" )
self.assertEqual( c.substitute( "\\" ), "" )
def testRemove( self ) :
c = Gaffer.Context()
c["a"] = "apple"
c["b"] = "bear"
c["c"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a", "b", "c", "frame", "framesPerSecond" ] ) )
# test Context.remove()
c.remove( "a" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "c", "frame", "framesPerSecond" ] ) )
h = c.hash()
# test Context.__delitem__()
del c[ "c" ]
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "frame", "framesPerSecond" ] ) )
self.assertEqual( c["b"], "bear" )
def testRemoveMatching( self ) :
c = Gaffer.Context()
c["a_1"] = "apple"
c["a_2"] = "apple"
c["b_1"] = "bear"
c["b_2"] = "bear"
c["c_1"] = "cat"
c["c_2"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a_1", "a_2", "b_1", "b_2", "c_1", "c_2", "frame", "framesPerSecond" ] ) )
# test Context.removeMatching()
c.removeMatching( "a* c*" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b_1", "b_2", "frame", "framesPerSecond" ] ) )
h = c.hash()
def testContains( self ) :
c = Gaffer.Context()
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
c["a"] = 1
self.assertTrue( "a" in c )
self.assertFalse( "a" not in c )
del c["a"]
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
def testTime( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c.getFramesPerSecond(), 24.0 )
self.assertAlmostEqual( c.getTime(), 1.0 / 24.0 )
c.setFrame( 12.0 )
self.assertEqual( c.getFrame(), 12.0 )
self.assertEqual( c.getFramesPerSecond(), 24.0 )
self.assertAlmostEqual( c.getTime(), 12.0 / 24.0 )
c.setFramesPerSecond( 48.0 )
self.assertEqual( c.getFrame(), 12.0 )
self.assertEqual( c.getFramesPerSecond(), 48.0 )
self.assertAlmostEqual( c.getTime(), 12.0 / 48.0 )
def testEditableScope( self ) :
GafferTest.testEditableScope()
def testCanceller( self ) :
c = Gaffer.Context()
c["test"] = 1
self.assertEqual( c.canceller(), None )
canceller = IECore.Canceller()
cc = Gaffer.Context( c, canceller )
self.assertEqual( cc["test"], 1 )
self.assertTrue( cc.canceller() is not None )
canceller.cancel()
with self.assertRaises( IECore.Cancelled ) :
IECore.Canceller.check( cc.canceller() )
contextCopy = Gaffer.Context( cc )
self.assertTrue( contextCopy.canceller() is not None )
with self.assertRaises( IECore.Cancelled ) :
IECore.Canceller.check( cc.canceller() )
def testCancellerLifetime( self ) :
canceller = IECore.Canceller()
context = Gaffer.Context( Gaffer.Context(), canceller )
cancellerWeakRef = weakref.ref( canceller )
del canceller
self.assertIsNotNone( cancellerWeakRef() )
del context
self.assertIsNone( cancellerWeakRef() )
def testOmitCanceller( self ) :
context1 = Gaffer.Context( Gaffer.Context(), IECore.Canceller() )
self.assertIsNotNone( context1.canceller() )
context2 = Gaffer.Context( context1, omitCanceller = True )
self.assertIsNone( context2.canceller() )
context3 = Gaffer.Context( context1, omitCanceller = False )
self.assertIsNotNone( context3.canceller() )
if __name__ == "__main__":
unittest.main()
|
|
# Copyright The IETF Trust 2007, All Rights Reserved
# Portion Copyright (C) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved. Contact: Pasi Eronen <pasi.eronen@nokia.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Nokia Corporation and/or its
# subsidiary(-ies) nor the names of its contributors may be used
# to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs, re, os, glob
import datetime
import tarfile
from ietf.idtracker.models import IDInternal, InternetDraft, AreaGroup, Position, IESGLogin, Acronym
from django.views.generic.list_detail import object_list
from django.views.generic.simple import direct_to_template
from django.views.decorators.vary import vary_on_cookie
from django.core.urlresolvers import reverse as urlreverse
from django.http import Http404, HttpResponse, HttpResponseForbidden, HttpResponseRedirect
from django.template import RequestContext, Context, loader
from django.shortcuts import render_to_response, get_object_or_404
from django.conf import settings
from django.utils import simplejson as json
from django import forms
from ietf.iesg.models import TelechatDates, TelechatAgendaItem, WGAction
from ietf.idrfc.idrfc_wrapper import IdWrapper, RfcWrapper
from ietf.idrfc.models import RfcIndex
from ietf.idrfc.utils import update_telechat
from ietf.ietfauth.decorators import group_required, role_required
from ietf.idtracker.templatetags.ietf_filters import in_group
from ietf.ipr.models import IprDocAlias
from ietf.doc.models import Document, TelechatDocEvent, LastCallDocEvent, ConsensusDocEvent
from ietf.group.models import Group, GroupMilestone
def date_threshold():
"""Return the first day of the month that is 185 days ago."""
ret = datetime.date.today() - datetime.timedelta(days=185)
ret = ret - datetime.timedelta(days=ret.day - 1)
return ret
def inddocs(request):
queryset_list_ind = [d for d in InternetDraft.objects.filter(stream__in=("IRTF","ISE"), docevent__type="iesg_approved").distinct() if d.latest_event(type__in=("iesg_disapproved", "iesg_approved")).type == "iesg_approved"]
queryset_list_ind.sort(key=lambda d: d.b_approve_date, reverse=True)
queryset_list_ind_dnp = [d for d in IDInternal.objects.filter(stream__in=("IRTF","ISE"), docevent__type="iesg_disapproved").distinct() if d.latest_event(type__in=("iesg_disapproved", "iesg_approved")).type == "iesg_disapproved"]
queryset_list_ind_dnp.sort(key=lambda d: d.dnp_date, reverse=True)
return render_to_response('iesg/independent_doc.html',
dict(object_list=queryset_list_ind,
object_list_dnp=queryset_list_ind_dnp),
context_instance=RequestContext(request))
def wgdocs(request,cat):
pass
def wgdocsREDESIGN(request,cat):
is_recent = 0
proto_actions = []
doc_actions = []
threshold = date_threshold()
proto_levels = ["bcp", "ds", "ps", "std"]
doc_levels = ["exp", "inf"]
if cat == 'new':
is_recent = 1
drafts = InternetDraft.objects.filter(docevent__type="iesg_approved", docevent__time__gte=threshold, intended_std_level__in=proto_levels + doc_levels).exclude(stream__in=("ISE","IRTF")).distinct()
for d in drafts:
if d.b_approve_date and d.b_approve_date >= threshold:
if d.intended_std_level_id in proto_levels:
proto_actions.append(d)
elif d.intended_std_level_id in doc_levels:
doc_actions.append(d)
elif cat == 'prev':
# proto
start_date = datetime.date(1997, 12, 1)
drafts = InternetDraft.objects.filter(docevent__type="iesg_approved", docevent__time__lt=threshold, docevent__time__gte=start_date, intended_std_level__in=proto_levels).exclude(stream__in=("ISE","IRTF")).distinct()
for d in drafts:
if d.b_approve_date and start_date <= d.b_approve_date < threshold:
proto_actions.append(d)
# doc
start_date = datetime.date(1998, 10, 15)
drafts = InternetDraft.objects.filter(docevent__type="iesg_approved", docevent__time__lt=threshold, docevent__time__gte=start_date, intended_std_level__in=doc_levels).exclude(stream__in=("ISE","IRTF")).distinct()
for d in drafts:
if d.b_approve_date and start_date <= d.b_approve_date < threshold:
doc_actions.append(d)
else:
raise Http404
proto_actions.sort(key=lambda d: d.b_approve_date, reverse=True)
doc_actions.sort(key=lambda d: d.b_approve_date, reverse=True)
return render_to_response('iesg/ietf_doc.html',
dict(object_list=proto_actions,
object_list_doc=doc_actions,
is_recent=is_recent,
title_prefix="Recent" if is_recent else "Previous"),
context_instance=RequestContext(request))
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
wgdocs = wgdocsREDESIGN
def get_doc_section(id):
pass
def get_doc_sectionREDESIGN(doc):
if doc.type_id == 'draft':
if doc.intended_std_level_id in ["bcp", "ds", "ps", "std"]:
s = "2"
else:
s = "3"
g = doc.group_acronym()
if g and str(g) != 'none':
s = s + "1"
elif (s == "3") and doc.stream_id in ("ise","irtf"):
s = s + "3"
else:
s = s + "2"
if not doc.get_state_slug=="rfc" and doc.get_state_slug('draft-iesg') not in ("lc", "writeupw", "goaheadw", "iesg-eva", "defer"):
s = s + "3"
elif doc.returning_item():
s = s + "2"
else:
s = s + "1"
elif doc.type_id == 'charter':
s = get_wg_section(doc.group)
elif doc.type_id == 'statchg':
protocol_action = False
for relation in doc.relateddocument_set.filter(relationship__slug__in=('tops','tois','tohist','toinf','tobcp','toexp')):
if relation.relationship.slug in ('tops','tois') or relation.target.document.std_level.slug in ('std','ds','ps'):
protocol_action = True
if protocol_action:
s="23"
else:
s="33"
if doc.get_state_slug() not in ("iesgeval", "defer", "appr-pr", "appr-pend", "appr-sent"):
s = s + "3"
elif doc.returning_item():
s = s + "2"
else:
s = s + "1"
elif doc.type_id == 'conflrev':
if doc.get_state('conflrev').slug not in ('adrev','iesgeval','appr-reqnopub-pend','appr-reqnopub-sent','appr-noprob-pend','appr-noprob-sent','defer'):
s = "343"
elif doc.returning_item():
s = "342"
else:
s = "341"
return s
def get_wg_section(wg):
s = ""
charter_slug = None
if wg.charter:
charter_slug = wg.charter.get_state_slug()
if wg.state_id in ['active','dormant']:
if charter_slug in ['extrev','iesgrev']:
s = '422'
else:
s = '421'
else:
if charter_slug in ['extrev','iesgrev']:
s = '412'
else:
s = '411'
return s
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
get_doc_section = get_doc_sectionREDESIGN
def agenda_docs(date, next_agenda):
matches = Document.objects.filter(docevent__telechatdocevent__telechat_date=date).select_related("stream").distinct()
docmatches = []
for doc in matches:
if doc.latest_event(TelechatDocEvent, type="scheduled_for_telechat").telechat_date != date:
continue
e = doc.latest_event(type="started_iesg_process")
doc.balloting_started = e.time if e else datetime.datetime.min
if doc.type_id == "draft":
s = doc.get_state("draft-iana-review")
if s: # and s.slug in ("not-ok", "changed", "need-rev"):
doc.iana_review_state = str(s)
if doc.get_state_slug("draft-iesg") == "lc":
e = doc.latest_event(LastCallDocEvent, type="sent_last_call")
if e:
doc.lastcall_expires = e.expires
if doc.stream_id in ("ietf", "irtf", "iab"):
doc.consensus = "Unknown"
e = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
if e:
doc.consensus = "Yes" if e.consensus else "No"
elif doc.type_id=='conflrev':
doc.conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document
docmatches.append(doc)
# Be careful to keep this the same as what's used in agenda_documents
docmatches.sort(key=lambda d: d.balloting_started)
res = dict(("s%s%s%s" % (i, j, k), []) for i in range(2, 5) for j in range (1, 4) for k in range(1, 4))
for k in range(1,4):
res['s34%d'%k]=[]
for id in docmatches:
section_key = "s"+get_doc_section(id)
if section_key not in res:
res[section_key] = []
res[section_key].append({'obj':id})
return res
def agenda_wg_actions(date):
res = dict(("s%s%s%s" % (i, j, k), []) for i in range(2, 5) for j in range (1, 4) for k in range(1, 4))
charters = Document.objects.filter(type="charter", docevent__telechatdocevent__telechat_date=date).select_related("group").distinct()
charters = charters.filter(group__state__slug__in=["proposed","active"])
for c in charters:
if c.latest_event(TelechatDocEvent, type="scheduled_for_telechat").telechat_date != date:
continue
c.group.txt_link = settings.CHARTER_TXT_URL + "%s-%s.txt" % (c.canonical_name(), c.rev)
section_key = "s" + get_wg_section(c.group)
if section_key not in res:
res[section_key] = []
# Cleanup - Older view code wants obj, newer wants doc. Older code should be moved forward
res[section_key].append({'obj': c.group, 'doc': c})
return res
def agenda_management_issues(date):
return TelechatAgendaItem.objects.filter(type=3).order_by('id')
def _agenda_json(request, date=None):
if not date:
date = TelechatDates.objects.all()[0].date1
next_agenda = True
else:
y,m,d = date.split("-")
date = datetime.date(int(y), int(m), int(d))
next_agenda = None
data = {'telechat-date':str(date),
'as-of':str(datetime.datetime.utcnow()),
'sections':{}}
data['sections']['1'] = {'title':"Administrivia"}
data['sections']['1.1'] = {'title':"Roll Call"}
data['sections']['1.2'] = {'title':"Bash the Agenda"}
data['sections']['1.3'] = {'title':"Approval of the Minutes of Past Telechats"}
data['sections']['1.4'] = {'title':"List of Remaining Action Items from Last Telechat"}
data['sections']['2'] = {'title':"Protocol Actions"}
data['sections']['2.1'] = {'title':"WG Submissions"}
data['sections']['2.1.1'] = {'title':"New Items", 'docs':[]}
data['sections']['2.1.2'] = {'title':"Returning Items", 'docs':[]}
data['sections']['2.2'] = {'title':"Individual Submissions"}
data['sections']['2.2.1'] = {'title':"New Items", 'docs':[]}
data['sections']['2.2.2'] = {'title':"Returning Items", 'docs':[]}
data['sections']['2.3'] = {'title':"Individual Submissions"}
data['sections']['2.3.1'] = {'title':"New Items", 'docs':[]}
data['sections']['2.3.2'] = {'title':"Returning Items", 'docs':[]}
data['sections']['3'] = {'title':"Document Actions"}
data['sections']['3.1'] = {'title':"WG Submissions"}
data['sections']['3.1.1'] = {'title':"New Items", 'docs':[]}
data['sections']['3.1.2'] = {'title':"Returning Items", 'docs':[]}
data['sections']['3.2'] = {'title':"Individual Submissions Via AD"}
data['sections']['3.2.1'] = {'title':"New Items", 'docs':[]}
data['sections']['3.2.2'] = {'title':"Returning Items", 'docs':[]}
data['sections']['3.3'] = {'title':"Status Changes"}
data['sections']['3.3.1'] = {'title':"New Items", 'docs':[]}
data['sections']['3.3.2'] = {'title':"Returning Items", 'docs':[]}
data['sections']['3.4'] = {'title':"IRTF and Independent Submission Stream Documents"}
data['sections']['3.4.1'] = {'title':"New Items", 'docs':[]}
data['sections']['3.4.2'] = {'title':"Returning Items", 'docs':[]}
data['sections']['4'] = {'title':"Working Group Actions"}
data['sections']['4.1'] = {'title':"WG Creation"}
data['sections']['4.1.1'] = {'title':"Proposed for IETF Review", 'wgs':[]}
data['sections']['4.1.2'] = {'title':"Proposed for Approval", 'wgs':[]}
data['sections']['4.2'] = {'title':"WG Rechartering"}
data['sections']['4.2.1'] = {'title':"Under Evaluation for IETF Review", 'wgs':[]}
data['sections']['4.2.2'] = {'title':"Proposed for Approval", 'wgs':[]}
data['sections']['5'] = {'title':"IAB News We Can Use"}
data['sections']['6'] = {'title':"Management Issues"}
data['sections']['7'] = {'title':"Working Group News"}
docs = agenda_docs(date, next_agenda)
for section in docs.keys():
# in case the document is in a state that does not have an agenda section
if section != 's':
s = str(".".join(list(section)[1:]))
if s[0:1] == '4':
# ignore these; not sure why they are included by agenda_docs
pass
else:
if len(docs[section]) != 0:
# If needed, add a "For Action" section to agenda
if s[4:5] == '3':
data['sections'][s] = {'title':"For Action", 'docs':[]}
for obj in docs[section]:
d = obj['obj']
docinfo = {'docname':d.canonical_name(),
'title':d.title,
'ad':d.ad.name}
if d.note:
docinfo['note'] = d.note
defer = d.active_defer_event()
if defer:
docinfo['defer-by'] = defer.by.name
docinfo['defer-at'] = str(defer.time)
if d.type_id == "draft":
docinfo['rev'] = d.rev
docinfo['intended-std-level'] = str(d.intended_std_level)
if d.rfc_number():
docinfo['rfc-number'] = d.rfc_number()
iana_state = d.get_state("draft-iana-review")
if iana_state and iana_state.slug in ("not-ok", "changed", "need-rev"):
docinfo['iana-review-state'] = str(iana_state)
if d.get_state_slug("draft-iesg") == "lc":
e = d.latest_event(LastCallDocEvent, type="sent_last_call")
if e:
docinfo['lastcall-expires'] = e.expires.strftime("%Y-%m-%d")
docinfo['consensus'] = None
e = d.latest_event(ConsensusDocEvent, type="changed_consensus")
if e:
docinfo['consensus'] = e.consensus
elif d.type_id == 'conflrev':
docinfo['rev'] = d.rev
td = d.relateddocument_set.get(relationship__slug='conflrev').target.document
docinfo['target-docname'] = td.canonical_name()
docinfo['target-title'] = td.title
docinfo['target-rev'] = td.rev
docinfo['intended-std-level'] = str(td.intended_std_level)
docinfo['stream'] = str(td.stream)
else:
# XXX check this -- is there nothing to set for
# all other documents here?
pass
data['sections'][s]['docs'] += [docinfo, ]
wgs = agenda_wg_actions(date)
for section in wgs.keys():
# in case the charter is in a state that does not have an agenda section
if section != 's':
s = str(".".join(list(section)[1:]))
if s[0:1] != '4':
# ignore these; not sure why they are included by agenda_wg_actions
pass
else:
if len(wgs[section]) != 0:
for obj in wgs[section]:
wg = obj['obj']
doc = obj['doc']
wginfo = {'docname': doc.canonical_name(),
'rev': doc.rev,
'wgname': doc.group.name,
'acronym': doc.group.acronym,
'ad': doc.group.ad.name}
data['sections'][s]['wgs'] += [wginfo, ]
mgmt = agenda_management_issues(date)
num = 0
for m in mgmt:
num += 1
data['sections']["6.%d" % num] = {'title':m.title}
return data
def _agenda_data(request, date=None):
if not date:
date = TelechatDates.objects.all()[0].date1
next_agenda = True
else:
y,m,d = date.split("-")
date = datetime.date(int(y), int(m), int(d))
next_agenda = None
#date = "2006-03-16"
docs = agenda_docs(date, next_agenda)
mgmt = agenda_management_issues(date)
wgs = agenda_wg_actions(date)
data = {'date':str(date), 'docs':docs,'mgmt':mgmt,'wgs':wgs}
for key, filename in {'action_items':settings.IESG_TASK_FILE,
'roll_call':settings.IESG_ROLL_CALL_FILE,
'minutes':settings.IESG_MINUTES_FILE}.items():
try:
f = codecs.open(filename, 'r', 'utf-8', 'replace')
text = f.read().strip()
f.close()
data[key] = text
except IOError:
data[key] = "(Error reading "+key+")"
return data
@vary_on_cookie
def agenda(request, date=None):
data = _agenda_data(request, date)
data['private'] = 'private' in request.REQUEST
data['settings'] = settings
return render_to_response("iesg/agenda.html", data, context_instance=RequestContext(request))
def agenda_txt(request):
data = _agenda_data(request)
return render_to_response("iesg/agenda.txt", data, context_instance=RequestContext(request), mimetype="text/plain")
def agenda_json(request):
response = HttpResponse(mimetype='text/plain')
response.write(json.dumps(_agenda_json(request), indent=2))
return response
def agenda_scribe_template(request):
date = TelechatDates.objects.all()[0].date1
docs = agenda_docs(date, True)
return render_to_response('iesg/scribe_template.html', {'date':str(date), 'docs':docs, 'USE_DB_REDESIGN_PROXY_CLASSES': settings.USE_DB_REDESIGN_PROXY_CLASSES}, context_instance=RequestContext(request) )
def _agenda_moderator_package(request):
data = _agenda_data(request)
data['ad_names'] = [str(x) for x in IESGLogin.active_iesg()]
data['ad_names'].sort(key=lambda x: x.split(' ')[-1])
return render_to_response("iesg/moderator_package.html", data, context_instance=RequestContext(request))
@group_required('Area_Director','Secretariat')
def agenda_moderator_package(request):
return _agenda_moderator_package(request)
def agenda_moderator_package_test(request):
if request.META['REMOTE_ADDR'] == "127.0.0.1":
return _agenda_moderator_package(request)
else:
return HttpResponseForbidden()
def _agenda_package(request):
data = _agenda_data(request)
return render_to_response("iesg/agenda_package.txt", data, context_instance=RequestContext(request), mimetype='text/plain')
@group_required('Area_Director','Secretariat')
def agenda_package(request):
return _agenda_package(request)
def agenda_package_test(request):
if request.META['REMOTE_ADDR'] == "127.0.0.1":
return _agenda_package(request)
else:
return HttpResponseForbidden()
def agenda_documents_txt(request):
dates = TelechatDates.objects.all()[0].dates()
docs = []
for date in dates:
from ietf.doc.models import TelechatDocEvent
for d in Document.objects.filter(docevent__telechatdocevent__telechat_date=date).distinct():
if d.latest_event(TelechatDocEvent, type="scheduled_for_telechat").telechat_date == date:
docs.append(d)
t = loader.get_template('iesg/agenda_documents.txt')
c = Context({'docs':docs,'special_stream_list':['ise','irtf']})
return HttpResponse(t.render(c), mimetype='text/plain')
class RescheduleForm(forms.Form):
telechat_date = forms.TypedChoiceField(coerce=lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date(), empty_value=None, required=False)
clear_returning_item = forms.BooleanField(initial=False, required=False)
def __init__(self, *args, **kwargs):
dates = kwargs.pop('telechat_dates')
super(self.__class__, self).__init__(*args, **kwargs)
# telechat choices
init = kwargs['initial']['telechat_date']
if init and init not in dates:
dates.insert(0, init)
choices = [("", "(not on agenda)")]
for d in dates:
choices.append((d, d.strftime("%Y-%m-%d")))
self.fields['telechat_date'].choices = choices
def handle_reschedule_form(request, doc, dates):
initial = dict(telechat_date=doc.telechat_date())
formargs = dict(telechat_dates=dates,
prefix="%s" % doc.name,
initial=initial)
if request.method == 'POST':
form = RescheduleForm(request.POST, **formargs)
if form.is_valid():
login = request.user.get_profile()
update_telechat(request, doc, login,
form.cleaned_data['telechat_date'],
False if form.cleaned_data['clear_returning_item'] else None)
doc.time = datetime.datetime.now()
doc.save()
else:
form = RescheduleForm(**formargs)
form.show_clear = doc.returning_item()
return form
def agenda_documents(request):
dates = TelechatDates.objects.all()[0].dates()
from ietf.doc.models import TelechatDocEvent
docs = []
for d in Document.objects.filter(docevent__telechatdocevent__telechat_date__in=dates).distinct():
if d.latest_event(TelechatDocEvent, type="scheduled_for_telechat").telechat_date in dates:
docs.append(d)
e = d.latest_event(type="started_iesg_process")
d.balloting_started = e.time if e else datetime.datetime.min
docs.sort(key=lambda d: d.balloting_started)
for i in docs:
i.reschedule_form = handle_reschedule_form(request, i, dates)
# some may have been taken off the schedule by the reschedule form
docs = [d for d in docs if d.telechat_date()]
telechats = []
for date in dates:
matches = filter(lambda x: x.telechat_date() == date, docs)
res = {}
for i in matches:
section_key = "s" + get_doc_section(i)
if section_key not in res:
res[section_key] = []
if i.type_id=='draft':
if i.get_state_slug()!="rfc":
i.iprUrl = "/ipr/search?option=document_search&id_document_tag=" + str(i.name)
else:
i.iprUrl = "/ipr/search?option=rfc_search&rfc_search=" + str(i.rfc_number())
i.iprCount = len(i.ipr())
res[section_key].append(i)
telechats.append({'date':date, 'docs':res})
return direct_to_template(request, 'iesg/agenda_documents_redesign.html', {'telechats':telechats, 'hide_telechat_date':True})
def telechat_docs_tarfile(request,year,month,day):
from tempfile import mkstemp
date=datetime.date(int(year),int(month),int(day))
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.doc.models import TelechatDocEvent
docs = []
for d in IDInternal.objects.filter(docevent__telechatdocevent__telechat_date=date).distinct():
if d.latest_event(TelechatDocEvent, type="scheduled_for_telechat").telechat_date == date:
docs.append(d)
else:
docs= IDInternal.objects.filter(telechat_date=date, primary_flag=1, agenda=1)
response = HttpResponse(mimetype='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename=telechat-%s-%s-%s-docs.tgz'%(year, month, day)
tarstream = tarfile.open('','w:gz',response)
mfh, mfn = mkstemp()
manifest = open(mfn, "w")
for doc in docs:
doc_path = os.path.join(settings.INTERNET_DRAFT_PATH, doc.draft.filename+"-"+doc.draft.revision_display()+".txt")
if os.path.exists(doc_path):
try:
tarstream.add(doc_path, str(doc.draft.filename+"-"+doc.draft.revision_display()+".txt"))
manifest.write("Included: "+doc_path+"\n")
except Exception, e:
manifest.write(("Failed (%s): "%e)+doc_path+"\n")
else:
manifest.write("Not found: "+doc_path+"\n")
manifest.close()
tarstream.add(mfn, "manifest.txt")
tarstream.close()
os.unlink(mfn)
return response
def discusses(request):
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
res = []
for d in IDInternal.objects.filter(states__type="draft-iesg", states__slug__in=("pub-req", "ad-eval", "review-e", "lc-req", "lc", "writeupw", "goaheadw", "iesg-eva", "defer", "watching"), docevent__ballotpositiondocevent__pos="discuss").distinct():
found = False
for p in d.positions.all():
if p.discuss:
found = True
break
if not found:
continue
if d.rfc_flag:
doc = RfcWrapper(d)
else:
doc = IdWrapper(draft=d)
if doc.in_ietf_process() and doc.ietf_process.has_active_iesg_ballot():
res.append(doc)
return direct_to_template(request, 'iesg/discusses.html', {'docs':res})
positions = Position.objects.filter(discuss=1)
res = []
try:
ids = set()
except NameError:
# for Python 2.3
from sets import Set as set
ids = set()
for p in positions:
try:
draft = p.ballot.drafts.filter(primary_flag=1)
if len(draft) > 0 and draft[0].rfc_flag:
if not -draft[0].draft_id in ids:
ids.add(-draft[0].draft_id)
try:
ri = RfcIndex.objects.get(rfc_number=draft[0].draft_id)
doc = RfcWrapper(ri)
if doc.in_ietf_process() and doc.ietf_process.has_active_iesg_ballot():
res.append(doc)
except RfcIndex.DoesNotExist:
# NOT QUITE RIGHT, although this should never happen
pass
if len(draft) > 0 and not draft[0].rfc_flag and draft[0].draft.id_document_tag not in ids:
ids.add(draft[0].draft.id_document_tag)
doc = IdWrapper(draft=draft[0])
if doc.in_ietf_process() and doc.ietf_process.has_active_iesg_ballot():
res.append(doc)
except IDInternal.DoesNotExist:
pass
return direct_to_template(request, 'iesg/discusses.html', {'docs':res})
@role_required('Area Director', 'Secretariat')
def milestones_needing_review(request):
# collect milestones, grouped on AD and group
ads = {}
for m in GroupMilestone.objects.filter(state="review").exclude(group__state="concluded", group__ad=None).distinct().select_related("group", "group__ad"):
groups = ads.setdefault(m.group.ad, {})
milestones = groups.setdefault(m.group, [])
milestones.append(m)
ad_list = []
for ad, groups in ads.iteritems():
ad_list.append(ad)
ad.groups_needing_review = sorted(groups, key=lambda g: g.acronym)
for g, milestones in groups.iteritems():
g.milestones_needing_review = sorted(milestones, key=lambda m: m.due)
return render_to_response('iesg/milestones_needing_review.html',
dict(ads=sorted(ad_list, key=lambda ad: ad.plain_name()),
),
context_instance=RequestContext(request))
def parse_wg_action_file(path):
f = open(path, 'rU')
line = f.readline()
while line and not line.strip():
line = f.readline()
# name
m = re.search(r'([^\(]*) \(', line)
if not m:
return None
name = m.group(1)
# acronym
m = re.search(r'\((\w+)\)', line)
if not m:
return None
acronym = m.group(1)
# date
line = f.readline()
m = re.search(r'(\d{4})-(\d{2})-(\d{2})', line)
while line and not m:
line = f.readline()
m = re.search(r'(\d{4})-(\d{2})-(\d{2})', line)
last_updated = None
if m:
try:
last_updated = datetime.date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
except:
pass
# token
line = f.readline()
while line and not 'area director' in line.lower():
line = f.readline()
line = f.readline()
line = f.readline()
m = re.search(r'\s*(\w+)\s*', line)
token = ""
if m:
token = m.group(1)
return dict(filename=os.path.basename(path), name=name, acronym=acronym,
status_date=last_updated, token=token)
def get_possible_wg_actions():
res = []
charters = glob.glob(os.path.join(settings.IESG_WG_EVALUATION_DIR, '*-charter.txt'))
for path in charters:
d = parse_wg_action_file(path)
if d:
if not d['status_date']:
d['status_date'] = datetime.date(1900,1,1)
res.append(d)
res.sort(key=lambda x: x['status_date'])
return res
@group_required('Area_Director', 'Secretariat')
def working_group_actions(request):
current_items = WGAction.objects.order_by('status_date').select_related()
if request.method == 'POST' and in_group(request.user, 'Secretariat'):
filename = request.POST.get('filename')
if filename and filename in os.listdir(settings.IESG_WG_EVALUATION_DIR):
if 'delete' in request.POST:
os.unlink(os.path.join(settings.IESG_WG_EVALUATION_DIR, filename))
if 'add' in request.POST:
d = parse_wg_action_file(os.path.join(settings.IESG_WG_EVALUATION_DIR, filename))
qstr = "?" + "&".join("%s=%s" % t for t in d.iteritems())
return HttpResponseRedirect(urlreverse('iesg_add_working_group_action') + qstr)
skip = [c.group_acronym.acronym for c in current_items]
possible_items = filter(lambda x: x['acronym'] not in skip,
get_possible_wg_actions())
return render_to_response("iesg/working_group_actions.html",
dict(current_items=current_items,
possible_items=possible_items),
context_instance=RequestContext(request))
class EditWGActionForm(forms.ModelForm):
token_name = forms.ChoiceField(required=True)
telechat_date = forms.TypedChoiceField(coerce=lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date(), empty_value=None, required=False)
class Meta:
model = WGAction
fields = ['status_date', 'token_name', 'category', 'note']
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
# token name choices
self.fields['token_name'].choices = [("", "(None)")] + [(p.plain_name(), p.plain_name()) for p in IESGLogin.active_iesg().order_by('first_name')]
# telechat choices
dates = TelechatDates.objects.all()[0].dates()
init = kwargs['initial']['telechat_date']
if init and init not in dates:
dates.insert(0, init)
choices = [("", "(not on agenda)")]
for d in dates:
choices.append((d, d.strftime("%Y-%m-%d")))
self.fields['telechat_date'].choices = choices
@group_required('Secretariat')
def edit_working_group_action(request, wga_id):
if wga_id != None:
wga = get_object_or_404(WGAction, pk=wga_id)
else:
wga = WGAction()
try:
wga.group_acronym = Acronym.objects.get(acronym=request.GET.get('acronym'))
except Acronym.DoesNotExist:
pass
wga.token_name = request.GET.get('token')
try:
d = datetime.datetime.strptime(request.GET.get('status_date'), '%Y-%m-%d').date()
except:
d = datetime.date.today()
wga.status_date = d
wga.telechat_date = TelechatDates.objects.all()[0].date1
wga.agenda = True
initial = dict(telechat_date=wga.telechat_date if wga.agenda else None)
if request.method == 'POST':
if "delete" in request.POST:
wga.delete()
return HttpResponseRedirect(urlreverse('iesg_working_group_actions'))
form = EditWGActionForm(request.POST, instance=wga, initial=initial)
if form.is_valid():
form.save(commit=False)
wga.agenda = bool(form.cleaned_data['telechat_date'])
if wga.category in (11, 21):
wga.agenda = False
if wga.agenda:
wga.telechat_date = form.cleaned_data['telechat_date']
wga.save()
return HttpResponseRedirect(urlreverse('iesg_working_group_actions'))
else:
form = EditWGActionForm(instance=wga, initial=initial)
return render_to_response("iesg/edit_working_group_action.html",
dict(wga=wga,
form=form),
context_instance=RequestContext(request))
|
|
"""The tests for the TTS component."""
from unittest.mock import PropertyMock, patch
import pytest
import yarl
from homeassistant.components.demo.tts import DemoProvider
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
DOMAIN as DOMAIN_MP,
MEDIA_TYPE_MUSIC,
SERVICE_PLAY_MEDIA,
)
import homeassistant.components.tts as tts
from homeassistant.components.tts import _get_cache_files
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component, async_mock_service
def relative_url(url):
"""Convert an absolute url to a relative one."""
return str(yarl.URL(url).relative())
@pytest.fixture
def demo_provider():
"""Demo TTS provider."""
return DemoProvider("en")
@pytest.fixture(autouse=True)
def mock_get_cache_files():
"""Mock the list TTS cache function."""
with patch(
"homeassistant.components.tts._get_cache_files", return_value={}
) as mock_cache_files:
yield mock_cache_files
@pytest.fixture(autouse=True)
def mock_init_cache_dir():
"""Mock the TTS cache dir in memory."""
with patch(
"homeassistant.components.tts._init_tts_cache_dir",
side_effect=lambda hass, cache_dir: hass.config.path(cache_dir),
) as mock_cache_dir:
yield mock_cache_dir
@pytest.fixture
def empty_cache_dir(tmp_path, mock_init_cache_dir, mock_get_cache_files, request):
"""Mock the TTS cache dir with empty dir."""
mock_init_cache_dir.side_effect = None
mock_init_cache_dir.return_value = str(tmp_path)
# Restore original get cache files behavior, we're working with a real dir.
mock_get_cache_files.side_effect = _get_cache_files
yield tmp_path
if request.node.rep_call.passed:
return
# Print contents of dir if failed
print("Content of dir for", request.node.nodeid)
for fil in tmp_path.iterdir():
print(fil.relative_to(tmp_path))
# To show the log.
assert False
@pytest.fixture()
def mutagen_mock():
"""Mock writing tags."""
with patch(
"homeassistant.components.tts.SpeechManager.write_tags",
side_effect=lambda *args: args[1],
):
yield
@pytest.fixture(autouse=True)
async def internal_url_mock(hass):
"""Mock internal URL of the instance."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
async def test_setup_component_demo(hass):
"""Set up the demo platform with defaults."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
assert hass.services.has_service(tts.DOMAIN, "demo_say")
assert hass.services.has_service(tts.DOMAIN, "clear_cache")
async def test_setup_component_demo_no_access_cache_folder(hass, mock_init_cache_dir):
"""Set up the demo platform with defaults."""
config = {tts.DOMAIN: {"platform": "demo"}}
mock_init_cache_dir.side_effect = OSError(2, "No access")
assert not await async_setup_component(hass, tts.DOMAIN, config)
assert not hass.services.has_service(tts.DOMAIN, "demo_say")
assert not hass.services.has_service(tts.DOMAIN, "clear_cache")
async def test_setup_component_and_test_service(hass, empty_cache_dir):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_config_language(
hass, empty_cache_dir
):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "language": "de"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_config_language_special(
hass, empty_cache_dir
):
"""Set up the demo platform and call service with extend language."""
import homeassistant.components.demo.tts as demo_tts
demo_tts.SUPPORT_LANGUAGES.append("en_US")
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "language": "en_US"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en-us_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en-us_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_wrong_conf_language(hass):
"""Set up the demo platform and call service with wrong config."""
config = {tts.DOMAIN: {"platform": "demo", "language": "ru"}}
with assert_setup_component(0, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
async def test_setup_component_and_test_service_with_service_language(
hass, empty_cache_dir
):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
).is_file()
async def test_setup_component_test_service_with_wrong_service_language(
hass, empty_cache_dir
):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "lang",
},
blocking=True,
)
assert len(calls) == 0
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_lang_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_service_options(
hass, empty_cache_dir
):
"""Set up the demo platform and call service with options."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
tts.ATTR_OPTIONS: {"voice": "alex", "age": 5},
},
blocking=True,
)
opt_hash = tts._hash_options({"voice": "alex", "age": 5})
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== f"http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir
/ f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
).is_file()
async def test_setup_component_and_test_with_service_options_def(hass, empty_cache_dir):
"""Set up the demo platform and call service with default options."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN), patch(
"homeassistant.components.demo.tts.DemoProvider.default_options",
new_callable=PropertyMock(return_value={"voice": "alex"}),
):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
},
blocking=True,
)
opt_hash = tts._hash_options({"voice": "alex"})
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== f"http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir
/ f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_service_options_wrong(
hass, empty_cache_dir
):
"""Set up the demo platform and call service with wrong options."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
tts.ATTR_OPTIONS: {"speed": 1},
},
blocking=True,
)
opt_hash = tts._hash_options({"speed": 1})
assert len(calls) == 0
await hass.async_block_till_done()
assert not (
empty_cache_dir
/ f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_base_url_set(hass):
"""Set up the demo platform with ``base_url`` set and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "base_url": "http://fnord"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID] == "http://fnord"
"/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491"
"_en_-_demo.mp3"
)
async def test_setup_component_and_test_service_clear_cache(hass, empty_cache_dir):
"""Set up the demo platform and call service clear cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
# To make sure the file is persisted
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
await hass.services.async_call(
tts.DOMAIN, tts.SERVICE_CLEAR_CACHE, {}, blocking=True
)
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_receive_voice(
hass, demo_provider, hass_client
):
"""Set up the demo platform and call service and receive voice."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
req = await client.get(relative_url(calls[0].data[ATTR_MEDIA_CONTENT_ID]))
_, demo_data = demo_provider.get_tts_audio("bla", "en")
demo_data = tts.SpeechManager.write_tags(
"42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3",
demo_data,
demo_provider,
"There is someone at the door.",
"en",
None,
)
assert req.status == 200
assert await req.read() == demo_data
async def test_setup_component_and_test_service_with_receive_voice_german(
hass, demo_provider, hass_client
):
"""Set up the demo platform and call service and receive voice."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "language": "de"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
req = await client.get(relative_url(calls[0].data[ATTR_MEDIA_CONTENT_ID]))
_, demo_data = demo_provider.get_tts_audio("bla", "de")
demo_data = tts.SpeechManager.write_tags(
"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3",
demo_data,
demo_provider,
"There is someone at the door.",
"de",
None,
)
assert req.status == 200
assert await req.read() == demo_data
async def test_setup_component_and_web_view_wrong_file(hass, hass_client):
"""Set up the demo platform and receive wrong file from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
req = await client.get(url)
assert req.status == HTTP_NOT_FOUND
async def test_setup_component_and_web_view_wrong_filename(hass, hass_client):
"""Set up the demo platform and receive wrong filename from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_proxy/265944dsk32c1b2a621be5930510bb2cd_en_-_demo.mp3"
req = await client.get(url)
assert req.status == HTTP_NOT_FOUND
async def test_setup_component_test_without_cache(hass, empty_cache_dir):
"""Set up demo platform without cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "cache": False}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_test_with_cache_call_service_without_cache(
hass, empty_cache_dir
):
"""Set up demo platform with cache and call service without cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "cache": True}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_CACHE: False,
},
blocking=True,
)
assert len(calls) == 1
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_test_with_cache_dir(
hass, empty_cache_dir, demo_provider
):
"""Set up demo platform with cache and call service without cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
_, demo_data = demo_provider.get_tts_audio("bla", "en")
cache_file = (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
with open(cache_file, "wb") as voice_file:
voice_file.write(demo_data)
config = {tts.DOMAIN: {"platform": "demo", "cache": True}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
with patch(
"homeassistant.components.demo.tts.DemoProvider.get_tts_audio",
return_value=(None, None),
):
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
async def test_setup_component_test_with_error_on_get_tts(hass):
"""Set up demo platform with wrong get_tts_audio."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN), patch(
"homeassistant.components.demo.tts.DemoProvider.get_tts_audio",
return_value=(None, None),
):
assert await async_setup_component(hass, tts.DOMAIN, config)
async def test_setup_component_load_cache_retrieve_without_mem_cache(
hass, demo_provider, empty_cache_dir, hass_client
):
"""Set up component and load cache and get without mem cache."""
_, demo_data = demo_provider.get_tts_audio("bla", "en")
cache_file = (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
with open(cache_file, "wb") as voice_file:
voice_file.write(demo_data)
config = {tts.DOMAIN: {"platform": "demo", "cache": True}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
req = await client.get(url)
assert req.status == 200
assert await req.read() == demo_data
async def test_setup_component_and_web_get_url(hass, hass_client):
"""Set up the demo platform and receive file from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_get_url"
data = {"platform": "demo", "message": "There is someone at the door."}
req = await client.post(url, json=data)
assert req.status == 200
response = await req.json()
assert response == {
"url": "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3",
"path": "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3",
}
async def test_setup_component_and_web_get_url_bad_config(hass, hass_client):
"""Set up the demo platform and receive wrong file from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_get_url"
data = {"message": "There is someone at the door."}
req = await client.post(url, json=data)
assert req.status == 400
async def test_tags_with_wave(hass, demo_provider):
"""Set up the demo platform and call service and receive voice."""
# below data represents an empty wav file
demo_data = bytes.fromhex(
"52 49 46 46 24 00 00 00 57 41 56 45 66 6d 74 20 10 00 00 00 01 00 02 00"
+ "22 56 00 00 88 58 01 00 04 00 10 00 64 61 74 61 00 00 00 00"
)
tagged_data = tts.SpeechManager.write_tags(
"42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.wav",
demo_data,
demo_provider,
"AI person is in front of your door.",
"en",
None,
)
assert tagged_data != demo_data
|
|
#
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for generic RPC clients.
"""
import logging
import ganeti.rpc.transport as t
from ganeti import constants
from ganeti import errors
from ganeti.rpc.errors import (ProtocolError, RequestError, LuxiError)
from ganeti import serializer
KEY_METHOD = constants.LUXI_KEY_METHOD
KEY_ARGS = constants.LUXI_KEY_ARGS
KEY_SUCCESS = constants.LUXI_KEY_SUCCESS
KEY_RESULT = constants.LUXI_KEY_RESULT
KEY_VERSION = constants.LUXI_KEY_VERSION
def ParseRequest(msg):
"""Parses a request message.
"""
try:
request = serializer.LoadJson(msg)
except ValueError, err:
raise ProtocolError("Invalid RPC request (parsing error): %s" % err)
logging.debug("RPC request: %s", request)
if not isinstance(request, dict):
logging.error("RPC request not a dict: %r", msg)
raise ProtocolError("Invalid RPC request (not a dict)")
method = request.get(KEY_METHOD, None) # pylint: disable=E1103
args = request.get(KEY_ARGS, None) # pylint: disable=E1103
version = request.get(KEY_VERSION, None) # pylint: disable=E1103
if method is None or args is None:
logging.error("RPC request missing method or arguments: %r", msg)
raise ProtocolError(("Invalid RPC request (no method or arguments"
" in request): %r") % msg)
return (method, args, version)
def ParseResponse(msg):
"""Parses a response message.
"""
# Parse the result
try:
data = serializer.LoadJson(msg)
except KeyboardInterrupt:
raise
except Exception, err:
raise ProtocolError("Error while deserializing response: %s" % str(err))
# Validate response
if not (isinstance(data, dict) and
KEY_SUCCESS in data and
KEY_RESULT in data):
raise ProtocolError("Invalid response from server: %r" % data)
return (data[KEY_SUCCESS], data[KEY_RESULT],
data.get(KEY_VERSION, None)) # pylint: disable=E1103
def FormatResponse(success, result, version=None):
"""Formats a response message.
"""
response = {
KEY_SUCCESS: success,
KEY_RESULT: result,
}
if version is not None:
response[KEY_VERSION] = version
logging.debug("RPC response: %s", response)
return serializer.DumpJson(response)
def FormatRequest(method, args, version=None):
"""Formats a request message.
"""
# Build request
request = {
KEY_METHOD: method,
KEY_ARGS: args,
}
if version is not None:
request[KEY_VERSION] = version
# Serialize the request
return serializer.DumpJson(request,
private_encoder=serializer.EncodeWithPrivateFields)
def CallRPCMethod(transport_cb, method, args, version=None):
"""Send a RPC request via a transport and return the response.
"""
assert callable(transport_cb)
request_msg = FormatRequest(method, args, version=version)
# Send request and wait for response
response_msg = transport_cb(request_msg)
(success, result, resp_version) = ParseResponse(response_msg)
# Verify version if there was one in the response
if resp_version is not None and resp_version != version:
raise LuxiError("RPC version mismatch, client %s, response %s" %
(version, resp_version))
if success:
return result
errors.MaybeRaise(result)
raise RequestError(result)
class AbstractClient(object):
"""High-level client abstraction.
This uses a backing Transport-like class on top of which it
implements data serialization/deserialization.
"""
def __init__(self, timeouts=None, transport=t.Transport):
"""Constructor for the Client class.
Arguments:
- address: a valid address the the used transport class
- timeout: a list of timeouts, to be used on connect and read/write
- transport: a Transport-like class
If timeout is not passed, the default timeouts of the transport
class are used.
"""
self.timeouts = timeouts
self.transport_class = transport
self.transport = None
# The version used in RPC communication, by default unused:
self.version = None
def _GetAddress(self):
"""Returns the socket address
"""
raise NotImplementedError
def _InitTransport(self):
"""(Re)initialize the transport if needed.
"""
if self.transport is None:
self.transport = self.transport_class(self._GetAddress(),
timeouts=self.timeouts)
def _CloseTransport(self):
"""Close the transport, ignoring errors.
"""
if self.transport is None:
return
try:
old_transp = self.transport
self.transport = None
old_transp.Close()
except Exception: # pylint: disable=W0703
pass
def _SendMethodCall(self, data):
# Send request and wait for response
def send(try_no):
if try_no:
logging.debug("RPC peer disconnected, retrying")
self._InitTransport()
return self.transport.Call(data)
return t.Transport.RetryOnNetworkError(send,
lambda _: self._CloseTransport())
def Close(self):
"""Close the underlying connection.
"""
self._CloseTransport()
def close(self):
"""Same as L{Close}, to be used with contextlib.closing(...).
"""
self.Close()
def CallMethod(self, method, args):
"""Send a generic request and return the response.
"""
if not isinstance(args, (list, tuple)):
raise errors.ProgrammerError("Invalid parameter passed to CallMethod:"
" expected list, got %s" % type(args))
return CallRPCMethod(self._SendMethodCall, method, args,
version=self.version)
class AbstractStubClient(AbstractClient):
"""An abstract Client that connects a generated stub client to a L{Transport}.
Subclasses should inherit from this class (first) as well and a designated
stub (second).
"""
def __init__(self, timeouts=None, transport=t.Transport):
"""Constructor for the class.
Arguments are the same as for L{AbstractClient}. Checks that SOCKET_PATH
attribute is defined (in the stub class).
"""
super(AbstractStubClient, self).__init__(timeouts=timeouts,
transport=transport)
def _GenericInvoke(self, method, *args):
return self.CallMethod(method, args)
def _GetAddress(self):
return self._GetSocketPath() # pylint: disable=E1101
|
|
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""MB - the Meta-Build wrapper around GN.
MB is a wrapper script for GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import ast
import errno
import json
import os
import pipes
import platform
import pprint
import re
import shutil
import sys
import subprocess
import tempfile
import traceback
import urllib2
from collections import OrderedDict
CHROMIUM_SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
sys.path = [os.path.join(CHROMIUM_SRC_DIR, 'build')] + sys.path
import gn_helpers
try:
cmp # Python 2
except NameError: # Python 3
def cmp(x, y): # pylint: disable=redefined-builtin
return (x > y) - (x < y)
def main(args):
mbw = MetaBuildWrapper()
return mbw.Main(args)
class MetaBuildWrapper(object):
def __init__(self):
self.chromium_src_dir = CHROMIUM_SRC_DIR
self.default_config = os.path.join(self.chromium_src_dir, 'infra', 'mb',
'mb_config.pyl')
self.default_isolate_map = os.path.join(self.chromium_src_dir, 'infra',
'mb', 'gn_isolate_map.pyl')
self.executable = sys.executable
self.platform = sys.platform
self.sep = os.sep
self.args = argparse.Namespace()
self.configs = {}
self.luci_tryservers = {}
self.masters = {}
self.mixins = {}
def Main(self, args):
self.ParseArgs(args)
try:
ret = self.args.func()
if ret:
self.DumpInputFiles()
return ret
except KeyboardInterrupt:
self.Print('interrupted, exiting')
return 130
except Exception:
self.DumpInputFiles()
s = traceback.format_exc()
for l in s.splitlines():
self.Print(l)
return 1
def ParseArgs(self, argv):
def AddCommonOptions(subp):
subp.add_argument('-b', '--builder',
help='builder name to look up config from')
subp.add_argument('-m', '--master',
help='master name to look up config from')
subp.add_argument('-c', '--config',
help='configuration to analyze')
subp.add_argument('--phase',
help='optional phase name (used when builders '
'do multiple compiles with different '
'arguments in a single build)')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is %(default)s)')
subp.add_argument('-i', '--isolate-map-file', metavar='PATH',
help='path to isolate map file '
'(default is %(default)s)',
default=[],
action='append',
dest='isolate_map_files')
subp.add_argument('-g', '--goma-dir',
help='path to goma directory')
subp.add_argument('--android-version-code',
help='Sets GN arg android_default_version_code')
subp.add_argument('--android-version-name',
help='Sets GN arg android_default_version_name')
subp.add_argument('-n', '--dryrun', action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
subp.add_argument('-v', '--verbose', action='store_true',
help='verbose logging')
parser = argparse.ArgumentParser(prog='mb')
subps = parser.add_subparsers()
subp = subps.add_parser('analyze',
help='analyze whether changes to a set of files '
'will cause a set of binaries to be rebuilt.')
AddCommonOptions(subp)
subp.add_argument('path', nargs=1,
help='path build was generated into.')
subp.add_argument('input_path', nargs=1,
help='path to a file containing the input arguments '
'as a JSON object.')
subp.add_argument('output_path', nargs=1,
help='path to a file containing the output arguments '
'as a JSON object.')
subp.set_defaults(func=self.CmdAnalyze)
subp = subps.add_parser('export',
help='print out the expanded configuration for'
'each builder as a JSON object')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file (default is %(default)s)')
subp.add_argument('-g', '--goma-dir',
help='path to goma directory')
subp.set_defaults(func=self.CmdExport)
subp = subps.add_parser('gen',
help='generate a new set of build files')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path to generate build into')
subp.set_defaults(func=self.CmdGen)
subp = subps.add_parser('isolate',
help='generate the .isolate files for a given'
'binary')
AddCommonOptions(subp)
subp.add_argument('path', nargs=1,
help='path build was generated into')
subp.add_argument('target', nargs=1,
help='ninja target to generate the isolate for')
subp.set_defaults(func=self.CmdIsolate)
subp = subps.add_parser('lookup',
help='look up the command for a given config or '
'builder')
AddCommonOptions(subp)
subp.set_defaults(func=self.CmdLookup)
subp = subps.add_parser(
'run',
help='build and run the isolated version of a '
'binary',
formatter_class=argparse.RawDescriptionHelpFormatter)
subp.description = (
'Build, isolate, and run the given binary with the command line\n'
'listed in the isolate. You may pass extra arguments after the\n'
'target; use "--" if the extra arguments need to include switches.\n'
'\n'
'Examples:\n'
'\n'
' % tools/mb/mb.py run -m chromium.linux -b "Linux Builder" \\\n'
' //out/Default content_browsertests\n'
'\n'
' % tools/mb/mb.py run out/Default content_browsertests\n'
'\n'
' % tools/mb/mb.py run out/Default content_browsertests -- \\\n'
' --test-launcher-retry-limit=0'
'\n'
)
AddCommonOptions(subp)
subp.add_argument('-j', '--jobs', dest='jobs', type=int,
help='Number of jobs to pass to ninja')
subp.add_argument('--no-build', dest='build', default=True,
action='store_false',
help='Do not build, just isolate and run')
subp.add_argument('path', nargs=1,
help=('path to generate build into (or use).'
' This can be either a regular path or a '
'GN-style source-relative path like '
'//out/Default.'))
subp.add_argument('-s', '--swarmed', action='store_true',
help='Run under swarming with the default dimensions')
subp.add_argument('-d', '--dimension', default=[], action='append', nargs=2,
dest='dimensions', metavar='FOO bar',
help='dimension to filter on')
subp.add_argument('--no-default-dimensions', action='store_false',
dest='default_dimensions', default=True,
help='Do not automatically add dimensions to the task')
subp.add_argument('target', nargs=1,
help='ninja target to build and run')
subp.add_argument('extra_args', nargs='*',
help=('extra args to pass to the isolate to run. Use '
'"--" as the first arg if you need to pass '
'switches'))
subp.set_defaults(func=self.CmdRun)
subp = subps.add_parser('validate',
help='validate the config file')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file (default is %(default)s)')
subp.set_defaults(func=self.CmdValidate)
subp = subps.add_parser('gerrit-buildbucket-config',
help='Print buildbucket.config for gerrit '
'(see MB user guide)')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file (default is %(default)s)')
subp.set_defaults(func=self.CmdBuildbucket)
subp = subps.add_parser('help',
help='Get help on a subcommand.')
subp.add_argument(nargs='?', action='store', dest='subcommand',
help='The command to get help for.')
subp.set_defaults(func=self.CmdHelp)
self.args = parser.parse_args(argv)
def DumpInputFiles(self):
def DumpContentsOfFilePassedTo(arg_name, path):
if path and self.Exists(path):
self.Print("\n# To recreate the file passed to %s:" % arg_name)
self.Print("%% cat > %s <<EOF" % path)
contents = self.ReadFile(path)
self.Print(contents)
self.Print("EOF\n%\n")
if getattr(self.args, 'input_path', None):
DumpContentsOfFilePassedTo(
'argv[0] (input_path)', self.args.input_path[0])
if getattr(self.args, 'swarming_targets_file', None):
DumpContentsOfFilePassedTo(
'--swarming-targets-file', self.args.swarming_targets_file)
def CmdAnalyze(self):
vals = self.Lookup()
return self.RunGNAnalyze(vals)
def CmdExport(self):
self.ReadConfigFile()
obj = {}
for master, builders in self.masters.items():
obj[master] = {}
for builder in builders:
config = self.masters[master][builder]
if not config:
continue
if isinstance(config, dict):
args = {k: self.FlattenConfig(v)['gn_args']
for k, v in config.items()}
elif config.startswith('//'):
args = config
else:
args = self.FlattenConfig(config)['gn_args']
if 'error' in args:
continue
obj[master][builder] = args
# Dump object and trim trailing whitespace.
s = '\n'.join(l.rstrip() for l in
json.dumps(obj, sort_keys=True, indent=2).splitlines())
self.Print(s)
return 0
def CmdGen(self):
vals = self.Lookup()
return self.RunGNGen(vals)
def CmdHelp(self):
if self.args.subcommand:
self.ParseArgs([self.args.subcommand, '--help'])
else:
self.ParseArgs(['--help'])
def CmdIsolate(self):
vals = self.GetConfig()
if not vals:
return 1
return self.RunGNIsolate()
def CmdLookup(self):
vals = self.Lookup()
cmd = self.GNCmd('gen', '_path_')
gn_args = self.GNArgs(vals)
self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
env = None
self.PrintCmd(cmd, env)
return 0
def CmdRun(self):
vals = self.GetConfig()
if not vals:
return 1
build_dir = self.args.path[0]
target = self.args.target[0]
if self.args.build:
ret = self.Build(target)
if ret:
return ret
ret = self.RunGNIsolate()
if ret:
return ret
if self.args.swarmed:
return self._RunUnderSwarming(build_dir, target)
else:
return self._RunLocallyIsolated(build_dir, target)
def _RunUnderSwarming(self, build_dir, target):
# TODO(dpranke): Look up the information for the target in
# the //testing/buildbot.json file, if possible, so that we
# can determine the isolate target, command line, and additional
# swarming parameters, if possible.
#
# TODO(dpranke): Also, add support for sharding and merging results.
dimensions = []
for k, v in self._DefaultDimensions() + self.args.dimensions:
dimensions += ['-d', k, v]
cmd = [
self.executable,
self.PathJoin('tools', 'swarming_client', 'isolate.py'),
'archive',
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
'-I', 'isolateserver.appspot.com',
]
ret, out, _ = self.Run(cmd, force_verbose=False)
if ret:
return ret
isolated_hash = out.splitlines()[0].split()[0]
cmd = [
self.executable,
self.PathJoin('tools', 'swarming_client', 'swarming.py'),
'run',
'-s', isolated_hash,
'-I', 'isolateserver.appspot.com',
'-S', 'chromium-swarm.appspot.com',
] + dimensions
if self.args.extra_args:
cmd += ['--'] + self.args.extra_args
ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
return ret
def _RunLocallyIsolated(self, build_dir, target):
cmd = [
self.executable,
self.PathJoin('tools', 'swarming_client', 'isolate.py'),
'run',
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
]
if self.args.extra_args:
cmd += ['--'] + self.args.extra_args
ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
return ret
def _DefaultDimensions(self):
if not self.args.default_dimensions:
return []
# This code is naive and just picks reasonable defaults per platform.
if self.platform == 'darwin':
os_dim = ('os', 'Mac-10.12')
elif self.platform.startswith('linux'):
os_dim = ('os', 'Ubuntu-14.04')
elif self.platform == 'win32':
os_dim = ('os', 'Windows-10')
else:
raise MBErr('unrecognized platform string "%s"' % self.platform)
return [('pool', 'Chrome'),
('cpu', 'x86-64'),
os_dim]
def CmdBuildbucket(self):
self.ReadConfigFile()
self.Print('# This file was generated using '
'"tools/mb/mb.py gerrit-buildbucket-config".')
for luci_tryserver in sorted(self.luci_tryservers):
self.Print('[bucket "luci.%s"]' % luci_tryserver)
for bot in sorted(self.luci_tryservers[luci_tryserver]):
self.Print('\tbuilder = %s' % bot)
for master in sorted(self.masters):
if master.startswith('tryserver.'):
self.Print('[bucket "master.%s"]' % master)
for bot in sorted(self.masters[master]):
self.Print('\tbuilder = %s' % bot)
return 0
def CmdValidate(self, print_ok=True):
errs = []
# Read the file to make sure it parses.
self.ReadConfigFile()
# Build a list of all of the configs referenced by builders.
all_configs = {}
for master in self.masters:
for config in self.masters[master].values():
if isinstance(config, dict):
for c in config.values():
all_configs[c] = master
else:
all_configs[config] = master
# Check that every referenced args file or config actually exists.
for config, loc in all_configs.items():
if config.startswith('//'):
if not self.Exists(self.ToAbsPath(config)):
errs.append('Unknown args file "%s" referenced from "%s".' %
(config, loc))
elif not config in self.configs:
errs.append('Unknown config "%s" referenced from "%s".' %
(config, loc))
# Check that every actual config is actually referenced.
for config in self.configs:
if not config in all_configs:
errs.append('Unused config "%s".' % config)
# Figure out the whole list of mixins, and check that every mixin
# listed by a config or another mixin actually exists.
referenced_mixins = set()
for config, mixins in self.configs.items():
for mixin in mixins:
if not mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by config "%s".' %
(mixin, config))
referenced_mixins.add(mixin)
for mixin in self.mixins:
for sub_mixin in self.mixins[mixin].get('mixins', []):
if not sub_mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by mixin "%s".' %
(sub_mixin, mixin))
referenced_mixins.add(sub_mixin)
# Check that every mixin defined is actually referenced somewhere.
for mixin in self.mixins:
if not mixin in referenced_mixins:
errs.append('Unreferenced mixin "%s".' % mixin)
if errs:
raise MBErr(('mb config file %s has problems:' % self.args.config_file) +
'\n ' + '\n '.join(errs))
if print_ok:
self.Print('mb config file %s looks ok.' % self.args.config_file)
return 0
def GetConfig(self):
build_dir = self.args.path[0]
vals = self.DefaultVals()
if self.args.builder or self.args.master or self.args.config:
vals = self.Lookup()
# Re-run gn gen in order to ensure the config is consistent with the
# build dir.
self.RunGNGen(vals)
return vals
toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
'toolchain.ninja')
if not self.Exists(toolchain_path):
self.Print('Must either specify a path to an existing GN build dir '
'or pass in a -m/-b pair or a -c flag to specify the '
'configuration')
return {}
vals['gn_args'] = self.GNArgsFromDir(build_dir)
return vals
def GNArgsFromDir(self, build_dir):
args_contents = ""
gn_args_path = self.PathJoin(self.ToAbsPath(build_dir), 'args.gn')
if self.Exists(gn_args_path):
args_contents = self.ReadFile(gn_args_path)
gn_args = []
for l in args_contents.splitlines():
fields = l.split(' ')
name = fields[0]
val = ' '.join(fields[2:])
gn_args.append('%s=%s' % (name, val))
return ' '.join(gn_args)
def Lookup(self):
vals = self.ReadIOSBotConfig()
if not vals:
self.ReadConfigFile()
config = self.ConfigFromArgs()
if config.startswith('//'):
if not self.Exists(self.ToAbsPath(config)):
raise MBErr('args file "%s" not found' % config)
vals = self.DefaultVals()
vals['args_file'] = config
else:
if not config in self.configs:
raise MBErr('Config "%s" not found in %s' %
(config, self.args.config_file))
vals = self.FlattenConfig(config)
return vals
def ReadIOSBotConfig(self):
if not self.args.master or not self.args.builder:
return {}
path = self.PathJoin(self.chromium_src_dir, 'ios', 'build', 'bots',
self.args.master, self.args.builder + '.json')
if not self.Exists(path):
return {}
contents = json.loads(self.ReadFile(path))
gn_args = ' '.join(contents.get('gn_args', []))
vals = self.DefaultVals()
vals['gn_args'] = gn_args
return vals
def ReadConfigFile(self):
if not self.Exists(self.args.config_file):
raise MBErr('config file not found at %s' % self.args.config_file)
try:
contents = ast.literal_eval(self.ReadFile(self.args.config_file))
except SyntaxError as e:
raise MBErr('Failed to parse config file "%s": %s' %
(self.args.config_file, e))
self.configs = contents['configs']
self.luci_tryservers = contents.get('luci_tryservers', {})
self.masters = contents['masters']
self.mixins = contents['mixins']
def ReadIsolateMap(self):
if not self.args.isolate_map_files:
self.args.isolate_map_files = [self.default_isolate_map]
for f in self.args.isolate_map_files:
if not self.Exists(f):
raise MBErr('isolate map file not found at %s' % f)
isolate_maps = {}
for isolate_map in self.args.isolate_map_files:
try:
isolate_map = ast.literal_eval(self.ReadFile(isolate_map))
duplicates = set(isolate_map).intersection(isolate_maps)
if duplicates:
raise MBErr(
'Duplicate targets in isolate map files: %s.' %
', '.join(duplicates))
isolate_maps.update(isolate_map)
except SyntaxError as e:
raise MBErr(
'Failed to parse isolate map file "%s": %s' % (isolate_map, e))
return isolate_maps
def ConfigFromArgs(self):
if self.args.config:
if self.args.master or self.args.builder:
raise MBErr('Can not specific both -c/--config and -m/--master or '
'-b/--builder')
return self.args.config
if not self.args.master or not self.args.builder:
raise MBErr('Must specify either -c/--config or '
'(-m/--master and -b/--builder)')
if not self.args.master in self.masters:
raise MBErr('Master name "%s" not found in "%s"' %
(self.args.master, self.args.config_file))
if not self.args.builder in self.masters[self.args.master]:
raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' %
(self.args.builder, self.args.master, self.args.config_file))
config = self.masters[self.args.master][self.args.builder]
if isinstance(config, dict):
if self.args.phase is None:
raise MBErr('Must specify a build --phase for %s on %s' %
(self.args.builder, self.args.master))
phase = str(self.args.phase)
if phase not in config:
raise MBErr('Phase %s doesn\'t exist for %s on %s' %
(phase, self.args.builder, self.args.master))
return config[phase]
if self.args.phase is not None:
raise MBErr('Must not specify a build --phase for %s on %s' %
(self.args.builder, self.args.master))
return config
def FlattenConfig(self, config):
mixins = self.configs[config]
vals = self.DefaultVals()
visited = []
self.FlattenMixins(mixins, vals, visited)
return vals
def DefaultVals(self):
return {
'args_file': '',
'cros_passthrough': False,
'gn_args': '',
}
def FlattenMixins(self, mixins, vals, visited):
for m in mixins:
if m not in self.mixins:
raise MBErr('Unknown mixin "%s"' % m)
visited.append(m)
mixin_vals = self.mixins[m]
if 'cros_passthrough' in mixin_vals:
vals['cros_passthrough'] = mixin_vals['cros_passthrough']
if 'args_file' in mixin_vals:
if vals['args_file']:
raise MBErr('args_file specified multiple times in mixins '
'for %s on %s' % (self.args.builder, self.args.master))
vals['args_file'] = mixin_vals['args_file']
if 'gn_args' in mixin_vals:
if vals['gn_args']:
vals['gn_args'] += ' ' + mixin_vals['gn_args']
else:
vals['gn_args'] = mixin_vals['gn_args']
if 'mixins' in mixin_vals:
self.FlattenMixins(mixin_vals['mixins'], vals, visited)
return vals
def RunGNGen(self, vals, compute_grit_inputs_for_analyze=False):
build_dir = self.args.path[0]
cmd = self.GNCmd('gen', build_dir, '--check')
gn_args = self.GNArgs(vals)
if compute_grit_inputs_for_analyze:
gn_args += ' compute_grit_inputs_for_analyze=true'
# Since GN hasn't run yet, the build directory may not even exist.
self.MaybeMakeDirectory(self.ToAbsPath(build_dir))
gn_args_path = self.ToAbsPath(build_dir, 'args.gn')
self.WriteFile(gn_args_path, gn_args, force_verbose=True)
swarming_targets = []
if getattr(self.args, 'swarming_targets_file', None):
# We need GN to generate the list of runtime dependencies for
# the compile targets listed (one per line) in the file so
# we can run them via swarming. We use gn_isolate_map.pyl to convert
# the compile targets to the matching GN labels.
path = self.args.swarming_targets_file
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path,
output_path=None)
contents = self.ReadFile(path)
swarming_targets = set(contents.splitlines())
isolate_map = self.ReadIsolateMap()
err, labels = self.MapTargetsToLabels(isolate_map, swarming_targets)
if err:
raise MBErr(err)
gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps')
self.WriteFile(gn_runtime_deps_path, '\n'.join(labels) + '\n')
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
ret, _, _ = self.Run(cmd)
if ret:
# If `gn gen` failed, we should exit early rather than trying to
# generate isolates. Run() will have already logged any error output.
self.Print('GN gen failed: %d' % ret)
return ret
android = 'target_os="android"' in vals['gn_args']
fuchsia = 'target_os="fuchsia"' in vals['gn_args']
for target in swarming_targets:
if android:
# Android targets may be either android_apk or executable. The former
# will result in runtime_deps associated with the stamp file, while the
# latter will result in runtime_deps associated with the executable.
label = isolate_map[target]['label']
runtime_deps_targets = [
target + '.runtime_deps',
'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
elif fuchsia:
# Only emit a runtime deps file for the group() target on Fuchsia.
label = isolate_map[target]['label']
runtime_deps_targets = [
'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
elif (isolate_map[target]['type'] == 'script' or
isolate_map[target].get('label_type') == 'group'):
# For script targets, the build target is usually a group,
# for which gn generates the runtime_deps next to the stamp file
# for the label, which lives under the obj/ directory, but it may
# also be an executable.
label = isolate_map[target]['label']
runtime_deps_targets = [
'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
if self.platform == 'win32':
runtime_deps_targets += [ target + '.exe.runtime_deps' ]
else:
runtime_deps_targets += [ target + '.runtime_deps' ]
elif self.platform == 'win32':
runtime_deps_targets = [target + '.exe.runtime_deps']
else:
runtime_deps_targets = [target + '.runtime_deps']
for r in runtime_deps_targets:
runtime_deps_path = self.ToAbsPath(build_dir, r)
if self.Exists(runtime_deps_path):
break
else:
raise MBErr('did not generate any of %s' %
', '.join(runtime_deps_targets))
runtime_deps = self.ReadFile(runtime_deps_path).splitlines()
self.WriteIsolateFiles(build_dir, target, runtime_deps)
return 0
def RunGNIsolate(self):
target = self.args.target[0]
isolate_map = self.ReadIsolateMap()
err, labels = self.MapTargetsToLabels(isolate_map, [target])
if err:
raise MBErr(err)
label = labels[0]
build_dir = self.args.path[0]
cmd = self.GNCmd('desc', build_dir, label, 'runtime_deps')
ret, out, _ = self.Call(cmd)
if ret:
if out:
self.Print(out)
return ret
runtime_deps = out.splitlines()
self.WriteIsolateFiles(build_dir, target, runtime_deps)
ret, _, _ = self.Run([
self.executable,
self.PathJoin('tools', 'swarming_client', 'isolate.py'),
'check',
'-i',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
'-s',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target))],
buffer_output=False)
return ret
def WriteIsolateFiles(self, build_dir, target, runtime_deps):
isolate_path = self.ToAbsPath(build_dir, target + '.isolate')
self.WriteFile(isolate_path,
pprint.pformat({
'variables': {
'files': sorted(runtime_deps),
}
}) + '\n')
self.WriteJSON(
{
'args': [
'--isolated',
self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
'--isolate',
self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
],
'dir': self.chromium_src_dir,
'version': 1,
},
isolate_path + 'd.gen.json',
)
def MapTargetsToLabels(self, isolate_map, targets):
labels = []
err = ''
for target in targets:
if target == 'all':
labels.append(target)
elif target.startswith('//'):
labels.append(target)
else:
if target in isolate_map:
if isolate_map[target]['type'] == 'unknown':
err += ('test target "%s" type is unknown\n' % target)
else:
labels.append(isolate_map[target]['label'])
else:
err += ('target "%s" not found in '
'//infra/mb/gn_isolate_map.pyl\n' % target)
return err, labels
def GNCmd(self, subcommand, path, *args):
if self.platform == 'linux2':
subdir, exe = 'linux64', 'gn'
elif self.platform == 'darwin':
subdir, exe = 'mac', 'gn'
else:
subdir, exe = 'win', 'gn.exe'
arch = platform.machine()
if (arch.startswith('s390') or arch.startswith('ppc') or
self.platform.startswith('aix')):
# use gn in PATH
gn_path = 'gn'
else:
gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, exe)
return [gn_path, subcommand, path] + list(args)
def GNArgs(self, vals):
if vals['cros_passthrough']:
if not 'GN_ARGS' in os.environ:
raise MBErr('MB is expecting GN_ARGS to be in the environment')
gn_args = os.environ['GN_ARGS']
if not re.search('target_os.*=.*"chromeos"', gn_args):
raise MBErr('GN_ARGS is missing target_os = "chromeos": (GN_ARGS=%s)' %
gn_args)
else:
gn_args = vals['gn_args']
if self.args.goma_dir:
gn_args += ' goma_dir="%s"' % self.args.goma_dir
android_version_code = self.args.android_version_code
if android_version_code:
gn_args += ' android_default_version_code="%s"' % android_version_code
android_version_name = self.args.android_version_name
if android_version_name:
gn_args += ' android_default_version_name="%s"' % android_version_name
# Canonicalize the arg string into a sorted, newline-separated list
# of key-value pairs, and de-dup the keys if need be so that only
# the last instance of each arg is listed.
gn_args = gn_helpers.ToGNString(gn_helpers.FromGNArgs(gn_args))
args_file = vals.get('args_file', None)
if args_file:
gn_args = ('import("%s")\n' % vals['args_file']) + gn_args
return gn_args
def ToAbsPath(self, build_path, *comps):
return self.PathJoin(self.chromium_src_dir,
self.ToSrcRelPath(build_path),
*comps)
def ToSrcRelPath(self, path):
"""Returns a relative path from the top of the repo."""
if path.startswith('//'):
return path[2:].replace('/', self.sep)
return self.RelPath(path, self.chromium_src_dir)
def RunGNAnalyze(self, vals):
# Analyze runs before 'gn gen' now, so we need to run gn gen
# in order to ensure that we have a build directory.
ret = self.RunGNGen(vals, compute_grit_inputs_for_analyze=True)
if ret:
return ret
build_path = self.args.path[0]
input_path = self.args.input_path[0]
gn_input_path = input_path + '.gn'
output_path = self.args.output_path[0]
gn_output_path = output_path + '.gn'
inp = self.ReadInputJSON(['files', 'test_targets',
'additional_compile_targets'])
if self.args.verbose:
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
# This shouldn't normally happen, but could due to unusual race conditions,
# like a try job that gets scheduled before a patch lands but runs after
# the patch has landed.
if not inp['files']:
self.Print('Warning: No files modified in patch, bailing out early.')
self.WriteJSON({
'status': 'No dependency',
'compile_targets': [],
'test_targets': [],
}, output_path)
return 0
gn_inp = {}
gn_inp['files'] = ['//' + f for f in inp['files'] if not f.startswith('//')]
isolate_map = self.ReadIsolateMap()
err, gn_inp['additional_compile_targets'] = self.MapTargetsToLabels(
isolate_map, inp['additional_compile_targets'])
if err:
raise MBErr(err)
err, gn_inp['test_targets'] = self.MapTargetsToLabels(
isolate_map, inp['test_targets'])
if err:
raise MBErr(err)
labels_to_targets = {}
for i, label in enumerate(gn_inp['test_targets']):
labels_to_targets[label] = inp['test_targets'][i]
try:
self.WriteJSON(gn_inp, gn_input_path)
cmd = self.GNCmd('analyze', build_path, gn_input_path, gn_output_path)
ret, _, _ = self.Run(cmd, force_verbose=True)
if ret:
return ret
gn_outp_str = self.ReadFile(gn_output_path)
try:
gn_outp = json.loads(gn_outp_str)
except Exception as e:
self.Print("Failed to parse the JSON string GN returned: %s\n%s"
% (repr(gn_outp_str), str(e)))
raise
outp = {}
if 'status' in gn_outp:
outp['status'] = gn_outp['status']
if 'error' in gn_outp:
outp['error'] = gn_outp['error']
if 'invalid_targets' in gn_outp:
outp['invalid_targets'] = gn_outp['invalid_targets']
if 'compile_targets' in gn_outp:
all_input_compile_targets = sorted(
set(inp['test_targets'] + inp['additional_compile_targets']))
# If we're building 'all', we can throw away the rest of the targets
# since they're redundant.
if 'all' in gn_outp['compile_targets']:
outp['compile_targets'] = ['all']
else:
outp['compile_targets'] = gn_outp['compile_targets']
# crbug.com/736215: When GN returns targets back, for targets in
# the default toolchain, GN will have generated a phony ninja
# target matching the label, and so we can safely (and easily)
# transform any GN label into the matching ninja target. For
# targets in other toolchains, though, GN doesn't generate the
# phony targets, and we don't know how to turn the labels into
# compile targets. In this case, we also conservatively give up
# and build everything. Probably the right thing to do here is
# to have GN return the compile targets directly.
if any("(" in target for target in outp['compile_targets']):
self.Print('WARNING: targets with non-default toolchains were '
'found, building everything instead.')
outp['compile_targets'] = all_input_compile_targets
else:
outp['compile_targets'] = [
label.replace('//', '') for label in outp['compile_targets']]
# Windows has a maximum command line length of 8k; even Linux
# maxes out at 128k; if analyze returns a *really long* list of
# targets, we just give up and conservatively build everything instead.
# Probably the right thing here is for ninja to support response
# files as input on the command line
# (see https://github.com/ninja-build/ninja/issues/1355).
if len(' '.join(outp['compile_targets'])) > 7*1024:
self.Print('WARNING: Too many compile targets were affected.')
self.Print('WARNING: Building everything instead to avoid '
'command-line length issues.')
outp['compile_targets'] = all_input_compile_targets
if 'test_targets' in gn_outp:
outp['test_targets'] = [
labels_to_targets[label] for label in gn_outp['test_targets']]
if self.args.verbose:
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
self.WriteJSON(outp, output_path)
finally:
if self.Exists(gn_input_path):
self.RemoveFile(gn_input_path)
if self.Exists(gn_output_path):
self.RemoveFile(gn_output_path)
return 0
def ReadInputJSON(self, required_keys):
path = self.args.input_path[0]
output_path = self.args.output_path[0]
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
try:
inp = json.loads(self.ReadFile(path))
except Exception as e:
self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' %
(path, e), output_path)
for k in required_keys:
if not k in inp:
self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
output_path)
return inp
def WriteFailureAndRaise(self, msg, output_path):
if output_path:
self.WriteJSON({'error': msg}, output_path, force_verbose=True)
raise MBErr(msg)
def WriteJSON(self, obj, path, force_verbose=False):
try:
self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n',
force_verbose=force_verbose)
except Exception as e:
raise MBErr('Error %s writing to the output path "%s"' %
(e, path))
def CheckCompile(self, master, builder):
url_template = self.args.url_template + '/{builder}/builds/_all?as_text=1'
url = urllib2.quote(url_template.format(master=master, builder=builder),
safe=':/()?=')
try:
builds = json.loads(self.Fetch(url))
except Exception as e:
return str(e)
successes = sorted(
[int(x) for x in builds.keys() if "text" in builds[x] and
cmp(builds[x]["text"][:2], ["build", "successful"]) == 0],
reverse=True)
if not successes:
return "no successful builds"
build = builds[str(successes[0])]
step_names = set([step["name"] for step in build["steps"]])
compile_indicators = set(["compile", "compile (with patch)", "analyze"])
if compile_indicators & step_names:
return "compiles"
return "does not compile"
def PrintCmd(self, cmd, env):
if self.platform == 'win32':
env_prefix = 'set '
env_quoter = QuoteForSet
shell_quoter = QuoteForCmd
else:
env_prefix = ''
env_quoter = pipes.quote
shell_quoter = pipes.quote
def print_env(var):
if env and var in env:
self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
print_env('LLVM_FORCE_HEAD_REVISION')
if cmd[0] == self.executable:
cmd = ['python'] + cmd[1:]
self.Print(*[shell_quoter(arg) for arg in cmd])
def PrintJSON(self, obj):
self.Print(json.dumps(obj, indent=2, sort_keys=True))
def Build(self, target):
build_dir = self.ToSrcRelPath(self.args.path[0])
ninja_cmd = ['ninja', '-C', build_dir]
if self.args.jobs:
ninja_cmd.extend(['-j', '%d' % self.args.jobs])
ninja_cmd.append(target)
ret, _, _ = self.Run(ninja_cmd, force_verbose=False, buffer_output=False)
return ret
def Run(self, cmd, env=None, force_verbose=True, buffer_output=True):
# This function largely exists so it can be overridden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.PrintCmd(cmd, env)
if self.args.dryrun:
return 0, '', ''
ret, out, err = self.Call(cmd, env=env, buffer_output=buffer_output)
if self.args.verbose or force_verbose:
if ret:
self.Print(' -> returned %d' % ret)
if out:
self.Print(out, end='')
if err:
self.Print(err, end='', file=sys.stderr)
return ret, out, err
def Call(self, cmd, env=None, buffer_output=True):
if buffer_output:
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
else:
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
env=env)
p.wait()
out = err = ''
return p.returncode, out, err
def ExpandUser(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.expanduser(path)
def Exists(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.exists(path)
def Fetch(self, url):
# This function largely exists so it can be overridden for testing.
f = urllib2.urlopen(url)
contents = f.read()
f.close()
return contents
def MaybeMakeDirectory(self, path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def PathJoin(self, *comps):
# This function largely exists so it can be overriden for testing.
return os.path.join(*comps)
def Print(self, *args, **kwargs):
# This function largely exists so it can be overridden for testing.
print(*args, **kwargs)
if kwargs.get('stream', sys.stdout) == sys.stdout:
sys.stdout.flush()
def ReadFile(self, path):
# This function largely exists so it can be overriden for testing.
with open(path) as fp:
return fp.read()
def RelPath(self, path, start='.'):
# This function largely exists so it can be overriden for testing.
return os.path.relpath(path, start)
def RemoveFile(self, path):
# This function largely exists so it can be overriden for testing.
os.remove(path)
def RemoveDirectory(self, abs_path):
if self.platform == 'win32':
# In other places in chromium, we often have to retry this command
# because we're worried about other processes still holding on to
# file handles, but when MB is invoked, it will be early enough in the
# build that their should be no other processes to interfere. We
# can change this if need be.
self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path])
else:
shutil.rmtree(abs_path, ignore_errors=True)
def TempFile(self, mode='w'):
# This function largely exists so it can be overriden for testing.
return tempfile.NamedTemporaryFile(mode=mode, delete=False)
def WriteFile(self, path, contents, force_verbose=False):
# This function largely exists so it can be overriden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
with open(path, 'w') as fp:
return fp.write(contents)
class MBErr(Exception):
pass
# See http://goo.gl/l5NPDW and http://goo.gl/4Diozm for the painful
# details of this next section, which handles escaping command lines
# so that they can be copied and pasted into a cmd window.
UNSAFE_FOR_SET = set('^<>&|')
UNSAFE_FOR_CMD = UNSAFE_FOR_SET.union(set('()%'))
ALL_META_CHARS = UNSAFE_FOR_CMD.union(set('"'))
def QuoteForSet(arg):
if any(a in UNSAFE_FOR_SET for a in arg):
arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg)
return arg
def QuoteForCmd(arg):
# First, escape the arg so that CommandLineToArgvW will parse it properly.
if arg == '' or ' ' in arg or '"' in arg:
quote_re = re.compile(r'(\\*)"')
arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
# Then check to see if the arg contains any metacharacters other than
# double quotes; if it does, quote everything (including the double
# quotes) for safety.
if any(a in UNSAFE_FOR_CMD for a in arg):
arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg)
return arg
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
'''
Port of the R seg.lm.fit function.
'''
import statsmodels.api as sm
import numpy as np
import warnings
from copy import copy
class Lm_Seg(object):
"""
"""
def __init__(self, x, y, brk):
super(Lm_Seg, self).__init__()
self.x = x
self.y = y
self.brk = brk
# Make sure the starting break point is in range of the data
if not (self.x > self.brk).any():
raise ValueError("brk is outside the range.")
# Check for nans, infs...
if not np.isfinite(x).all():
self.y = self.y[np.isfinite(self.x)]
self.x = self.x[np.isfinite(self.x)]
if not np.isfinite(y).all():
self.x = self.x[np.isfinite(self.y)]
self.y = self.y[np.isfinite(self.y)]
def fit_model(self, tol=1e-3, iter_max=100, h_step=2.0, epsil_0=10,
constant=True, verbose=True):
'''
'''
# Fit a normal linear model to the data
if constant:
x_const = sm.add_constant(self.x)
model = sm.OLS(self.y, x_const)
else:
model = sm.OLS(self.y, self.x)
init_lm = model.fit()
if verbose:
print init_lm.summary()
epsil = epsil_0
# Before we get into the loop, make sure that this was a bad fit
if epsil_0 < tol:
warnings.warning('Initial epsilon is smaller than tolerance. \
The tolerance should be set smaller.')
return init_lm
# Sum of residuals
dev_0 = np.sum(init_lm.resid**2.)
# Count
it = 0
h_it = 0
# Now loop through and minimize the residuals by changing where the
# breaking point is.
while np.abs(epsil) > tol:
U = (self.x - self.brk) * (self.x > self.brk)
V = deriv_max(self.x, self.brk)
X_all = np.vstack([self.x, U, V]).T
if constant:
X_all = sm.add_constant(X_all)
model = sm.OLS(self.y, X_all)
fit = model.fit()
beta = fit.params[2] # Get coef
gamma = fit.params[3] # Get coef
# Adjust the break point
new_brk = copy(self.brk)
new_brk += (h_step * gamma) / beta
# If the new break point is outside of the allowed range, reset
# the step size to half of the original, then try stepping again
if not (self.x > new_brk).any():
while True:
h_step /= 2.0
new_brk += (h_step * gamma) / beta
h_it += 1
if (self.x > new_brk).any():
self.brk = new_brk
break
if h_it >= 5:
raise ValueError("Cannot find suitable step size. \
Check number of breaks.")
else:
self.brk = new_brk
dev_1 = np.sum(fit.resid**2.)
epsil = (dev_1 - dev_0) / (dev_0 + 1e-3)
dev_0 = dev_1
if verbose:
print "Iteration: %s/%s" % (it+1, iter_max)
print fit.summary()
print "Break Point: " + str(self.brk)
print "Epsilon: " + str(epsil)
it += 1
if it > iter_max:
warnings.warning("Max iterations reached. \
Result may not be minimized.")
break
# With the break point hopefully found, do a final good fit
U = (self.x - self.brk) * (self.x > self.brk)
V = deriv_max(self.x, self.brk)
X_all = np.vstack([self.x, U, V]).T
X_all = sm.add_constant(X_all)
model = sm.OLS(self.y, X_all)
self.fit = model.fit()
self._params = self.fit.params
cov_matrix = self.fit.cov_params()
self._errs = np.asarray([np.sqrt(cov_matrix[i, i])
for i in range(cov_matrix.shape[0])])
self.brk_err = brk_errs(fit.params, fit.cov_params())
self.get_slopes()
return self
def model(self, x=None, model_return=False):
p = self.params
trans_pt = np.abs(self.x-self.brk).argmin()
mod_eqn = lambda k: p[0] + p[1]*k*(k < self.brk) + \
((p[1]+p[2])*k + (-p[2])*k[trans_pt])*(k >= self.brk)
if model_return or x is None:
return mod_eqn
return mod_eqn(x)
def get_slopes(self):
'''
'''
n_slopes = self.params[1:-2].shape[0]
self._slopes = np.empty(n_slopes)
self._slope_errs = np.empty(n_slopes)
for s in range(n_slopes):
if s == 0:
self._slopes[s] = self.params[s+1]
self._slope_errs[s] = self.param_errs[s+1]
else:
self._slopes[s] = self.params[s+1] + self._slopes[:s]
self._slope_errs[s] = \
np.sqrt(self.param_errs[s+1]**2 + self._slope_errs[:s]**2)
return self
@property
def slopes(self):
return self._slopes
@property
def slope_errs(self):
return self._slope_errs
@property
def params(self):
return np.append(self._params, self.brk)
@property
def param_errs(self):
return np.append(self._errs, self.brk_err)
def plot(self, x, show_data=True):
'''
'''
import matplotlib.pyplot as p
if show_data:
p.plot(self.x, self.y, 'bD')
p.plot(x, self.model(x), 'g')
p.grid(True)
p.show()
def deriv_max(a, b, pow=1):
if pow == 1:
dum = -1 * np.ones(a.shape)
dum[a < b] = 0
return dum
else:
return -pow * np.max(a - b, axis=0) ** (pow-1)
def brk_errs(params, cov):
'''
Given the covariance matrix of the fits, calculate the standard
error on the break.
'''
# Var gamma
term1 = cov[3, 3]
# Var beta * (beta/gamma)^2`
term2 = cov[2, 2] * (params[3]/params[2])**2.
# Correlation b/w gamma and beta
term3 = 2 * cov[3, 2] * (params[3]/params[2])
return np.sqrt(term1 + term2 + term3)
|
|
import gtk
import dialog
import utils
import random
from fract4d import gradient
def show_gradients(parent,f):
GradientDialog.show(parent,f)
class GradientDialog(dialog.T):
def __init__(self,main_window,f):
global userPrefs
dialog.T.__init__(
self,
_("Gradients"),
main_window,
gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.set_size_request(300, 320)
self.f = f
self.grad=gradient.Gradient()
self.mousedown = False
self.origmpos = self.startmpos = 0
self.cur = -1 # no segment selected
#self.create_gradient_dialog()
def show(parent,f,grad):
dialog.T.reveal(GradientDialog,False, parent, None, f,grad)
show = staticmethod(show)
def create_gradient_dialog(self):
hData = self.grad.getDataFromHandle(self.grad.cur)
HSVCo = gradient.RGBtoHSV(hData.col)
###GRADIENT PREVIEW###
self.gradarea=gtk.DrawingArea()
self.gradarea.set_size_request(256, 64)
self.gradarea.connect('realize', self.gradarea_realized)
self.gradarea.connect('expose_event', self.gradarea_expose)
self.gradarea.add_events(
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.POINTER_MOTION_MASK)
self.gradarea.connect('button-press-event', self.gradarea_mousedown)
self.gradarea.connect('button-release-event', self.gradarea_clicked)
self.gradarea.connect('motion-notify-event', self.gradarea_mousemoved)
gradareaBox = gtk.HBox(False, 0)
###CONTEXT MENU###
menu_items = (
( "/_Insert", "<control>I", self.add_handle, 0 ),
( "/_Delete", "<control>D", self.rem_handle, 0 ),
( "/_Coloring Mode", None, None, 0, "<Branch>" ),
( "/Coloring Mode/_RGB", "<control>R", self.cmode, 0 ),
( "/Coloring Mode/_HSV", "<control>H", self.cmode, 1 ),
( "/_Blending Mode", None, None, 0, "<Branch>" ),
( "/Blending Mode/_Linear", "<control>L", self.bmode, 0 ),
( "/Blending Mode/_Sinusoidal", None, self.bmode, 1 ),
( "/Blending Mode/Curved _Increasing",None, self.bmode, 2 ),
( "/Blending Mode/Curved _Decreasing",None, self.bmode, 3 ),
( "/Debug", None, self.printstuff, 0 )
)
accel_group = gtk.AccelGroup()
self.item_factory= gtk.ItemFactory(gtk.Menu, "<gradients>", accel_group)
self.item_factory.create_items(menu_items)
self.add_accel_group(accel_group)
self.menu=self.item_factory.get_widget("<gradients>")
###COLOR SELECTION###
if gtk.pygtk_version[0] >= 2 and gtk.pygtk_version[1] >= 4:
lblCsel = gtk.Label("Color:")
self.csel = gtk.ColorButton(
utils.create_color(hData.col[0], hData.col[1], hData.col[2]))
self.csel.connect('color-set', self.colorchanged)
self.colorbutton = True
else:
self.csel = gtk.Button("Color...")
self.csel.connect('clicked', self.cbutton_clicked)
self.csel_dialog = gtk.ColorSelectionDialog("Select a Color")
self.csel_dialog.colorsel.set_current_color(
utils.create_color(hData.col[0], hData.col[1], hData.col[2]))
self.csel_dialog.ok_button.connect('clicked', self.cdialog_response)
self.colorbutton = False
synccolsB = gtk.Button("Sync Colors")
synccolsB.connect('clicked', self.sync_colors)
CSelBox = gtk.HBox(False, 0)
###ALTERNATION CONTROL###
lblAlternate = gtk.Label(_("Alternation:"))
alternate = gtk.SpinButton(gtk.Adjustment(self.grad.getAlt(), 0, .5, 0.01, .5, 0.0))
alternate.set_digits(3)
alternate.connect('value-changed', self.alternate_changed)
AlternateBox = gtk.HBox(False, 0)
###POSITION CONTROL###
lblPos = gtk.Label(_("Position:"))
self.pos = gtk.SpinButton(gtk.Adjustment(hData.pos, 0, 1, 0.01, 0.1, 0.0))
self.pos.set_digits(2)
self.pos.connect('value-changed', self.pos_changed)
PosBox = gtk.HBox(False, 0)
###RANDOMIZE BUTTON###
randomize = gtk.Button(_("Randomize"))
randomize.connect('clicked', self.randomize)
randBox = gtk.HBox(False, 0)
###OFFSET CONTROL###
lblOffset = gtk.Label(_("Offset:"))
lblOffsetBox = gtk.HBox(False, 0)
offset=gtk.HScale(gtk.Adjustment(self.grad.getOffset(), 0, 1, 0.001, 0.01, 0.0))
offset.set_digits(3)
offset.connect('value-changed', self.offset_changed)
####################
###WIDGET PACKING###
####################
self.vbox.set_homogeneous(0)
gradareaBox.pack_start(self.gradarea, 1, 0, 10)
self.vbox.pack_start(gradareaBox, 0, 0, 5)
if self.colorbutton: CSelBox.pack_start(lblCsel, 0, 0, 10)
CSelBox.pack_start(self.csel, 0, 0, 10)
CSelBox.pack_end(synccolsB, 0, 0, 10)
self.vbox.pack_start(CSelBox, 0, 0, 5)
PosBox.pack_start(lblPos, 0, 0, 10)
PosBox.pack_start(self.pos, 0, 0, 10)
self.vbox.pack_start(PosBox, 0, 0, 5)
AlternateBox.pack_start(lblAlternate, 0, 0, 10)
AlternateBox.pack_start(alternate, 0, 0, 10)
self.vbox.pack_start(AlternateBox, 0, 0, 5)
lblOffsetBox.pack_start(lblOffset, 0, 0, 5)
self.vbox.pack_start(lblOffsetBox, 0, 0, 5)
self.vbox.pack_start(offset, 0, 0, 5)
randBox.pack_start(randomize, 1, 0, 10)
self.vbox.pack_start(randBox, 0, 0, 5)
def offset_changed(self, widget):
if self.grad.getOffset() != widget.get_value():
self.grad.setOffset(1-widget.get_value())
self.grad.compute()
self.gradarea.queue_draw()
self.f.colorlist=self.grad.clist
self.f.changed(False)
def colourchanged(self, widget):
color = widget.get_color()
seg, side = self.grad.getSegFromHandle(self.grad.cur)
if side == 'left':
seg.left.col = [colour.red/256, colour.green/256, colour.blue/256]
else:
seg.right.col = [colour.red/256, colour.green/256, colour.blue/256]
self.grad.compute()
self.gradarea.queue_draw()
self.f.colorlist=self.grad.clist
self.f.changed(False)
#The backwards-compatible button was clicked
def cbutton_clicked(self, widget):
self.csel_dialog.show()
def cdialog_response(self, widget):
colour = self.csel_dialog.colorsel.get_current_color()
seg, side = self.grad.getSegFromHandle(self.grad.cur)
if side == 'left':
seg.left.col = [colour.red/256, colour.green/256, colour.blue/256]
else:
seg.right.col = [colour.red/256, colour.green/256, colour.blue/256]
self.grad.compute()
self.gradarea.queue_draw()
self.f.colorlist=self.grad.clist
self.f.changed(False)
self.csel_dialog.hide()
return False
###Each handle is comprised of two handles, whose colours can be set independently.
###This function finds the other handle and sets it to the current handle's colour.
def sync_colours(self, widget):
if self.grad.cur % 2 == 0: #The handle is the first in its segment
if self.grad.cur > 0:
self.grad.segments[self.grad.cur/2-1].right.col = self.grad.getDataFromHandle(self.grad.cur).col
else:
self.grad.segments[-1].right.col = self.grad.getDataFromHandle(self.grad.cur).col
else:
if self.grad.cur < len(self.grad.segments)*2:
self.grad.segments[self.grad.cur/2+1].left.col = self.grad.getDataFromHandle(self.grad.cur).col
else:
self.grad.segments[0].left.col = self.grad.getDataFromHandle(self.grad.cur).col
self.grad.compute()
self.gradarea.queue_draw()
self.f.colorlist=self.grad.clist
self.f.changed(False)
###ALTERNATION CHANGED###
def alternate_changed(self, widget):
if self.grad.getAlt() != widget.get_value():
self.grad.setAlt(widget.get_value())
self.grad.compute()
self.gradarea.queue_draw()
self.f.colorlist = self.grad.clist
self.f.changed(False)
###POSITION CHANGED###
def pos_changed(self, widget):
if self.grad.getDataFromHandle(self.grad.cur).pos != widget.get_value():
self.grad.move(self.grad.cur, widget.get_value()-self.grad.getDataFromHandle(self.grad.cur).pos)
widget.set_value(self.grad.getDataFromHandle(self.grad.cur).pos)
self.grad.compute()
self.gradarea.queue_draw()
self.f.colorlist = self.grad.clist
self.f.changed(False)
###INIT FOR GRADIENT PREVIEW###
def gradarea_realized(self, widget):
self.gradcol= widget.get_colormap().alloc_color(
"#FFFFFFFFFFFF", True, True)
self.gradgc = widget.window.new_gc( foreground=self.gradcol,
background=self.gradcol,
fill=gtk.gdk.SOLID)
widget.window.draw_rectangle(widget.style.white_gc,
True,
0, 0,
widget.allocation.width,
widget.allocation.height)
return True
def gradarea_expose(self, widget, event):
#Assume some other process has compute()ed the gradient
##Draw the gradient itself##
for col in self.grad.clist:
self.gradcol = widget.get_colormap().alloc_color(col[1]*255,col[2]*255,col[3]*255, True, True)
self.gradgc.set_foreground(self.gradcol)
widget.window.draw_line(self.gradgc,
col[0]*self.grad.num+4, 0,
col[0]*self.grad.num+4, 56)
##Draw some handles##
for seg in self.grad.segments:
s_lpos = (seg.left.pos+(1-self.grad.offset)) * self.grad.num
s_rpos = (seg.right.pos+(1-self.grad.offset)) * self.grad.num
if s_lpos > self.grad.num:
s_lpos -= self.grad.num
elif s_lpos < 0:
s_lpos += self.grad.num
if s_rpos > self.grad.num:
s_rpos -= self.grad.num
elif s_rpos < 0:
s_rpos += self.grad.num
s_lpos += 4
s_rpos += 4
wgc=widget.style.white_gc
bgc=widget.style.black_gc
index=self.grad.segments.index(seg)
#A vast ugliness that should draw the selected handle with a white centre.
#The problem is that each handle is really two handles - the second handle
#of the left-hand segment and the first of the right.
#The first two branches deal with handles in the middle, whilst the second
#two deal with those at the edges. The other is a case for where neither
#of the handles in a segment should be highlighted.
if self.grad.cur/2.0 == index or (self.grad.cur+1)/2.0 == index:
self.draw_handle(widget.window, int(s_lpos), wgc, bgc)
self.draw_handle(widget.window, int(s_rpos), bgc, bgc)
elif (self.grad.cur-1)/2.0 == index:
self.draw_handle(widget.window, int(s_lpos), bgc, bgc)
self.draw_handle(widget.window, int(s_rpos), wgc, bgc)
elif (self.grad.cur-1)/2.0 == len(self.grad.segments)-1.0 and index == 0:
self.draw_handle(widget.window, int(s_lpos), wgc, bgc)
self.draw_handle(widget.window, int(s_rpos), bgc, bgc)
elif self.grad.cur == 0 and index == len(self.grad.segments)/2.0:
self.draw_handle(widget.window, int(s_lpos), bgc, bgc)
self.draw_handle(widget.window, int(s_rpos), wgc, bgc)
else:
self.draw_handle(widget.window, int(s_lpos), bgc, bgc)
self.draw_handle(widget.window, int(s_rpos), bgc, bgc)
return False
def gradarea_mousedown(self, widget, event):
if self.mousedown == False and event.button == 1:
x=event.x/self.grad.num
x-=1-self.grad.offset
if x < 0:
x+=1
seg = self.grad.getSegAt(x)
relx = x - seg.left.pos
if relx < (seg.right.pos-seg.left.pos)/2:
self.grad.cur=self.grad.segments.index(seg)*2
else:
self.grad.cur=self.grad.segments.index(seg)*2+1
hData = self.grad.getDataFromHandle(self.grad.cur)
if self.colourbutton == True:
self.csel.set_color(
utils.create_color(hData.col[0],hData.col[1],hData.col[2]))
else:
self.csel_dialog.colorsel.set_current_color(
utils.create_color(hData.col[0],hData.col[1],hData.col[2]))
self.pos.set_value(hData.pos)
self.gradarea.queue_draw()
if event.button == 1:
self.mousedown = True
self.origmpos = self.startmpos = event.x
elif event.button == 3:
self.mousepos = event.x #We can't pass this as callback data, because things're screwed. If this isn't true, please tell!
#self.item_factory.popup(int(event.x), int(event.y), event.button)
self.menu.popup(None, None, None, event.button, event.time)
return False
def gradarea_clicked(self, widget, event):
self.mousedown = False
if self.startmpos != event.x:
self.grad.compute()
self.gradarea.queue_draw()
self.f.colorlist=self.grad.getCList()
self.f.changed(False)
return False
def gradarea_mousemoved(self, widget, event):
if self.mousedown:
self.grad.move(self.grad.cur, (event.x - self.origmpos)/self.grad.num)
self.origmpos = event.x
self.grad.compute()
self.gradarea.queue_draw()
def add_handle(self, action, widget):
self.grad.add(self.mousepos/self.grad.num)
self.gradarea.queue_draw()
def rem_handle(self, action, widget):
self.grad.remove(self.grad.cur)
self.grad.cur = 0
self.gradarea.queue_draw()
def cmode(self, action, widget):
seg, side = self.grad.getSegFromHandle(self.grad.cur)
if action == 0:
seg.cmode = 'RGB'
else:
seg.cmode = 'HSV'
self.grad.compute()
self.gradarea.queue_draw()
self.f.colorlist=self.grad.getCList()
self.f.changed(False)
def bmode(self, action, widget):
seg, side = self.grad.getSegFromHandle(self.grad.cur)
if action == 0:
seg.bmode = 'Linear'
elif action == 1:
seg.bmode = 'Sinusoidal'
elif action == 2:
seg.bmode = 'CurvedI'
else:
seg.bmode = 'CurvedD'
self.grad.compute()
self.gradarea.queue_draw()
self.f.colorlist=self.grad.getCList()
self.f.changed(False)
def printstuff(self, action, widget):
for seg in self.grad.segments:
print [seg.left.pos, seg.left.col], [seg.right.pos, seg.right.col]
def randomize(self, widget):
oldcol = [random.randint(0, 255),random.randint(0, 255),random.randint(0, 255)]
oldpos = i = 0
poslist = []
for seg in self.grad.segments:
poslist.append(random.random())
poslist.sort()
for seg in self.grad.segments:
seg.left.pos = oldpos
seg.left.col = oldcol
seg.right.pos = oldpos = poslist[i]
seg.right.col = oldcol = [random.randint(0, 255),random.randint(0, 255),random.randint(0, 255)]
i+=1
seg.right.pos = 1
seg.right.col = self.grad.segments[0].left.col
self.grad.compute()
self.gradarea.queue_draw()
self.f.colorlist=self.grad.getCList()
self.f.changed(False)
return False
def draw_handle(self, drawable, pos, fill, outline):
for y in range(8):
drawable.draw_line(fill, pos-y/2, y+56, pos+y/2, y+56)
lpos = pos + 3.5
rpos = pos - 3.5
drawable.draw_line(outline, pos, 56, lpos, 63);
drawable.draw_line(outline, pos, 56, rpos, 63);
drawable.draw_line(outline, lpos, 63, rpos, 63);
|
|
# -*- coding: utf-8 -*-
"""
This module contains some utility functions for Streams.
You may wonder why do we need for such simple ``filter-*`` functions. The
reason is simple and this is about how :py:mod:`multiprocessing` and therefore
:py:class:`concurrent.futures.ProcessPoolExecutor` works. It can't pickle
lambdas so we need for whole pickleable functions.
"""
###############################################################################
from six import PY3
from six import text_type
# noinspection PyUnresolvedReferences
from six.moves import zip as izip
try:
from cdecimal import Decimal
except ImportError:
from decimal import Decimal
if PY3:
long = int
###############################################################################
def filter_keys(item):
"""
Returns first element of the tuple or ``item`` itself.
:param object item: It can be tuple, list or just an object.
>>> filter_keys(1)
... 1
>>> filter_keys((1, 2))
... 1
"""
if isinstance(item, tuple):
return item[0]
return item
def filter_values(item):
"""
Returns last element of the tuple or ``item`` itself.
:param object item: It can be tuple, list or just an object.
>>> filter_values(1)
... 1
>>> filter_values((1, 2))
... 2
"""
if isinstance(item, tuple):
return item[-1]
return item
def filter_true(argument):
"""
Return the predicate value of given item and the item itself.
:param tuple argument: Argument consists of predicate function and item
iteself.
>>> filter_true((lambda x: x <= 5, 5))
... True, 5
>>> filter_true((lambda x: x > 100, 1)
... False, 1
"""
predicate, item = argument
return bool(predicate(item)), item
def filter_false(argument):
"""
Opposite to :py:func:`streams.utils.filter_true`
:param tuple argument: Argument consists of predicate function and item
iteself.
>>> filter_false((lambda x: x <= 5, 5))
... False, 5
>>> filter_false((lambda x: x > 100, 1))
... True, 1
"""
is_correct, item = filter_true(argument)
return not is_correct, item
# noinspection PyBroadException
def int_or_none(item):
"""
Tries to convert ``item`` to :py:func:`int`. If it is not possible, returns
``None``.
:param object item: Element to convert into :py:func:`int`.
>>> int_or_none(1)
... 1
>>> int_or_none("1")
... 1
>>> int_or_none("smth")
... None
"""
if isinstance(item, int):
return item
try:
return int(item)
except:
return None
# noinspection PyBroadException
def float_or_none(item):
"""
Tries to convert ``item`` to :py:func:`float`. If it is not possible,
returns ``None``.
:param object item: Element to convert into :py:func:`float`.
>>> float_or_none(1)
... 1.0
>>> float_or_none("1")
... 1.0
>>> float_or_none("smth")
... None
"""
if isinstance(item, float):
return item
try:
return float(item)
except:
return None
# noinspection PyBroadException
def long_or_none(item):
"""
Tries to convert ``item`` to :py:func:`long`. If it is not possible,
returns ``None``.
:param object item: Element to convert into :py:func:`long`.
>>> long_or_none(1)
... 1L
>>> long_or_none("1")
... 1L
>>> long_or_none("smth")
... None
"""
if isinstance(item, long):
return item
try:
return long(item)
except:
return None
# noinspection PyBroadException
def decimal_or_none(item):
"""
Tries to convert ``item`` to :py:class:`decimal.Decimal`. If it is not
possible, returns ``None``.
:param object item: Element to convert into :py:class:`decimal.Decimal`.
>>> decimal_or_none(1)
... Decimal("1")
>>> decimal_or_none("1")
... Decimal("1")
>>> decimal_or_none("smth")
... None
"""
if isinstance(item, Decimal):
return item
try:
return Decimal(item)
except:
return None
# noinspection PyBroadException
def unicode_or_none(item):
"""
Tries to convert ``item`` to :py:func:`unicode`. If it is not possible,
returns ``None``.
:param object item: Element to convert into :py:func:`unicode`.
>>> unicode_or_none(1)
... u"1"
>>> unicode_or_none("1")
... u"1"
>>> unicode_or_none("smth")
... u"smth"
.. note::
This is relevant for Python 2 only. Python 3 will use native
:py:func:`str`.
"""
if isinstance(item, text_type):
return item
try:
return text_type(item)
except:
return None
def apply_to_tuple(*funcs, **kwargs):
"""
Applies several functions to one ``item`` and returns tuple of results.
:param list func: The list of functions we need to apply.
:param dict kwargs: Keyword arguments with only one mandatory argument,
``item``. Functions would be applied to this item.
>>> apply_to_tuple(int, float, item="1")
... (1, 1.0)
"""
item = kwargs["item"]
if not isinstance(item, (tuple, list)):
return funcs[0](item)
result = []
for func, arg in izip(funcs, item):
if func is not None:
arg = func(arg)
result.append(arg)
return tuple(result)
def key_mapper(argument):
"""
Maps ``predicate`` only to key (first element) of a ``item``. If ``item``
is not :py:func:`tuple` then tuplifies it first.
:param tuple argument: The tuple of (``predicate`` and ``item``).
>>> key_mapper((lambda x: x + 10, (1, 2)))
... (11, 2)
"""
predicate, item = argument
item = item if isinstance(item, (tuple, list)) else (item, item)
return apply_to_tuple(predicate, None, item=item)
def value_mapper(argument):
"""
Maps ``predicate`` only to value (last element) of a ``item``. If ``item``
is not :py:func:`tuple` then tuplifies it first.
:param tuple argument: The tuple of (``predicate`` and ``item``).
>>> value_mapper((lambda x: x + 10, (1, 2)))
... (1, 12)
"""
predicate, item = argument
item = item if isinstance(item, (tuple, list)) else (item, item)
return apply_to_tuple(None, predicate, item=item)
def make_list(iterable):
"""
Makes a list from given ``iterable``. But won't create new one if
``iterable`` is a :py:func:`list` or :py:func:`tuple` itself.
:param Iterable iterable: Some iterable entity we need to convert into
:py:func:`list`.
"""
if isinstance(iterable, (list, tuple)):
return iterable
return list(iterable)
###############################################################################
class MaxHeapItem(object):
"""
This is small wrapper around item to give it a possibility to use heaps
from :py:mod:`heapq` as max-heaps. Unfortunately this module provides
min-heaps only.
Guys, come on. We need for max-heaps to.
"""
__slots__ = "value",
def __init__(self, value):
self.value = value
def __lt__(self, other):
other = other.value if isinstance(other, MaxHeapItem) else other
return self.value > other
def __le__(self, other):
other = other.value if isinstance(other, MaxHeapItem) else other
return self.value >= other.value
def __gt__(self, other):
other = other.value if isinstance(other, MaxHeapItem) else other
return self.value < other
def __ge__(self, other):
other = other.value if isinstance(other, MaxHeapItem) else other
return self.value <= other
def __eq__(self, other):
other = other.value if isinstance(other, MaxHeapItem) else other
return self.value == other
def __ne__(self, other):
other = other.value if isinstance(other, MaxHeapItem) else other
return self.value != other
def __cmp__(self, other):
other = other.value if isinstance(other, MaxHeapItem) else other
if self.value < other:
return 1
if self.value == other:
return 0
return -1
def __repr__(self):
return repr(self.value)
def __hash__(self):
return hash(self.value)
def __nonzero__(self):
return bool(self.value)
|
|
gCategories = [
{'name':'All', 'desc': '*',
'sub': [
{'name':'Computers & Electronics', 'desc':'Computers, Laptops, tablets, Phones, Desktops, Computer Accessories',
'sub': [
{'name':'Laptops', 'desc':'Laptop computers'},
{'name':'Desktops', 'desc':'Desktop computers'},
{'name':'Mobile Phones', 'desc':'Smart phones, feature phones'},
{'name':'Tablets', 'desc':'Tablet computers'},
{'name':'Smart TV', 'desc':'TV, Smart TV'},
{'name':'Camera', 'desc': 'Cameras'},
{'name':'Electronic Accessories', 'desc':'Accessories',
'sub': [
{'name':'Mobile Cases', 'desc':''},
{'name':'Headphones & Headsets', 'desc':''},
{'name':'Power Banks', 'desc':''},
{'name':'Screenguards', 'desc':''},
{'name':'Memory Cards', 'desc':''},
{'name':'Smart Headphones', 'desc':''},
{'name':'Mobile Cables', 'desc':''},
{'name':'Chargers', 'desc':''},
{'name':'Selfie Sticks', 'desc':''},
],
},
],
},
{'name':'Appliances', 'desc': 'Home, Kitchen Appliances',
'sub': [
{'name':'Televisions', 'desc':'Televisions'},
{'name':'Refrigerators', 'desc':'Refrigerators'},
{'name':'Air Conditioners', 'desc':'Air Conditioners'},
{'name':'Geysers', 'desc':'Geysers'},
{'name':'Washing Machines', 'desc':'Washing machines'},
{'name':'Kitchen Appliances', 'desc':'Kitchen Appliances',
'sub':[
{'name':'Mixer/Juicer/Grinder', 'desc':''},
{'name':'Food Processors', 'desc':''},
{'name':'Induction Cooktops', 'desc':''},
{'name':'Sandwich Makers', 'desc':''},
{'name':'Popup Toasters', 'desc':''},
{'name':'Hand Blender', 'desc':''},
{'name':'Electric Kettle', 'desc':''},
{'name':'Oven Toaster Grills', 'desc':''},
{'name':'Coffee Makers', 'desc':''},
{'name':'Electric Cookers', 'desc':''},
{'name':'Dishwashers', 'desc':''},
{'name':'Chimneys', 'desc':''},
{'name':'Microwave Ovens', 'desc':''},
{'name':'Water Purifiers', 'desc':''},
],
},
{'name':'Home Appliances', 'desc':'Home Appliances',
'sub': [
{'name':'Irons', 'desc'},
{'name':'Air Purifiers', 'desc'},
{'name':'Water Purifiers', 'desc'},
{'name':'Vacuum Cleaners', 'desc'},
{'name':'Smart Home Automation', 'desc'},
],
},
],
},
{'name':'Fashion & Clothing Men', 'desc':'boys, men',
'sub': [
{'name': 'clothing', 'desc':'for boys, men',
{'name':'Top wear', 'desc':'',
'sub':[
{'name':'T-Shirts', 'desc':'for boys, men',},
{'name':'Shirts', 'desc':'for boys, men',},
{'name':'Kurtas', 'desc':'for boys, men',},
{'name':'Suits & Blazers', 'desc':'for boys, men',},
{'name':'Jackets', 'desc':'for boys, men',},
{'name':'Sweatshirts', 'desc':'for boys, men',},
],
},
{'name':'Bottom wear', 'desc':'for boys, men',},
'sub':[
{'name':'Jeans', 'desc':'for boys, men',},
{'name':'Shorts', 'desc':'for boys, men',},
{'name':'Cargos', 'desc':'for boys, men',},
{'name':'Track pants', 'desc':'for boys, men',},
]
},
{'name':'Sports wear', 'desc':'for boys, men',},
'sub':[
{'name':'Sports T-Shirts', 'desc':'for boys, men',},
{'name':'Track Pants', 'desc':'for boys, men',},
{'name':'Track Suits', 'desc':'for boys, men',},
{'name':'Shorts', 'desc':'for boys, men',},
]
},
{'name':'Innerwear & Sleepwear', 'desc':'for boys, men',},
'sub':[
{'name':'Briefs & Trunks', 'desc':'for boys, men',},
{'name':'Vests', 'desc':'for boys, men',},
{'name':'Boxers', 'desc':'for boys, men',},
{'name':'Thermals', 'desc':'for boys, men',},
{'name':'Ties, Socks, Caps', 'desc':'for boys, men',},
{'name':'Kurta, Pyjama', 'desc':'for boys, men',},
]
},
],
},
{'name':'Footwear', 'desc':'boys, men',
'sub':[
{'name':'Sports Shoes', 'desc':'boys, men',},
{'name':'Casual Shoes', 'desc':'boys, men',},
{'name':'Formal Shoes', 'desc':'men',},
{'name':'Sandals & Floaters', 'desc':'boys,men',},
{'name':'Flip Flops', 'desc':'boys,men',},
{'name':'Loafers', 'desc':'boys, men',},
{'name':'Boots', 'desc':'boys, men',},
{'name':'Running Shoes', 'desc':'boys, men',},
{'name':'Sneakers', 'desc':'boys, men',},
],
},
{'name':'Accessories', 'desc':'boys, men',},
'sub':[
{'name':'Backpacks', 'desc':'boys, men',},
{'name':'Wallets', 'desc':'boys, men',},
{'name':'Belts', 'desc':'boys, men',},
{'name':'Sunglasses', 'desc':'boys, men',},
{'name':'Luggage & Travel', 'desc':'boys, men',},
{'name':'Jewellery', 'desc':'boys, men',},
{'name':'Sports & Fitness Store', 'desc':'boys, men',},
{'name':'Smart Watches', 'desc':'boys, men',},
{'name':'Smart Bands', 'desc':'boys, men',},
{'name':'Smart Watches', 'desc':'boys, men',},
{'name':'Trimmers', 'desc':'boys, men',},
{'name':'Shavers', 'desc':'boys, men',},
{'name':'Perfumes', 'desc':'boys, men',},
{'name':'Deodrents', 'desc':'boys, men',},
],
},
]
},
{'name':'Fashion & Clothing Women', 'desc':'girls, women',
'sub': [
{'name':'Indian & Fusion Wear', 'desc':'girls, women',
'sub':[
{'name':'Kurtas & Suits', 'desc':'girls, women',},
{'name':'Kurtis, Tunics & Tops', 'desc':'girls, women',},
{'name':'Leggings, Salwars, Churidars', 'desc':'girls, women',},
{'name':'Skirts & Palazzos', 'desc':'girls, women',},
{'name':'Sarees & Blouses', 'desc':'girls, women',},
{'name':'Dress Material', 'desc':'girls, women',},
{'name':'Lehenga Choli', 'desc':'girls, women',},
{'name':'Dupattas & Shawls', 'desc':'girls, women',},
{'name':'Jackets & Waistcoats', 'desc':'girls, women',},
],
},
{'name':'Western Wear', 'desc':'girls, women',
'sub':[
{'name':'Dresses & Jumpsuits', 'desc':'girls, women',},
{'name':'Tops, T-Shirts & Shirts', 'desc':'girls, women',},
{'name':'Jeans & Jeggings', 'desc':'girls, women',},
{'name':'Trousers & Capris', 'desc':'girls, women',},
{'name':'Shorts & Skirts', 'desc':'girls, women',},
{'name':'Shrugs', 'desc':'girls, women',},
{'name':'Sweaters & Sweatshirts', 'desc':'girls, women',},
{'name':'Jackets & Waistcoats', 'desc':'girls, women',},
{'name':'Coats & Blazers', 'desc':'girls, women',},
],
},
{'name':'Lingerie & Sleepwear', 'desc':'girls, women',
'sub':[
{'name':'Bras & Lingerie Sets', 'desc':'girls, women',},
{'name':'Briefs', 'desc':'girls, women',},
{'name':'Shapewear', 'desc':'girls, women',},
{'name':'Sleepwear & Loungewear', 'desc':'girls, women',},
{'name':'Swimwear', 'desc':'girls, women',},
{'name':'Camisoles & Thermals', 'desc':'girls, women',},
],
},
{'name':'Footwear', 'desc':'girls, women',
{'name':'Flats & Casual Shoes', 'desc':'girls, women',},
{'name':'Heels', 'desc':'girls, women',},
{'name':'Sports Shoes & Floaters', 'desc':'girls, women',},
{'name':'Sports & Active Wear', 'desc':'girls, women',},
},
{'name':'Accessories', 'desc':'girls, women',
'sub':[
{'name':'Sports Equipment', 'desc':'girls, women',},
{'name':'Handbags, Bags & Wallets', 'desc':'girls, women',},
{'name':'Watches & Wearables', 'desc':'girls, women',},
{'name':'Sunglasses & Frames', 'desc':'girls, women',},
{'name':'Headphones & Earphones', 'desc':'girls, women',},
{'name':'Luggage & Trolleys', 'desc':'girls, women',},
{'name':'Cosmetics & Personal Care', 'desc':'girls, women',},
{'name':'Fashion Accessories', 'desc':'girls, women',},
{'name':'Belts', 'desc':'girls, women',},
{'name':'Scarves, Stoles & Gloves', 'desc':'girls, women',},
{'name':'Caps & Hats', 'desc':'girls, women',},
{'name':'Hair Accessories', 'desc':'girls, women',},
{'name':'Socks', 'desc':'girls, women',},
{'name':'Hair Straightners', 'desc':'girls, women',},
{'name':'Hair Dryers', 'desc':'girls, women',},
{'name':'Beauty & Grooming', 'desc':'girls, women',},
{'name':'Make Up', 'desc':'girls, women',},
{'name':'Skin Care', 'desc':'girls, women',},
{'name':'Deodorants & Perfumes', 'desc':'girls, women',},
]
},
{'name':'Jewellery', 'desc':'girls, women',
'sub':[
{'name':'Precious Jewellery', 'desc':'girls, women',},
{'name':'Artificial Jewellery', 'desc':'girls, women',},
{'name':'Silver Jewellery', 'desc':'girls, women',},
{'name':'Gold jewellery', 'desc':'girls, women',},
{'name':'Precious Jewellery', 'desc':'girls, women',},
],
},
],
},
{'name':'Books', 'desc':'',
'sub':[
{'name':'Entrance Exams', 'desc':'',},
{'name':'Academic', 'desc':'',},
{'name':'Novels', 'desc':'',},
{'name':'History', 'desc':'',},
{'name':'Science', 'desc':'',},
{'name':'Literature & Fiction', 'desc':'',},
{'name':'Indian Writing', 'desc':'',},
{'name':'Biographies', 'desc':'',},
{'name':'Children', 'desc':'',},
{'name':'Business', 'desc':'',},
{'name':'Self Help', 'desc':'',},
{'name':'Comics', 'desc':'',},
{'name':'Stationery', 'desc':'',},
{'name':'Pens', 'desc':'',},
{'name':'Diaries', 'desc':'',},
{'name':'Key Chains', 'desc':'',},
{'name':'Desk Organisers', 'desc':'',},
{'name':'Guide books', 'desc':'',},
],
},
{'name':'Accessories', 'desc':'',
'sub':[
{'name':'Car & Bike Accessories', 'desc':'',
'desc':[
{'name':'Car & Bike Accessories', 'desc'},
{'name':'Car Body Cover', 'desc'},
{'name':'Bike Body Cover', 'desc'},
{'name':'Car Air Freshener', 'desc'},
{'name':'Vehicle Washing & Cleaning', 'desc'},
{'name':'Car Sun Shade', 'desc'},
{'name':'Car Mat', 'desc'},
{'name':'Car Electronics & Appliances', 'desc'},
{'name':'Car Media Player', 'desc'},
{'name':'Car Pressure Washer', 'desc'},
{'name':'Car Charger', 'desc'},
{'name':'Car Bluetooth Device', 'desc'},
{'name':'Car Vacuum Cleaner', 'desc'},
{'name':'Car Refrigerator', 'desc'},
{'name':'Helmets & Riding Gear', 'desc'},
],
},
{'name':'Sports Accessories', 'desc':'Footbaal, Basketball, cricket, hockey',
'sub':[
{'name':'Football', 'desc':'',
'sub':[
{'name':'Footballs', 'desc':'',},
],
},
{'name':'Cricket', 'desc':'',
'sub':[
{'name':'bats', 'desc':'',},
{'name':'balls', 'desc':'',},
{'name':'stumps', 'desc':'',},
],
},
],
},
],
},
],
},
]
|
|
# A little helper module for plotting of broadbean objects
from typing import Tuple, Union, Dict, List
import numpy as np
import matplotlib.pyplot as plt
from broadbean import Sequence, BluePrint, Element
from broadbean.sequence import SequenceConsistencyError
# The object we can/want to plot
BBObject = Union[Sequence, BluePrint, Element]
def getSIScalingAndPrefix(minmax: Tuple[float, float]) -> Tuple[float, str]:
"""
Return the scaling exponent and unit prefix. E.g. (-2e-3, 1e-6) will
return (1e3, 'm')
Args:
minmax: The (min, max) value of the signal
Returns:
A tuple of the scaling (inverse of the prefix) and the prefix
string.
"""
v_max = max(map(abs, minmax)) # type: ignore
if v_max == 0:
v_max = 1 # type: ignore
exponent = np.log10(v_max)
prefix = ''
scaling: float = 1
if exponent < 0:
prefix = 'm'
scaling = 1e3
if exponent < -3:
prefix = 'micro '
scaling = 1e6
if exponent < -6:
prefix = 'n'
scaling = 1e9
return (scaling, prefix)
def _plot_object_validator(obj_to_plot: BBObject) -> None:
"""
Validate the object
"""
if isinstance(obj_to_plot, Sequence):
proceed = obj_to_plot.checkConsistency(verbose=True)
if not proceed:
raise SequenceConsistencyError
elif isinstance(obj_to_plot, Element):
obj_to_plot.validateDurations()
elif isinstance(obj_to_plot, BluePrint):
assert obj_to_plot.SR is not None
def _plot_object_forger(obj_to_plot: BBObject,
**forger_kwargs) -> Dict[int, Dict]:
"""
Make a forged sequence out of any object.
Returns a forged sequence.
"""
if isinstance(obj_to_plot, BluePrint):
elem = Element()
elem.addBluePrint(1, obj_to_plot)
seq = Sequence()
seq.addElement(1, elem)
seq.setSR(obj_to_plot.SR)
elif isinstance(obj_to_plot, Element):
seq = Sequence()
seq.addElement(1, obj_to_plot)
seq.setSR(obj_to_plot._meta['SR'])
elif isinstance(obj_to_plot, Sequence):
seq = obj_to_plot
forged_seq = seq.forge(includetime=True, **forger_kwargs)
return forged_seq
def _plot_summariser(seq: Dict[int, Dict]) -> Dict[int, Dict[str, np.ndarray]]:
"""
Return a plotting summary of a subsequence.
Args:
seq: The 'content' value of a forged sequence where a
subsequence resides
Returns:
A dict that looks like a forged element, but all waveforms
are just two points, np.array([min, max])
"""
output = {}
# we assume correctness, all postions specify the same channels
chans = seq[1]['data'].keys()
minmax = dict(zip(chans, [(0, 0)]*len(chans)))
for element in seq.values():
arr_dict = element['data']
for chan in chans:
wfm = arr_dict[chan]['wfm']
if wfm.min() < minmax[chan][0]:
minmax[chan] = (wfm.min(), minmax[chan][1])
if wfm.max() > minmax[chan][1]:
minmax[chan] = (minmax[chan][0], wfm.max())
output[chan] = {'wfm': np.array(minmax[chan]),
'm1': np.zeros(2),
'm2': np.zeros(2),
'time': np.linspace(0, 1, 2)}
return output
# the Grand Unified Plotter
def plotter(obj_to_plot: BBObject, **forger_kwargs) -> None:
"""
The one plot function to be called. Turns whatever it gets
into a sequence, forges it, and plots that.
"""
# TODO: Take axes as input
# strategy:
# * Validate
# * Forge
# * Plot
_plot_object_validator(obj_to_plot)
seq = _plot_object_forger(obj_to_plot, **forger_kwargs)
# Get the dimensions.
chans = seq[1]['content'][1]['data'].keys()
seqlen = len(seq.keys())
def update_minmax(chanminmax, wfmdata, chanind):
(thismin, thismax) = (wfmdata.min(), wfmdata.max())
if thismin < chanminmax[chanind][0]:
chanminmax[chanind] = [thismin, chanminmax[chanind][1]]
if thismax > chanminmax[chanind][1]:
chanminmax[chanind] = [chanminmax[chanind][0], thismax]
return chanminmax
# Then figure out the figure scalings
minf: float = -np.inf
inf: float = np.inf
chanminmax: List[Tuple[float, float]] = [(inf, minf)]*len(chans)
for chanind, chan in enumerate(chans):
for pos in range(1, seqlen+1):
if seq[pos]['type'] == 'element':
wfmdata = (seq[pos]['content'][1]
['data'][chan]['wfm'])
chanminmax = update_minmax(chanminmax, wfmdata, chanind)
elif seq[pos]['type'] == 'subsequence':
for pos2 in seq[pos]['content'].keys():
elem = seq[pos]['content'][pos2]['data']
wfmdata = elem[chan]['wfm']
chanminmax = update_minmax(chanminmax,
wfmdata, chanind)
fig, axs = plt.subplots(len(chans), seqlen)
# ...and do the plotting
for chanind, chan in enumerate(chans):
# figure out the channel voltage scaling
# The entire channel shares a y-axis
minmax: Tuple[float, float] = chanminmax[chanind]
(voltagescaling, voltageprefix) = getSIScalingAndPrefix(minmax)
voltageunit = voltageprefix + 'V'
for pos in range(seqlen):
# 1 by N arrays are indexed differently than M by N arrays
# and 1 by 1 arrays are not arrays at all...
if len(chans) == 1 and seqlen > 1:
ax = axs[pos]
if len(chans) > 1 and seqlen == 1:
ax = axs[chanind]
if len(chans) == 1 and seqlen == 1:
ax = axs
if len(chans) > 1 and seqlen > 1:
ax = axs[chanind, pos]
# reduce the tickmark density (must be called before scaling)
ax.locator_params(tight=True, nbins=4, prune='lower')
if seq[pos+1]['type'] == 'element':
content = seq[pos+1]['content'][1]['data'][chan]
wfm = content['wfm']
m1 = content.get('m1', np.zeros_like(wfm))
m2 = content.get('m2', np.zeros_like(wfm))
time = content['time']
newdurs = content.get('newdurations', [])
else:
arr_dict = _plot_summariser(seq[pos+1]['content'])
wfm = arr_dict[chan]['wfm']
newdurs = []
ax.annotate('SUBSEQ', xy=(0.5, 0.5),
xycoords='axes fraction',
horizontalalignment='center')
time = np.linspace(0, 1, 2) # needed for timeexponent
# Figure out the axes' scaling
timeexponent = np.log10(time.max())
timeunit = 's'
timescaling: float = 1.0
if timeexponent < 0:
timeunit = 'ms'
timescaling = 1e3
if timeexponent < -3:
timeunit = 'micro s'
timescaling = 1e6
if timeexponent < -6:
timeunit = 'ns'
timescaling = 1e9
if seq[pos+1]['type'] == 'element':
ax.plot(timescaling*time, voltagescaling*wfm, lw=3,
color=(0.6, 0.4, 0.3), alpha=0.4)
ymax = voltagescaling * chanminmax[chanind][1]
ymin = voltagescaling * chanminmax[chanind][0]
yrange = ymax - ymin
ax.set_ylim([ymin-0.05*yrange, ymax+0.2*yrange])
if seq[pos+1]['type'] == 'element':
# TODO: make this work for more than two markers
# marker1 (red, on top)
y_m1 = ymax+0.15*yrange
marker_on = np.ones_like(m1)
marker_on[m1 == 0] = np.nan
marker_off = np.ones_like(m1)
ax.plot(timescaling*time, y_m1*marker_off,
color=(0.6, 0.1, 0.1), alpha=0.2, lw=2)
ax.plot(timescaling*time, y_m1*marker_on,
color=(0.6, 0.1, 0.1), alpha=0.6, lw=2)
# marker 2 (blue, below the red)
y_m2 = ymax+0.10*yrange
marker_on = np.ones_like(m2)
marker_on[m2 == 0] = np.nan
marker_off = np.ones_like(m2)
ax.plot(timescaling*time, y_m2*marker_off,
color=(0.1, 0.1, 0.6), alpha=0.2, lw=2)
ax.plot(timescaling*time, y_m2*marker_on,
color=(0.1, 0.1, 0.6), alpha=0.6, lw=2)
# If subsequence, plot lines indicating min and max value
if seq[pos+1]['type'] == 'subsequence':
# min:
ax.plot(time, np.ones_like(time)*wfm[0],
color=(0.12, 0.12, 0.12), alpha=0.2, lw=2)
# max:
ax.plot(time, np.ones_like(time)*wfm[1],
color=(0.12, 0.12, 0.12), alpha=0.2, lw=2)
ax.set_xticks([])
# time step lines
for dur in np.cumsum(newdurs):
ax.plot([timescaling*dur, timescaling*dur],
[ax.get_ylim()[0], ax.get_ylim()[1]],
color=(0.312, 0.2, 0.33),
alpha=0.3)
# labels
if pos == 0:
ax.set_ylabel('({})'.format(voltageunit))
if pos == seqlen - 1 and not(isinstance(obj_to_plot, BluePrint)):
newax = ax.twinx()
newax.set_yticks([])
if isinstance(chan, int):
new_ylabel = f'Ch. {chan}'
elif isinstance(chan, str):
new_ylabel = chan
newax.set_ylabel(new_ylabel)
if seq[pos+1]['type'] == 'subsequence':
ax.set_xlabel('Time N/A')
else:
ax.set_xlabel('({})'.format(timeunit))
# remove excess space from the plot
if not chanind+1 == len(chans):
ax.set_xticks([])
if not pos == 0:
ax.set_yticks([])
fig.subplots_adjust(hspace=0, wspace=0)
# display sequencer information
if chanind == 0 and isinstance(obj_to_plot, Sequence):
seq_info = seq[pos+1]['sequencing']
titlestring = ''
if seq_info['twait'] == 1: # trigger wait
titlestring += 'T '
if seq_info['nrep'] > 1: # nreps
titlestring += '\u21BB{} '.format(seq_info['nrep'])
if seq_info['nrep'] == 0:
titlestring += '\u221E '
if seq_info['jump_input'] != 0:
if seq_info['jump_input'] == -1:
titlestring += 'E\u2192 '
else:
titlestring += 'E{} '.format(seq_info['jump_input'])
if seq_info['goto'] > 0:
titlestring += '\u21b1{}'.format(seq_info['goto'])
ax.set_title(titlestring)
|
|
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
import sklearn
import OVFM.Model as md
import OVFM.FeatureMap as fm
import OVFM.Risk as rsk
import OVFM.LearningRate as lr
import OVFM.DataGeneration as dg
import OVFM.SGD as sgd
import time
from IPython import embed
def tuneSGD( D, L, features, targets ):
best_score = 1
last_score = 1
best_params = np.empty( 3 )
for gamma in np.logspace( -5, -1, 5 ):
phi_l = fm.DecomposableFF( gamma, X_train.shape[ 1 ], D, L )
loss = ls.L2( )
for C in np.logspace( -10, 1, 5 ):
re = reg.L1( C )
for eta0 in np.logspace( -2, 2, 5 ):
l = lr.Constant( eta0 )
estimator = sgd.SGD(phi_l, l, loss, re, 1, 5, False, 0)
scores = sgd.scoreCV( estimator, features, targets )
print scores, gamma, C, eta0
if np.mean( scores ) >= last_score:
last_score = 1
break
last_score = np.mean( scores )
if np.mean( scores ) <= best_score:
best_params[ 0 ] = C
best_params[ 1 ] = eta0
best_params[ 2 ] = gamma
best_score = np.mean( scores )
return best_params
if __name__ == '__main__':
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1)).astype( float )
scaler = sklearn.preprocessing.StandardScaler( )
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = scaler.fit_transform( train[:, :np.ceil(0.5 * n_pixels)] ) # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = scaler.fit_transform( test[:, :np.ceil(0.5 * n_pixels)] )
y_test = test[:, np.floor(0.5 * n_pixels):]
#Begin OVFM stuff
D = 100
# # B = Id + 1 L2 eps
# lbda = 0.99999
# gamma = 0.0005
# eps = 0.005
# C = 0.0001
# eta0 = 5.0
# # B = Id + 1 L1 eps
# lbda = 0.99999
# gamma = 0.0005
# eps = 0.005
# C = 0.001
# eta0 = 1.0
# B = corrcoef L2 eps
# gamma = 3.59381366e-05
# eps = 0.004
# C = 4.64158883e-07
# eta0 = 5.99484250e-01
# B = corrcoef L1 eps
gamma = 1.0e-03
eps = 0.004
C = 0
eta0 = 2.5
gff = fm.GaussianFF( gamma, X_train.shape[ 1 ], D )
Kex = gff.kernel_exact( X_train )
Kap = gff.kernel_approx( X_train )
fig, axes = plt.subplots( nrows=1, ncols=2, sharex=False, sharey=False )
im = axes[ 0 ].imshow( Kex, origin = 'lower' )
im.set_cmap( 'hot' )
axes[ 0 ].set_title( 'Kernel exact' )
im = axes[ 1 ].imshow( Kap, origin = 'lower' )
im.set_cmap( 'hot' )
axes[ 1 ].set_title( 'Kernel approximation' )
plt.show( )
print 'Kernel approximation MSE:', np.linalg.norm( Kex - Kap ) ** 2 / X_train.size
# M = np.corrcoef( y_train.T ) >= 1.0
# Dg = np.diag( np.diag( M ) + np.sum( M, axis = 1 ) )
# L = np.linalg.inv( Dg - M )
# L = lbda * np.eye( ( X_train.shape[ 1 ] ) ) + ( 1 - lbda ) * np.ones( ( y_train.shape[ 1 ], y_train.shape[ 1 ] ) )
# L = np.random.rand( X_train.shape[ 1 ], X_train.shape[ 1 ] )
# L = np.dot( L, L.T )
# L = L / np.linalg.norm( L ) ** 2
L = np.eye( ( X_train.shape[ 1 ] ) )
# best_params = tuneSGD( 300, L, np.vstack( ( X_train, X_test ) ), np.vstack( ( y_train, y_test ) ) )
# C = best_params[ 0 ]
# eta0 = best_params[ 1 ]
# gamma = best_params[ 2 ]
gamma = 0.0001
C = 5.16227766017e-6
eta0 = 0.5
model = md.Model( fm.DecomposableFF( gamma, X_train.shape[ 1 ], D, L ) )
risk = rsk.GroupLasso( C, X_train.shape[ 1 ] )
l = lr.Constant( eta0 )
#End OVFM stuff
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
"OVFM": sgd.SGD( risk, l, 3, 10, 0 )
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
if name == 'OVFM':
t = time.clock( )
estimator.fit(model, X_train, y_train)
y_test_predict[name] = model(X_test)
print 'Learning time: ', name, ' ', time.clock( ) - t
# scores = sgd.scoreCV( estimator, np.vstack( ( X_train, X_test ) ), np.vstack( ( y_train, y_test ) ) )
# print 'Error: ', scores, np.mean( scores ), np.var( scores )
else:
t = time.clock( )
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
print 'Learning time: ', name, ' ', time.clock( ) - t
# scores = sgd.scoreCV( estimator, np.vstack( ( X_train, X_test ) ), np.vstack( ( y_train, y_test ) ) )
# print 'Error: ', scores, np.mean( scores ), np.var( scores )
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((scaler.inverse_transform( X_test[i] ), y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((scaler.inverse_transform( X_test[i] ), y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
|
|
import copy
class Pentomino(object):
def __init__(self, name, coos):
self.name = name
self.coos = coos
self.dim = len(coos[0])
def normalize_coo(self, coo):
a=self.coos[0][coo]
for i in self.coos :
if a > self.coos[i][coo] :
a = self.coos[i][coo]
for i in self.coos :
self.coos[i][coo] = self.coos[i][coo] - [a]
def normalize(self):
self.coos.sort()
a=self.coos[0][0]
b=self.coos[0][1]
for i in self.coos :
if a > i[0] :
a = i[0]
if b > i[1] :
b = i[1]
for i in range(5) :
self.coos[i][0] = self.coos[i][0] - a
self.coos[i][1] = self.coos[i][1] - b
return self
def flip(self, coo):
right = -100
left = 100
for i in self.coos :
if right < i[coo] :
right = i[coo]
if left > i[coo] :
left = i[coo]
if right-left == 1 :
for i in range(5) :
if self.coos[i][coo] == left :
self.coos[i][coo] = right
else :
self.coos[i][coo] = left
elif right-left == 2 :
for i in range(5) :
if self.coos[i][coo] == left :
self.coos[i][coo] = right
elif self.coos[i][coo] == right :
self.coos[i][coo] = left
elif right-left == 3 :
for i in range(5) :
if self.coos[i][coo] == left :
self.coos[i][coo] = right
elif self.coos[i][coo] == right :
self.coos[i][coo] = left
elif self.coos[i][coo] == right-1 :
self.coos[i][coo] = right-2
else :
self.coos[i][coo] = right-1
return self
def translate_one(self, coo):
for i in range(5) :
self.coos[i][coo] = self.coos[i][coo] + 1
return self
def translate_coo(self, coo, amount):
for i in self.coos :
self.coos[i][coo] = self.coos[i][coo] + amount
return self
def translate_by(self, by_vector):
for i in self.coos :
self.coos[i][0] = self.coos[i][0] + by_vector[0]
self.coos[i][1] = self.coos[i][1] + by_vector[1]
return self
def turn90(self):
for i in range(5) :
coord = self.coos[i][1]
self.coos[i][1]=self.coos[i][0]
self.coos[i][0]=coord
self.flip(1)
return self
def max(self):
maxx=-1
maxy=-1
maximum=list()
for i in self.coos:
if maxx<i[0]:
maxx=i[0]
for i in self.coos:
if maxx==i[0]:
maximum.append(i)
for i in maximum:
if maxy<i[1]:
maxy=i[1]
return [maxx,maxy]
def __hash__(self):
c0 = self.normalize()
h = 100**len(self.coos)
x=0
for i in range(5) :
x = c0.coos[i][0]*100+c0.coos[i][1]
h = h+x*100**(i*2)
x=0
return h
def __eq__(self, other):
if self.name != other.name :
return False
else :
return self.__hash__() == other.__hash__()
def representation(self):
return "[" + self.name + ":" + str(self.coos) + "]"
class F(Pentomino):
def __init__(self):
Pentomino.__init__(self, "F", [[0,1],[1,0],[1,1],[1,2],[2,2]])
class I(Pentomino):
def __init__(self):
Pentomino.__init__(self, "I", [[0,0],[0,1],[0,2],[0,3],[0,4]])
class L(Pentomino):
def __init__(self):
Pentomino.__init__(self, "L", [[0,0],[0,1],[0,2],[0,3],[1,0]])
class N(Pentomino):
def __init__(self):
Pentomino.__init__(self, "N", [[0,0],[0,1],[1,1],[1,2],[1,3]])
class P(Pentomino):
def __init__(self):
Pentomino.__init__(self, "P", [[0,0],[0,1],[0,2],[1,1],[1,2]])
class T(Pentomino):
def __init__(self):
Pentomino.__init__(self, "T", [[0,2],[1,0],[1,1],[1,2],[2,2]])
class U(Pentomino):
def __init__(self):
Pentomino.__init__(self, "U", [[0,0],[0,1],[1,0],[2,0],[2,1]])
class V(Pentomino):
def __init__(self):
Pentomino.__init__(self, "V", [[0,0],[1,0],[2,0],[2,1],[2,2]])
class W(Pentomino):
def __init__(self):
Pentomino.__init__(self, "W", [[0,0],[1,0],[1,1],[2,1],[2,2]])
class X(Pentomino):
def __init__(self):
Pentomino.__init__(self, "X", [[0,1],[1,0],[1,1],[1,2],[2,1]])
class Y(Pentomino):
def __init__(self):
Pentomino.__init__(self, "Y", [[0,0],[1,0],[2,0],[2,1],[3,0]])
class Z(Pentomino):
def __init__(self):
Pentomino.__init__(self, "Z", [[0,2],[1,0],[1,1],[1,2],[2,0]])
def all_pentominos():
return [F(), I(), L(), P(), N(), T(), U(), V(), W(), X(), Y(), Z()]
def all_fixed_pentominos():
s = TileSet()
for i in all_pentominos() :
if i.name == "X" :
s.add(i)
elif i.name == "I":
s.add(I())
s.add(I().turn90())
else :
for k in range(4):
s.add(i.normalize())
s.add(i.flip(0).normalize())
s.add(i.flip(1).normalize())
s.add(i.flip(0).normalize())
i.flip(1).normalize()
i.turn90().normalize()
return s
def fixed_pentominos_of(p):
s = TileSet()
for i in all_pentominos() :
if p.name == i.name :
for k in range(4):
s.add(i.normalize())
s.add(i.flip(0).normalize())
s.add(i.flip(1).normalize())
s.add(i.flip(0).normalize())
i.flip(1).normalize()
i.turn90().normalize()
return s
class TileSet(object):
def __init__(self, plist=[]):
self.set = set()
for p in plist:
self.add(p)
def __iter__(self):
return iter(self.set)
def add(self, p):
c = copy.deepcopy(p)
value = True
for i in self.set :
if i.__eq__(c) :
value = False
if value :
self.set.add(c)
def size(self):
return len(self.set)
def representation(self):
rep = "["
i = 0
for p in self.set:
if i>0:
rep += ","
else:
i = 1
rep += str(p.coos)
rep += "]"
return rep
|
|
#!/usr/bin/python
#
# This is a test-case-maintenance script that updates the jamtests
# repository with output that was created by the JAM policy weaver.
#
import sys
MAJOR = sys.version_info[0]
import os
import re
import subprocess
from subprocess import PIPE
import shutil
import time
import imp
from optparse import OptionParser
import filecmp
import fnmatch
def read_file(filepath):
fl = open(filepath, 'r')
txt = fl.read().replace('\r\n','\n').strip()
fl.close()
return txt
def get_result_predicate(srcdir, app):
respath = os.path.join(srcdir, app + '.result')
if os.path.isfile(respath):
respred = read_file(respath)
respred = respred.strip()
respred = respred.rstrip(";")
else:
respred = "\"RESULT NOT SPECIFIED\""
return respred
def should_update(src, tgt, diff=False):
if not os.path.isfile(tgt):
return True
if not OVERWRITE:
if VERBOSE:
cfg.out("Not overwriting existing file: %s" % tgt)
return False
srctime = os.path.getmtime(src)
tgttime = os.path.getmtime(tgt)
if COMPARE_TIME and srctime <= tgttime:
if VERBOSE:
cfg.out("Source modification time is earlier: %s: %r, %s: %r" % (src, srctime, tgt, tgttime))
return False
# File contents are checked later since wrapping adds additional text.
if diff and filecmp.cmp(src, tgt):
if VERBOSE:
cfg.out("Source and target are equal: %s == %s" % (src, tgt))
return False
return True
def has_change(tgtfile, txt):
if not os.path.exists(tgtfile):
return True
tgttxt = read_file(tgtfile)
chg = tgttxt != txt.strip()
return chg
def write_text(tgtfile, txt):
tgtfl = open(tgtfile, 'w')
# Avoid extra ending newline for idempotence.
tgtfl.write(txt)
tgtfl.close()
# Generate a version of the source that profiles the execution.
def insert_profile(txtin, desc, specs, extra=None):
txtout = txtin
ind = ''
nl = '\n'
if 'indent' in specs:
indnum = specs['indent']
if indnum > -1:
ind = ''.join([' ' for i in range(0, indnum)])
ind = '\n' + ind
else:
nl = ''
else:
ind = '\n'
if 'prefixsemicolonstart' in specs and specs['prefixsemicolonstart']:
startsc = ';'
else:
startsc = ''
if 'prefixsemicolonend' in specs and specs['prefixsemicolonend']:
endsc = ';'
else:
endsc = ''
if 'noquotes' in specs and specs['noquotes']:
specdesc = desc
else:
specdesc = "'" + desc + "'"
openprof = startsc + ind + "JAM.startProfile(" + specdesc + ");" + nl
closeprof = endsc + ind + "JAM.stopProfile(" + specdesc + ");" + nl
if 'beginafter' in specs:
bas = specs['beginafter']
if type(bas) == str:
bas = [bas]
else:
bas = None
if 'endbefore' in specs:
ebs = specs['endbefore']
if type(ebs) == str:
ebs = [ebs]
else:
ebs = None
if 'endafter' in specs:
eas = specs['endafter']
if type(eas) == str:
eas = [eas]
else:
eas = None
if 'matchall' in specs:
ma = specs['matchall']
else:
ma = False
found0 = False
if bas is None:
txtout = openprof + txtout
found0 = True
else:
for ba in bas:
start = 0
while start > -1:
start = txtout.find(ba, start)
if start > -1:
start = start + len(ba)
txtout = txtout[:start] + openprof + txtout[start:]
found0 = True
if not ma: break
found1 = False
if ebs is None and eas is None:
txtout = txtout + closeprof
found1 = True
else:
if ebs is not None:
for eb in ebs:
start = 0
while start > -1:
start = txtout.find(eb, start)
if start > -1:
txtout = txtout[:start] + closeprof + txtout[start:]
# Advance beyond the previous match.
start += len(closeprof) + 1
found1 = True
if not ma: break
if eas is not None:
for ea in eas:
start = 0
while start > -1:
start = txtout.find(ea, start)
if start > -1:
start = start + len(ea)
txtout = txtout[:start] + closeprof + txtout[start:]
# Advance beyond the previous match.
start += len(closeprof) + 1
found1 = True
if not ma: break
if not found0 or not found1:
warndesc = desc
if extra is not None: warndesc = extra + "/" + warndesc
warning0 = ''
warning1 = ''
if not found0:
warning0 = "Profile %s beginning" % warndesc
if not found1:
if not found0:
warning1 = " and ending"
else:
warning1 = "Profile %s ending" % warndesc
cfg.warn(warning0 + warning1 + " insertion point not found")
# Return the original rather than a partial profile.
return txtin
return txtout
# /insert_profile
def copy_policy(app, desc, polsrc, tgtdir):
assert os.path.isfile(polsrc)
if not cfg.load_dir(tgtdir):
return False
poltgt = os.path.join(tgtdir, app + '.' + desc + '.js')
# Don't copy if the source hasn't been updated.
if not should_update(polsrc, poltgt, True):
return False
shutil.copy(polsrc, poltgt)
return True
# /copy_policy
def copy_source(app, desc, srcpath, tgtdir, respred=None, name=None):
assert os.path.isfile(srcpath)
if not cfg.load_dir(tgtdir): return
if name is None:
tgt = os.path.join(tgtdir, '%s.%s.js' % (app, desc))
else:
tgt = os.path.join(tgtdir, name)
# Don't copy if the source hasn't been updated.
update_tgt = should_update(srcpath, tgt)
if not update_tgt: return False
srctxt = read_file(srcpath)
# Normalize the number of blank lines.
srctxt = srctxt.strip() + "\n"
if respred is not None:
# Generate a statement that checks some expected state.
srctxt = srctxt + '\nJAM.log("Result: " + (%s));\n' % respred;
if has_change(tgt, srctxt):
write_text(tgt, srctxt)
return True
else:
if VERBOSE:
cfg.out("No change from current text: %s.%s" % (app, desc))
return False
# /copy_source
def copy_variants(app, suf, srcdir, jsrel, apptgtdir, respred):
changed = False
srcpath = os.path.join(srcdir, jsrel)
if copy_source(app, suf, srcpath, apptgtdir, respred=respred):
changed = True
return changed
# /copy_variants
def copy_sources(app, suf, srcdir, jssrc, apptgtdir, respred):
changed = False
subtgtdir = os.path.join(apptgtdir, 'source-%s' % suf)
for jsrel in jssrc:
jspath = os.path.join(srcdir, jsrel)
assert os.path.isfile(jspath), 'File not found: %s' % jspath
jsname = os.path.basename(jsrel)
jsdir = os.path.dirname(jsrel)
normtgtdir = os.path.join(subtgtdir, jsdir)
if copy_source(app, suf, jspath, normtgtdir, respred=respred, name=jsname):
changed = True
return changed
# /copy_sources
def copy_files(app, infos, apppath, wrap=False):
if not cfg.prepare_dir(apppath):
# Error printed within |prepare_dir|.
return False
# Optionally, a result file in the target directory can contain
# a JavaScript expression that should evaluate to |true| (or some
# other value). This value will be checked at the end of the script.
if wrap:
respred = get_result_predicate(apppath, app)
else:
respred = None
for info in infos:
refsuf, crssuf = cfg.get_suffixes_from_info(info)
if refsuf is None:
# Error printed within |get_suffixes_from_info|.
continue
srcdir = info['dir']
if MAJOR >= 3: infoitems = info.items()
else: infoitems = info.iteritems()
for desc, jssrc in infoitems:
if desc == 'dir': continue
if desc == 'version': continue
if desc == 'policy': continue
if desc == 'modular.policy': continue
if desc == 'info': continue
if desc in cfg.COARSE_SOURCE_KEYS:
suf = 'unprotected.%s' % desc
else:
suf = '%s.%s' % (refsuf, desc)
changed = False
if isinstance(jssrc, str):
jspath = os.path.join(srcdir, jssrc)
assert os.path.isfile(jspath)
if copy_variants(app, suf, srcdir, jssrc, apppath, respred):
changed = True
elif isinstance(jssrc, list):
srcsub = os.path.join(srcdir, 'source-%s' % desc)
if copy_sources(app, suf, srcsub, jssrc, apppath, respred):
changed = True
if changed:
cfg.out('Updated %s.%s' % (app, suf))
# Collect various policy variations.
polchanged = False
for desc in ['policy', 'modular.policy']:
if desc in info:
polsrc = os.path.join(srcdir, info[desc])
if desc == 'modular.policy':
# The refinement indicator isn't meaningful for the
# coarse-grained policy.
suf = 'coarse.policy'
if crssuf is not None:
suf = '%s.%s' % (crssuf, suf)
else:
suf = '%s.%s' % (refsuf, desc)
if copy_policy(app, suf, polsrc, apppath):
polchanged = True
if polchanged:
cfg.out('Updated policy for %s' % app)
return True
# /copy_files
def update_coarse(apppath, app, wrap):
appkey = app.split('.', 1)[0]
if not os.path.isdir(apppath):
cfg.err('Unable to find application directory: %s' % apppath)
return
for srcdir in os.listdir(apppath):
if not srcdir.startswith('source-'): continue
if srcdir.endswith('.profile'): continue
srcdirpath = os.path.join(apppath, srcdir)
if not os.path.isdir(srcdirpath): continue
desc = srcdir[len('source-'):]
descparts = desc.split('.')
begindesc = descparts[0]
if begindesc == 'unprotected':
if len(descparts) == 2:
basedesc = descparts[1]
else:
cfg.warn('Unexpected variant: %s/%s' % (app, desc))
continue
else:
basedesc = desc
if basedesc not in cfg.COARSE_SOURCE_KEYS: continue
tgtdesc = 'coarse.%s' % basedesc
tgtdirpath = os.path.join(apppath, 'source-%s' % tgtdesc)
appdesc = '%s/%s' % (app, tgtdesc)
changed = False
for filename in os.listdir(srcdirpath):
# %%% Potential loophole
if filename.endswith('.html'): continue
tgtpath = os.path.join(tgtdirpath, filename)
srcpath = os.path.join(srcdirpath, filename)
if os.path.isdir(srcpath):
# %%% Recursively copy
continue
elif not os.path.isfile(srcpath):
cfg.warn("Profile source file doesn't exist: %s" % srcpath)
continue
srctxt = read_file(srcpath)
# Normalize the number of blank lines.
srctxt = srctxt.strip() + "\n"
# Generate a coarse-grained transaction version.
modtxt = "introspect(JAM.policy.pFull) {\n" + srctxt + "\n" + "}\n"
if has_change(tgtpath, modtxt):
if cfg.load_dir(tgtdirpath):
write_text(tgtpath, modtxt)
changed = True
if changed:
cfg.out('Updated %s' % appdesc)
else:
if VERBOSE:
cfg.out("No change from current text: %s" % appdesc)
# /update_coarse
def update_profile(apppath, app, wrap):
appkey = app.split('.', 1)[0]
if not os.path.isdir(apppath):
cfg.err('Unable to find application directory: %s' % apppath)
return
for srcdir in os.listdir(apppath):
if srcdir.endswith('.profile'): continue
if not srcdir.startswith('source-'): continue
srcdirpath = os.path.join(apppath, srcdir)
if not os.path.isdir(srcdirpath): continue
desc = srcdir[len('source-'):]
enddesc = desc.split('.')[-1]
if enddesc not in cfg.PROFILE_SOURCE_KEYS: continue
tgtdirpath = srcdirpath + '.profile'
appdesc = '%s/%s.profile' % (app, desc)
changed = False
for filename in os.listdir(srcdirpath):
tgtpath = os.path.join(tgtdirpath, filename)
srcpath = os.path.join(srcdirpath, filename)
if os.path.isdir(srcpath):
# %%% Recursively copy
continue
elif not os.path.isfile(srcpath):
cfg.warn("Profile source file doesn't exist: %s" % srcpath)
continue
srctxt = read_file(srcpath)
# Normalize the number of blank lines.
srctxt = srctxt.strip() + "\n"
profdesc = desc + '.profile'
# Insert the standard "load" profile.
profspec = {
'beginafter': None,
'endbefore': None,
}
proftxt = insert_profile(srctxt, 'load', profspec, appdesc)
if appkey in cfg.PROFILES:
profspecs = cfg.PROFILES[appkey]
# Need a consistent iteration order.
profkeys = list(profspecs.keys())
profkeys.sort()
for extraprofdesc in profkeys:
extraprofspecs = profspecs[extraprofdesc]
if desc in extraprofspecs:
profspec = extraprofspecs[desc]
proftxt = insert_profile(proftxt, extraprofdesc, profspec, appdesc)
if has_change(tgtpath, proftxt):
if cfg.load_dir(tgtdirpath):
write_text(tgtpath, proftxt)
changed = True
if changed:
cfg.out('Updated %s' % appdesc)
else:
if VERBOSE:
cfg.out("No change from current text: %s" % appdesc)
# /update_profile
def update_expected(app, infos, tgtpath):
for appinfo in infos:
refsuf, crssuf = cfg.get_suffixes_from_info(appinfo)
if refsuf is None:
# Error printed within |get_suffixes_from_info|.
continue
if 'out' in appinfo:
respath = os.path.join(appinfo['dir'], appinfo['out'])
res = read_file(respath)
outfile = '%s.%s.out.js' % (app, refsuf)
outpath = os.path.join(tgtpath, outfile)
stat = cfg.overwrite_expected(res, outpath)
if stat == 'overwritten' or stat == 'created':
cfg.out('%s %s' % (outfile, stat))
elif VERBOSE:
cfg.warn("Result not found: " + app)
if 'info' in appinfo:
infopath = os.path.join(appinfo['dir'], appinfo['info'])
infooutfile = '%s.%s.info.txt' % (app, refsuf)
infooutpath = os.path.join(tgtpath, infooutfile)
cfg.process_info(infopath, infooutpath, True, quiet=True)
elif VERBOSE:
cfg.warn("Info not found: " + app)
# /update_expected
def process_results(resdir, tgtdir, bases, wrap, transfer, exp, coarse, prof, getall):
if not os.path.isdir(tgtdir):
cfg.err("Target directory not found: %s" % tgtdir)
return
for app in bases:
appdir = os.path.join(tgtdir, app)
if transfer or exp:
assert os.path.isdir(resdir), "Results path %s doesn't exist." % resdir
infos = cfg.get_result_info(resdir, app, getall)
if transfer and infos is not None:
copy_files(app, infos, appdir, wrap)
if exp and infos is not None:
update_expected(app, infos, appdir)
if coarse:
update_coarse(appdir, app, wrap)
if prof:
update_profile(appdir, app, wrap)
# /process_results
def main():
parser = OptionParser(usage="%prog")
parser.add_option('-f', '--overwrite', action='store_true', default=False, dest='overwrite', help='overwrite existing files')
parser.add_option('-v', '--verbose', action='store_true', default=False, dest='verbose', help='generate verbose output')
parser.add_option('-a', '--app', action='store', default=None, dest='app', help='limit to the given app')
parser.add_option('-r', '--updatecoarse', action='store_true', default=False, dest='updatecoarse', help='update coarse sources')
parser.add_option('-u', '--updateprof', action='store_true', default=False, dest='updateprof', help='update profile sources')
parser.add_option('-e', '--updateexp', action='store_true', default=False, dest='updateexp', help='update expected results')
parser.add_option('-t', '--transfer', action='store_true', default=False, dest='transfer', help='transfer results')
parser.add_option('-l', '--lastonly', action='store_true', default=False, dest='lastonly', help='only load the latest result for an app')
parser.add_option('-T', '--nodifftime', action='store_true', default=False, dest='nodifftime', help='update even if source file timestamp is older')
parser.add_option('-c', '--config', action='store', default=os.path.join(os.path.dirname(__file__), 'transferconfig.py'), dest='config', help='configuration.py file')
parser.add_option('-g', '--group', action='store', default=None, dest='group', help='test group to transfer (default: all)')
opts, args = parser.parse_args()
if len(args) != 0:
parser.error("Invalid number of arguments")
global cfg
cfg = imp.load_source("cfg", opts.config)
global OVERWRITE, VERBOSE, COMPARE_TIME
OVERWRITE = opts.overwrite
VERBOSE = opts.verbose
COMPARE_TIME = not opts.nodifftime
tgtkeys = list(cfg.TARGETDIRS.keys())
tgtkeys.sort()
for destdir in tgtkeys:
if opts.group is not None:
if opts.group not in cfg.TEST_GROUPS:
fatal("Invalid test group: %s" % opts.group)
if not destdir.endswith('/' + opts.group):
continue
props = cfg.TARGETDIRS[destdir]
wrap = props['wrap']
bases = props['basenames']
if opts.app is not None:
bases = [base for base in bases if fnmatch.fnmatch(base, opts.app)]
resdir = cfg.RESULTSDIR
process_results(resdir, destdir, bases, wrap, opts.transfer, opts.updateexp, opts.updatecoarse, opts.updateprof, not opts.lastonly)
if __name__ == "__main__":
main()
|
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation script for the DeepLab model.
See model.py for more details and usage.
"""
import numpy as np
import six
import tensorflow as tf
from tensorflow.contrib import metrics as contrib_metrics
from tensorflow.contrib import quantize as contrib_quantize
from tensorflow.contrib import tfprof as contrib_tfprof
from tensorflow.contrib import training as contrib_training
from deeplab import common
from deeplab import model
from deeplab.datasets import data_generator
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
# Settings for log directories.
flags.DEFINE_string('eval_logdir', None, 'Where to write the event logs.')
flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.')
# Settings for evaluating the model.
flags.DEFINE_integer('eval_batch_size', 1,
'The number of images in each batch during evaluation.')
flags.DEFINE_list('eval_crop_size', '513,513',
'Image crop size [height, width] for evaluation.')
flags.DEFINE_integer('eval_interval_secs', 60 * 5,
'How often (in seconds) to run evaluation.')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
'The ratio of input to output spatial resolution.')
# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test.
flags.DEFINE_multi_float('eval_scales', [1.0],
'The scales to resize images for evaluation.')
# Change to True for adding flipped images during test.
flags.DEFINE_bool('add_flipped_images', False,
'Add flipped images for evaluation or not.')
flags.DEFINE_integer(
'quantize_delay_step', -1,
'Steps to start quantized training. If < 0, will not quantize model.')
# Dataset settings.
flags.DEFINE_string('dataset', 'pascal_voc_seg',
'Name of the segmentation dataset.')
flags.DEFINE_string('eval_split', 'val',
'Which split of the dataset used for evaluation')
flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')
flags.DEFINE_integer('max_number_of_evaluations', 0,
'Maximum number of eval iterations. Will loop '
'indefinitely upon nonpositive values.')
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
dataset = data_generator.Dataset(
dataset_name=FLAGS.dataset,
split_name=FLAGS.eval_split,
dataset_dir=FLAGS.dataset_dir,
batch_size=FLAGS.eval_batch_size,
crop_size=[int(sz) for sz in FLAGS.eval_crop_size],
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
model_variant=FLAGS.model_variant,
num_readers=2,
is_training=False,
should_shuffle=False,
should_repeat=False)
tf.gfile.MakeDirs(FLAGS.eval_logdir)
tf.logging.info('Evaluating on %s set', FLAGS.eval_split)
with tf.Graph().as_default():
samples = dataset.get_one_shot_iterator().get_next()
model_options = common.ModelOptions(
outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes},
crop_size=[int(sz) for sz in FLAGS.eval_crop_size],
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
# Set shape in order for tf.contrib.tfprof.model_analyzer to work properly.
samples[common.IMAGE].set_shape(
[FLAGS.eval_batch_size,
int(FLAGS.eval_crop_size[0]),
int(FLAGS.eval_crop_size[1]),
3])
if tuple(FLAGS.eval_scales) == (1.0,):
tf.logging.info('Performing single-scale test.')
predictions = model.predict_labels(samples[common.IMAGE], model_options,
image_pyramid=FLAGS.image_pyramid)
else:
tf.logging.info('Performing multi-scale test.')
if FLAGS.quantize_delay_step >= 0:
raise ValueError(
'Quantize mode is not supported with multi-scale test.')
predictions = model.predict_labels_multi_scale(
samples[common.IMAGE],
model_options=model_options,
eval_scales=FLAGS.eval_scales,
add_flipped_images=FLAGS.add_flipped_images)
predictions = predictions[common.OUTPUT_TYPE]
predictions = tf.reshape(predictions, shape=[-1])
labels = tf.reshape(samples[common.LABEL], shape=[-1])
weights = tf.to_float(tf.not_equal(labels, dataset.ignore_label))
# Set ignore_label regions to label 0, because metrics.mean_iou requires
# range of labels = [0, dataset.num_classes). Note the ignore_label regions
# are not evaluated since the corresponding regions contain weights = 0.
labels = tf.where(
tf.equal(labels, dataset.ignore_label), tf.zeros_like(labels), labels)
predictions_tag = 'miou'
for eval_scale in FLAGS.eval_scales:
predictions_tag += '_' + str(eval_scale)
if FLAGS.add_flipped_images:
predictions_tag += '_flipped'
# Define the evaluation metric.
metric_map = {}
num_classes = dataset.num_of_classes
metric_map['eval/%s_overall' % predictions_tag] = tf.metrics.mean_iou(
labels=labels, predictions=predictions, num_classes=num_classes,
weights=weights)
# IoU for each class.
one_hot_predictions = tf.one_hot(predictions, num_classes)
one_hot_predictions = tf.reshape(one_hot_predictions, [-1, num_classes])
one_hot_labels = tf.one_hot(labels, num_classes)
one_hot_labels = tf.reshape(one_hot_labels, [-1, num_classes])
for c in range(num_classes):
predictions_tag_c = '%s_class_%d' % (predictions_tag, c)
tp, tp_op = tf.metrics.true_positives(
labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],
weights=weights)
fp, fp_op = tf.metrics.false_positives(
labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],
weights=weights)
fn, fn_op = tf.metrics.false_negatives(
labels=one_hot_labels[:, c], predictions=one_hot_predictions[:, c],
weights=weights)
tp_fp_fn_op = tf.group(tp_op, fp_op, fn_op)
iou = tf.where(tf.greater(tp + fn, 0.0),
tp / (tp + fn + fp),
tf.constant(np.NaN))
metric_map['eval/%s' % predictions_tag_c] = (iou, tp_fp_fn_op)
(metrics_to_values,
metrics_to_updates) = contrib_metrics.aggregate_metric_map(metric_map)
summary_ops = []
for metric_name, metric_value in six.iteritems(metrics_to_values):
op = tf.summary.scalar(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
summary_op = tf.summary.merge(summary_ops)
summary_hook = contrib_training.SummaryAtEndHook(
log_dir=FLAGS.eval_logdir, summary_op=summary_op)
hooks = [summary_hook]
num_eval_iters = None
if FLAGS.max_number_of_evaluations > 0:
num_eval_iters = FLAGS.max_number_of_evaluations
if FLAGS.quantize_delay_step >= 0:
contrib_quantize.create_eval_graph()
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer
.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
contrib_training.evaluate_repeatedly(
checkpoint_dir=FLAGS.checkpoint_dir,
master=FLAGS.master,
eval_ops=list(metrics_to_updates.values()),
max_number_of_evaluations=num_eval_iters,
hooks=hooks,
eval_interval_secs=FLAGS.eval_interval_secs)
if __name__ == '__main__':
flags.mark_flag_as_required('checkpoint_dir')
flags.mark_flag_as_required('eval_logdir')
flags.mark_flag_as_required('dataset_dir')
tf.app.run()
|
|
# ----------------------------------------------------------------------
# Ckan synchronization client
# Extracted from the API client in order to be able to insert
# progress reporting, etc.
# ----------------------------------------------------------------------
# ------------------------------ TODO ------------------------------
# we should report the *total* progress ASAP in order to avoid
# "going back" progress bars.
# Problem: we cannot determine the amount of required changes while
# downloading the data; a hack might be to report *twice* the
# total in the "downloading state" progress, then revert back to the
# actual value after last iteration, just moments before being able
# to compute the actual changes to be performed..
# Also, double-check progress reporting for get state of orgs / categories
import copy
import logging
import random
import itertools
from ckan_api_client.exceptions import HTTPError
from ckan_api_client.high_level import CkanHighlevelClient
from ckan_api_client.objects import CkanDataset, CkanOrganization, CkanGroup
from ckan_api_client.utils import IDMap, IDPair
from harvester.utils import report_progress
# Extras field containing id of the external source.
# The id is simply source_name:
HARVEST_SOURCE_ID_FIELD = '_harvest_source'
logger = logging.getLogger(__name__)
class SynchronizationClient(object):
"""
Synchronization client, providing functionality for importing
collections of datasets into a Ckan instance.
Synchronization acts as follows:
- Snsure all the required organizations/groups are there;
create a map between "source" ids and Ckan ids.
Optionally update existing organizations/groups with
new details.
- Find all the Ckan datasets matching the ``source_name``
- Determine which datasets...
- ...need to be created
- ...need to be updated
- ...need to be deleted
- First, delete datasets to be deleted in order to free up names
- Then, create datasets that need to be created
- Lastly, update datasets using the configured merge strategy
(see constructor arguments).
"""
def __init__(self, base_url, api_key=None, **kw):
"""
:param base_url:
Base URL of the Ckan instance, passed to high-level client
:param api_key:
API key to be used, passed to high-level client
:param organization_merge_strategy: One of:
- 'create' (default) if the organization doesn't exist, create it.
Otherwise, leave it alone.
- 'update' if the organization doesn't exist, create it.
Otherwise, update with new values.
:param group_merge_strategy: One of:
- 'create' (default) if the group doesn't exist, create it.
Otherwise, leave it alone.
- 'update' if the group doesn't exist, create it.
Otherwise, update with new values.
:param dataset_preserve_names:
if ``True`` (the default) will preserve old names of existing
datasets
:param dataset_preserve_organization:
if ``True`` (the default) will preserve old organizations of
existing datasets.
:param dataset_group_merge_strategy:
- 'add' add groups, keep old ones (default)
- 'replace' replace all existing groups
- 'preserve' leave groups alone
"""
self._client = CkanHighlevelClient(base_url, api_key)
self._conf = {
'organization_merge_strategy': 'create',
'group_merge_strategy': 'create',
'dataset_preserve_names': True,
'dataset_preserve_organization': True,
'dataset_group_merge_strategy': 'add',
}
self._conf.update(kw)
def sync(self, source_name, data):
"""
Synchronize data from a source into Ckan.
- datasets are matched by _harvest_source
- groups and organizations are matched by name
:param source_name:
String identifying the source of the data. Used to build
ids that will be used in further synchronizations.
:param data:
Data to be synchronized. Should be a dict (or dict-like)
with top level keys coresponding to the object type,
mapping to dictionaries of ``{'id': <object>}``.
"""
groups = dict(
(key, CkanGroup(val))
for key, val in data['group'].iteritems())
organizations = dict(
(key, CkanOrganization(val))
for key, val in data['organization'].iteritems())
# Upsert groups and organizations
groups_map = self._upsert_groups(groups)
orgs_map = self._upsert_organizations(organizations)
# Create list of datasets to be synced
logger.info('Creating list of datasets to be synchronized')
source_datasets = {}
for source_id, dataset_dict in data['dataset'].iteritems():
_dataset_dict = copy.deepcopy(dataset_dict)
# We need to make sure "source" datasets
# don't have (otherwise misleading) ids
_dataset_dict.pop('id', None)
# We need to update groups and organizations,
# to map their name from the source into a
# ckan id
_dataset_dict['groups'] = [
groups_map.to_ckan(grp_id)
for grp_id in _dataset_dict['groups']
]
_dataset_dict['owner_org'] = \
orgs_map.to_ckan(_dataset_dict['owner_org'])
dataset = CkanDataset(_dataset_dict)
# We also want to add the "source id", used for further
# synchronizations to find stuff
dataset.extras[HARVEST_SOURCE_ID_FIELD] = \
self._join_source_id(source_name, source_id)
source_datasets[source_id] = dataset
# Retrieve list of datasets from Ckan
logger.info('Retrieving current status from Ckan')
ckan_datasets = self._find_datasets_by_source(source_name)
# Compare collections to find differences
differences = self._compare_collections(
ckan_datasets, source_datasets)
# ------------------------------------------------------------
# We now need to create/update/delete datasets.
# todo: we need to make sure dataset names are not
# already used by another dataset. The only
# way is to randomize resource names and hope
# a 409 response indicates duplicate name..
# _progress_total = sum(len(differences[x])
# for x in ('left', 'right', 'differing'))
# _progress_next = itertools.count(1).next
# report_progress(0, _progress_total)
_prog_tot_add = len(differences['right'])
_prog_next_add = itertools.count(1).next
_prog_tot_remove = len(differences['left'])
_prog_next_remove = itertools.count(1).next
_prog_tot_update = len(differences['differing'])
_prog_next_update = itertools.count(1).next
# Create progress bars early..
report_progress(('datasets', 'delete'), 0, _prog_tot_remove)
report_progress(('datasets', 'create'), 0, _prog_tot_add)
report_progress(('datasets', 'update'), 0, _prog_tot_update)
# We delete first, in order to (possibly) deallocate
# some already-used names..
for source_id in differences['left']:
ckan_id = ckan_datasets[source_id].id
logger.info('Deleting dataset {0}'.format(ckan_id))
self._client.delete_dataset(ckan_id)
report_progress(('datasets', 'delete'),
_prog_next_remove(), _prog_tot_remove)
def force_dataset_operation(operation, dataset, retry=5):
# Maximum dataset name length is 100 characters
# We trim it down to 80 just to be safe.
# Note: we generally want to preserve the original name
# and there should *never* be problems with that
# when updating..
_orig_name = dataset.name[:80]
dataset.name = _orig_name
while True:
try:
result = operation(dataset)
except HTTPError, e:
if e.status_code != 409:
raise
retry -= 1
if retry < 0:
raise
dataset.name = '{0}-{1:06d}'.format(
_orig_name,
random.randint(0, 999999))
logger.debug('Got 409: trying to rename dataset to {0}'
.format(dataset.name))
else:
return result
# Create missing datasets
for source_id in differences['right']:
logger.info('Creating dataset {0}'.format(source_id))
dataset = source_datasets[source_id]
force_dataset_operation(self._client.create_dataset, dataset)
report_progress(('datasets', 'create'),
_prog_next_add(), _prog_tot_add)
# Update outdated datasets
for source_id in differences['differing']:
logger.info('Updating dataset {0}'.format(source_id))
# dataset = source_datasets[source_id]
old_dataset = ckan_datasets[source_id]
new_dataset = source_datasets[source_id]
dataset = self._merge_datasets(old_dataset, new_dataset)
dataset.id = old_dataset.id # Mandatory!
self._client.update_dataset(dataset) # should never fail!
report_progress(('datasets', 'update'),
_prog_next_update(), _prog_tot_update)
def _merge_datasets(self, old, new):
# Preserve dataset names
if self._conf['dataset_preserve_names']:
new.name = old.name
# Merge groups according to configured strategy
_strategy = self._conf['dataset_group_merge_strategy']
if _strategy == 'add':
# We want to preserve the order!
groups = list(old.groups)
for g in new.groups:
if g not in groups:
groups.append(g)
new.groups = groups
elif _strategy == 'replace':
# Do nothing, we just want the new groups to replace
# the old ones -- no need to merge
pass
elif _strategy == 'preserve':
# Simply discard the new groups, keep the old ones
new.groups = old.groups
else:
# Invalid value! Shouldn't this have been catched
# before?
pass
# What should we do with owner organization?
if self._conf['dataset_preserve_organization']:
if old.owner_org:
new.owner_org = old.owner_org
return new
def _upsert_groups(self, groups):
"""
:param groups:
dict mapping ``{org_name : CkanGroup()}``
:return: a map of source/ckan ids of groups
:rtype: IDMap
"""
idmap = IDMap()
for group_name, group in groups.iteritems():
if not isinstance(group, CkanGroup):
raise TypeError("Expected CkanGroup, got {0!r}"
.format(type(group)))
if group.name is None:
group.name = group_name
if group.name != group_name:
raise ValueError("Mismatching group name!")
try:
ckan_group = self._client.get_group_by_name(
group_name, allow_deleted=True)
except HTTPError, e:
if e.status_code != 404:
raise
# We need to create the group
group.id = None
group.state = 'active'
created_group = self._client.create_group(group)
idmap.add(IDPair(source_id=group.name,
ckan_id=created_group.id))
else:
# The group already exist. It might be logically
# deleted, but we don't care -> just update and
# make sure it is marked as active.
# todo: make sure we don't need to preserve users and stuff,
# otherwise we need to workaround that in hi-lev client
group_id = ckan_group.id
if self._conf['group_merge_strategy'] == 'update':
# If merge strategy is 'update', we should update
# the group.
group.state = 'active'
group.id = ckan_group.id
updated_group = self._client.update_group(group)
group_id = updated_group.id
elif group.state != 'active':
# We only want to update the **original** group to set it
# as active, but preserving original values.
ckan_group.state = 'active'
updated_group = self._client.update_group(ckan_group)
group_id = updated_group.id
idmap.add(IDPair(source_id=group.name, ckan_id=group_id))
return idmap
def _upsert_organizations(self, orgs):
"""
:param orgs:
dict mapping ``{org_name : CkanOrganization()}``
:return: a map of source/ckan ids of organizations
:rtype: IDMap
"""
idmap = IDMap()
for org_name, org in orgs.iteritems():
if not isinstance(org, CkanOrganization):
raise TypeError("Expected CkanOrganization, got {0!r}"
.format(type(org)))
if org.name is None:
org.name = org_name
if org.name != org_name:
raise ValueError("Mismatching org name!")
try:
ckan_org = self._client.get_organization_by_name(
org_name, allow_deleted=True)
except HTTPError, e:
if e.status_code != 404:
raise
# We need to create the org
org.id = None
org.state = 'active'
created_org = self._client.create_organization(org)
idmap.add(IDPair(source_id=org.name,
ckan_id=created_org.id))
else:
# We only want to update if state != 'active'
org_id = ckan_org.id
if self._conf['organization_merge_strategy'] == 'update':
# If merge strategy is 'update', we should update
# the group.
org.state = 'active'
org.id = ckan_org.id
updated_org = self._client.update_organization(org)
org_id = updated_org.id
elif org.state != 'active':
# We only want to update the **original** org to set it
# as active, but preserving original values.
ckan_org.state = 'active'
updated_org = self._client.update_organization(ckan_org)
org_id = updated_org.id
idmap.add(IDPair(source_id=org_name,
ckan_id=org_id))
return idmap
def _find_datasets_by_source(self, source_name):
"""
Find all datasets matching the current source.
Returns a dict mapping source ids with dataset objects.
"""
# HACK: We are reporting *twice* the number of datasets,
# to give an estimate of the remaining steps..
_total = len(self._client.list_datasets())
_current = itertools.count(1).next
results = {}
report_progress(('get ckan state',), 0, _total * 2)
for dataset in self._client.iter_datasets():
if HARVEST_SOURCE_ID_FIELD in dataset.extras:
source_id = dataset.extras[HARVEST_SOURCE_ID_FIELD]
_name, _id = self._parse_source_id(source_id)
if _name == source_name:
results[_id] = dataset
report_progress(('get ckan state',), _current(), _total * 2)
report_progress(('get ckan state',), _total, _total)
return results
def _parse_source_id(self, source_id):
splitted = source_id.split(':')
if len(splitted) != 2:
raise ValueError("Invalid source id")
return splitted
def _join_source_id(self, source_name, source_id):
return ':'.join((source_name, source_id))
def _compare_collections(self, left, right):
"""
Compare two collections of objects.
Both collections are dictionaries mapping "source" ids
with objects.
:param left:
The "original" collection, retrieved from Ckan.
Objects will already have ids.
``left`` is the collection retrieved
The two collections are simply dictionaries of objects;
keys are the ids (used internally by the source).
Values in the right will contain Ckan ids, while the ones
in the left will not.
:returns:
A dictionary mapping names to sets of keys:
* ``common`` -- keys in both mappings
* ``differing`` -- keys of differing objects
* ``left`` -- keys of objects that are only in ckan
* ``right`` -- keys of objects that are not in ckan
"""
left_keys = set(left.iterkeys())
right_keys = set(right.iterkeys())
common_keys = left_keys & right_keys
left_only_keys = left_keys - right_keys
right_only_keys = right_keys - left_keys
differing = set(k for k in common_keys if left[k] != right[k])
return {
'common': common_keys,
'left': left_only_keys,
'right': right_only_keys,
'differing': differing,
}
|
|
from test.lib import fixtures, testing
from test.lib.testing import assert_raises_message
from sqlalchemy.sql import column, desc, asc, literal, collate
from sqlalchemy.sql.expression import _BinaryExpression as BinaryExpression, \
ClauseList, _Grouping as Grouping, \
_UnaryExpression as UnaryExpression
from sqlalchemy.sql import operators
from sqlalchemy import exc
from sqlalchemy.schema import Column, Table, MetaData
from sqlalchemy.types import Integer, TypeEngine, TypeDecorator, UserDefinedType
from sqlalchemy.dialects import mysql, firebird
from sqlalchemy import text, literal_column
class DefaultColumnComparatorTest(fixtures.TestBase):
def _do_scalar_test(self, operator, compare_to):
left = column('left')
assert operator(left).compare(
compare_to(left)
)
def _do_operate_test(self, operator, right=column('right')):
left = column('left')
assert operator(left, right).compare(
BinaryExpression(left, right, operator)
)
def test_desc(self):
self._do_scalar_test(operators.desc_op, desc)
def test_asc(self):
self._do_scalar_test(operators.asc_op, asc)
def test_plus(self):
self._do_operate_test(operators.add)
def test_is_null(self):
self._do_operate_test(operators.is_, None)
def test_isnot_null(self):
self._do_operate_test(operators.isnot, None)
def test_is(self):
self._do_operate_test(operators.is_)
def test_isnot(self):
self._do_operate_test(operators.isnot)
def test_collate(self):
left = column('left')
right = "some collation"
operators.collate(left, right).compare(
collate(left, right)
)
from sqlalchemy import and_, not_, between
class OperatorPrecedenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_operator_precedence(self):
# TODO: clean up /break up
metadata = MetaData()
table = Table('op', metadata,
Column('field', Integer))
self.assert_compile(table.select((table.c.field == 5) == None),
"SELECT op.field FROM op WHERE (op.field = :field_1) IS NULL")
self.assert_compile(table.select((table.c.field + 5) == table.c.field),
"SELECT op.field FROM op WHERE op.field + :field_1 = op.field")
self.assert_compile(table.select((table.c.field + 5) * 6),
"SELECT op.field FROM op WHERE (op.field + :field_1) * :param_1")
self.assert_compile(table.select((table.c.field * 5) + 6),
"SELECT op.field FROM op WHERE op.field * :field_1 + :param_1")
self.assert_compile(table.select(5 + table.c.field.in_([5, 6])),
"SELECT op.field FROM op WHERE :param_1 + "
"(op.field IN (:field_1, :field_2))")
self.assert_compile(table.select((5 + table.c.field).in_([5, 6])),
"SELECT op.field FROM op WHERE :field_1 + op.field "
"IN (:param_1, :param_2)")
self.assert_compile(table.select(not_(and_(table.c.field == 5,
table.c.field == 7))),
"SELECT op.field FROM op WHERE NOT "
"(op.field = :field_1 AND op.field = :field_2)")
self.assert_compile(table.select(not_(table.c.field == 5)),
"SELECT op.field FROM op WHERE op.field != :field_1")
self.assert_compile(table.select(not_(table.c.field.between(5, 6))),
"SELECT op.field FROM op WHERE NOT "
"(op.field BETWEEN :field_1 AND :field_2)")
self.assert_compile(table.select(not_(table.c.field) == 5),
"SELECT op.field FROM op WHERE (NOT op.field) = :param_1")
self.assert_compile(table.select((table.c.field == table.c.field).\
between(False, True)),
"SELECT op.field FROM op WHERE (op.field = op.field) "
"BETWEEN :param_1 AND :param_2")
self.assert_compile(table.select(
between((table.c.field == table.c.field), False, True)),
"SELECT op.field FROM op WHERE (op.field = op.field) "
"BETWEEN :param_1 AND :param_2")
class OperatorAssociativityTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_associativity(self):
# TODO: clean up /break up
f = column('f')
self.assert_compile(f - f, "f - f")
self.assert_compile(f - f - f, "(f - f) - f")
self.assert_compile((f - f) - f, "(f - f) - f")
self.assert_compile((f - f).label('foo') - f, "(f - f) - f")
self.assert_compile(f - (f - f), "f - (f - f)")
self.assert_compile(f - (f - f).label('foo'), "f - (f - f)")
# because - less precedent than /
self.assert_compile(f / (f - f), "f / (f - f)")
self.assert_compile(f / (f - f).label('foo'), "f / (f - f)")
self.assert_compile(f / f - f, "f / f - f")
self.assert_compile((f / f) - f, "f / f - f")
self.assert_compile((f / f).label('foo') - f, "f / f - f")
# because / more precedent than -
self.assert_compile(f - (f / f), "f - f / f")
self.assert_compile(f - (f / f).label('foo'), "f - f / f")
self.assert_compile(f - f / f, "f - f / f")
self.assert_compile((f - f) / f, "(f - f) / f")
self.assert_compile(((f - f) / f) - f, "(f - f) / f - f")
self.assert_compile((f - f) / (f - f), "(f - f) / (f - f)")
# higher precedence
self.assert_compile((f / f) - (f / f), "f / f - f / f")
self.assert_compile((f / f) - (f - f), "f / f - (f - f)")
self.assert_compile((f / f) / (f - f), "(f / f) / (f - f)")
self.assert_compile(f / (f / (f - f)), "f / (f / (f - f))")
class ComposedLikeOperatorsTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_contains(self):
self.assert_compile(
column('x').contains('y'),
"x LIKE '%%' || :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_contains_escape(self):
self.assert_compile(
column('x').contains('y', escape='\\'),
"x LIKE '%%' || :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_contains_literal(self):
self.assert_compile(
column('x').contains(literal_column('y')),
"x LIKE '%%' || y || '%%'",
checkparams={}
)
def test_contains_text(self):
self.assert_compile(
column('x').contains(text('y')),
"x LIKE '%%' || y || '%%'",
checkparams={}
)
def test_not_contains(self):
self.assert_compile(
~column('x').contains('y'),
"NOT (x LIKE '%%' || :x_1 || '%%')",
checkparams={'x_1': 'y'}
)
def test_not_contains_escape(self):
self.assert_compile(
~column('x').contains('y', escape='\\'),
"NOT (x LIKE '%%' || :x_1 || '%%' ESCAPE '\\')",
checkparams={'x_1': 'y'}
)
def test_contains_concat(self):
self.assert_compile(
column('x').contains('y'),
"x LIKE concat(concat('%%', %s), '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_contains_concat(self):
self.assert_compile(
~column('x').contains('y'),
"NOT (x LIKE concat(concat('%%', %s), '%%'))",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_contains_literal_concat(self):
self.assert_compile(
column('x').contains(literal_column('y')),
"x LIKE concat(concat('%%', y), '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_contains_text_concat(self):
self.assert_compile(
column('x').contains(text('y')),
"x LIKE concat(concat('%%', y), '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_startswith(self):
self.assert_compile(
column('x').startswith('y'),
"x LIKE :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_startswith_escape(self):
self.assert_compile(
column('x').startswith('y', escape='\\'),
"x LIKE :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_not_startswith(self):
self.assert_compile(
~column('x').startswith('y'),
"NOT (x LIKE :x_1 || '%%')",
checkparams={'x_1': 'y'}
)
def test_not_startswith_escape(self):
self.assert_compile(
~column('x').startswith('y', escape='\\'),
"NOT (x LIKE :x_1 || '%%' ESCAPE '\\')",
checkparams={'x_1': 'y'}
)
def test_startswith_literal(self):
self.assert_compile(
column('x').startswith(literal_column('y')),
"x LIKE y || '%%'",
checkparams={}
)
def test_startswith_text(self):
self.assert_compile(
column('x').startswith(text('y')),
"x LIKE y || '%%'",
checkparams={}
)
def test_startswith_concat(self):
self.assert_compile(
column('x').startswith('y'),
"x LIKE concat(%s, '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_startswith_concat(self):
self.assert_compile(
~column('x').startswith('y'),
"NOT (x LIKE concat(%s, '%%'))",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_startswith_literal_mysql(self):
self.assert_compile(
column('x').startswith(literal_column('y')),
"x LIKE concat(y, '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_startswith_text_mysql(self):
self.assert_compile(
column('x').startswith(text('y')),
"x LIKE concat(y, '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_endswith(self):
self.assert_compile(
column('x').endswith('y'),
"x LIKE '%%' || :x_1",
checkparams={'x_1': 'y'}
)
def test_endswith_escape(self):
self.assert_compile(
column('x').endswith('y', escape='\\'),
"x LIKE '%%' || :x_1 ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_not_endswith(self):
self.assert_compile(
~column('x').endswith('y'),
"NOT (x LIKE '%%' || :x_1)",
checkparams={'x_1': 'y'}
)
def test_not_endswith_escape(self):
self.assert_compile(
~column('x').endswith('y', escape='\\'),
"NOT (x LIKE '%%' || :x_1 ESCAPE '\\')",
checkparams={'x_1': 'y'}
)
def test_endswith_literal(self):
self.assert_compile(
column('x').endswith(literal_column('y')),
"x LIKE '%%' || y",
checkparams={}
)
def test_endswith_text(self):
self.assert_compile(
column('x').endswith(text('y')),
"x LIKE '%%' || y",
checkparams={}
)
def test_endswith_mysql(self):
self.assert_compile(
column('x').endswith('y'),
"x LIKE concat('%%', %s)",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_endswith_mysql(self):
self.assert_compile(
~column('x').endswith('y'),
"NOT (x LIKE concat('%%', %s))",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_endswith_literal_mysql(self):
self.assert_compile(
column('x').endswith(literal_column('y')),
"x LIKE concat('%%', y)",
checkparams={},
dialect=mysql.dialect()
)
def test_endswith_text_mysql(self):
self.assert_compile(
column('x').endswith(text('y')),
"x LIKE concat('%%', y)",
checkparams={},
dialect=mysql.dialect()
)
|
|
"""
data hash pandas / numpy objects
"""
import itertools
import numpy as np
from pandas._libs import hashing, tslib
from pandas.core.dtypes.generic import (
ABCMultiIndex,
ABCIndexClass,
ABCSeries,
ABCDataFrame)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_list_like)
from pandas.core.dtypes.missing import isnull
from pandas.core.dtypes.cast import infer_dtype_from_scalar
# 16 byte long hashing key
_default_hash_key = '0123456789123456'
def _combine_hash_arrays(arrays, num_items):
"""
Parameters
----------
arrays : generator
num_items : int
Should be the same as CPython's tupleobject.c
"""
try:
first = next(arrays)
except StopIteration:
return np.array([], dtype=np.uint64)
arrays = itertools.chain([first], arrays)
mult = np.uint64(1000003)
out = np.zeros_like(first) + np.uint64(0x345678)
for i, a in enumerate(arrays):
inverse_i = num_items - i
out ^= a
out *= mult
mult += np.uint64(82520 + inverse_i + inverse_i)
assert i + 1 == num_items, 'Fed in wrong num_items'
out += np.uint64(97531)
return out
def hash_pandas_object(obj, index=True, encoding='utf8', hash_key=None,
categorize=True):
"""
Return a data hash of the Index/Series/DataFrame
.. versionadded:: 0.19.2
Parameters
----------
index : boolean, default True
include the index in the hash (if Series/DataFrame)
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
.. versionadded:: 0.20.0
Returns
-------
Series of uint64, same length as the object
"""
from pandas import Series
if hash_key is None:
hash_key = _default_hash_key
if isinstance(obj, ABCMultiIndex):
return Series(hash_tuples(obj, encoding, hash_key),
dtype='uint64', copy=False)
if isinstance(obj, ABCIndexClass):
h = hash_array(obj.values, encoding, hash_key,
categorize).astype('uint64', copy=False)
h = Series(h, index=obj, dtype='uint64', copy=False)
elif isinstance(obj, ABCSeries):
h = hash_array(obj.values, encoding, hash_key,
categorize).astype('uint64', copy=False)
if index:
index_iter = (hash_pandas_object(obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize).values
for _ in [None])
arrays = itertools.chain([h], index_iter)
h = _combine_hash_arrays(arrays, 2)
h = Series(h, index=obj.index, dtype='uint64', copy=False)
elif isinstance(obj, ABCDataFrame):
hashes = (hash_array(series.values) for _, series in obj.iteritems())
num_items = len(obj.columns)
if index:
index_hash_generator = (hash_pandas_object(obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize).values # noqa
for _ in [None])
num_items += 1
hashes = itertools.chain(hashes, index_hash_generator)
h = _combine_hash_arrays(hashes, num_items)
h = Series(h, index=obj.index, dtype='uint64', copy=False)
else:
raise TypeError("Unexpected type for hashing %s" % type(obj))
return h
def hash_tuples(vals, encoding='utf8', hash_key=None):
"""
Hash an MultiIndex / list-of-tuples efficiently
.. versionadded:: 0.20.0
Parameters
----------
vals : MultiIndex, list-of-tuples, or single tuple
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
ndarray of hashed values array
"""
is_tuple = False
if isinstance(vals, tuple):
vals = [vals]
is_tuple = True
elif not is_list_like(vals):
raise TypeError("must be convertible to a list-of-tuples")
from pandas import Categorical, MultiIndex
if not isinstance(vals, ABCMultiIndex):
vals = MultiIndex.from_tuples(vals)
# create a list-of-Categoricals
vals = [Categorical(vals.labels[level],
vals.levels[level],
ordered=False,
fastpath=True)
for level in range(vals.nlevels)]
# hash the list-of-ndarrays
hashes = (_hash_categorical(cat,
encoding=encoding,
hash_key=hash_key)
for cat in vals)
h = _combine_hash_arrays(hashes, len(vals))
if is_tuple:
h = h[0]
return h
def hash_tuple(val, encoding='utf8', hash_key=None):
"""
Hash a single tuple efficiently
Parameters
----------
val : single tuple
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
hash
"""
hashes = (_hash_scalar(v, encoding=encoding, hash_key=hash_key)
for v in val)
h = _combine_hash_arrays(hashes, len(val))[0]
return h
def _hash_categorical(c, encoding, hash_key):
"""
Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes
Parameters
----------
c : Categorical
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
ndarray of hashed values array, same size as len(c)
"""
hashed = hash_array(c.categories.values, encoding, hash_key,
categorize=False)
# we have uint64, as we don't directly support missing values
# we don't want to use take_nd which will coerce to float
# instead, directly construt the result with a
# max(np.uint64) as the missing value indicator
#
# TODO: GH 15362
mask = c.isnull()
if len(hashed):
result = hashed.take(c.codes)
else:
result = np.zeros(len(mask), dtype='uint64')
if mask.any():
result[mask] = np.iinfo(np.uint64).max
return result
def hash_array(vals, encoding='utf8', hash_key=None, categorize=True):
"""
Given a 1d array, return an array of deterministic integers.
.. versionadded:: 0.19.2
Parameters
----------
vals : ndarray, Categorical
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
.. versionadded:: 0.20.0
Returns
-------
1d uint64 numpy array of hash values, same length as the vals
"""
if not hasattr(vals, 'dtype'):
raise TypeError("must pass a ndarray-like")
dtype = vals.dtype
if hash_key is None:
hash_key = _default_hash_key
# For categoricals, we hash the categories, then remap the codes to the
# hash values. (This check is above the complex check so that we don't ask
# numpy if categorical is a subdtype of complex, as it will choke.
if is_categorical_dtype(dtype):
return _hash_categorical(vals, encoding, hash_key)
# we'll be working with everything as 64-bit values, so handle this
# 128-bit value early
elif np.issubdtype(dtype, np.complex128):
return hash_array(vals.real) + 23 * hash_array(vals.imag)
# First, turn whatever array this is into unsigned 64-bit ints, if we can
# manage it.
elif isinstance(dtype, np.bool):
vals = vals.astype('u8')
elif issubclass(dtype.type, (np.datetime64, np.timedelta64)):
vals = vals.view('i8').astype('u8', copy=False)
elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8:
vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8')
else:
# With repeated values, its MUCH faster to categorize object dtypes,
# then hash and rename categories. We allow skipping the categorization
# when the values are known/likely to be unique.
if categorize:
from pandas import factorize, Categorical, Index
codes, categories = factorize(vals, sort=False)
cat = Categorical(codes, Index(categories),
ordered=False, fastpath=True)
return _hash_categorical(cat, encoding, hash_key)
try:
vals = hashing.hash_object_array(vals, hash_key, encoding)
except TypeError:
# we have mixed types
vals = hashing.hash_object_array(vals.astype(str).astype(object),
hash_key, encoding)
# Then, redistribute these 64-bit ints within the space of 64-bit ints
vals ^= vals >> 30
vals *= np.uint64(0xbf58476d1ce4e5b9)
vals ^= vals >> 27
vals *= np.uint64(0x94d049bb133111eb)
vals ^= vals >> 31
return vals
def _hash_scalar(val, encoding='utf8', hash_key=None):
"""
Hash scalar value
Returns
-------
1d uint64 numpy array of hash value, of length 1
"""
if isnull(val):
# this is to be consistent with the _hash_categorical implementation
return np.array([np.iinfo(np.uint64).max], dtype='u8')
if getattr(val, 'tzinfo', None) is not None:
# for tz-aware datetimes, we need the underlying naive UTC value and
# not the tz aware object or pd extension type (as
# infer_dtype_from_scalar would do)
if not isinstance(val, tslib.Timestamp):
val = tslib.Timestamp(val)
val = val.tz_convert(None)
dtype, val = infer_dtype_from_scalar(val)
vals = np.array([val], dtype=dtype)
return hash_array(vals, hash_key=hash_key, encoding=encoding,
categorize=False)
|
|
from copy import copy
from sympy.tensor.array.dense_ndim_array import MutableDenseNDimArray
from sympy import Symbol, Rational, SparseMatrix
from sympy.matrices import Matrix
from sympy.tensor.array.sparse_ndim_array import MutableSparseNDimArray
def test_ndim_array_initiation():
arr_with_one_element = MutableDenseNDimArray([23])
assert len(arr_with_one_element) == 1
assert arr_with_one_element[0] == 23
assert arr_with_one_element.rank() == 1
arr_with_symbol_element = MutableDenseNDimArray([Symbol('x')])
assert len(arr_with_symbol_element) == 1
assert arr_with_symbol_element[0] == Symbol('x')
assert arr_with_symbol_element.rank() == 1
number5 = 5
vector = MutableDenseNDimArray.zeros(number5)
assert len(vector) == number5
assert vector.shape == (number5,)
assert vector.rank() == 1
vector = MutableSparseNDimArray.zeros(number5)
assert len(vector) == number5
assert vector.shape == (number5,)
assert vector._sparse_array == {}
assert vector.rank() == 1
n_dim_array = MutableDenseNDimArray(range(3**4), (3, 3, 3, 3,))
assert len(n_dim_array) == 3 * 3 * 3 * 3
assert n_dim_array.shape == (3, 3, 3, 3)
assert n_dim_array.rank() == 4
array_shape = (3, 3, 3, 3)
sparse_array = MutableSparseNDimArray.zeros(*array_shape)
assert len(sparse_array._sparse_array) == 0
assert len(sparse_array) == 3 * 3 * 3 * 3
assert n_dim_array.shape == array_shape
assert n_dim_array.rank() == 4
one_dim_array = MutableDenseNDimArray([2, 3, 1])
assert len(one_dim_array) == 3
assert one_dim_array.shape == (3,)
assert one_dim_array.rank() == 1
assert one_dim_array.tolist() == [2, 3, 1]
shape = (3, 3)
array_with_many_args = MutableSparseNDimArray.zeros(*shape)
assert len(array_with_many_args) == 3 * 3
assert array_with_many_args.shape == shape
assert array_with_many_args[0, 0] == 0
assert array_with_many_args.rank() == 2
def test_reshape():
array = MutableDenseNDimArray(range(50), 50)
assert array.shape == (50,)
assert array.rank() == 1
array = array.reshape(5, 5, 2)
assert array.shape == (5, 5, 2)
assert array.rank() == 3
assert len(array) == 50
def test_iterator():
array = MutableDenseNDimArray(range(4), (2, 2))
j = 0
for i in array:
assert i == j
j += 1
array = array.reshape(4)
j = 0
for i in array:
assert i == j
j += 1
def test_sparse():
sparse_array = MutableSparseNDimArray([0, 0, 0, 1], (2, 2))
assert len(sparse_array) == 2 * 2
# dictionary where all data is, only non-zero entries are actually stored:
assert len(sparse_array._sparse_array) == 1
assert list(sparse_array) == [0, 0, 0, 1]
for i, j in zip(sparse_array, [0, 0, 0, 1]):
assert i == j
sparse_array[0, 0] = 123
assert len(sparse_array._sparse_array) == 2
assert sparse_array[0, 0] == 123
# when element in sparse array become zero it will disappear from
# dictionary
sparse_array[0, 0] = 0
assert len(sparse_array._sparse_array) == 1
sparse_array[1, 1] = 0
assert len(sparse_array._sparse_array) == 0
assert sparse_array[0, 0] == 0
def test_calculation():
a = MutableDenseNDimArray([1]*9, (3, 3))
b = MutableDenseNDimArray([9]*9, (3, 3))
c = a + b
for i in c:
assert i == 10
assert c == MutableDenseNDimArray([10]*9, (3, 3))
assert c == MutableSparseNDimArray([10]*9, (3, 3))
c = b - a
for i in c:
assert i == 8
assert c == MutableDenseNDimArray([8]*9, (3, 3))
assert c == MutableSparseNDimArray([8]*9, (3, 3))
def test_ndim_array_converting():
dense_array = MutableDenseNDimArray([1, 2, 3, 4], (2, 2))
alist = dense_array.tolist()
alist == [[1, 2], [3, 4]]
matrix = dense_array.tomatrix()
assert (isinstance(matrix, Matrix))
for i in range(len(dense_array)):
assert dense_array[i] == matrix[i]
assert matrix.shape == dense_array.shape
sparse_array = MutableSparseNDimArray([1, 2, 3, 4], (2, 2))
alist = sparse_array.tolist()
assert alist == [[1, 2], [3, 4]]
matrix = sparse_array.tomatrix()
assert(isinstance(matrix, SparseMatrix))
for i in range(len(sparse_array)):
assert sparse_array[i] == matrix[i]
assert matrix.shape == sparse_array.shape
def test_converting_functions():
arr_list = [1, 2, 3, 4]
arr_matrix = Matrix(((1, 2), (3, 4)))
# list
arr_ndim_array = MutableDenseNDimArray(arr_list, (2, 2))
assert (isinstance(arr_ndim_array, MutableDenseNDimArray))
assert arr_matrix.tolist() == arr_ndim_array.tolist()
# Matrix
arr_ndim_array = MutableDenseNDimArray(arr_matrix)
assert (isinstance(arr_ndim_array, MutableDenseNDimArray))
assert arr_matrix.tolist() == arr_ndim_array.tolist()
assert arr_matrix.shape == arr_ndim_array.shape
def test_equality():
first_list = [1, 2, 3, 4]
second_list = [1, 2, 3, 4]
third_list = [4, 3, 2, 1]
assert first_list == second_list
assert first_list != third_list
first_ndim_array = MutableDenseNDimArray(first_list, (2, 2))
second_ndim_array = MutableDenseNDimArray(second_list, (2, 2))
third_ndim_array = MutableDenseNDimArray(third_list, (2, 2))
fourth_ndim_array = MutableDenseNDimArray(first_list, (2, 2))
assert first_ndim_array == second_ndim_array
second_ndim_array[0, 0] = 0
assert first_ndim_array != second_ndim_array
assert first_ndim_array != third_ndim_array
assert first_ndim_array == fourth_ndim_array
def test_arithmetic():
a = MutableDenseNDimArray([3 for i in range(9)], (3, 3))
b = MutableDenseNDimArray([7 for i in range(9)], (3, 3))
c1 = a + b
c2 = b + a
assert c1 == c2
d1 = a - b
d2 = b - a
assert d1 == d2 * (-1)
e1 = a * 5
e2 = 5 * a
e3 = copy(a)
e3 *= 5
assert e1 == e2 == e3
f1 = a / 5
f2 = copy(a)
f2 /= 5
assert f1 == f2
assert f1[0, 0] == f1[0, 1] == f1[0, 2] == f1[1, 0] == f1[1, 1] == \
f1[1, 2] == f1[2, 0] == f1[2, 1] == f1[2, 2] == Rational(3, 5)
assert type(a) == type(b) == type(c1) == type(c2) == type(d1) == type(d2) \
== type(e1) == type(e2) == type(e3) == type(f1)
def test_higher_dimenions():
m3 = MutableDenseNDimArray(range(10, 34), (2, 3, 4))
assert m3.tolist() == [[[10, 11, 12, 13],
[14, 15, 16, 17],
[18, 19, 20, 21]],
[[22, 23, 24, 25],
[26, 27, 28, 29],
[30, 31, 32, 33]]]
assert m3._get_tuple_index(0) == (0, 0, 0)
assert m3._get_tuple_index(1) == (0, 0, 1)
assert m3._get_tuple_index(4) == (0, 1, 0)
assert m3._get_tuple_index(12) == (1, 0, 0)
assert str(m3) == '[[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]]'
m3_rebuilt = MutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]])
assert m3 == m3_rebuilt
m3_other = MutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]], (2, 3, 4))
assert m3 == m3_other
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Rally command: task """
from __future__ import print_function
import json
import os
import sys
import webbrowser
import jsonschema
from oslo_utils import uuidutils
import yaml
from rally import api
from rally.cli import cliutils
from rally.cli import envutils
from rally.common import db
from rally.common import fileutils
from rally.common.i18n import _
from rally.common import junit
from rally.common import log as logging
from rally.common import objects
from rally.common import utils as rutils
from rally import consts
from rally import exceptions
from rally import plugins
from rally.task.processing import plot
from rally.task.processing import utils
class FailedToLoadTask(exceptions.RallyException):
msg_fmt = _("Failed to load task")
class TaskCommands(object):
"""Task management.
Set of commands that allow you to manage benchmarking tasks and results.
"""
def _load_task(self, task_file, task_args=None, task_args_file=None):
"""Load tasks template from file and render it with passed args.
:param task_file: Path to file with input task
:param task_args: JSON or YAML representation of dict with args that
will be used to render input task with jinja2
:param task_args_file: Path to file with JSON or YAML representation
of dict, that will be used to render input
with jinja2. If both specified task_args and
task_args_file they will be merged. task_args
has bigger priority so it will update values
from task_args_file.
:returns: Str with loaded and rendered task
"""
print(cliutils.make_header("Preparing input task"))
def print_invalid_header(source_name, args):
print(_("Invalid %(source)s passed: \n\n %(args)s \n")
% {"source": source_name, "args": args},
file=sys.stderr)
def parse_task_args(src_name, args):
try:
kw = args and yaml.safe_load(args)
kw = {} if kw is None else kw
except yaml.parser.ParserError as e:
print_invalid_header(src_name, args)
print(_("%(source)s has to be YAML or JSON. Details:"
"\n\n%(err)s\n")
% {"source": src_name, "err": e},
file=sys.stderr)
raise TypeError()
if not isinstance(kw, dict):
print_invalid_header(src_name, args)
print(_("%(src)s has to be dict, actually %(src_type)s\n")
% {"src": src_name, "src_type": type(kw)},
file=sys.stderr)
raise TypeError()
return kw
try:
kw = {}
if task_args_file:
with open(task_args_file) as f:
kw.update(parse_task_args("task_args_file", f.read()))
kw.update(parse_task_args("task_args", task_args))
except TypeError:
raise FailedToLoadTask()
with open(task_file) as f:
try:
input_task = f.read()
task_dir = os.path.expanduser(
os.path.dirname(task_file)) or "./"
rendered_task = api.Task.render_template(input_task,
task_dir, **kw)
except Exception as e:
print(_("Failed to render task template:\n%(task)s\n%(err)s\n")
% {"task": input_task, "err": e},
file=sys.stderr)
raise FailedToLoadTask()
print(_("Input task is:\n%s\n") % rendered_task)
try:
parsed_task = yaml.safe_load(rendered_task)
except Exception as e:
print(_("Wrong format of rendered input task. It should be "
"YAML or JSON.\n%s") % e,
file=sys.stderr)
raise FailedToLoadTask()
print(_("Task syntax is correct :)"))
return parsed_task
def _load_and_validate_task(self, task, task_args, task_args_file,
deployment, task_instance=None):
if not os.path.isfile(task):
if task_instance:
task_instance.set_failed(log="No such file '%s'" % task)
raise IOError("File '%s' is not found." % task)
input_task = self._load_task(task, task_args, task_args_file)
api.Task.validate(deployment, input_task, task_instance)
print(_("Task config is valid :)"))
return input_task
@cliutils.args("--deployment", type=str, dest="deployment",
required=False, help="UUID or name of the deployment")
@cliutils.args("--task", "--filename",
help="Path to the file with full configuration of task")
@cliutils.args("--task-args", dest="task_args",
help="Input task args (dict in json). These args are used "
"to render input task that is jinja2 template.")
@cliutils.args("--task-args-file", dest="task_args_file",
help="Path to the file with input task args (dict in "
"json/yaml). These args are used to render input "
"task that is jinja2 template.")
@envutils.with_default_deployment(cli_arg_name="deployment")
@plugins.ensure_plugins_are_loaded
def validate(self, task, deployment=None, task_args=None,
task_args_file=None):
"""Validate a task configuration file.
This will check that task configuration file has valid syntax and
all required options of scenarios, contexts, SLA and runners are set.
:param task: a file with yaml/json task
:param task_args: Input task args (dict in json/yaml). These args are
used to render input task that is jinja2 template.
:param task_args_file: File with input task args (dict in json/yaml).
These args are used to render input task that
is jinja2 template.
:param deployment: UUID or name of a deployment
"""
try:
self._load_and_validate_task(task, task_args, task_args_file,
deployment)
except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
print(e, file=sys.stderr)
return(1)
@cliutils.args("--deployment", type=str, dest="deployment",
required=False, help="UUID or name of the deployment")
@cliutils.args("--task", "--filename", help="Path to the input task file")
@cliutils.args("--task-args", dest="task_args",
help="Input task args (dict in json). These args are used "
"to render input task that is jinja2 template.")
@cliutils.args("--task-args-file", dest="task_args_file",
help="Path to the file with input task args (dict in "
"json/yaml). These args are used to render input "
"task that is jinja2 template.")
@cliutils.args("--tag", help="Tag for this task")
@cliutils.args("--no-use", action="store_false", dest="do_use",
help="Don't set new task as default for future operations")
@cliutils.args("--abort-on-sla-failure", action="store_true",
dest="abort_on_sla_failure",
help="Abort the execution of a benchmark scenario when"
"any SLA check for it fails")
@envutils.with_default_deployment(cli_arg_name="deployment")
@plugins.ensure_plugins_are_loaded
def start(self, task, deployment=None, task_args=None, task_args_file=None,
tag=None, do_use=False, abort_on_sla_failure=False):
"""Start benchmark task.
:param task: a file with yaml/json task
:param task_args: Input task args (dict in json/yaml). These args are
used to render input task that is jinja2 template.
:param task_args_file: File with input task args (dict in json/yaml).
These args are used to render input task that
is jinja2 template.
:param deployment: UUID or name of a deployment
:param tag: optional tag for this task
:param do_use: if True, the new task will be stored as the default one
for future operations
:param abort_on_sla_failure: if True, the execution of a benchmark
scenario will stop when any SLA check
for it fails
"""
task_instance = api.Task.create(deployment, tag)
try:
input_task = self._load_and_validate_task(
task, task_args, task_args_file, deployment,
task_instance=task_instance)
print(cliutils.make_header(
_("Task %(tag)s %(uuid)s: started")
% {"uuid": task_instance["uuid"],
"tag": task_instance["tag"]}))
print("Benchmarking... This can take a while...\n")
print("To track task status use:\n")
print("\trally task status\n\tor\n\trally task detailed\n")
if do_use:
self.use(task_instance["uuid"])
api.Task.start(deployment, input_task, task=task_instance,
abort_on_sla_failure=abort_on_sla_failure)
self.detailed(task_id=task_instance["uuid"])
except (exceptions.InvalidTaskException, FailedToLoadTask) as e:
task_instance.set_failed(log=e.format_message())
print(e, file=sys.stderr)
return(1)
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task")
@envutils.with_default_task_id
@cliutils.args("--soft", action="store_true",
help="Abort task after current scenario full execution")
def abort(self, task_id=None, soft=False):
"""Abort started benchmarking task.
:param task_id: Task uuid
:param soft: if set to True, task should be aborted after execution of
current scenario
"""
if soft:
print("INFO: please be informed that soft abort wont stop "
"current running scenario, it will prevent to start "
"new ones, so if you are running task with only one "
"scenario - soft abort will not help at all.")
api.Task.abort(task_id, soft, async=False)
print("Task %s successfully stopped." % task_id)
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task")
@envutils.with_default_task_id
def status(self, task_id=None):
"""Display current status of task.
:param task_id: Task uuid
Returns current status of task
"""
task = db.task_get(task_id)
print(_("Task %(task_id)s: %(status)s")
% {"task_id": task_id, "status": task["status"]})
@cliutils.args("--uuid", type=str, dest="task_id",
help=("uuid of task, if --uuid is \"last\" results of most "
"recently created task will be displayed."))
@cliutils.args("--iterations-data", dest="iterations_data",
action="store_true",
help="print detailed results for each iteration")
@envutils.with_default_task_id
def detailed(self, task_id=None, iterations_data=False):
"""Display results table.
:param task_id: Task uuid
:param iterations_data: print detailed results for each iteration
Prints detailed information of task.
"""
def _print_iterations_data(raw_data):
headers = ["iteration", "full duration"]
float_cols = ["full duration"]
atomic_actions = []
for row in raw_data:
# find first non-error result to get atomic actions names
if not row["error"] and "atomic_actions" in row:
atomic_actions = row["atomic_actions"].keys()
for row in raw_data:
if row["atomic_actions"]:
for (c, a) in enumerate(atomic_actions, 1):
action = "%(no)i. %(action)s" % {"no": c, "action": a}
headers.append(action)
float_cols.append(action)
break
table_rows = []
formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3)
for col in float_cols]))
for (c, r) in enumerate(raw_data, 1):
dlist = [c]
dlist.append(r["duration"])
if r["atomic_actions"]:
for action in atomic_actions:
dlist.append(r["atomic_actions"].get(action) or 0)
table_rows.append(rutils.Struct(**dict(zip(headers, dlist))))
cliutils.print_list(table_rows,
fields=headers,
formatters=formatters)
print()
task = db.task_get_detailed(task_id)
if task is None:
print("The task %s can not be found" % task_id)
return(1)
print()
print("-" * 80)
print(_("Task %(task_id)s: %(status)s")
% {"task_id": task_id, "status": task["status"]})
if task["status"] == consts.TaskStatus.FAILED:
print("-" * 80)
verification = yaml.safe_load(task["verification_log"])
if not logging.is_debug():
print(verification[0])
print(verification[1])
print()
print(_("For more details run:\nrally -vd task detailed %s")
% task["uuid"])
else:
print(yaml.safe_load(verification[2]))
return
for result in task["results"]:
key = result["key"]
print("-" * 80)
print()
print("test scenario %s" % key["name"])
print("args position %s" % key["pos"])
print("args values:")
print(json.dumps(key["kw"], indent=2))
raw = result["data"]["raw"]
table_cols = ["action", "min", "median",
"90%ile", "95%ile", "max",
"avg", "success", "count"]
float_cols = ["min", "median",
"90%ile", "95%ile", "max",
"avg"]
formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3)
for col in float_cols]))
table_rows = []
actions_data = utils.get_atomic_actions_data(raw)
for action in actions_data:
durations = actions_data[action]
if durations:
data = [action,
round(min(durations), 3),
round(utils.median(durations), 3),
round(utils.percentile(durations, 0.90), 3),
round(utils.percentile(durations, 0.95), 3),
round(max(durations), 3),
round(utils.mean(durations), 3),
"%.1f%%" % (len(durations) * 100.0 / len(raw)),
len(raw)]
else:
data = [action, None, None, None, None, None, None,
"0.0%", len(raw)]
table_rows.append(rutils.Struct(**dict(zip(table_cols, data))))
cliutils.print_list(table_rows, fields=table_cols,
formatters=formatters,
table_label="Response Times (sec)",
sortby_index=None)
if iterations_data:
_print_iterations_data(raw)
print(_("Load duration: %s") % result["data"]["load_duration"])
print(_("Full duration: %s") % result["data"]["full_duration"])
# NOTE(hughsaunders): ssrs=scenario specific results
ssrs = []
for result in raw:
data = result["scenario_output"].get("data")
if data:
ssrs.append(data)
if ssrs:
keys = set()
for ssr in ssrs:
keys.update(ssr.keys())
headers = ["key", "min", "median",
"90%ile", "95%ile", "max",
"avg"]
float_cols = ["min", "median", "90%ile",
"95%ile", "max", "avg"]
formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3)
for col in float_cols]))
table_rows = []
for key in keys:
values = [float(ssr[key]) for ssr in ssrs if key in ssr]
if values:
row = [str(key),
round(min(values), 3),
round(utils.median(values), 3),
round(utils.percentile(values, 0.90), 3),
round(utils.percentile(values, 0.95), 3),
round(max(values), 3),
round(utils.mean(values), 3)]
else:
row = [str(key)] + ["n/a"] * 6
table_rows.append(rutils.Struct(**dict(zip(headers, row))))
print("\nScenario Specific Results\n")
cliutils.print_list(table_rows,
fields=headers,
formatters=formatters,
table_label="Response Times (sec)")
for result in raw:
errors = result["scenario_output"].get("errors")
if errors:
print(errors)
print()
print("HINTS:")
print(_("* To plot HTML graphics with this data, run:"))
print("\trally task report %s --out output.html" % task["uuid"])
print()
print(_("* To generate a JUnit report, run:"))
print("\trally task report %s --junit --out output.xml" %
task["uuid"])
print()
print(_("* To get raw JSON output of task results, run:"))
print("\trally task results %s\n" % task["uuid"])
@cliutils.args("--uuid", type=str, dest="task_id", help="uuid of task")
@envutils.with_default_task_id
@cliutils.suppress_warnings
def results(self, task_id=None):
"""Display raw task results.
This will produce a lot of output data about every iteration.
:param task_id: Task uuid
"""
results = [{"key": x["key"], "result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]}
for x in objects.Task.get(task_id).get_results()]
if results:
print(json.dumps(results, sort_keys=True, indent=4))
else:
print(_("The task %s marked as '%s'. Results "
"available when it is '%s' .") % (
task_id, consts.TaskStatus.FAILED, consts.TaskStatus.FINISHED))
return(1)
@cliutils.args("--deployment", type=str, dest="deployment",
help="List tasks from specified deployment."
"By default tasks listed from active deployment.")
@cliutils.args("--all-deployments", action="store_true",
dest="all_deployments",
help="List tasks from all deployments.")
@cliutils.args("--status", type=str, dest="status",
help="List tasks with specified status."
" Available statuses: %s" % ", ".join(consts.TaskStatus))
@cliutils.args("--uuids-only", action="store_true",
dest="uuids_only", help="List task UUIDs only")
@envutils.with_default_deployment(cli_arg_name="deployment")
def list(self, deployment=None, all_deployments=False, status=None,
uuids_only=False):
"""List tasks, started and finished.
Displayed tasks could be filtered by status or deployment.
By default 'rally task list' will display tasks from active deployment
without filtering by status.
:param deployment: UUID or name of deployment
:param status: task status to filter by.
Available task statuses are in rally.consts.TaskStatus
:param all_deployments: display tasks from all deployments
:param uuids_only: list task UUIDs only
"""
filters = {}
headers = ["uuid", "deployment_name", "created_at", "duration",
"status", "tag"]
if status in consts.TaskStatus:
filters.setdefault("status", status)
elif status:
print(_("Error: Invalid task status '%s'.\n"
"Available statuses: %s") % (
status, ", ".join(consts.TaskStatus)),
file=sys.stderr)
return(1)
if not all_deployments:
filters.setdefault("deployment", deployment)
task_list = [task.to_dict() for task in objects.Task.list(**filters)]
for x in task_list:
x["duration"] = x["updated_at"] - x["created_at"]
if uuids_only:
if task_list:
cliutils.print_list(task_list, ["uuid"],
print_header=False,
print_border=False)
elif task_list:
cliutils.print_list(
task_list,
headers, sortby_index=headers.index("created_at"))
else:
if status:
print(_("There are no tasks in '%s' status. "
"To run a new task, use:\n"
"\trally task start") % status)
else:
print(_("There are no tasks. To run a new task, use:\n"
"\trally task start"))
@cliutils.args("--tasks", dest="tasks", nargs="+",
help="uuids of tasks or json files with task results")
@cliutils.args("--out", type=str, dest="out", required=True,
help="Path to output file.")
@cliutils.args("--open", dest="open_it", action="store_true",
help="Open it in browser.")
@cliutils.args("--html", dest="out_format",
action="store_const", const="html",
help="Generate the report in HTML.")
@cliutils.args("--junit", dest="out_format",
action="store_const", const="junit",
help="Generate the report in the JUnit format.")
@envutils.default_from_global("tasks", envutils.ENV_TASK, "--uuid")
@cliutils.suppress_warnings
def report(self, tasks=None, out=None, open_it=False, out_format="html"):
"""Generate report file for specified task.
:param task_id: UUID, task identifier
:param tasks: list, UUIDs od tasks or pathes files with tasks results
:param out: str, output file name
:param open_it: bool, whether to open output file in web browser
:param out_format: output format (junit or html)
"""
tasks = isinstance(tasks, list) and tasks or [tasks]
results = []
message = []
processed_names = {}
for task_file_or_uuid in tasks:
if os.path.exists(os.path.expanduser(task_file_or_uuid)):
with open(os.path.expanduser(task_file_or_uuid),
"r") as inp_js:
tasks_results = json.load(inp_js)
for result in tasks_results:
try:
jsonschema.validate(
result,
objects.task.TASK_RESULT_SCHEMA)
except jsonschema.ValidationError as e:
print(_("ERROR: Invalid task result format in %s")
% task_file_or_uuid, file=sys.stderr)
if logging.is_debug():
print(e, file=sys.stderr)
else:
print(e.message, file=sys.stderr)
return 1
elif uuidutils.is_uuid_like(task_file_or_uuid):
tasks_results = map(
lambda x: {"key": x["key"],
"sla": x["data"]["sla"],
"result": x["data"]["raw"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]},
objects.Task.get(task_file_or_uuid).get_results())
else:
print(_("ERROR: Invalid UUID or file name passed: %s"
) % task_file_or_uuid,
file=sys.stderr)
return 1
for task_result in tasks_results:
if task_result["key"]["name"] in processed_names:
processed_names[task_result["key"]["name"]] += 1
task_result["key"]["pos"] = processed_names[
task_result["key"]["name"]]
else:
processed_names[task_result["key"]["name"]] = 0
results.append(task_result)
output_file = os.path.expanduser(out)
if out_format == "html":
with open(output_file, "w+") as f:
f.write(plot.plot(results))
if open_it:
webbrowser.open_new_tab("file://" + os.path.realpath(out))
elif out_format == "junit":
test_suite = junit.JUnit("Rally test suite")
for result in results:
if isinstance(result["sla"], list):
message = ",".join([sla["detail"] for sla in
result["sla"] if not sla["success"]])
if message:
outcome = junit.JUnit.FAILURE
else:
outcome = junit.JUnit.SUCCESS
test_suite.add_test(result["key"]["name"],
result["full_duration"], outcome, message)
with open(output_file, "w+") as f:
f.write(test_suite.to_xml())
else:
print(_("Invalid output format: %s") % out_format,
file=sys.stderr)
return 1
@cliutils.args("--force", action="store_true", help="force delete")
@cliutils.args("--uuid", type=str, dest="task_id", nargs="*",
metavar="TASK_ID",
help="uuid of task or a list of task uuids")
@envutils.with_default_task_id
def delete(self, task_id=None, force=False):
"""Delete task and its results.
:param task_id: Task uuid or a list of task uuids
:param force: Force delete or not
"""
def _delete_single_task(tid, force):
try:
api.Task.delete(tid, force=force)
print("Successfully deleted task `%s`" % tid)
except exceptions.TaskInvalidStatus as e:
print(e)
print("Use '--force' option to delete the task with vague "
"state.")
if isinstance(task_id, list):
for tid in task_id:
_delete_single_task(tid, force)
else:
_delete_single_task(task_id, force)
@cliutils.args("--uuid", type=str, dest="task_id", help="uuid of task")
@cliutils.args("--json", dest="tojson",
action="store_true",
help="output in json format")
@envutils.with_default_task_id
def sla_check(self, task_id=None, tojson=False):
"""Display SLA check results table.
:param task_id: Task uuid.
:returns: Number of failed criteria.
"""
results = objects.Task.get(task_id).get_results()
failed_criteria = 0
data = []
STATUS_PASS = "PASS"
STATUS_FAIL = "FAIL"
for result in results:
key = result["key"]
for sla in sorted(result["data"]["sla"],
key=lambda x: x["criterion"]):
success = sla.pop("success")
sla["status"] = success and STATUS_PASS or STATUS_FAIL
sla["benchmark"] = key["name"]
sla["pos"] = key["pos"]
failed_criteria += int(not success)
data.append(sla if tojson else rutils.Struct(**sla))
if tojson:
print(json.dumps(data, sort_keys=False))
else:
cliutils.print_list(data, ("benchmark", "pos", "criterion",
"status", "detail"))
return failed_criteria
@cliutils.args("--task", type=str, dest="task", required=False,
help="UUID of the task")
def use(self, task):
"""Set active task.
:param task: Task uuid.
"""
print("Using task: %s" % task)
db.task_get(task)
fileutils.update_globals_file("RALLY_TASK", task)
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Note that since July 2004, all patents on the LZW compression patent have
expired. Therefore the GIF format may now be used freely.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
Many thanks to Alex Robinson for implementing the concept of subrectangles,
which (depening on image content) can give a very significant reduction in
file size.
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Usefull links
-------------
* http://tronche.com/computer-graphics/gif/
* http://en.wikipedia.org/wiki/Graphics_Interchange_Format
* http://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
# todo: This module should be part of imageio (or at least based on)
import os, time
try:
import PIL
from PIL import Image
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
def get_cKDTree():
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
return cKDTree
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
class GifWriter:
""" GifWriter()
Class that contains methods for helping write the animated GIF file.
"""
def getheaderAnim(self, im):
""" getheaderAnim(im)
Get animation header. To replace PILs getheader()[0]
"""
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurari 2011 to implement subrectangles.
"""
# Defaule use full image and place at upper left
if xy is None:
xy = (0,0)
# Image separator,
bb = '\x2C'
# Image position and size
bb += intToBin( xy[0] ) # Left position
bb += intToBin( xy[1] ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += '\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
def getAppExt(self, loops=float('inf')):
""" getAppExt(loops=float('inf'))
Application extention. This part specifies the amount of loops.
If loops is 0 or inf, it goes on infinitely.
"""
if loops==0 or loops==float('inf'):
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(self, duration=0.1, dispose=2):
""" getGraphicsControlExt(duration=0.1, dispose=2)
Graphics Control Extension. A sort of header at the start of
each image. Specifies duration and transparancy.
Dispose
-------
* 0 - No disposal specified.
* 1 - Do not dispose. The graphic is to be left in place.
* 2 - Restore to background color. The area used by the graphic
must be restored to the background color.
* 3 - Restore to previous. The decoder is required to restore the
area overwritten by the graphic with what was there prior to
rendering the graphic.
* 4-7 -To be defined.
"""
bb = '\x21\xF9\x04'
bb += chr((dispose & 3) << 2) # low bit 1 == transparency,
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
# are dispose.
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparant color
bb += '\x00' # end
return bb
def handleSubRectangles(self, images, subRectangles):
""" handleSubRectangles(images)
Handle the sub-rectangle stuff. If the rectangles are given by the
user, the values are checked. Otherwise the subrectangles are
calculated automatically.
"""
if isinstance(subRectangles, (tuple,list)):
# xy given directly
# Check xy
xy = subRectangles
if xy is None:
xy = (0,0)
if hasattr(xy, '__len__'):
if len(xy) == len(images):
xy = [xxyy for xxyy in xy]
else:
raise ValueError("len(xy) doesn't match amount of images.")
else:
xy = [xy for im in images]
xy[0] = (0,0)
else:
# Calculate xy using some basic image processing
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to use auto-subRectangles.")
# First make numpy arrays if required
for i in range(len(images)):
im = images[i]
if isinstance(im, Image.Image):
tmp = im.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
images[i] = a
# Determine the sub rectangles
images, xy = self.getSubRectangles(images)
# Done
return images, xy
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0,0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0,0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im-prev)
if diff.ndim==3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = X[0], X[-1]+1
y0, y1 = Y[0], Y[-1]+1
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1,x0:x1]
prev = im
ims2.append(im2)
xy.append((x0,y0))
# Done
#print('%1.2f seconds to determine subrectangles of %i images' %
# (time.time()-t0, len(ims2)) )
return ims2, xy
def convertImagesToPIL(self, images, dither, nq=0):
""" convertImagesToPIL(images, nq=0)
Convert images to Paletted PIL images, which can then be
written to a single animaged GIF.
"""
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==3 and im.shape[2]==4:
im = Image.fromarray(im[:,:,:3],'RGB')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nqInstance = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nqInstance.paletteImage())
else:
im = nqInstance.quantize(im) # Use to quantize the image itself
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
for im in images:
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Done
return images2
def writeGifToFile(self, fp, images, durations, loops, xys, disposes):
""" writeGifToFile(fp, images, durations, loops, xys, disposes)
Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurance
palettes, occur = [], []
for im in images:
palette = getheader(im)[1]
if not palette:
palette = PIL.ImagePalette.ImageColor
palettes.append(palette)
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = self.getheaderAnim(im)
appext = self.getAppExt(loops)
# Write
fp.write(header.encode('utf-8'))
fp.write(globalPalette)
fp.write(appext.encode('utf-8'))
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = self.getGraphicsControlExt(durations[frames],
disposes[frames])
# Make image descriptor suitable for using 256 local color palette
lid = self.getImageDescriptor(im, xys[frames])
# Write local header
if (palette != globalPalette) or (disposes[frames] != 2):
# Use local color palette
fp.write(graphext.encode('utf-8'))
fp.write(lid.encode('utf-8')) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08'.encode('utf-8')) # LZW minimum size code
else:
# Use global color palette
fp.write(graphext.encode('utf-8'))
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";".encode('utf-8')) # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed. When using this option,
better results are usually obtained when subRectangles is False.
subRectangles : False, True, or a list of 2-element tuples
Whether to use sub-rectangles. If True, the minimal rectangle that
is required to update each frame is automatically detected. This
can give significant reductions in file size, particularly if only
a part of the image changes. One can also give a list of x-y
coordinates if you want to do the cropping yourself. The default
is True.
dispose : int
How to dispose each frame. 1 means that each frame is to be left
in place. 2 means the background color should be restored after
each frame. 3 means the decoder should restore the previous frame.
If subRectangles==False, the default is 2, otherwise it is 1.
"""
# Check PIL
if PIL is None:
print 'PIL is none'
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
print 'check images'
images = checkImages(images)
# Instantiate writer object
gifWriter = GifWriter()
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images):
duration = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images]
# Check subrectangles
if subRectangles:
images, xy = gifWriter.handleSubRectangles(images, subRectangles)
defaultDispose = 1 # Leave image in place
else:
# Normal mode
xy = [(0,0) for im in images]
defaultDispose = 2 # Restore to background color.
# Check dispose
if dispose is None:
dispose = defaultDispose
if hasattr(dispose, '__len__'):
if len(dispose) != len(images):
raise ValueError("len(xy) doesn't match amount of images.")
else:
dispose = [dispose for im in images]
# Make images in a format that we can write easy
images = gifWriter.convertImagesToPIL(images, dither, nq)
# Write
fp = open(filename, 'wb')
try:
print 'writing to gif file'
gifWriter.writeGifToFile(fp, images, duration, loops, xy, dispose)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append( PIL.Image.fromarray(im) )
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
if image.mode != "RGBA":
raise IOError("Image mode should be RGBA.")
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
outstream.write(rr if rgb else bb)
outstream.write(gg)
outstream.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = int(length//2)
q = np.array(list(range(mid-1,-1,-1))+list(range(-1,mid)))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius * 2**self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print("Beginning 1D learning: samplepixels = %1.2f rad = %i" %
(samplepixels, rad) )
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print(tmp + printed_string)
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius * 2**self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
finalAlpha = (1.0*alpha)/self.INITALPHA
print("Finished 1D learning: final alpha = %1.2f!" % finalAlpha)
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if get_cKDTree():
return self.quantize_with_scipy(image)
else:
print('Scipy not available, falling back to slower version.')
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
cKDTree = get_cKDTree()
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print("Distance: %1.2f" % (result[0].sum()/(w*h)) )
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(*key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, *color):
i = self.inxsearch(*color)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
from oslo_log import log as logging
import webob.exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import hosts
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.policies import hosts as hosts_policies
LOG = logging.getLogger(__name__)
class HostController(wsgi.Controller):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
super(HostController, self).__init__()
self.api = compute.HostAPI()
@wsgi.Controller.api_version("2.1", "2.42")
@validation.query_schema(hosts.index_query)
@wsgi.expected_errors(())
def index(self, req):
"""Returns a dict in the format
| {'hosts': [{'host_name': 'some.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.other.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.celly.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'compute1.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'compute2.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'sched1.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'sched2.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'vol1.host.com',
| 'service': 'volume',
| 'zone': 'internal'}]}
"""
context = req.environ['nova.context']
context.can(hosts_policies.POLICY_NAME % 'list',
target={})
filters = {'disabled': False}
zone = req.GET.get('zone', None)
if zone:
filters['availability_zone'] = zone
services = self.api.service_get_all(context, filters=filters,
set_zones=True, all_cells=True)
hosts = []
api_services = ('nova-osapi_compute', 'nova-metadata')
for service in services:
if service.binary not in api_services:
hosts.append({'host_name': service['host'],
'service': service['topic'],
'zone': service['availability_zone']})
return {'hosts': hosts}
@wsgi.Controller.api_version("2.1", "2.42")
@wsgi.expected_errors((400, 404, 501))
@validation.schema(hosts.update)
def update(self, req, id, body):
"""Return booleanized version of body dict.
:param Request req: The request object (containing 'nova-context'
env var).
:param str id: The host name.
:param dict body: example format {'host': {'status': 'enable',
'maintenance_mode': 'enable'}}
:return: Same dict as body but 'enable' strings for 'status' and
'maintenance_mode' are converted into True, else False.
:rtype: dict
"""
def read_enabled(orig_val):
# Convert enable/disable str to a bool.
val = orig_val.strip().lower()
return val == "enable"
context = req.environ['nova.context']
context.can(hosts_policies.POLICY_NAME % 'update',
target={})
# See what the user wants to 'update'
status = body.get('status')
maint_mode = body.get('maintenance_mode')
if status is not None:
status = read_enabled(status)
if maint_mode is not None:
maint_mode = read_enabled(maint_mode)
# Make the calls and merge the results
result = {'host': id}
if status is not None:
result['status'] = self._set_enabled_status(context, id, status)
if maint_mode is not None:
result['maintenance_mode'] = self._set_host_maintenance(context,
id,
maint_mode)
return result
def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
LOG.info("Putting host %(host_name)s in maintenance mode %(mode)s.",
{'host_name': host_name, 'mode': mode})
try:
result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
common.raise_feature_not_supported()
except (exception.HostNotFound, exception.HostMappingNotFound) as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _set_enabled_status(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances.
:param enabled: a boolean - if False no new VMs will be able to start
on the host.
"""
if enabled:
LOG.info("Enabling host %s.", host_name)
else:
LOG.info("Disabling host %s.", host_name)
try:
result = self.api.set_host_enabled(context, host_name, enabled)
except NotImplementedError:
common.raise_feature_not_supported()
except (exception.HostNotFound, exception.HostMappingNotFound) as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("enabled", "disabled"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
try:
result = self.api.host_power_action(context, host_name, action)
except NotImplementedError:
common.raise_feature_not_supported()
except (exception.HostNotFound, exception.HostMappingNotFound) as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return {"host": host_name, "power_action": result}
@wsgi.Controller.api_version("2.1", "2.42")
@wsgi.expected_errors((400, 404, 501))
def startup(self, req, id):
context = req.environ['nova.context']
context.can(hosts_policies.POLICY_NAME % 'start',
target={})
return self._host_power_action(req, host_name=id, action="startup")
@wsgi.Controller.api_version("2.1", "2.42")
@wsgi.expected_errors((400, 404, 501))
def shutdown(self, req, id):
context = req.environ['nova.context']
context.can(hosts_policies.POLICY_NAME % 'shutdown',
target={})
return self._host_power_action(req, host_name=id, action="shutdown")
@wsgi.Controller.api_version("2.1", "2.42")
@wsgi.expected_errors((400, 404, 501))
def reboot(self, req, id):
context = req.environ['nova.context']
context.can(hosts_policies.POLICY_NAME % 'reboot',
target={})
return self._host_power_action(req, host_name=id, action="reboot")
@staticmethod
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node.vcpus,
'memory_mb': compute_node.memory_mb,
'disk_gb': compute_node.local_gb}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node.vcpus_used,
'memory_mb': compute_node.memory_mb_used,
'disk_gb': compute_node.local_gb_used}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for instance in instances:
cpu_sum += instance['vcpus']
mem_sum += instance['memory_mb']
hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
return {'resource': {'host': host_name,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}}
@staticmethod
def _get_resources_by_project(host_name, instances):
# Getting usage resource per project
project_map = {}
for instance in instances:
resource = project_map.setdefault(instance['project_id'],
{'host': host_name,
'project': instance['project_id'],
'cpu': 0,
'memory_mb': 0,
'disk_gb': 0})
resource['cpu'] += instance['vcpus']
resource['memory_mb'] += instance['memory_mb']
resource['disk_gb'] += (instance['root_gb'] +
instance['ephemeral_gb'])
return project_map
@wsgi.Controller.api_version("2.1", "2.42")
@wsgi.expected_errors(404)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
context.can(hosts_policies.POLICY_NAME % 'show',
target={})
host_name = id
try:
mapping = objects.HostMapping.get_by_host(context, host_name)
nova_context.set_target_cell(context, mapping.cell_mapping)
compute_node = (
objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host_name))
instances = self.api.instance_get_all_by_host(context, host_name)
except (exception.ComputeHostNotFound,
exception.HostMappingNotFound) as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
resources = [self._get_total_resources(host_name, compute_node)]
resources.append(self._get_used_now_resources(host_name,
compute_node))
resources.append(self._get_resource_totals_from_instances(host_name,
instances))
by_proj_resources = self._get_resources_by_project(host_name,
instances)
for resource in by_proj_resources.values():
resources.append({'resource': resource})
return {'host': resources}
|
|
# The 6.00 Word Game
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print(" ", len(wordList), "words loaded.")
return wordList
def getFrequencyDict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
# (end of helper code)
# -----------------------------------
#
# Problem #1: Scoring a word
#
def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
score = 0
l = len(word)
for c in word:
score += SCRABBLE_LETTER_VALUES[c]
score *= l
if l == n:
score += 50
return score
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def displayHand(hand):
"""
Displays the letters currently in the hand.
For example:
>>> displayHand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print(letter,end=" ") # print all on the same line
print() # print an empty line
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def dealHand(n):
"""
Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
numVowels = n // 3
for i in range(numVowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(numVowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
updatedHand = {c:hand[c] for c in hand}
for c in word:
updatedHand[c] -= 1
return updatedHand
#
# Problem #3: Test word validity
#
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
for c in word:
if c not in hand:
return False
elif word.count(c) > hand[c]:
return False
if word in wordList:
return True
else:
return False
#
# Problem #4: Playing a hand
#
def calculateHandlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
return sum(hand.values())
def playHand(hand, wordList, n):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word or a single period (the string ".")
to indicate they're done playing
* Invalid words are rejected, and a message is displayed asking
the user to choose another word until they enter a valid word or "."
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters or the user
inputs a "."
hand: dictionary (string -> int)
wordList: list of lowercase strings
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# Keep track of the total score
score = 0
word = ''
# As long as there are still letters left in the hand:
while calculateHandlen(hand) > 0 or word != '.':
# Display the hand
print('Current Hand: ', end='')
displayHand(hand)
# Ask user for input
word = input('Enter word, or a "." to indicate that you are finished:')
# If the input is a single period:
if word == '.':
# End the game (break out of the loop)
break
# Otherwise (the input is not a single period):
elif not isValidWord(word, hand, wordList):
# If the word is not valid:
print('Invalid word, please try again.')
continue
# Reject invalid word (print a message followed by a blank line)
else:
# Otherwise (the word is valid):
score += getWordScore(word, n)
# Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line
print('"' + word + '"' + ' earned ' + str(getWordScore(word, n)) + ' points. Total: ' + str(score) + ' points')
# Update the hand
hand = updateHand(hand, word)
if calculateHandlen(hand) == 0:
break
# Game is over (user entered a '.' or ran out of letters), so tell user the total score
if word =='.':
print('Goodbye! Total score: ' + str(score))
elif calculateHandlen(hand) == 0:
print('Run out of letters. Total score: ' + str(score))
#
# Problem #5: Playing a game
#
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'n', let the user play a new (random) hand.
* If the user inputs 'r', let the user play the last hand again.
* If the user inputs 'e', exit the game.
* If the user inputs anything else, tell them their input was invalid.
2) When done playing the hand, repeat from step 1
"""
userInput = ''
lastHand = ''
while userInput != 'e':
userInput = input('Enter n to deal a new hand, r to replay the last hand, or e to end game:')
# If the user inputs 'n', let the user play a new (random) hand.
if userInput == 'n':
lastHand = dealHand(HAND_SIZE)
playHand(lastHand, wordList, HAND_SIZE)
continue
# If the user inputs 'r', let the user play the last hand again.
elif userInput == 'r':
if lastHand == '':
print('You have not played a hand yet. Please play a new hand first!')
continue
else:
playHand(lastHand, wordList, HAND_SIZE)
continue
elif userInput == 'e':
break
else:
print('Invalid command.')
continue
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
wordList = loadWords()
playGame(wordList)
|
|
"""Tests for the HomeKit component."""
from __future__ import annotations
import asyncio
import os
from unittest.mock import ANY, AsyncMock, MagicMock, Mock, patch
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_CAMERA, CATEGORY_TELEVISION
import pytest
from homeassistant import config as hass_config
from homeassistant.components import homekit as homekit_base, zeroconf
from homeassistant.components.binary_sensor import BinarySensorDeviceClass
from homeassistant.components.homekit import (
MAX_DEVICES,
STATUS_READY,
STATUS_RUNNING,
STATUS_STOPPED,
STATUS_WAIT,
HomeKit,
)
from homeassistant.components.homekit.accessories import HomeBridge
from homeassistant.components.homekit.const import (
BRIDGE_NAME,
BRIDGE_SERIAL_NUMBER,
DEFAULT_PORT,
DOMAIN,
HOMEKIT,
HOMEKIT_MODE_ACCESSORY,
HOMEKIT_MODE_BRIDGE,
SERVICE_HOMEKIT_RESET_ACCESSORY,
SERVICE_HOMEKIT_UNPAIR,
)
from homeassistant.components.homekit.type_triggers import DeviceTriggerAccessory
from homeassistant.components.homekit.util import get_persist_fullpath_for_entry_id
from homeassistant.components.sensor import SensorDeviceClass
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_DEVICE_ID,
ATTR_ENTITY_ID,
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
CONF_PORT,
EVENT_HOMEASSISTANT_STARTED,
PERCENTAGE,
SERVICE_RELOAD,
STATE_ON,
)
from homeassistant.core import HomeAssistantError, State
from homeassistant.helpers import device_registry
from homeassistant.helpers.entityfilter import (
CONF_EXCLUDE_DOMAINS,
CONF_EXCLUDE_ENTITIES,
CONF_EXCLUDE_ENTITY_GLOBS,
CONF_INCLUDE_DOMAINS,
CONF_INCLUDE_ENTITIES,
CONF_INCLUDE_ENTITY_GLOBS,
convert_filter,
)
from homeassistant.setup import async_setup_component
from homeassistant.util import json as json_util
from .util import PATH_HOMEKIT, async_init_entry, async_init_integration
from tests.common import MockConfigEntry, get_fixture_path
IP_ADDRESS = "127.0.0.1"
def generate_filter(
include_domains,
include_entities,
exclude_domains,
exclude_entites,
include_globs=None,
exclude_globs=None,
):
"""Generate an entity filter using the standard method."""
return convert_filter(
{
CONF_INCLUDE_DOMAINS: include_domains,
CONF_INCLUDE_ENTITIES: include_entities,
CONF_EXCLUDE_DOMAINS: exclude_domains,
CONF_EXCLUDE_ENTITIES: exclude_entites,
CONF_INCLUDE_ENTITY_GLOBS: include_globs or [],
CONF_EXCLUDE_ENTITY_GLOBS: exclude_globs or [],
}
)
@pytest.fixture(autouse=True)
def always_patch_driver(hk_driver):
"""Load the hk_driver fixture."""
@pytest.fixture(autouse=True)
def patch_source_ip(mock_get_source_ip):
"""Patch homeassistant and pyhap functions for getting local address."""
with patch("pyhap.util.get_local_address", return_value="10.10.10.10"):
yield
def _mock_homekit(hass, entry, homekit_mode, entity_filter=None, devices=None):
return HomeKit(
hass=hass,
name=BRIDGE_NAME,
port=DEFAULT_PORT,
ip_address=None,
entity_filter=entity_filter or generate_filter([], [], [], []),
exclude_accessory_mode=False,
entity_config={},
homekit_mode=homekit_mode,
advertise_ip=None,
entry_id=entry.entry_id,
entry_title=entry.title,
devices=devices,
)
def _mock_homekit_bridge(hass, entry):
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = MagicMock()
return homekit
def _mock_accessories(accessory_count):
accessories = {}
for idx in range(accessory_count + 1):
accessories[idx + 1000] = MagicMock(async_stop=AsyncMock())
return accessories
def _mock_pyhap_bridge():
return MagicMock(
aid=1, accessories=_mock_accessories(10), display_name="HomeKit Bridge"
)
async def test_setup_min(hass, mock_async_zeroconf):
"""Test async_setup with min config options."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: BRIDGE_NAME, CONF_PORT: DEFAULT_PORT},
options={},
)
entry.add_to_hass(hass)
with patch(f"{PATH_HOMEKIT}.HomeKit") as mock_homekit, patch(
"homeassistant.components.network.async_get_source_ip", return_value="1.2.3.4"
):
mock_homekit.return_value = homekit = Mock()
type(homekit).async_start = AsyncMock()
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
mock_homekit.assert_any_call(
hass,
BRIDGE_NAME,
DEFAULT_PORT,
"1.2.3.4",
ANY,
ANY,
{},
HOMEKIT_MODE_BRIDGE,
None,
entry.entry_id,
entry.title,
devices=[],
)
# Test auto start enabled
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert mock_homekit().async_start.called is True
async def test_homekit_setup(hass, hk_driver, mock_async_zeroconf):
"""Test setup of bridge and driver."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: "mock_name", CONF_PORT: 12345},
source=SOURCE_IMPORT,
)
homekit = HomeKit(
hass,
BRIDGE_NAME,
DEFAULT_PORT,
IP_ADDRESS,
True,
{},
{},
HOMEKIT_MODE_BRIDGE,
advertise_ip=None,
entry_id=entry.entry_id,
entry_title=entry.title,
)
hass.states.async_set("light.demo", "on")
hass.states.async_set("light.demo2", "on")
zeroconf_mock = MagicMock()
uuid = await hass.helpers.instance_id.async_get()
with patch(f"{PATH_HOMEKIT}.HomeDriver", return_value=hk_driver) as mock_driver:
await hass.async_add_executor_job(homekit.setup, zeroconf_mock, uuid)
path = get_persist_fullpath_for_entry_id(hass, entry.entry_id)
mock_driver.assert_called_with(
hass,
entry.entry_id,
BRIDGE_NAME,
entry.title,
loop=hass.loop,
address=IP_ADDRESS,
port=DEFAULT_PORT,
persist_file=path,
advertised_address=None,
async_zeroconf_instance=zeroconf_mock,
zeroconf_server=f"{uuid}-hap.local.",
)
assert homekit.driver.safe_mode is False
async def test_homekit_setup_ip_address(hass, hk_driver, mock_async_zeroconf):
"""Test setup with given IP address."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: "mock_name", CONF_PORT: 12345},
source=SOURCE_IMPORT,
)
homekit = HomeKit(
hass,
BRIDGE_NAME,
DEFAULT_PORT,
"172.0.0.0",
True,
{},
{},
HOMEKIT_MODE_BRIDGE,
None,
entry_id=entry.entry_id,
entry_title=entry.title,
)
path = get_persist_fullpath_for_entry_id(hass, entry.entry_id)
uuid = await hass.helpers.instance_id.async_get()
with patch(f"{PATH_HOMEKIT}.HomeDriver", return_value=hk_driver) as mock_driver:
await hass.async_add_executor_job(homekit.setup, mock_async_zeroconf, uuid)
mock_driver.assert_called_with(
hass,
entry.entry_id,
BRIDGE_NAME,
entry.title,
loop=hass.loop,
address="172.0.0.0",
port=DEFAULT_PORT,
persist_file=path,
advertised_address=None,
async_zeroconf_instance=mock_async_zeroconf,
zeroconf_server=f"{uuid}-hap.local.",
)
async def test_homekit_setup_advertise_ip(hass, hk_driver, mock_async_zeroconf):
"""Test setup with given IP address to advertise."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: "mock_name", CONF_PORT: 12345},
source=SOURCE_IMPORT,
)
homekit = HomeKit(
hass,
BRIDGE_NAME,
DEFAULT_PORT,
"0.0.0.0",
True,
{},
{},
HOMEKIT_MODE_BRIDGE,
"192.168.1.100",
entry_id=entry.entry_id,
entry_title=entry.title,
)
async_zeroconf_instance = MagicMock()
path = get_persist_fullpath_for_entry_id(hass, entry.entry_id)
uuid = await hass.helpers.instance_id.async_get()
with patch(f"{PATH_HOMEKIT}.HomeDriver", return_value=hk_driver) as mock_driver:
await hass.async_add_executor_job(homekit.setup, async_zeroconf_instance, uuid)
mock_driver.assert_called_with(
hass,
entry.entry_id,
BRIDGE_NAME,
entry.title,
loop=hass.loop,
address="0.0.0.0",
port=DEFAULT_PORT,
persist_file=path,
advertised_address="192.168.1.100",
async_zeroconf_instance=async_zeroconf_instance,
zeroconf_server=f"{uuid}-hap.local.",
)
async def test_homekit_add_accessory(hass, mock_async_zeroconf):
"""Add accessory if config exists and get_acc returns an accessory."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entry.add_to_hass(hass)
homekit = _mock_homekit_bridge(hass, entry)
mock_acc = Mock(category="any")
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
homekit.bridge = _mock_pyhap_bridge()
with patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc:
mock_get_acc.side_effect = [None, mock_acc, None]
state = State("light.demo", "on")
homekit.add_bridge_accessory(state)
mock_get_acc.assert_called_with(hass, ANY, ANY, 1403373688, {})
assert not homekit.bridge.add_accessory.called
state = State("demo.test", "on")
homekit.add_bridge_accessory(state)
mock_get_acc.assert_called_with(hass, ANY, ANY, 600325356, {})
assert homekit.bridge.add_accessory.called
state = State("demo.test_2", "on")
homekit.add_bridge_accessory(state)
mock_get_acc.assert_called_with(hass, ANY, ANY, 1467253281, {})
assert homekit.bridge.add_accessory.called
@pytest.mark.parametrize("acc_category", [CATEGORY_TELEVISION, CATEGORY_CAMERA])
async def test_homekit_warn_add_accessory_bridge(
hass, acc_category, mock_async_zeroconf, caplog
):
"""Test we warn when adding cameras or tvs to a bridge."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entry.add_to_hass(hass)
homekit = _mock_homekit_bridge(hass, entry)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
mock_camera_acc = Mock(category=acc_category)
homekit.bridge = _mock_pyhap_bridge()
with patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc:
mock_get_acc.side_effect = [None, mock_camera_acc, None]
state = State("camera.test", "on")
homekit.add_bridge_accessory(state)
mock_get_acc.assert_called_with(hass, ANY, ANY, 1508819236, {})
assert not homekit.bridge.add_accessory.called
assert "accessory mode" in caplog.text
async def test_homekit_remove_accessory(hass, mock_async_zeroconf):
"""Remove accessory from bridge."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = "driver"
homekit.bridge = _mock_pyhap_bridge()
acc_mock = MagicMock()
acc_mock.stop = AsyncMock()
homekit.bridge.accessories = {6: acc_mock}
acc = await homekit.async_remove_bridge_accessory(6)
assert acc is acc_mock
assert acc_mock.stop.called
assert len(homekit.bridge.accessories) == 0
async def test_homekit_entity_filter(hass, mock_async_zeroconf):
"""Test the entity filter."""
entry = await async_init_integration(hass)
entity_filter = generate_filter(["cover"], ["demo.test"], [], [])
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE, entity_filter)
homekit.bridge = Mock()
homekit.bridge.accessories = {}
hass.states.async_set("cover.test", "open")
hass.states.async_set("demo.test", "on")
hass.states.async_set("light.demo", "on")
filtered_states = await homekit.async_configure_accessories()
assert hass.states.get("cover.test") in filtered_states
assert hass.states.get("demo.test") in filtered_states
assert hass.states.get("light.demo") not in filtered_states
async def test_homekit_entity_glob_filter(hass, mock_async_zeroconf):
"""Test the entity filter."""
entry = await async_init_integration(hass)
entity_filter = generate_filter(
["cover"], ["demo.test"], [], [], ["*.included_*"], ["*.excluded_*"]
)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE, entity_filter)
homekit.bridge = Mock()
homekit.bridge.accessories = {}
hass.states.async_set("cover.test", "open")
hass.states.async_set("demo.test", "on")
hass.states.async_set("cover.excluded_test", "open")
hass.states.async_set("light.included_test", "on")
filtered_states = await homekit.async_configure_accessories()
assert hass.states.get("cover.test") in filtered_states
assert hass.states.get("demo.test") in filtered_states
assert hass.states.get("cover.excluded_test") not in filtered_states
assert hass.states.get("light.included_test") in filtered_states
async def test_homekit_start(hass, hk_driver, mock_async_zeroconf, device_reg):
"""Test HomeKit start method."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
acc = Accessory(hk_driver, "any")
homekit.driver.accessory = acc
connection = (device_registry.CONNECTION_NETWORK_MAC, "AA:BB:CC:DD:EE:FF")
bridge_with_wrong_mac = device_reg.async_get_or_create(
config_entry_id=entry.entry_id,
connections={connection},
manufacturer="Any",
name="Any",
model="Home Assistant HomeKit Bridge",
)
hass.states.async_set("light.demo", "on")
hass.states.async_set("light.demo2", "on")
state = hass.states.async_all()[0]
with patch(f"{PATH_HOMEKIT}.HomeKit.add_bridge_accessory") as mock_add_acc, patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
) as mock_setup_msg, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
) as hk_driver_start:
await homekit.async_start()
await hass.async_block_till_done()
mock_add_acc.assert_any_call(state)
mock_setup_msg.assert_called_with(
hass, entry.entry_id, "Mock Title (Home Assistant Bridge)", ANY, ANY
)
assert hk_driver_start.called
assert homekit.status == STATUS_RUNNING
# Test start() if already started
hk_driver_start.reset_mock()
await homekit.async_start()
await hass.async_block_till_done()
assert not hk_driver_start.called
assert device_reg.async_get(bridge_with_wrong_mac.id) is None
device = device_reg.async_get_device(
{(DOMAIN, entry.entry_id, BRIDGE_SERIAL_NUMBER)}
)
assert device
formatted_mac = device_registry.format_mac(homekit.driver.state.mac)
assert (device_registry.CONNECTION_NETWORK_MAC, formatted_mac) in device.connections
# Start again to make sure the registry entry is kept
homekit.status = STATUS_READY
with patch(f"{PATH_HOMEKIT}.HomeKit.add_bridge_accessory") as mock_add_acc, patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
) as mock_setup_msg, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
) as hk_driver_start:
await homekit.async_start()
device = device_reg.async_get_device(
{(DOMAIN, entry.entry_id, BRIDGE_SERIAL_NUMBER)}
)
assert device
formatted_mac = device_registry.format_mac(homekit.driver.state.mac)
assert (device_registry.CONNECTION_NETWORK_MAC, formatted_mac) in device.connections
assert len(device_reg.devices) == 1
assert homekit.driver.state.config_version == 1
async def test_homekit_start_with_a_broken_accessory(
hass, hk_driver, mock_async_zeroconf
):
"""Test HomeKit start method."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_filter = generate_filter(["cover", "light"], ["demo.test"], [], [])
await async_init_entry(hass, entry)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE, entity_filter)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
homekit.driver.accessory = Accessory(hk_driver, "any")
hass.states.async_set("light.demo", "on")
hass.states.async_set("light.broken", "on")
with patch(f"{PATH_HOMEKIT}.get_accessory", side_effect=Exception), patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
) as mock_setup_msg, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
) as hk_driver_start:
await homekit.async_start()
await hass.async_block_till_done()
mock_setup_msg.assert_called_with(
hass, entry.entry_id, "Mock Title (Home Assistant Bridge)", ANY, ANY
)
assert hk_driver_start.called
assert homekit.status == STATUS_RUNNING
# Test start() if already started
hk_driver_start.reset_mock()
await homekit.async_start()
await hass.async_block_till_done()
assert not hk_driver_start.called
async def test_homekit_start_with_a_device(
hass, hk_driver, mock_async_zeroconf, demo_cleanup, device_reg, entity_reg
):
"""Test HomeKit start method with a device."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
assert await async_setup_component(hass, "demo", {"demo": {}})
await hass.async_block_till_done()
reg_entry = entity_reg.async_get("light.ceiling_lights")
assert reg_entry is not None
device_id = reg_entry.device_id
await async_init_entry(hass, entry)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE, None, devices=[device_id])
homekit.driver = hk_driver
with patch(f"{PATH_HOMEKIT}.get_accessory", side_effect=Exception), patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
) as mock_setup_msg:
await homekit.async_start()
await hass.async_block_till_done()
mock_setup_msg.assert_called_with(
hass, entry.entry_id, "Mock Title (Home Assistant Bridge)", ANY, ANY
)
assert homekit.status == STATUS_RUNNING
assert isinstance(
list(homekit.driver.accessory.accessories.values())[0], DeviceTriggerAccessory
)
await homekit.async_stop()
async def test_homekit_stop(hass):
"""Test HomeKit stop method."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = Mock()
homekit.driver.async_stop = AsyncMock()
homekit.bridge = Mock()
homekit.bridge.accessories = {}
assert homekit.status == STATUS_READY
await homekit.async_stop()
await hass.async_block_till_done()
homekit.status = STATUS_WAIT
await homekit.async_stop()
await hass.async_block_till_done()
homekit.status = STATUS_STOPPED
await homekit.async_stop()
await hass.async_block_till_done()
assert homekit.driver.async_stop.called is False
# Test if driver is started
homekit.status = STATUS_RUNNING
await homekit.async_stop()
await hass.async_block_till_done()
assert homekit.driver.async_stop.called is True
async def test_homekit_reset_accessories(hass, mock_async_zeroconf):
"""Test resetting HomeKit accessories."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
hass.states.async_set("light.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory.Bridge.add_accessory"
) as mock_add_accessory, patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch(
f"{PATH_HOMEKIT}.accessories.HomeAccessory.run"
) as mock_run, patch.object(
homekit_base, "_HOMEKIT_CONFIG_UPDATE_TIME", 0
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
homekit.driver.aio_stop_event = MagicMock()
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 2
assert mock_add_accessory.called
assert mock_run.called
homekit.status = STATUS_READY
async def test_homekit_unpair(hass, device_reg, mock_async_zeroconf):
"""Test unpairing HomeKit accessories."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
hass.states.async_set("light.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
homekit.driver.aio_stop_event = MagicMock()
state = homekit.driver.state
state.add_paired_client("client1", "any", b"1")
formatted_mac = device_registry.format_mac(state.mac)
hk_bridge_dev = device_reg.async_get_device(
{}, {(device_registry.CONNECTION_NETWORK_MAC, formatted_mac)}
)
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_UNPAIR,
{ATTR_DEVICE_ID: hk_bridge_dev.id},
blocking=True,
)
await hass.async_block_till_done()
assert state.paired_clients == {}
homekit.status = STATUS_STOPPED
async def test_homekit_unpair_missing_device_id(hass, device_reg, mock_async_zeroconf):
"""Test unpairing HomeKit accessories with invalid device id."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
hass.states.async_set("light.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
homekit.driver.aio_stop_event = MagicMock()
state = homekit.driver.state
state.add_paired_client("client1", "any", b"1")
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_UNPAIR,
{ATTR_DEVICE_ID: "notvalid"},
blocking=True,
)
await hass.async_block_till_done()
state.paired_clients = {"client1": "any"}
homekit.status = STATUS_STOPPED
async def test_homekit_unpair_not_homekit_device(hass, device_reg, mock_async_zeroconf):
"""Test unpairing HomeKit accessories with a non-homekit device id."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
not_homekit_entry = MockConfigEntry(
domain="not_homekit", data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
hass.states.async_set("light.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
device_entry = device_reg.async_get_or_create(
config_entry_id=not_homekit_entry.entry_id,
sw_version="0.16.0",
model="Powerwall 2",
manufacturer="Tesla",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
state = homekit.driver.state
state.add_paired_client("client1", "any", b"1")
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_UNPAIR,
{ATTR_DEVICE_ID: device_entry.id},
blocking=True,
)
await hass.async_block_till_done()
state.paired_clients = {"client1": "any"}
homekit.status = STATUS_STOPPED
async def test_homekit_reset_accessories_not_supported(hass, mock_async_zeroconf):
"""Test resetting HomeKit accessories with an unsupported entity."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "not_supported.demo"
hass.states.async_set("not_supported.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory.Bridge.add_accessory"
) as mock_add_accessory, patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch.object(
homekit_base, "_HOMEKIT_CONFIG_UPDATE_TIME", 0
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
homekit.driver.aio_stop_event = MagicMock()
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 2
assert not mock_add_accessory.called
assert len(homekit.bridge.accessories) == 0
homekit.status = STATUS_STOPPED
async def test_homekit_reset_accessories_state_missing(hass, mock_async_zeroconf):
"""Test resetting HomeKit accessories when the state goes missing."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory.Bridge.add_accessory"
) as mock_add_accessory, patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch.object(
homekit_base, "_HOMEKIT_CONFIG_UPDATE_TIME", 0
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
homekit.driver.aio_stop_event = MagicMock()
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 0
assert not mock_add_accessory.called
homekit.status = STATUS_STOPPED
async def test_homekit_reset_accessories_not_bridged(hass, mock_async_zeroconf):
"""Test resetting HomeKit accessories when the state is not bridged."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory.Bridge.add_accessory"
) as mock_add_accessory, patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch.object(
homekit_base, "_HOMEKIT_CONFIG_UPDATE_TIME", 0
):
await async_init_entry(hass, entry)
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
aid = homekit.aid_storage.get_or_allocate_aid_for_entity_id(entity_id)
homekit.bridge.accessories = {aid: acc_mock}
homekit.status = STATUS_RUNNING
homekit.driver.aio_stop_event = MagicMock()
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: "light.not_bridged"},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 0
assert not mock_add_accessory.called
homekit.status = STATUS_STOPPED
async def test_homekit_reset_single_accessory(hass, mock_async_zeroconf):
"""Test resetting HomeKit single accessory."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
hass.states.async_set("light.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch(
f"{PATH_HOMEKIT}.accessories.HomeAccessory.run"
) as mock_run:
await async_init_entry(hass, entry)
homekit.status = STATUS_RUNNING
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
homekit.driver.accessory = acc_mock
homekit.driver.aio_stop_event = MagicMock()
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert mock_run.called
assert hk_driver_config_changed.call_count == 1
homekit.status = STATUS_READY
async def test_homekit_reset_single_accessory_unsupported(hass, mock_async_zeroconf):
"""Test resetting HomeKit single accessory with an unsupported entity."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "not_supported.demo"
hass.states.async_set("not_supported.demo", "on")
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
homekit.status = STATUS_RUNNING
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
homekit.driver.accessory = acc_mock
homekit.driver.aio_stop_event = MagicMock()
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 0
homekit.status = STATUS_STOPPED
async def test_homekit_reset_single_accessory_state_missing(hass, mock_async_zeroconf):
"""Test resetting HomeKit single accessory when the state goes missing."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
homekit.status = STATUS_RUNNING
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
homekit.driver.accessory = acc_mock
homekit.driver.aio_stop_event = MagicMock()
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 0
homekit.status = STATUS_STOPPED
async def test_homekit_reset_single_accessory_no_match(hass, mock_async_zeroconf):
"""Test resetting HomeKit single accessory when the entity id does not match."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_NAME: "mock_name", CONF_PORT: 12345}
)
entity_id = "light.demo"
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
with patch(f"{PATH_HOMEKIT}.HomeKit", return_value=homekit), patch(
"pyhap.accessory_driver.AccessoryDriver.config_changed"
) as hk_driver_config_changed, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await async_init_entry(hass, entry)
homekit.status = STATUS_RUNNING
acc_mock = MagicMock()
acc_mock.entity_id = entity_id
acc_mock.stop = AsyncMock()
homekit.driver.accessory = acc_mock
homekit.driver.aio_stop_event = MagicMock()
await hass.services.async_call(
DOMAIN,
SERVICE_HOMEKIT_RESET_ACCESSORY,
{ATTR_ENTITY_ID: "light.no_match"},
blocking=True,
)
await hass.async_block_till_done()
assert hk_driver_config_changed.call_count == 0
homekit.status = STATUS_STOPPED
async def test_homekit_too_many_accessories(
hass, hk_driver, caplog, mock_async_zeroconf
):
"""Test adding too many accessories to HomeKit."""
entry = await async_init_integration(hass)
entity_filter = generate_filter(["cover", "light"], ["demo.test"], [], [])
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE, entity_filter)
def _mock_bridge(*_):
mock_bridge = HomeBridge(hass, hk_driver, "mock_bridge")
# The bridge itself counts as an accessory
mock_bridge.accessories = range(MAX_DEVICES)
return mock_bridge
homekit.driver = hk_driver
homekit.driver.accessory = Accessory(hk_driver, "any")
hass.states.async_set("light.demo", "on")
hass.states.async_set("light.demo2", "on")
hass.states.async_set("light.demo3", "on")
with patch("pyhap.accessory_driver.AccessoryDriver.async_start"), patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
), patch(f"{PATH_HOMEKIT}.HomeBridge", _mock_bridge):
await homekit.async_start()
await hass.async_block_till_done()
assert "would exceed" in caplog.text
async def test_homekit_finds_linked_batteries(
hass, hk_driver, device_reg, entity_reg, mock_async_zeroconf
):
"""Test HomeKit start method."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = hk_driver
homekit.bridge = MagicMock()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
sw_version="0.16.0",
hw_version="2.34",
model="Powerwall 2",
manufacturer="Tesla",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
binary_charging_sensor = entity_reg.async_get_or_create(
"binary_sensor",
"powerwall",
"battery_charging",
device_id=device_entry.id,
original_device_class=BinarySensorDeviceClass.BATTERY_CHARGING,
)
battery_sensor = entity_reg.async_get_or_create(
"sensor",
"powerwall",
"battery",
device_id=device_entry.id,
original_device_class=SensorDeviceClass.BATTERY,
)
light = entity_reg.async_get_or_create(
"light", "powerwall", "demo", device_id=device_entry.id
)
hass.states.async_set(
binary_charging_sensor.entity_id,
STATE_ON,
{ATTR_DEVICE_CLASS: BinarySensorDeviceClass.BATTERY_CHARGING},
)
hass.states.async_set(
battery_sensor.entity_id, 30, {ATTR_DEVICE_CLASS: SensorDeviceClass.BATTERY}
)
hass.states.async_set(light.entity_id, STATE_ON)
with patch(f"{PATH_HOMEKIT}.async_show_setup_message"), patch(
f"{PATH_HOMEKIT}.get_accessory"
) as mock_get_acc, patch("pyhap.accessory_driver.AccessoryDriver.async_start"):
await homekit.async_start()
await hass.async_block_till_done()
mock_get_acc.assert_called_with(
hass,
ANY,
ANY,
ANY,
{
"manufacturer": "Tesla",
"model": "Powerwall 2",
"sw_version": "0.16.0",
"hw_version": "2.34",
"platform": "test",
"linked_battery_charging_sensor": "binary_sensor.powerwall_battery_charging",
"linked_battery_sensor": "sensor.powerwall_battery",
},
)
async def test_homekit_async_get_integration_fails(
hass, hk_driver, device_reg, entity_reg, mock_async_zeroconf
):
"""Test that we continue if async_get_integration fails."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = hk_driver
homekit.bridge = HomeBridge(hass, hk_driver, "mock_bridge")
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
sw_version="0.16.0",
model="Powerwall 2",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
binary_charging_sensor = entity_reg.async_get_or_create(
"binary_sensor",
"invalid_integration_does_not_exist",
"battery_charging",
device_id=device_entry.id,
original_device_class=BinarySensorDeviceClass.BATTERY_CHARGING,
)
battery_sensor = entity_reg.async_get_or_create(
"sensor",
"invalid_integration_does_not_exist",
"battery",
device_id=device_entry.id,
original_device_class=SensorDeviceClass.BATTERY,
)
light = entity_reg.async_get_or_create(
"light", "invalid_integration_does_not_exist", "demo", device_id=device_entry.id
)
hass.states.async_set(
binary_charging_sensor.entity_id,
STATE_ON,
{ATTR_DEVICE_CLASS: BinarySensorDeviceClass.BATTERY_CHARGING},
)
hass.states.async_set(
battery_sensor.entity_id, 30, {ATTR_DEVICE_CLASS: SensorDeviceClass.BATTERY}
)
hass.states.async_set(light.entity_id, STATE_ON)
with patch.object(homekit.bridge, "add_accessory"), patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
), patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await homekit.async_start()
await hass.async_block_till_done()
mock_get_acc.assert_called_with(
hass,
ANY,
ANY,
ANY,
{
"model": "Powerwall 2",
"sw_version": "0.16.0",
"platform": "invalid_integration_does_not_exist",
"linked_battery_charging_sensor": "binary_sensor.invalid_integration_does_not_exist_battery_charging",
"linked_battery_sensor": "sensor.invalid_integration_does_not_exist_battery",
},
)
async def test_yaml_updates_update_config_entry_for_name(hass, mock_async_zeroconf):
"""Test async_setup with imported config."""
entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_IMPORT,
data={CONF_NAME: BRIDGE_NAME, CONF_PORT: DEFAULT_PORT},
options={},
)
entry.add_to_hass(hass)
with patch(f"{PATH_HOMEKIT}.HomeKit") as mock_homekit, patch(
"homeassistant.components.network.async_get_source_ip", return_value="1.2.3.4"
):
mock_homekit.return_value = homekit = Mock()
type(homekit).async_start = AsyncMock()
assert await async_setup_component(
hass, "homekit", {"homekit": {CONF_NAME: BRIDGE_NAME, CONF_PORT: 12345}}
)
await hass.async_block_till_done()
mock_homekit.assert_any_call(
hass,
BRIDGE_NAME,
12345,
"1.2.3.4",
ANY,
ANY,
{},
HOMEKIT_MODE_BRIDGE,
None,
entry.entry_id,
entry.title,
devices=[],
)
# Test auto start enabled
mock_homekit.reset_mock()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
mock_homekit().async_start.assert_called()
async def test_homekit_uses_system_zeroconf(hass, hk_driver, mock_async_zeroconf):
"""Test HomeKit uses system zeroconf."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: BRIDGE_NAME, CONF_PORT: DEFAULT_PORT},
options={},
)
assert await async_setup_component(hass, "zeroconf", {"zeroconf": {}})
system_async_zc = await zeroconf.async_get_async_instance(hass)
with patch("pyhap.accessory_driver.AccessoryDriver.async_start"), patch(
f"{PATH_HOMEKIT}.HomeKit.async_stop"
), patch(f"{PATH_HOMEKIT}.async_port_is_available"):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert (
hass.data[DOMAIN][entry.entry_id][HOMEKIT].driver.advertiser
== system_async_zc
)
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
def _write_data(path: str, data: dict) -> None:
"""Write the data."""
os.makedirs(os.path.dirname(path), exist_ok=True)
json_util.save_json(path, data)
async def test_homekit_ignored_missing_devices(
hass, hk_driver, device_reg, entity_reg, mock_async_zeroconf
):
"""Test HomeKit handles a device in the entity registry but missing from the device registry."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = hk_driver
homekit.bridge = _mock_pyhap_bridge()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
sw_version="0.16.0",
model="Powerwall 2",
manufacturer="Tesla",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
"binary_sensor",
"powerwall",
"battery_charging",
device_id=device_entry.id,
original_device_class=BinarySensorDeviceClass.BATTERY_CHARGING,
)
entity_reg.async_get_or_create(
"sensor",
"powerwall",
"battery",
device_id=device_entry.id,
original_device_class=SensorDeviceClass.BATTERY,
)
light = entity_reg.async_get_or_create(
"light", "powerwall", "demo", device_id=device_entry.id
)
before_removal = entity_reg.entities.copy()
# Delete the device to make sure we fallback
# to using the platform
device_reg.async_remove_device(device_entry.id)
# Wait for the entities to be removed
await asyncio.sleep(0)
await asyncio.sleep(0)
# Restore the registry
entity_reg.entities = before_removal
hass.states.async_set(light.entity_id, STATE_ON)
hass.states.async_set("light.two", STATE_ON)
with patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc, patch(
f"{PATH_HOMEKIT}.HomeBridge", return_value=homekit.bridge
), patch("pyhap.accessory_driver.AccessoryDriver.async_start"):
await homekit.async_start()
await hass.async_block_till_done()
mock_get_acc.assert_any_call(
hass,
ANY,
ANY,
ANY,
{
"platform": "Tesla Powerwall",
"linked_battery_charging_sensor": "binary_sensor.powerwall_battery_charging",
"linked_battery_sensor": "sensor.powerwall_battery",
},
)
async def test_homekit_finds_linked_motion_sensors(
hass, hk_driver, device_reg, entity_reg, mock_async_zeroconf
):
"""Test HomeKit start method."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = hk_driver
homekit.bridge = HomeBridge(hass, hk_driver, "mock_bridge")
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
sw_version="0.16.0",
model="Camera Server",
manufacturer="Ubq",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
binary_motion_sensor = entity_reg.async_get_or_create(
"binary_sensor",
"camera",
"motion_sensor",
device_id=device_entry.id,
original_device_class=BinarySensorDeviceClass.MOTION,
)
camera = entity_reg.async_get_or_create(
"camera", "camera", "demo", device_id=device_entry.id
)
hass.states.async_set(
binary_motion_sensor.entity_id,
STATE_ON,
{ATTR_DEVICE_CLASS: BinarySensorDeviceClass.MOTION},
)
hass.states.async_set(camera.entity_id, STATE_ON)
with patch.object(homekit.bridge, "add_accessory"), patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
), patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await homekit.async_start()
await hass.async_block_till_done()
mock_get_acc.assert_called_with(
hass,
ANY,
ANY,
ANY,
{
"manufacturer": "Ubq",
"model": "Camera Server",
"platform": "test",
"sw_version": "0.16.0",
"linked_motion_sensor": "binary_sensor.camera_motion_sensor",
},
)
async def test_homekit_finds_linked_humidity_sensors(
hass, hk_driver, device_reg, entity_reg, mock_async_zeroconf
):
"""Test HomeKit start method."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_BRIDGE)
homekit.driver = hk_driver
homekit.bridge = HomeBridge(hass, hk_driver, "mock_bridge")
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
sw_version="0.16.1",
model="Smart Brainy Clever Humidifier",
manufacturer="Home Assistant",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
humidity_sensor = entity_reg.async_get_or_create(
"sensor",
"humidifier",
"humidity_sensor",
device_id=device_entry.id,
original_device_class=SensorDeviceClass.HUMIDITY,
)
humidifier = entity_reg.async_get_or_create(
"humidifier", "humidifier", "demo", device_id=device_entry.id
)
hass.states.async_set(
humidity_sensor.entity_id,
"42",
{
ATTR_DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
)
hass.states.async_set(humidifier.entity_id, STATE_ON)
with patch.object(homekit.bridge, "add_accessory"), patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
), patch(f"{PATH_HOMEKIT}.get_accessory") as mock_get_acc, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
):
await homekit.async_start()
await hass.async_block_till_done()
mock_get_acc.assert_called_with(
hass,
ANY,
ANY,
ANY,
{
"manufacturer": "Home Assistant",
"model": "Smart Brainy Clever Humidifier",
"platform": "test",
"sw_version": "0.16.1",
"linked_humidity_sensor": "sensor.humidifier_humidity_sensor",
},
)
async def test_reload(hass, mock_async_zeroconf):
"""Test we can reload from yaml."""
entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_IMPORT,
data={CONF_NAME: "reloadable", CONF_PORT: 12345},
options={},
)
entry.add_to_hass(hass)
with patch(f"{PATH_HOMEKIT}.HomeKit") as mock_homekit, patch(
"homeassistant.components.network.async_get_source_ip", return_value="1.2.3.4"
):
mock_homekit.return_value = homekit = Mock()
assert await async_setup_component(
hass, "homekit", {"homekit": {CONF_NAME: "reloadable", CONF_PORT: 12345}}
)
await hass.async_block_till_done()
mock_homekit.assert_any_call(
hass,
"reloadable",
12345,
"1.2.3.4",
ANY,
False,
{},
HOMEKIT_MODE_BRIDGE,
None,
entry.entry_id,
entry.title,
devices=[],
)
yaml_path = get_fixture_path("configuration.yaml", "homekit")
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path), patch(
f"{PATH_HOMEKIT}.HomeKit"
) as mock_homekit2, patch.object(homekit.bridge, "add_accessory"), patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
), patch(
f"{PATH_HOMEKIT}.get_accessory"
), patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
), patch(
"homeassistant.components.network.async_get_source_ip", return_value="1.2.3.4"
):
mock_homekit2.return_value = homekit = Mock()
await hass.services.async_call(
"homekit",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
mock_homekit2.assert_any_call(
hass,
"reloadable",
45678,
"1.2.3.4",
ANY,
False,
{},
HOMEKIT_MODE_BRIDGE,
None,
entry.entry_id,
entry.title,
devices=[],
)
async def test_homekit_start_in_accessory_mode(
hass, hk_driver, mock_async_zeroconf, device_reg
):
"""Test HomeKit start method in accessory mode."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
homekit.driver.accessory = Accessory(hk_driver, "any")
hass.states.async_set("light.demo", "on")
with patch(f"{PATH_HOMEKIT}.HomeKit.add_bridge_accessory") as mock_add_acc, patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
) as mock_setup_msg, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
) as hk_driver_start:
await homekit.async_start()
await hass.async_block_till_done()
mock_add_acc.assert_not_called()
mock_setup_msg.assert_called_with(
hass, entry.entry_id, "Mock Title (demo)", ANY, ANY
)
assert hk_driver_start.called
assert homekit.status == STATUS_RUNNING
async def test_homekit_start_in_accessory_mode_unsupported_entity(
hass, hk_driver, mock_async_zeroconf, device_reg, caplog
):
"""Test HomeKit start method in accessory mode with an unsupported entity."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
homekit.driver.accessory = Accessory(hk_driver, "any")
hass.states.async_set("notsupported.demo", "on")
with patch(f"{PATH_HOMEKIT}.HomeKit.add_bridge_accessory") as mock_add_acc, patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
) as mock_setup_msg, patch(
"pyhap.accessory_driver.AccessoryDriver.async_start"
) as hk_driver_start:
await homekit.async_start()
await hass.async_block_till_done()
assert not mock_add_acc.called
assert not mock_setup_msg.called
assert not hk_driver_start.called
assert homekit.status == STATUS_WAIT
assert "entity not supported" in caplog.text
async def test_homekit_start_in_accessory_mode_missing_entity(
hass, hk_driver, mock_async_zeroconf, device_reg, caplog
):
"""Test HomeKit start method in accessory mode when entity is not available."""
entry = await async_init_integration(hass)
homekit = _mock_homekit(hass, entry, HOMEKIT_MODE_ACCESSORY)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
homekit.driver.accessory = Accessory(hk_driver, "any")
with patch(f"{PATH_HOMEKIT}.HomeKit.add_bridge_accessory") as mock_add_acc, patch(
f"{PATH_HOMEKIT}.async_show_setup_message"
), patch("pyhap.accessory_driver.AccessoryDriver.async_start"):
await homekit.async_start()
await hass.async_block_till_done()
mock_add_acc.assert_not_called()
assert homekit.status == STATUS_WAIT
assert "entity not available" in caplog.text
async def test_wait_for_port_to_free(hass, hk_driver, mock_async_zeroconf, caplog):
"""Test we wait for the port to free before declaring unload success."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_NAME: BRIDGE_NAME, CONF_PORT: DEFAULT_PORT},
options={},
)
entry.add_to_hass(hass)
with patch("pyhap.accessory_driver.AccessoryDriver.async_start"), patch(
f"{PATH_HOMEKIT}.HomeKit.async_stop"
), patch(f"{PATH_HOMEKIT}.async_port_is_available", return_value=True) as port_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert "Waiting for the HomeKit server to shutdown" not in caplog.text
assert port_mock.called
with patch("pyhap.accessory_driver.AccessoryDriver.async_start"), patch(
f"{PATH_HOMEKIT}.HomeKit.async_stop"
), patch.object(homekit_base, "PORT_CLEANUP_CHECK_INTERVAL_SECS", 0), patch(
f"{PATH_HOMEKIT}.async_port_is_available", return_value=False
) as port_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert "Waiting for the HomeKit server to shutdown" in caplog.text
assert port_mock.called
|
|
import uuid
from datetime import datetime
from amqpstorm import Message
from amqpstorm.exception import AMQPMessageError
from amqpstorm.tests.utility import FakeChannel
from amqpstorm.tests.utility import TestFramework
class MessageTests(TestFramework):
def test_message_create_new_message(self):
body = self.message
message = Message.create(None, body,
properties={'key': 'value',
'headers': {
b'name': b'eandersson'}
})
self.assertIsInstance(message, Message)
self.assertEqual(message._body, body)
result = message.to_dict()
self.assertIsNone(result['method'])
self.assertEqual(result['body'], body)
self.assertEqual(result['properties']['key'], 'value')
def test_message_default_properties(self):
body = self.message
message = Message.create(None, body)
self.assertIsNone(message.app_id)
self.assertIsNone(message.reply_to)
self.assertIsNone(message.content_encoding)
self.assertIsNone(message.content_type)
self.assertIsNone(message.priority)
self.assertIsNone(message.delivery_mode)
self.assertIsInstance(message.message_id, str)
self.assertIsInstance(message.correlation_id, str)
self.assertIsInstance(message.timestamp, datetime)
def test_message_app_id_custom_value(self):
app_id = 'my-app'
message = Message.create(None, '')
message.app_id = app_id
self.assertEqual(app_id, message.app_id)
def test_message_id_custom_value(self):
message_id = 'my-message-1'
message = Message.create(None, '')
message.message_id = message_id
self.assertEqual(message_id, message.properties['message_id'])
self.assertEqual(message_id, message._properties['message_id'])
def test_message_timestamp_custom_value(self):
dt = datetime.now()
message = Message.create(None, '')
message.timestamp = dt
self.assertEqual(dt, message.timestamp)
def test_message_content_encoding_custom_value(self):
content_encoding = 'gzip'
message = Message.create(None, '')
message.content_encoding = content_encoding
self.assertEqual(content_encoding, message.content_encoding)
def test_message_content_type_custom_value(self):
content_type = 'application/json'
message = Message.create(None, '')
message.content_type = content_type
self.assertEqual(content_type, message.content_type)
def test_message_delivery_mode_two(self):
delivery_mode = 2
message = Message.create(None, '')
message.delivery_mode = delivery_mode
self.assertEqual(delivery_mode, message.delivery_mode)
def test_message_priority_three(self):
priority = 3
message = Message.create(None, '')
message.priority = priority
self.assertEqual(priority, message.priority)
def test_message_correlation_id_custom_value(self):
correlation_id = str(uuid.uuid4())
message = Message.create(None, '')
message.correlation_id = correlation_id
self.assertEqual(correlation_id, message.correlation_id)
def test_message_reply_to_custom_value(self):
reply_to = str(uuid.uuid4())
message = Message.create(None, '')
message.reply_to = reply_to
self.assertEqual(reply_to, message.reply_to)
def test_message_redelivered(self):
message = Message.create(body='',
channel=FakeChannel())
message._method = {
'redelivered': True
}
self.assertTrue(message.redelivered)
def test_message_not_redelivered(self):
message = Message.create(body='',
channel=FakeChannel())
message._method = {
'redelivered': False
}
self.assertFalse(message.redelivered)
def test_message_redelivered_is_none(self):
message = Message.create(body='',
channel=FakeChannel())
message._method = {
'redelivered': None
}
self.assertIsNone(message.redelivered)
def test_message_redelivered_and_method_none(self):
message = Message.create(None, '')
message._method = dict()
self.assertIsNone(message.redelivered)
def test_message_redelivered_and_method_empty(self):
message = Message.create(None, '')
self.assertIsNone(message.redelivered)
def test_message_delivery_tag(self):
message = Message.create(body='',
channel=FakeChannel())
message._method = {
'delivery_tag': 5
}
self.assertEqual(message.delivery_tag, 5)
def test_message_delivery_tag_is_none(self):
message = Message.create(body='',
channel=FakeChannel())
message._method = {
'delivery_tag': None
}
self.assertIsNone(message.delivery_tag)
def test_message_delivery_tag_and_method_none(self):
message = Message.create(None, '')
message._method = dict()
self.assertIsNone(message.delivery_tag)
def test_message_rdelivery_tag_and_method_empty(self):
message = Message.create(None, '')
self.assertIsNone(message.delivery_tag)
def test_message_do_not_override_properties(self):
reply_to = self.message,
correlation_id = str(uuid.uuid4())
message_id = str(uuid.uuid4())
timestamp = datetime.now()
properties = {
'reply_to': reply_to,
'correlation_id': correlation_id,
'message_id': message_id,
'timestamp': timestamp
}
message = Message.create(None, '', properties)
self.assertEqual(reply_to, message.reply_to)
self.assertEqual(correlation_id, message.correlation_id)
self.assertEqual(message_id, message.message_id)
self.assertEqual(timestamp, message.timestamp)
def test_message_get_channel(self):
class FakeClass(object):
pass
message = Message(body='',
channel=FakeClass())
self.assertIsInstance(message.channel, FakeClass)
def test_message_ack(self):
delivery_tag = 123456
message = Message.create(body='',
channel=FakeChannel())
message._method = {
'delivery_tag': delivery_tag
}
message.ack()
result = message.channel.result.pop(0)
self.assertEqual(result[0], delivery_tag)
self.assertEqual(result[1], False)
def test_message_nack(self):
delivery_tag = 123456
message = Message.create(body='',
channel=FakeChannel())
message._method = {
'delivery_tag': delivery_tag
}
message.nack(requeue=True)
result = message.channel.result.pop(0)
self.assertEqual(result[0], delivery_tag)
self.assertEqual(result[1], False)
self.assertEqual(result[2], True)
message.nack(requeue=False)
result = message.channel.result.pop(0)
self.assertEqual(result[0], delivery_tag)
self.assertEqual(result[1], False)
self.assertEqual(result[2], False)
def test_message_reject(self):
delivery_tag = 123456
message = Message.create(body='',
channel=FakeChannel())
message._method = {
'delivery_tag': delivery_tag
}
message.reject(requeue=True)
result = message.channel.result.pop(0)
self.assertEqual(result[0], delivery_tag)
self.assertEqual(result[1], True)
message.reject(requeue=False)
result = message.channel.result.pop(0)
self.assertEqual(result[0], delivery_tag)
self.assertEqual(result[1], False)
def test_message_ack_raises_on_outbound(self):
message = Message.create(body='',
channel=None)
self.assertRaises(AMQPMessageError, message.ack)
def test_message_nack_raises_on_outbound(self):
message = Message.create(body='',
channel=None)
self.assertRaises(AMQPMessageError, message.nack)
def test_message_reject_raises_on_outbound(self):
message = Message.create(body='',
channel=None)
self.assertRaises(AMQPMessageError, message.reject)
def test_message_auto_decode_enabled(self):
message = Message(body=self.message,
properties={'key': 'value',
'headers': {b'name': b'eandersson'}},
channel=None)
self.assertEqual(self.message, message.body)
self.assertIn('name', message.properties['headers'])
self.assertIn(b'name', message._properties['headers'])
self.assertIsInstance(message.properties['headers']['name'], str)
def test_message_auto_decode_cache(self):
message = Message(body=self.message,
channel=None)
self.assertEqual(self.message, message.body)
message._body = 'invalidate'
self.assertEqual(self.message, message.body)
def test_message_auto_decode_when_method_is_none(self):
message = Message(body=self.message,
method=None,
channel=None)
self.assertIsNone(message.method)
def test_message_auto_decode_when_method_contains_list(self):
method_data = {'key': [b'a', b'b']}
message = Message(body=self.message,
method=method_data,
channel=None)
self.assertEqual(method_data['key'][0].decode('utf-8'),
message.method['key'][0])
def test_message_auto_decode_when_method_is_tuple(self):
method_data = (1, 2, 3, 4, 5)
message = Message(body=self.message,
method=method_data,
channel=None)
self.assertEqual(method_data, message.method)
self.assertEqual(method_data[0], message.method[0])
self.assertEqual(method_data[4], message.method[4])
def test_message_auto_decode_when_properties_contains_list(self):
prop_data = [b'travis', 2, 3, 4, 5]
message = Message(body=self.message,
properties={'key': prop_data},
channel=None)
self.assertIsInstance(message.properties['key'], list)
self.assertEqual(prop_data[0].decode('utf-8'),
message.properties['key'][0])
self.assertEqual(prop_data[4], message.properties['key'][4])
def test_message_auto_decode_when_properties_contains_tuple(self):
prop_data = (b'travis', 2, 3, 4, 5)
message = Message(body=self.message,
properties={'key': prop_data},
channel=None)
self.assertIsInstance(message.properties['key'], tuple)
self.assertEqual(prop_data[0].decode('utf-8'),
message.properties['key'][0])
self.assertEqual(prop_data[4], message.properties['key'][4])
def test_message_auto_decode_when_properties_contains_dict(self):
prop_data = {
'hello': b'travis'
}
message = Message(body=self.message,
properties={'key': prop_data},
channel=None)
self.assertIsInstance(message.properties['key'], dict)
self.assertEqual(prop_data['hello'].decode('utf-8'),
message.properties['key']['hello'])
def test_message_auto_decode_disabled(self):
body = self.message
message = Message(body=body,
properties={'key': 'value',
'headers': {b'name': b'eandersson'}},
channel=None,
auto_decode=False)
self.assertEqual(body, message.body)
self.assertIn(b'name', message.properties['headers'])
self.assertIsInstance(message.properties['headers'][b'name'], bytes)
def test_message_update_property_with_decode(self):
message = Message(None, auto_decode=True)
message._update_properties('app_id', '123')
self.assertEqual(message.properties['app_id'], '123')
self.assertEqual(message._properties['app_id'], '123')
def test_message_update_property_without_decode(self):
message = Message.create(None, '', None)
message._auto_decode = False
message._update_properties('app_id', '123')
self.assertEqual(message.properties['app_id'], '123')
self.assertEqual(message._properties['app_id'], '123')
def test_message_json(self):
body = '{"key": "value"}'
message = Message(body=body, channel=None)
result = message.json()
self.assertIsInstance(result, dict)
self.assertEqual(result['key'], 'value')
def test_message_dict(self):
body = self.message
properties = {'key': 'value'}
method = {b'alternative': 'value'}
message = Message(body=body,
properties=properties,
method=method,
channel=None)
result = dict(message)
self.assertIsInstance(result, dict)
self.assertEqual(result['body'], body)
self.assertEqual(result['properties'], properties)
self.assertEqual(result['method'], method)
def test_message_to_dict(self):
body = self.message
properties = {'key': 'value'}
method = {b'alternative': 'value'}
message = Message(body=body,
properties=properties,
method=method,
channel=None)
result = message.to_dict()
self.assertIsInstance(result, dict)
self.assertEqual(result['body'], body)
self.assertEqual(result['properties'], properties)
self.assertEqual(result['method'], method)
def test_message_to_tuple(self):
body = self.message
message = Message(body=body,
properties={'key': 'value'},
method={'key': 'value'},
channel=None)
body, channel, method, properties = message.to_tuple()
self.assertEqual(body, body)
self.assertIsInstance(method, dict)
self.assertIsInstance(properties, dict)
self.assertIsNone(channel)
|
|
#!/usr/bin/python
import os
import re
from datetime import datetime, timedelta
from trac.tests.functional import *
from trac.util.datefmt import utc, localtz, format_date
class TestTickets(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a ticket, comment on it, and attach a file"""
# TODO: this should be split into multiple tests
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.create_ticket()
self._tester.add_comment(ticketid)
self._tester.attach_file_to_ticket(ticketid)
class TestTicketPreview(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Preview ticket creation"""
self._tester.go_to_front()
tc.follow('New Ticket')
summary = random_sentence(5)
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-summary', summary)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('preview')
tc.url(self._tester.url + '/newticket$')
tc.find('ticket not yet created')
tc.find(summary)
tc.find(desc)
class TestTicketNoSummary(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Creating a ticket without summary should fail"""
self._tester.go_to_front()
tc.follow('New Ticket')
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('submit')
tc.find(desc)
tc.find('Tickets must contain a summary.')
tc.find('Create New Ticket')
tc.find('ticket not yet created')
class TestTicketAltFormats(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in alternative formats"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
for format in ['Comma-delimited Text', 'Tab-delimited Text',
'RSS Feed']:
tc.follow(format)
content = b.get_html()
if content.find(summary) < 0:
raise AssertionError('Summary missing from %s format' % format)
tc.back()
class TestTicketCSVFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in CSV format"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
tc.follow('Comma-delimited Text')
csv = b.get_html()
if not csv.startswith('id,summary,'):
raise AssertionError('Bad CSV format')
class TestTicketTabFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in Tab-delimitted format"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
tc.follow('Tab-delimited Text')
tab = b.get_html()
if not tab.startswith('id\tsummary\t'):
raise AssertionError('Bad tab delimitted format')
class TestTicketRSSFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in RSS format"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
# Make a number of changes to exercise all of the RSS feed code
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'comment', random_sentence(3))
tc.formvalue('propertyform', 'field-type', 'task')
tc.formvalue('propertyform', 'description', summary + '\n\n' +
random_sentence(8))
tc.formvalue('propertyform', 'field-keywords', 'key')
tc.submit('submit')
time.sleep(1) # Have to wait a second
tc.formvalue('propertyform', 'field-keywords', '')
tc.submit('submit')
tc.find('RSS Feed')
tc.follow('RSS Feed')
rss = b.get_html()
if not rss.startswith('<?xml version="1.0"?>'):
raise AssertionError('RSS Feed not valid feed')
class TestTicketSearch(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket search"""
summary = random_sentence(4)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_front()
tc.follow('Search')
tc.formvalue('fullsearch', 'ticket', True)
tc.formvalue('fullsearch', 'q', summary)
tc.submit('Search')
tc.find('class="searchable">.*' + summary)
tc.notfind('No matches found')
class TestNonTicketSearch(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test non-ticket search"""
# Create a summary containing only unique words
summary = ' '.join([random_word() + '_TestNonTicketSearch'
for i in range(5)])
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_front()
tc.follow('Search')
tc.formvalue('fullsearch', 'ticket', False)
tc.formvalue('fullsearch', 'q', summary)
tc.submit('Search')
tc.notfind('class="searchable">' + summary)
tc.find('No matches found')
class TestTicketHistory(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket history"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
comment = random_sentence(5)
self._tester.add_comment(ticketid, comment=comment)
self._tester.go_to_ticket(ticketid)
url = b.get_url()
tc.go(url + '?version=0')
tc.find('at <[^>]*>*Initial Version')
tc.find(summary)
tc.notfind(comment)
tc.go(url + '?version=1')
tc.find('at <[^>]*>*Version 1')
tc.find(summary)
tc.find(comment)
class TestTicketHistoryDiff(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket history (diff)"""
name = 'TestTicketHistoryDiff'
ticketid = self._tester.create_ticket(name)
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'description', random_sentence(6))
tc.submit('submit')
tc.find('Description<[^>]*>\\s*modified \\(<[^>]*>diff', 's')
tc.follow('diff')
tc.find('Changes\\s*between\\s*<[^>]*>Initial Version<[^>]*>\\s*and' \
'\\s*<[^>]*>Version 1<[^>]*>\\s*of\\s*<[^>]*>Ticket #' , 's')
class TestTicketQueryLinks(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket query links"""
count = 3
ticket_ids = [self._tester.create_ticket(
summary='TestTicketQueryLinks%s' % i)
for i in range(count)]
self._tester.go_to_query()
# We don't have the luxury of javascript, so this is a multi-step
# process
tc.formvalue('query', 'add_filter_0', 'summary')
tc.submit('add_0')
tc.formvalue('query', '0_owner', 'nothing')
tc.submit('rm_filter_0_owner_0')
tc.formvalue('query', '0_summary', 'TestTicketQueryLinks')
tc.submit('update')
query_url = b.get_url()
for i in range(count):
tc.find('TestTicketQueryLinks%s' % i)
tc.follow('TestTicketQueryLinks0')
tc.find('class="missing">← Previous Ticket')
tc.find('title="Ticket #%s">Next Ticket' % ticket_ids[1])
tc.follow('Back to Query')
tc.url(re.escape(query_url))
tc.follow('TestTicketQueryLinks1')
tc.find('title="Ticket #%s">Previous Ticket' % ticket_ids[0])
tc.find('title="Ticket #%s">Next Ticket' % ticket_ids[2])
tc.follow('Next Ticket')
tc.find('title="Ticket #%s">Previous Ticket' % ticket_ids[1])
tc.find('class="missing">Next Ticket →')
class TestTicketQueryOrClause(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket query with an or clauses"""
count = 3
ticket_ids = [self._tester.create_ticket(
summary='TestTicketQueryOrClause%s' % i,
info={'keywords': str(i)})
for i in range(count)]
self._tester.go_to_query()
tc.formvalue('query', '0_owner', '')
tc.submit('rm_filter_0_owner_0')
tc.formvalue('query', 'add_filter_0', 'summary')
tc.submit('add_0')
tc.formvalue('query', '0_summary', 'TestTicketQueryOrClause1')
tc.formvalue('query', 'add_clause_1', 'keywords')
tc.submit('add_1')
tc.formvalue('query', '1_keywords', '2')
tc.submit('update')
tc.notfind('TestTicketQueryOrClause0')
for i in [1, 2]:
tc.find('TestTicketQueryOrClause%s' % i)
class TestTimelineTicketDetails(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket details on timeline"""
env = self._testenv.get_trac_environment()
env.config.set('timeline', 'ticket_show_details', 'yes')
env.config.save()
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
self._tester.add_comment(ticketid)
self._tester.go_to_timeline()
tc.formvalue('prefs', 'ticket_details', True)
tc.submit()
htmltags = '(<[^>]*>)*'
tc.find('Ticket ' + htmltags + '#' + str(ticketid) + htmltags + ' \\(' +
summary + '\\) updated\\s+by\\s+' + htmltags + 'admin', 's')
class TestAdminComponent(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create component"""
self._tester.create_component()
class TestAdminComponentDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate component"""
name = "DuplicateMilestone"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('addcomponent', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find('Component .* already exists')
class TestAdminComponentRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove component"""
name = "RemovalComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'sel', name)
tc.submit('remove')
tc.notfind(name)
class TestAdminComponentNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected component"""
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'remove', 'Remove selected items')
tc.submit('remove')
tc.find('No component selected')
class TestAdminComponentDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default component"""
name = "DefaultComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
class TestAdminComponentDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin component detail"""
name = "DetailComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.follow(name)
desc = 'Some component description'
tc.formvalue('modcomp', 'description', desc)
tc.submit('cancel')
tc.url(component_url + '$')
tc.follow(name)
tc.notfind(desc)
class TestAdminMilestone(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create milestone"""
self._tester.create_milestone()
class TestAdminMilestoneSpace(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create milestone with a space"""
self._tester.create_milestone('Milestone 1')
class TestAdminMilestoneDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate milestone"""
name = "DuplicateMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.formvalue('addmilestone', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find('Milestone %s already exists' % name)
tc.notfind('%s')
class TestAdminMilestoneDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify milestone details"""
name = "DetailMilestone"
# Create a milestone
self._tester.create_milestone(name)
# Modify the details of the milestone
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'description', 'Some description.')
tc.submit('save')
tc.url(milestone_url)
# Make sure the milestone isn't closed
self._tester.go_to_roadmap()
tc.find(name)
# Cancel more modifications
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.formvalue('modifymilestone', 'description',
'~~Some other description.~~')
tc.submit('cancel')
tc.url(milestone_url)
# Verify the correct modifications show up
self._tester.go_to_roadmap()
tc.find('Some description.')
tc.follow(name)
tc.find('Some description.')
class TestAdminMilestoneDue(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone duedate"""
name = "DueMilestone"
duedate = datetime.now(tz=utc)
duedate_string = format_date(duedate, tzinfo=utc)
self._tester.create_milestone(name, due=duedate_string)
tc.find(duedate_string)
class TestAdminMilestoneDetailDue(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify milestone duedate on detail page"""
name = "DetailDueMilestone"
# Create a milestone
self._tester.create_milestone(name)
# Modify the details of the milestone
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
duedate = datetime.now(tz=utc)
duedate_string = format_date(duedate, tzinfo=utc)
tc.formvalue('modifymilestone', 'due', duedate_string)
tc.submit('save')
tc.url(milestone_url + '$')
tc.find(name + '(<[^>]*>|\\s)*'+ duedate_string, 's')
class TestAdminMilestoneCompleted(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone completed"""
name = "CompletedMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'completed', True)
tc.submit('save')
tc.url(milestone_url + "$")
class TestAdminMilestoneCompletedFuture(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone completed in the future"""
name = "CompletedFutureMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'completed', True)
cdate = datetime.now(tz=utc) + timedelta(days=1)
cdate_string = format_date(cdate, tzinfo=localtz)
tc.formvalue('modifymilestone', 'completeddate', cdate_string)
tc.submit('save')
tc.find('Completion date may not be in the future')
# And make sure it wasn't marked as completed.
self._tester.go_to_roadmap()
tc.find(name)
class TestAdminMilestoneRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove milestone"""
name = "MilestoneRemove"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.formvalue('milestone_table', 'sel', name)
tc.submit('remove')
tc.url(milestone_url + '$')
tc.notfind(name)
class TestAdminMilestoneRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple milestones"""
name = "MultiRemoveMilestone"
count = 3
for i in range(count):
self._tester.create_milestone("%s%s" % (name, i))
milestone_url = self._tester.url + '/admin/ticket/milestones'
tc.go(milestone_url)
tc.url(milestone_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('milestone_table', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(milestone_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminMilestoneNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected milestone"""
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.formvalue('milestone_table', 'remove', 'Remove selected items')
tc.submit('remove')
tc.find('No milestone selected')
class TestAdminMilestoneDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default milestone"""
name = "DefaultMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.formvalue('milestone_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
class TestAdminPriority(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create priority"""
self._tester.create_priority()
class TestAdminPriorityDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate priority"""
name = "DuplicatePriority"
self._tester.create_priority(name)
self._tester.create_priority(name)
tc.find('Priority %s already exists' % name)
class TestAdminPriorityModify(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify priority"""
name = "ModifyPriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.follow(name)
tc.formvalue('modenum', 'name', name * 2)
tc.submit('save')
tc.url(priority_url + '$')
tc.find(name * 2)
class TestAdminPriorityRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove priority"""
name = "RemovePriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.formvalue('enumtable', 'sel', name)
tc.submit('remove')
tc.url(priority_url + '$')
tc.notfind(name)
class TestAdminPriorityRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple priorities"""
name = "MultiRemovePriority"
count = 3
for i in range(count):
self._tester.create_priority("%s%s" % (name, i))
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('enumtable', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(priority_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminPriorityNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected priority"""
priority_url = self._tester.url + "/admin/ticket/priority"
tc.go(priority_url)
tc.formvalue('enumtable', 'remove', 'Remove selected items')
tc.submit('remove')
tc.find('No priority selected')
class TestAdminPriorityDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin default priority"""
name = "DefaultPriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.formvalue('enumtable', 'default', name)
tc.submit('apply')
tc.url(priority_url + '$')
tc.find('radio.*"%s"\\schecked="checked"' % name)
class TestAdminPriorityDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify priority details"""
name = "DetailPriority"
# Create a priority
self._tester.create_priority(name + '1')
# Modify the details of the priority
priority_url = self._tester.url + "/admin/ticket/priority"
tc.go(priority_url)
tc.url(priority_url + '$')
tc.follow(name + '1')
tc.url(priority_url + '/' + name + '1')
tc.formvalue('modenum', 'name', name + '2')
tc.submit('save')
tc.url(priority_url + '$')
# Cancel more modifications
tc.go(priority_url)
tc.follow(name)
tc.formvalue('modenum', 'name', name + '3')
tc.submit('cancel')
tc.url(priority_url + '$')
# Verify that only the correct modifications show up
tc.notfind(name + '1')
tc.find(name + '2')
tc.notfind(name + '3')
class TestAdminPriorityRenumber(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin renumber priorities"""
valuesRE = re.compile('<select name="value_([0-9]+)">', re.M)
html = b.get_html()
max_priority = max([int(x) for x in valuesRE.findall(html)])
name = "RenumberPriority"
self._tester.create_priority(name + '1')
self._tester.create_priority(name + '2')
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name + '1')
tc.find(name + '2')
tc.formvalue('enumtable', 'value_%s' % (max_priority + 1), str(max_priority + 2))
tc.formvalue('enumtable', 'value_%s' % (max_priority + 2), str(max_priority + 1))
tc.submit('apply')
tc.url(priority_url + '$')
# Verify that their order has changed.
tc.find(name + '2.*' + name + '1', 's')
class TestAdminPriorityRenumberDup(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin badly renumber priorities"""
# Make the first priority the 2nd priority, and leave the 2nd priority
# as the 2nd priority.
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.formvalue('enumtable', 'value_1', '2')
tc.submit('apply')
tc.url(priority_url + '$')
tc.find('Order numbers must be unique')
class TestAdminResolution(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create resolution"""
self._tester.create_resolution()
class TestAdminResolutionDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate resolution"""
name = "DuplicateResolution"
self._tester.create_resolution(name)
self._tester.create_resolution(name)
tc.find('Resolution %s already exists' % name)
class TestAdminSeverity(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create severity"""
self._tester.create_severity()
class TestAdminSeverityDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate severity"""
name = "DuplicateSeverity"
self._tester.create_severity(name)
self._tester.create_severity(name)
tc.find('Severity %s already exists' % name)
class TestAdminType(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create type"""
self._tester.create_type()
class TestAdminTypeDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate type"""
name = "DuplicateType"
self._tester.create_type(name)
self._tester.create_type(name)
tc.find('Type %s already exists' % name)
class TestAdminVersion(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create version"""
self._tester.create_version()
class TestAdminVersionDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate version"""
name = "DuplicateVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.formvalue('addversion', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find("Version %s already exists." % name)
class TestAdminVersionDetail(FunctionalTwillTestCaseSetup):
# This is somewhat pointless... the only place to find the version
# description is on the version details page.
def runTest(self):
"""Admin version details"""
name = "DetailVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
desc = 'Some version description.'
tc.formvalue('modifyversion', 'description', desc)
tc.submit('save')
tc.url(version_admin)
tc.follow(name)
tc.find(desc)
class TestAdminVersionDetailTime(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin version detail set time"""
name = "DetailTimeVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
tc.formvalue('modifyversion', 'time', '')
tc.submit('save')
tc.url(version_admin + '$')
tc.find(name + '(<[^>]*>|\\s)*<[^>]* name="default" value="%s"' % name, 's')
class TestAdminVersionDetailCancel(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin version details"""
name = "DetailVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
desc = 'Some other version description.'
tc.formvalue('modifyversion', 'description', desc)
tc.submit('cancel')
tc.url(version_admin)
tc.follow(name)
tc.notfind(desc)
class TestAdminVersionRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove version"""
name = "VersionRemove"
self._tester.create_version(name)
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.formvalue('version_table', 'sel', name)
tc.submit('remove')
tc.url(version_url + '$')
tc.notfind(name)
class TestAdminVersionRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple versions"""
name = "MultiRemoveVersion"
count = 3
for i in range(count):
self._tester.create_version("%s%s" % (name, i))
version_url = self._tester.url + '/admin/ticket/versions'
tc.go(version_url)
tc.url(version_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('version_table', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(version_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminVersionNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected version"""
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.formvalue('version_table', 'remove', 'Remove selected items')
tc.submit('remove')
tc.find('No version selected')
class TestAdminVersionDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default version"""
name = "DefaultVersion"
self._tester.create_version(name)
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.formvalue('version_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
class TestNewReport(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a new report"""
self._tester.create_report(
'Closed tickets, modified in the past 7 days by owner.',
'SELECT DISTINCT p.value AS __color__,'
' id AS ticket,'
' summary, component, milestone, t.type AS type,'
' reporter, time AS created,'
' changetime AS modified, description AS _description,'
' priority,'
' round(julianday(\'now\') - '
' julianday(changetime, \'unixepoch\')) as days,'
' resolution,'
' owner as __group__'
' FROM ticket t'
' LEFT JOIN enum p ON p.name = t.priority AND '
' p.type = \'priority\''
' WHERE ((julianday(\'now\') -'
' julianday(changetime, \'unixepoch\')) < 7)'
' AND status = \'closed\''
' ORDER BY __group__, changetime, p.value',
'List of all tickets that are closed, and have been modified in'
' the past 7 days, grouped by owner.\n\n(So they have probably'
' been closed this week.)')
class RegressionTestRev5665(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create version without release time (r5665)"""
self._tester.create_version(releasetime='')
class RegressionTestRev5994(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of the column label fix in r5994"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'custfield', 'text')
env.config.set('ticket-custom', 'custfield.label', 'Custom Field')
env.config.save()
try:
self._testenv.restart()
self._tester.go_to_query()
tc.find('<label>( |\\n)*<input[^<]*value="custfield"'
'[^<]*/>( |\\n)*Custom Field( |\\n)*</label>', 's')
finally:
pass
#env.config.set('ticket', 'restrict_owner', 'no')
#env.config.save()
#self._testenv.restart()
class RegressionTestTicket4447(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4447"""
ticketid = self._tester.create_ticket(summary="Hello World")
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.save()
try:
self._testenv.restart()
self._tester.go_to_ticket(ticketid)
self._tester.add_comment(ticketid)
tc.notfind('deleted')
tc.notfind('set to')
finally:
pass
class RegressionTestTicket4630a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4630 a"""
env = self._testenv.get_trac_environment()
env.config.set('ticket', 'restrict_owner', 'yes')
env.config.save()
try:
self._testenv.restart()
# Make sure 'user' has logged in.
self._tester.go_to_front()
self._tester.logout()
self._tester.login('user')
self._tester.logout()
self._tester.login('admin')
ticket_id = self._tester.create_ticket()
self._tester.go_to_ticket(ticket_id)
tc.formvalue('propertyform', 'action', 'reassign')
tc.find('reassign_reassign_owner')
tc.formvalue('propertyform', 'action_reassign_reassign_owner', 'user')
tc.submit('submit')
finally:
# Undo the config change for now since this (failing)
# regression test causes problems for later tests.
env.config.set('ticket', 'restrict_owner', 'no')
env.config.save()
self._testenv.restart()
class RegressionTestTicket4630b(FunctionalTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4630 b"""
# NOTE: this must be run after RegressionTestTicket4630 (user must
# have logged in)
from trac.perm import PermissionSystem
env = self._testenv.get_trac_environment()
perm = PermissionSystem(env)
users = perm.get_users_with_permission('TRAC_ADMIN')
self.assertEqual(users, ['admin'])
users = perm.get_users_with_permission('TICKET_MODIFY')
self.assertEqual(users, ['admin', 'user'])
class RegressionTestTicket5022(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5022
"""
summary = 'RegressionTestTicket5022'
ticket_id = self._tester.create_ticket(summary=summary)
tc.go(self._tester.url + '/newticket?id=%s' % ticket_id)
tc.notfind(summary)
class RegressionTestTicket5394a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5394 a
Order user list alphabetically in (re)assign action
"""
# set restrict_owner config
env = self._testenv.get_trac_environment()
env.config.set('ticket', 'restrict_owner', 'yes')
env.config.save()
self._testenv.restart()
self._tester.go_to_front()
self._tester.logout()
test_users = ['alice', 'bob', 'jane', 'john', 'charlie', 'alan',
'zorro']
# Apprently it takes a sec for the new user to be recognized by the
# environment. So we add all the users, then log in as the users
# in a second loop. This should be faster than adding a sleep(1)
# between the .adduser and .login steps.
for user in test_users:
self._testenv.adduser(user)
for user in test_users:
self._tester.login(user)
self._tester.logout()
self._tester.login('admin')
ticketid = self._tester.create_ticket("regression test 5394a")
self._tester.go_to_ticket(ticketid)
options = 'id="action_reassign_reassign_owner">' + \
''.join(['<option[^>]*>%s</option>' % user for user in
sorted(test_users + ['admin', 'user'])])
tc.find(options, 's')
# We don't have a good way to fully delete a user from the Trac db.
# Once we do, we may want to cleanup our list of users here.
class RegressionTestTicket5394b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5394 b
Order user list alphabetically on new ticket page
"""
# Must run after RegressionTestTicket5394a
self._tester.go_to_front()
tc.follow('New Ticket')
tc.find('Create New Ticket')
test_users = ['alice', 'bob', 'jane', 'john', 'charlie', 'alan',
'zorro']
options = 'id="field-owner"[^>]*>[[:space:]]*<option/>.*' + \
'.*'.join(['<option[^>]*>%s</option>' % user for user in
sorted(test_users + ['admin', 'user'])])
options = '.*'.join(sorted(test_users + ['admin', 'user']))
tc.find(options, 's')
# TODO: this should probably be changed to be a testsuite derived from
# TestSetup
class RegressionTestTicket5497prep(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 prep
When the component is changed, the owner should update to the
default owner of the component.
If component is changed and the owner is changed (reassigned action
for open tickets in the basic workflow), the owner should be the
specified owner, not the owner of the component.
"""
# The default owner for the component we're using for this testcase
# is 'user', and we'll manually assign to 'admin'.
self._tester.create_component('regression5497', 'user')
class RegressionTestTicket5497a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 a
Open ticket, component changed, owner not changed"""
ticketid = self._tester.create_ticket("regression test 5497a")
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'field-component', 'regression5497')
tc.submit('submit')
tc.find(regex_owned_by('user'))
class RegressionTestTicket5497b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 b
Open ticket, component changed, owner changed"""
ticketid = self._tester.create_ticket("regression test 5497b")
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'field-component', 'regression5497')
tc.formvalue('propertyform', 'action', 'reassign')
tc.formvalue('propertyform', 'action_reassign_reassign_owner', 'admin')
tc.submit('submit')
tc.notfind(regex_owned_by('user'))
tc.find(regex_owned_by('admin'))
class RegressionTestTicket5497c(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 c
New ticket, component changed, owner not changed"""
ticketid = self._tester.create_ticket("regression test 5497c",
{'component':'regression5497'})
self._tester.go_to_ticket(ticketid)
tc.find(regex_owned_by('user'))
class RegressionTestTicket5497d(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 d
New ticket, component changed, owner changed"""
ticketid = self._tester.create_ticket("regression test 5497d",
{'component':'regression5497', 'owner':'admin'})
self._tester.go_to_ticket(ticketid)
tc.find(regex_owned_by('admin'))
class RegressionTestTicket5602(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5602"""
# Create a set of tickets, and assign them all to a milestone
milestone = self._tester.create_milestone()
ids = [self._tester.create_ticket() for x in range(5)]
[self._tester.ticket_set_milestone(x, milestone) for x in ids]
# Need a ticket in each state: new, assigned, accepted, closed,
# reopened
# leave ids[0] as new
# make ids[1] be assigned
self._tester.go_to_ticket(ids[1])
tc.formvalue('propertyform', 'action', 'reassign')
tc.formvalue('propertyform', 'action_reassign_reassign_owner', 'admin')
tc.submit('submit')
# make ids[2] be accepted
self._tester.go_to_ticket(ids[2])
tc.formvalue('propertyform', 'action', 'accept')
tc.submit('submit')
# make ids[3] be closed
self._tester.go_to_ticket(ids[3])
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
tc.submit('submit')
# make ids[4] be reopened
self._tester.go_to_ticket(ids[4])
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
tc.submit('submit')
# FIXME: we have to wait a second to avoid "IntegrityError: columns
# ticket, time, field are not unique"
time.sleep(1)
tc.formvalue('propertyform', 'action', 'reopen')
tc.submit('submit')
tc.show()
tc.notfind("Python Traceback")
# Go to the milestone and follow the links to the closed and active
# tickets.
tc.go(self._tester.url + "/roadmap")
tc.follow(milestone)
tc.follow("Closed ticket:")
tc.find("Resolution:[ \t\n]+fixed")
tc.back()
tc.follow("Active tickets:")
tc.find("Status:[ \t\n]+new")
tc.find("Status:[ \t\n]+assigned")
tc.find("Status:[ \t\n]+accepted")
tc.notfind("Status:[ \t\n]+closed")
tc.find("Status:[ \t\n]+reopened")
class RegressionTestTicket5687(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5687"""
self._tester.logout()
self._tester.login('user')
ticketid = self._tester.create_ticket()
self._tester.logout()
self._tester.login('admin')
class RegressionTestTicket5930(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5930
TypeError: from_string() takes exactly 3 non-keyword arguments (4
given)
Caused by a saved query
"""
self._tester.create_report('Saved Query', 'query:version=1.0', '')
tc.notfind(internal_error)
# TODO: Add a testcase for the following:
# Can you also throw in addition of a 1.0 ticket and a 2.0 ticket
# as part of the demo env, then see that only the correct one shows
# up in the report?
class RegressionTestTicket6048(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6048"""
# Setup the DeleteTicket plugin
plugin = open(os.path.join(self._testenv.command_cwd, 'sample-plugins',
'workflow', 'DeleteTicket.py')).read()
open(os.path.join(self._testenv.tracdir, 'plugins', 'DeleteTicket.py'),
'w').write(plugin)
env = self._testenv.get_trac_environment()
prevconfig = env.config.get('ticket', 'workflow')
env.config.set('ticket', 'workflow',
prevconfig + ',DeleteTicketActionController')
env.config.save()
env = self._testenv.get_trac_environment() # reload environment
# Create a ticket and delete it
ticket_id = self._tester.create_ticket(
summary='RegressionTestTicket6048')
# (Create a second ticket so that the ticket id does not get reused
# and confuse the tester object.)
self._tester.create_ticket(summary='RegressionTestTicket6048b')
self._tester.go_to_ticket(ticket_id)
tc.find('delete ticket')
tc.formvalue('propertyform', 'action', 'delete')
tc.submit('submit')
self._tester.go_to_ticket(ticket_id)
tc.find('Error: Invalid ticket number')
tc.find('Ticket %s does not exist.' % ticket_id)
# Remove the DeleteTicket plugin
env.config.set('ticket', 'workflow', prevconfig)
env.config.save()
env = self._testenv.get_trac_environment() # reload environment
for ext in ('py', 'pyc', 'pyo'):
filename = os.path.join(self._testenv.tracdir, 'plugins',
'DeleteTicket.%s' % ext)
if os.path.exists(filename):
os.unlink(filename)
class RegressionTestTicket6747(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6747"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-workflow', 'resolve.operations',
'set_resolution,set_owner')
env.config.set('ticket-workflow', 'resolve.set_owner',
'a_specified_owner')
env.config.save()
try:
self._testenv.restart()
ticket_id = self._tester.create_ticket("RegressionTestTicket6747")
self._tester.go_to_ticket(ticket_id)
tc.find("a_specified_owner")
tc.notfind("a_specified_owneras")
finally:
# Undo the config change to avoid causing problems for later
# tests.
env.config.set('ticket-workflow', 'resolve.operations',
'set_resolution')
env.config.remove('ticket-workflow', 'resolve.set_owner')
env.config.save()
self._testenv.restart()
class RegressionTestTicket6879a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6879 a
Make sure that previewing a close does not make the available actions
be those for the close status.
"""
# create a ticket, then preview resolving the ticket twice
ticket_id = self._tester.create_ticket("RegressionTestTicket6879 a")
self._tester.go_to_ticket(ticket_id)
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
tc.submit('preview')
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('preview')
class RegressionTestTicket6879b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6879 a
Make sure that previewing a close does not make the available actions
be those for the close status.
"""
# create a ticket, then preview resolving the ticket twice
ticket_id = self._tester.create_ticket("RegressionTestTicket6879 b")
self._tester.go_to_ticket(ticket_id)
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
tc.submit('preview')
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('submit')
class RegressionTestTicket6912a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6912 a"""
try:
self._tester.create_component(name='RegressionTestTicket6912a',
user='')
except twill.utils.ClientForm.ItemNotFoundError, e:
raise twill.errors.TwillAssertionError(e)
class RegressionTestTicket6912b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6912 b"""
self._tester.create_component(name='RegressionTestTicket6912b',
user='admin')
tc.follow('RegressionTestTicket6912b')
try:
tc.formvalue('modcomp', 'owner', '')
except twill.utils.ClientForm.ItemNotFoundError, e:
raise twill.errors.TwillAssertionError(e)
tc.formvalue('modcomp', 'save', 'Save')
tc.submit()
tc.find('RegressionTestTicket6912b</a>[ \n\t]*</td>[ \n\t]*'
'<td class="owner"></td>', 's')
class RegressionTestTicket8247(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/8247
Author field of ticket comment corresponding to the milestone removal
was always 'anonymous'."""
name = "MilestoneRemove"
self._tester.create_milestone(name)
id = self._tester.create_ticket(info={'milestone': name})
ticket_url = self._tester.url + "/ticket/%d" % id
tc.go(ticket_url)
tc.find(name)
tc.go(self._tester.url + "/admin/ticket/milestones")
tc.formvalue('milestone_table', 'sel', name)
tc.submit('remove')
tc.go(ticket_url)
tc.find('<strong>Milestone</strong>[ \n\t]*<em>%s</em> deleted' % name)
tc.find('Changed <a.*</a> ago by admin')
tc.notfind('anonymous')
class RegressionTestTicket8861(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/8816
When creating a milestone with an already existing name, you get
a warning. After changing the name you will find that the original
milestone with that name is renamed instead of a new one being
created."""
name = "8861Milestone"
self._tester.create_milestone(name)
tc.go(self._tester.url + "/milestone?action=new")
tc.formvalue('edit', 'name', name)
tc.submit('Add milestone')
tc.find('Milestone "%s" already exists' % name)
tc.formvalue('edit', 'name', name + '__')
tc.submit('Add milestone')
tc.go(self._tester.url + "/roadmap")
tc.find('Milestone: <em>%s</em>' % name)
tc.find('Milestone: <em>%s</em>' % (name + '__'))
def functionalSuite(suite=None):
if not suite:
import trac.tests.functional.testcases
suite = trac.tests.functional.testcases.functionalSuite()
suite.addTest(TestTickets())
suite.addTest(TestTicketPreview())
suite.addTest(TestTicketNoSummary())
suite.addTest(TestTicketAltFormats())
suite.addTest(TestTicketCSVFormat())
suite.addTest(TestTicketTabFormat())
suite.addTest(TestTicketRSSFormat())
suite.addTest(TestTicketSearch())
suite.addTest(TestNonTicketSearch())
suite.addTest(TestTicketHistory())
suite.addTest(TestTicketHistoryDiff())
suite.addTest(TestTicketQueryLinks())
suite.addTest(TestTicketQueryOrClause())
suite.addTest(TestTimelineTicketDetails())
suite.addTest(TestAdminComponent())
suite.addTest(TestAdminComponentDuplicates())
suite.addTest(TestAdminComponentRemoval())
suite.addTest(TestAdminComponentNonRemoval())
suite.addTest(TestAdminComponentDefault())
suite.addTest(TestAdminComponentDetail())
suite.addTest(TestAdminMilestone())
suite.addTest(TestAdminMilestoneSpace())
suite.addTest(TestAdminMilestoneDuplicates())
suite.addTest(TestAdminMilestoneDetail())
suite.addTest(TestAdminMilestoneDue())
suite.addTest(TestAdminMilestoneDetailDue())
suite.addTest(TestAdminMilestoneCompleted())
suite.addTest(TestAdminMilestoneCompletedFuture())
suite.addTest(TestAdminMilestoneRemove())
suite.addTest(TestAdminMilestoneRemoveMulti())
suite.addTest(TestAdminMilestoneNonRemoval())
suite.addTest(TestAdminMilestoneDefault())
suite.addTest(TestAdminPriority())
suite.addTest(TestAdminPriorityModify())
suite.addTest(TestAdminPriorityRemove())
suite.addTest(TestAdminPriorityRemoveMulti())
suite.addTest(TestAdminPriorityNonRemoval())
suite.addTest(TestAdminPriorityDefault())
suite.addTest(TestAdminPriorityDetail())
suite.addTest(TestAdminPriorityRenumber())
suite.addTest(TestAdminPriorityRenumberDup())
suite.addTest(TestAdminResolution())
suite.addTest(TestAdminResolutionDuplicates())
suite.addTest(TestAdminSeverity())
suite.addTest(TestAdminSeverityDuplicates())
suite.addTest(TestAdminType())
suite.addTest(TestAdminTypeDuplicates())
suite.addTest(TestAdminVersion())
suite.addTest(TestAdminVersionDuplicates())
suite.addTest(TestAdminVersionDetail())
suite.addTest(TestAdminVersionDetailTime())
suite.addTest(TestAdminVersionDetailCancel())
suite.addTest(TestAdminVersionRemove())
suite.addTest(TestAdminVersionRemoveMulti())
suite.addTest(TestAdminVersionNonRemoval())
suite.addTest(TestAdminVersionDefault())
suite.addTest(TestNewReport())
suite.addTest(RegressionTestRev5665())
suite.addTest(RegressionTestRev5994())
suite.addTest(RegressionTestTicket4447())
suite.addTest(RegressionTestTicket4630a())
suite.addTest(RegressionTestTicket4630b())
suite.addTest(RegressionTestTicket5022())
suite.addTest(RegressionTestTicket5394a())
suite.addTest(RegressionTestTicket5394b())
suite.addTest(RegressionTestTicket5497prep())
suite.addTest(RegressionTestTicket5497a())
suite.addTest(RegressionTestTicket5497b())
suite.addTest(RegressionTestTicket5497c())
suite.addTest(RegressionTestTicket5497d())
suite.addTest(RegressionTestTicket5602())
suite.addTest(RegressionTestTicket5687())
suite.addTest(RegressionTestTicket5930())
suite.addTest(RegressionTestTicket6048())
suite.addTest(RegressionTestTicket6747())
suite.addTest(RegressionTestTicket6879a())
suite.addTest(RegressionTestTicket6879b())
suite.addTest(RegressionTestTicket6912a())
suite.addTest(RegressionTestTicket6912b())
suite.addTest(RegressionTestTicket8247())
suite.addTest(RegressionTestTicket8861())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='functionalSuite')
|
|
'''
BSD 3-Clause License
Copyright (c) 2019, Donald N. Bockoven III
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import Tkinter as tk
import tkFont
import tkMessageBox
class Master_window:
def __init__(self, master):
## BUILD SHAPE DICTIONARY AND ATTRIBUTE LISTS
file = open('aisc_shapes_historic_clean.csv','r')
data_raw = file.readlines()
file.close()
file = open('aisc_historic_shape_defs.csv','r')
defs_raw = file.readlines()
file.close()
self.shape_defs = []
data = []
for line in data_raw:
line = line.split(',')
line[-1]=line[-1].rstrip('\n')
data.append(line)
for line in defs_raw:
line = line.split(',')
line[-1]=line[-1].rstrip('\n')
self.shape_defs.append(line)
edition_raw = []
[edition_raw.append(d[0]) for d in data[1:]]
self.edition = list(set(edition_raw))
self.shape_sets = []
self.shapes = []
for x in range(0,len(self.edition)):
self.shape_sets.append([])
self.shapes.append([])
for d in data[1:]:
index = self.edition.index(d[0])
self.shape_sets[index].append(d[2])
for i in range(0,len(self.shape_sets)):
s = list(set(self.shape_sets[i]))
self.shape_sets[i] = s
self.values_list = data[0][4:]
for x in range(0,len(self.shape_sets)):
for y in range(0,len(self.shape_sets[x])):
self.shapes[x].append({})
count = 1
shape = ''
st=''
for d in data[1:]:
index_1 = self.edition.index(d[0])
index_2 = self.shape_sets[index_1].index(d[2])
if d[3] == shape[:-1*len(st)] or d[3] == shape:
st = '_{0}'.format(count)
shape = d[3]+st
count+=1
else:
shape = d[3]
st=''
count=1
shape_data = d[4:]
temp_shape_dict = {shape: shape_data}
self.shapes[index_1][index_2].update(temp_shape_dict)
## BEGIN BUILDING MAIN GUI WINDOW ##
self.widgets=[]
self.master = master
self.f_size = 8
helv = tkFont.Font(family='Helvetica',size=self.f_size, weight='bold')
self.menubar = tk.Menu(self.master)
self.menu = tk.Menu(self.menubar, tearoff=0)
self.menu_props = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label = "File", menu=self.menu)
self.menu.add_command(label="Quit", command=self.quit_app)
self.menubar.add_cascade(label = "Window Properties", menu=self.menu_props)
self.menu_props.add_command(label="Increase Font Size", command=self.font_size_up)
self.menu_props.add_command(label="Decrease Font Size", command=self.font_size_down)
try:
self.master.config(menu=self.menubar)
except AttributeError:
self.master.tk.call(master, "config", "-menu", self.menubar)
#Main Frames
self.main_frame = tk.Frame(master, bd=2, relief='sunken', padx=5,pady=5)
self.main_frame.pack(anchor='c', padx= 5, pady= 5, fill=tk.BOTH, expand=1)
self.base_frame = tk.Frame(master, bd=2, relief='sunken', padx=5,pady=5)
self.base_frame.pack(side=tk.BOTTOM, padx= 5, pady= 5, fill=tk.X, expand=1)
#Picker Frame
self.picker_frame = tk.Frame(self.main_frame, padx=2, pady=2)
self.menu_frame = tk.Frame(self.picker_frame, padx=0, pady=0)
self.edition_type = tk.StringVar()
self.edition_type.set(self.edition[0])
self.edition_type_label = tk.Label(self.menu_frame, text="AISC Edition : ", font=helv)
self.widgets.append(self.edition_type_label)
self.edition_type_label.pack(side=tk.TOP, fill=tk.X, expand=True)
self.edition_type_menu = tk.OptionMenu(self.menu_frame, self.edition_type, *self.edition, command=self.edition_change)
self.edition_type_menu.config(font=helv)
self.edition_type_menu.pack(side=tk.TOP, fill=tk.X, expand=True)
self.shape_type = tk.StringVar()
self.shape_type.set(self.shape_sets[0][0])
self.shape_type_label = tk.Label(self.menu_frame, text="Steel Shape Type : ", font=helv)
self.widgets.append(self.shape_type_label)
self.shape_type_label.pack(side=tk.TOP, fill=tk.X, expand=True)
self.shape_type_menu = tk.OptionMenu(self.menu_frame, self.shape_type, *self.shape_sets[0], command=self.shape_change)
self.shape_type_menu.config(font=helv)
self.shape_type_menu.pack(side=tk.TOP, fill=tk.X, expand=True)
self.menu_frame.pack(side=tk.TOP, fill=tk.X, expand=True)
self.shape_frame = tk.LabelFrame(self.picker_frame, text="Section:", bd=1, relief='sunken', padx=5, pady=5, font=helv)
self.widgets.append(self.shape_frame)
self.shape_menu = tk.Listbox(self.shape_frame, height = 20, width = 40, font=helv)
self.widgets.append(self.shape_menu)
for section in sorted(self.shapes[0][0].keys()):
self.shape_menu.insert(tk.END, section)
self.shape_menu.pack(side=tk.LEFT, fill=tk.Y, expand=True)
self.shape_scrollbar = tk.Scrollbar(self.shape_frame, orient="vertical")
self.shape_menu.config(yscrollcommand=self.shape_scrollbar.set)
self.shape_scrollbar.config(command=self.shape_menu.yview)
self.shape_scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.shape_menu.bind("<<ListboxSelect>>",self.shape_click)
self.shape_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.picker_frame.pack(side=tk.LEFT)
self.data_frame = tk.LabelFrame(self.main_frame, text="Section Properties - AISC 14th Edition:", bd=1, relief='sunken', padx=5, pady=5, font=helv)
self.widgets.append(self.data_frame)
self.properties_labels = []
for i in range(0,len(self.values_list)):
self.properties_labels.append('{0}: -- {1}'.format(self.values_list[i],self.shape_defs[i][1]))
j=0
z=0
self.properties_list = tk.Listbox(self.data_frame, height = 40, width = 30, font=helv, exportselection=0)
for line in self.properties_labels:
self.properties_list.insert(tk.END, line)
self.properties_list.pack(side=tk.LEFT, fill=tk.Y, expand=False)
self.properties_list.bind("<<ListboxSelect>>",self.prop_list_click)
self.widgets.append(self.properties_list)
self.properties_list_values = tk.Listbox(self.data_frame, height = 40, width = 15, font=helv, exportselection=0)
self.properties_list_values.pack(side=tk.LEFT, fill=tk.Y, expand=False)
self.properties_list_values.bind("<<ListboxSelect>>",self.prop_list_value_click)
self.widgets.append(self.properties_list_values)
self.data_frame.pack(side=tk.LEFT)
self.f_size_frame = tk.Frame(self.base_frame, padx=5,pady=5)
self.f_size_label = tk.Label(self.f_size_frame, text='Font Size ('+str(self.f_size)+'):', font=helv)
self.widgets.append(self.f_size_label)
self.f_size_label.grid(row=0,column=0)
self.b_f_size_minus = tk.Button(self.f_size_frame,text="-", command=self.font_size_down, font=helv)
self.widgets.append(self.b_f_size_minus)
self.b_f_size_minus.grid(row=0, column=1, padx=1, pady=1)
self.b_f_size_plus = tk.Button(self.f_size_frame,text="+", command=self.font_size_up, font=helv)
self.widgets.append(self.b_f_size_plus)
self.b_f_size_plus.grid(row=0, column=2, padx=1, pady=1)
self.f_size_frame.pack(side=tk.LEFT)
self.value_def_frame = tk.Frame(self.base_frame, padx=5,pady=5)
self.value_def = tk.StringVar()
self.value_def.set(self.values_list[0])
self.value_def_menu = tk.OptionMenu(self.value_def_frame, self.value_def, *self.values_list, command=self.value_definitions)
self.value_def_menu.config(font=helv)
self.value_def_menu.grid(row=0, column=0, padx=1, pady=1)
self.value_def_label = tk.Label(self.value_def_frame, text=self.shape_defs[0][0], font=helv, wraplength=400, justify=tk.LEFT)
self.widgets.append(self.value_def_label)
self.value_def_label.grid(row=0, column=1, padx=10, pady=1)
filters = ['=','<','>','Between']
self.value_filter = tk.StringVar()
self.value_filter.set('=')
self.value_filter_menu = tk.OptionMenu(self.value_def_frame, self.value_filter, *filters, command=self.value_filter_menu_switch)
self.value_filter_menu.config(font=helv)
self.value_filter_menu.grid(row=1, column=0, padx=1, pady=1)
self.filter_a = tk.StringVar()
self.entry_filter_a = tk.Entry(self.value_def_frame,textvariable=self.filter_a, font = helv, width = 15)
self.widgets.append(self.entry_filter_a)
self.entry_filter_a.grid(row=1, column=1, padx=1, pady=1)
self.filter_b = tk.StringVar()
self.entry_filter_b = tk.Entry(self.value_def_frame,textvariable=self.filter_b, font = helv, width = 15)
self.widgets.append(self.entry_filter_b)
self.entry_filter_b.grid(row=2, column=1, padx=1, pady=1)
self.entry_filter_b.configure(state="disabled")
self.b_value_filter = tk.Button(self.value_def_frame,text="Filter By Selected Value", command=self.value_filter_function)
self.b_value_filter.grid(row=1, column=2, padx=1, pady=1)
self.widgets.append(self.b_value_filter)
self.b_reset_filter = tk.Button(self.value_def_frame,text="Reset Shape List", command=self.shape_change)
self.b_reset_filter.grid(row=1, column=3, padx=1, pady=1)
self.widgets.append(self.b_reset_filter)
self.value_def_frame.pack(side=tk.LEFT, padx= 5, pady= 5)
self.b_quit = tk.Button(self.base_frame,text="Quit", command=self.quit_app, font=helv)
self.widgets.append(self.b_quit)
self.b_quit.pack(side=tk.RIGHT)
self.license_display()
def license_display(self, *event):
license_string = ("Copyright (c) 2019, Donald N. Bockoven III\n"
"All rights reserved.\n\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\""
" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE"
" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE"
" DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE"
" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL"
" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR"
" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER"
" CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,"
" OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE"
" OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n"
"https://github.com/buddyd16/Structural-Engineering/blob/master/LICENSE"
)
tkMessageBox.showerror("License Information",license_string)
self.master.focus_force()
def quit_app(self):
self.master.destroy()
self.master.quit()
def edition_change(self, *event):
self.shape_type_menu.destroy()
edition = self.edition_type.get()
edition_index = self.edition.index(edition)
self.shape_type_menu = tk.OptionMenu(self.menu_frame, self.shape_type, *self.shape_sets[edition_index], command=self.shape_change)
helv = tkFont.Font(family='Helvetica',size=self.f_size, weight='bold')
self.shape_type_menu.config(font=helv)
self.shape_type_menu.pack(side=tk.TOP, fill=tk.X, expand=True)
self.shape_type.set(self.shape_sets[edition_index][0])
def shape_change(self, *event):
self.shape_menu.delete(0,tk.END)
edition = self.edition_type.get()
edition_index = self.edition.index(edition)
new_shape_type = self.shape_type.get()
new_shape_type_index = self.shape_sets[edition_index].index(new_shape_type)
new_section_list = self.shapes[edition_index][new_shape_type_index]
for section in new_section_list:
self.shape_menu.insert(tk.END, section)
self.shape_menu.selection_set(0)
self.shape_click()
self.shape_frame.configure(text='Section: ')
def prop_list_click(self, *event):
pindex = self.properties_list.index(self.properties_list.curselection())
self.properties_list_values.selection_clear(0,tk.END)
self.properties_list_values.selection_set(pindex)
def prop_list_value_click(self, *event):
pindex = self.properties_list_values.index(self.properties_list_values.curselection())
self.properties_list.selection_clear(0,tk.END)
self.properties_list.selection_set(pindex)
def shape_click(self, *event):
shape = self.shape_menu.get(self.shape_menu.curselection())
edition = self.edition_type.get()
edition_index = self.edition.index(edition)
shape_type = self.shape_type.get()
shape_type_index = self.shape_sets[edition_index].index(shape_type)
section_props = self.shapes[edition_index][shape_type_index].get(shape)
self.data_frame.configure(text="Section Properties - "+edition+": -- Selected Shape: "+shape)
props_counter = 0
props_list = []
props = []
l = 0
self.properties_list.delete(0,tk.END)
self.properties_list_values.delete(0,tk.END)
for i in range(0,len(self.values_list)):
if section_props[i] == '0':
pass
else:
if self.shape_defs[i][1] == '-':
string = '{0} : '.format(self.values_list[i])
props.append(string)
l=max(len(string),l)
self.properties_list_values.insert(tk.END, section_props[i])
else:
string = '{0} ({1}) : '.format(self.values_list[i],self.shape_defs[i][1])
props.append(string)
l=max(len(string),l)
self.properties_list_values.insert(tk.END, section_props[i])
props_list.append(self.values_list[i])
props_counter+=1
for line in props:
# x = len(line)
# count = (l+1) - x
# st = ' '*count
# line = st + line
self.properties_list.insert(tk.END,line)
self.properties_list.configure(height=len(props), width = l+1)
self.properties_list_values.configure(height=len(props))
self.value_def_menu.destroy()
self.value_def_menu = tk.OptionMenu(self.value_def_frame, self.value_def, *props_list, command=self.value_definitions)
helv = tkFont.Font(family='Helvetica',size=self.f_size, weight='bold')
self.value_def_menu.config(font=helv)
self.value_def_menu.grid(row=0, column=0, padx=1, pady=1)
self.value_def.set(props_list[0])
self.value_definitions()
def value_definitions(self, *event):
index = self.values_list.index(self.value_def.get())
self.value_def_label.configure(text = self.shape_defs[index][0])
def font_size_up(self, *event):
self.f_size = self.f_size+1
helv = tkFont.Font(family='Helvetica',size=self.f_size, weight='bold')
self.f_size_label.configure(text='Font Size ('+str(self.f_size)+'):')
self.edition_type_menu.config(font=helv)
self.shape_type_menu.config(font=helv)
for widget in self.widgets:
widget.configure(font=helv)
def font_size_down(self, *event):
if self.f_size-1 < 6:
self.f_size = 6
else:
self.f_size = self.f_size-1
helv = tkFont.Font(family='Helvetica',size=self.f_size, weight='bold')
self.f_size_label.configure(text='Font Size ('+str(self.f_size)+'):')
self.edition_type_menu.config(font=helv)
self.shape_type_menu.config(font=helv)
for widget in self.widgets:
widget.configure(font=helv)
def value_filter_menu_switch(self, *event):
option = self.value_filter.get()
if option == 'Between':
self.entry_filter_b.configure(state="normal")
else:
self.entry_filter_b.configure(state="disabled")
def value_filter_function(self, *event):
a = self.filter_a.get()
b = self.filter_b.get()
filter_type = self.value_filter.get()
self.shape_menu.delete(0,tk.END)
value_index = self.values_list.index(self.value_def.get())
edition = self.edition_type.get()
edition_index = self.edition.index(edition)
shape_type = self.shape_type.get()
shape_type_index = self.shape_sets[edition_index].index(shape_type)
filtered_section_list = []
d = self.shapes[edition_index][shape_type_index]
new_section_list = sorted(d, key=lambda k: (float(d[k][value_index])))
if a == '':
pass
else:
if filter_type == 'Between' and b == '':
pass
elif filter_type == 'Between':
string = 'Section: - Sorted By: {0} Between {1} and {2}'.format(self.values_list[value_index],a,b)
for key in new_section_list:
if float(a) > float(b):
a = self.filter_b.get()
b = self.filter_a.get()
else:
pass
if float(d[key][value_index]) >= float(a) and float(d[key][value_index]) <= float(b):
filtered_section_list.append(key)
else:
pass
elif filter_type == '<':
string = 'Section: - Sorted By: {0} < {1}'.format(self.values_list[value_index],a)
for key in new_section_list:
if float(d[key][value_index]) <= float(a):
filtered_section_list.append(key)
else:
pass
elif filter_type == '>':
string = 'Section: - Sorted By: {0} > {1}'.format(self.values_list[value_index],a)
for key in new_section_list:
if float(d[key][value_index]) >= float(a):
filtered_section_list.append(key)
else:
pass
elif filter_type == '=':
string = 'Section: - Sorted By: {0} = {1}'.format(self.values_list[value_index],a)
for key in new_section_list:
if float(d[key][value_index]) == float(a):
filtered_section_list.append(key)
else:
pass
if len(filtered_section_list) == 0:
self.shape_menu.delete(0,tk.END)
else:
for section in filtered_section_list:
self.shape_menu.insert(tk.END, section)
self.shape_frame.configure(text=string)
def main():
root = tk.Tk()
root.title("AISC 14th Edition - Shape Database")
app = Master_window(root)
root.minsize(800,600)
root.mainloop()
if __name__ == '__main__':
main()
|
|
import os
import logging
import numpy as np
import scipy.signal as signal
import scipy.ndimage as ndimage
import astropy.io.fits as pyfits
"""
About
=====
cosmics.py is a small and simple python module to detect and clean cosmic ray hits on images (numpy arrays or FITS), using scipy, and based on Pieter van Dokkum's L.A.Cosmic algorithm.
L.A.Cosmic = Laplacian cosmic ray detection
U{http://www.astro.yale.edu/dokkum/lacosmic/}
(article : U{http://arxiv.org/abs/astro-ph/0108003})
Additional features
===================
I pimped this a bit to suit my needs :
- Automatic recognition of saturated stars, including their full saturation trails.
This avoids that such stars are treated as big cosmics.
Indeed saturated stars tend to get even uglier when you try to clean them. Plus they
keep L.A.Cosmic iterations going on forever.
This feature is mainly for pretty-image production. It is optional, requires one more parameter (a CCD saturation level in ADU), and uses some
nicely robust morphology operations and object extraction.
- Scipy image analysis allows to "label" the actual cosmic ray hits (i.e. group the pixels into local islands).
A bit special, but I use this in the scope of visualizing a PSF construction.
But otherwise the core is really a 1-to-1 implementation of L.A.Cosmic, and uses the same parameters.
Only the conventions on how filters are applied at the image edges might be different.
No surprise, this python module is much faster then the IRAF implementation, as it does not read/write every step to disk.
Usage
=====
Everything is in the file cosmics.py, all you need to do is to import it. You need pyfits, numpy and scipy.
See the demo scripts for example usages (the second demo uses f2n.py to make pngs, and thus also needs PIL).
Your image should have clean borders, cut away prescan/overscan etc.
Todo
====
Ideas for future improvements :
- Add something reliable to detect negative glitches (dust on CCD or small traps)
- Top level functions to simply run all this on either numpy arrays or directly on FITS files
- Reduce memory usage ... easy
- Switch from signal to ndimage, homogenize mirror boundaries
Malte Tewes, January 2010
"""
__version__ = '0.4'
# We define the laplacian kernel to be used
laplkernel = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]])
# Other kernels :
growkernel = np.ones((3, 3))
dilstruct = np.ones((5, 5)) # dilation structure for some morphological operations
dilstruct[0, 0] = 0
dilstruct[0, 4] = 0
dilstruct[4, 0] = 0
dilstruct[4, 4] = 0
# So this dilstruct looks like :
# 01110
# 11111
# 11111
# 11111
# 01110
# and is used to dilate saturated stars and connect cosmic rays.
class CosmicsImage:
def __init__(self, rawarray, pssl=0.0, gain=0, readnoise=3, sigclip=5.0, sigfrac=0.3, objlim=5.0,
satlevel=-1.0):
"""
sigclip : increase this if you detect cosmics where there are none. Default is 5.0, a good value for earth-bound images.
objlim : increase this if normal stars are detected as cosmics. Default is 5.0, a good value for earth-bound images.
Constructor of the cosmic class, takes a 2D numpy array of your image as main argument.
sigclip : laplacian-to-noise limit for cosmic ray detection
objlim : minimum contrast between laplacian image and fine structure image. Use 5.0 if your image is undersampled, HST, ...
satlevel : if we find agglomerations of pixels above this level, we consider it to be a saturated star and
do not try to correct and pixels around it. A negative satlevel skips this feature.
pssl is the previously subtracted sky level !
real gain = 1.8 # gain (electrons/ADU) (0=unknown)
real readn = 6.5 # read noise (electrons) (0=unknown)
##gain0 string statsec = "*,*" # section to use for automatic computation of gain
real skyval = 0. # sky level that has been subtracted (ADU)
real sigclip = 3.0 # detection limit for cosmic rays (sigma)
real sigfrac = 0.5 # fractional detection limit for neighbouring pixels
real objlim = 3.0 # contrast limit between CR and underlying object
int niter = 1 # maximum number of iterations
"""
self.rawarray = rawarray + pssl # internally, we will always work "with sky".
self.cleanarray = self.rawarray.copy() # In lacosmiciteration() we work on this guy
self.mask = np.cast['bool'](np.zeros(self.rawarray.shape)) # All False, no cosmics yet
self.gain = gain
self.readnoise = readnoise
self.sigclip = sigclip
self.objlim = objlim
self.sigcliplow = sigclip * sigfrac
self.satlevel = satlevel
self.pssl = pssl
self.backgroundlevel = None # only calculated and used if required.
self.satstars = None # a mask of the saturated stars, only calculated if required
def __str__(self):
"""
Gives a summary of the current state, including the number of cosmic pixels in the mask etc.
"""
stringlist = [
"Input array : (%i, %i), %s" % (self.rawarray.shape[0], self.rawarray.shape[1], self.rawarray.dtype.name),
"Current cosmic ray mask : %i pixels" % np.sum(self.mask)
]
if self.pssl != 0.0:
stringlist.append("Using a previously subtracted sky level of %f" % self.pssl)
if self.satstars is not None:
stringlist.append("Saturated star mask : %i pixels" % np.sum(self.satstars))
return "\n".join(stringlist)
def labelmask(self):
"""
Finds and labels the cosmic "islands" and returns a list of dicts containing their positions.
This is made on purpose for visualizations a la f2n.drawstarslist, but could be useful anyway.
"""
logging.debug("Labeling mask pixels ...")
# We morphologicaly dilate the mask to generously connect "sparse" cosmics :
# dilstruct = np.ones((5,5))
dilmask = ndimage.morphology.binary_dilation(self.mask, structure=dilstruct, iterations=1, mask=None,
output=None, border_value=0, origin=0, brute_force=False)
# origin = 0 means center
(labels, n) = ndimage.measurements.label(dilmask)
# print "Number of cosmic ray hits : %i" % n
# tofits(labels, "labels.fits", verbose = False)
slicecouplelist = ndimage.measurements.find_objects(labels)
# Now we have a huge list of couples of numpy slice objects giving a frame around each object
# For plotting purposes, we want to transform this into the center of each object.
if len(slicecouplelist) != n:
# This never happened, but you never know ...
raise RuntimeError("Mega error in labelmask !")
centers = [[(tup[0].start + tup[0].stop) / 2.0, (tup[1].start + tup[1].stop) / 2.0] for tup in slicecouplelist]
# We also want to know how many pixels where affected by each cosmic ray.
# Why ? Dunno... it's fun and available in scipy :-)
sizes = ndimage.measurements.sum(self.mask.ravel(), labels.ravel(), np.arange(1, n + 1, 1))
retdictlist = [{"name": "%i" % size, "x": center[0], "y": center[1]} for (size, center) in zip(sizes, centers)]
logging.debug("Labeling done")
return retdictlist
def getdilatedmask(self, size=3):
"""
Returns a morphologically dilated copy of the current mask.
size = 3 or 5 decides how to dilate.
"""
if size == 3:
dilmask = ndimage.morphology.binary_dilation(self.mask, structure=growkernel, iterations=1, mask=None,
output=None, border_value=0, origin=0, brute_force=False)
elif size == 5:
dilmask = ndimage.morphology.binary_dilation(self.mask, structure=dilstruct, iterations=1, mask=None,
output=None, border_value=0, origin=0, brute_force=False)
else:
dilmask = self.mask.copy()
return dilmask
def clean(self, mask=None):
"""
Given the mask, we replace the actual problematic pixels with the masked 5x5 median value.
This mimics what is done in L.A.Cosmic, but it's a bit harder to do in python, as there is no
readymade masked median. So for now we do a loop...
Saturated stars, if calculated, are also masked : they are not "cleaned", but their pixels are not
used for the interpolation.
We will directly change self.cleanimage. Instead of using the self.mask, you can supply your
own mask as argument. This might be useful to apply this cleaning function iteratively.
But for the true L.A.Cosmic, we don't use this, i.e. we use the full mask at each iteration.
"""
if mask is None:
mask = self.mask
logging.debug("Cleaning cosmic affected pixels ...")
# So... mask is a 2D array containing False and True, where True means "here is a cosmic"
# We want to loop through these cosmics one by one.
cosmicindices = np.argwhere(mask)
# This is a list of the indices of cosmic affected pixels.
# print cosmicindices
# We put cosmic ray pixels to np.Inf to flag them :
self.cleanarray[mask] = np.Inf
# Now we want to have a 2 pixel frame of Inf padding around our image.
w = self.cleanarray.shape[0]
h = self.cleanarray.shape[1]
padarray = np.zeros((w + 4, h + 4)) + np.Inf
padarray[2:w + 2, 2:h + 2] = self.cleanarray.copy() # that copy is important, we need 2 independent arrays
# The medians will be evaluated in this padarray, skipping the np.Inf.
# Now in this copy called padarray, we also put the saturated stars to np.Inf, if available :
if self.satstars is not None:
padarray[2:w + 2, 2:h + 2][self.satstars] = np.Inf
# Viva python, I tested this one, it works...
# A loop through every cosmic pixel :
for cosmicpos in cosmicindices:
x = cosmicpos[0]
y = cosmicpos[1]
cutout = padarray[x:x + 5, y:y + 5].ravel() # remember the shift due to the padding !
# print cutout
# Now we have our 25 pixels, some of them are np.Inf, and we want to take the median
goodcutout = cutout[cutout != np.Inf]
# print np.alen(goodcutout)
if np.alen(goodcutout) >= 25:
# This never happened, but you never know ...
raise RuntimeError("Mega error in clean !")
elif np.alen(goodcutout) > 0:
replacementvalue = np.median(goodcutout)
else:
# i.e. no good pixels : Shit, a huge cosmic, we will have to improvise ...
logging.warning("OH NO, I HAVE A HUUUUUUUGE COSMIC !!!!!")
replacementvalue = self.guessbackgroundlevel()
# We update the cleanarray,
# but measure the medians in the padarray, so to not mix things up...
self.cleanarray[x, y] = replacementvalue
# That's it.
logging.debug("Cleaning done")
# FYI, that's how the LACosmic cleaning looks in iraf :
"""
imarith(outmask,"+",finalsel,outmask)
imreplace(outmask,1,lower=1,upper=INDEF) # ok so outmask = 1 are the cosmics
imcalc(outmask,inputmask,"(1.-10000.*im1)",verb-)
imarith(oldoutput,"*",inputmask,inputmask)
median(inputmask,med5,5,5,zloreject=-9999,zhi=INDEF,verb-)
imarith(outmask,"*",med5,med5)
if (i>1) imdel(output)
imcalc(oldoutput//","//outmask//","//med5,output,"(1.-im2)*im1+im3",verb-)
# =
merging to full mask
inputmask = 1.0 - 10000.0 * finalsel # So this is 1.0, but cosmics are very negative
inputmask = oldoutput * inputmask # orig image, with very negative cosmics
med5 = median of inputmask, but rejecting these negative cosmics
# i dunno how to do this in python -> had to do the loop
med5 = finalsel * med5 # we keep only the cosmics of this median
# actual replacement :
output = (1.0 - outmask)*oldoutput + med5 # ok
"""
def findsatstars(self):
"""
Uses the satlevel to find saturated stars (not cosmics !), and puts the result as a mask in self.satstars.
This can then be used to avoid these regions in cosmic detection and cleaning procedures.
Slow ...
"""
logging.debug("Detecting saturated stars ...")
# DETECTION
satpixels = self.rawarray > self.satlevel # the candidate pixels
# We build a smoothed version of the image to look for large stars and their support :
m5 = ndimage.filters.median_filter(self.rawarray, size=5, mode='mirror')
# We look where this is above half the satlevel
largestruct = m5 > (self.satlevel / 2.0)
# The rough locations of saturated stars are now :
satstarscenters = np.logical_and(largestruct, satpixels)
logging.debug("Building mask of saturated stars ...")
# BUILDING THE MASK
# The subtility is that we want to include all saturated pixels connected to these saturated stars...
# I haven't found a better solution then the double loop
# We dilate the satpixels alone, to ensure connectivity in glitchy regions and to add a safety margin around them.
# dilstruct = np.array([[0,1,0], [1,1,1], [0,1,0]])
dilsatpixels = ndimage.morphology.binary_dilation(satpixels, structure=dilstruct, iterations=2, mask=None,
output=None, border_value=0, origin=0, brute_force=False)
# It turns out it's better to think large and do 2 iterations...
# We label these :
(dilsatlabels, nsat) = ndimage.measurements.label(dilsatpixels)
# tofits(dilsatlabels, "test.fits")
logging.debug("We have %i saturated stars." % nsat)
# The ouput, False for now :
outmask = np.zeros(self.rawarray.shape)
for i in range(1, nsat + 1): # we go through the islands of saturated pixels
thisisland = dilsatlabels == i # gives us a boolean array
# Does this intersect with satstarscenters ?
overlap = np.logical_and(thisisland, satstarscenters)
if np.sum(overlap) > 0:
outmask = np.logical_or(outmask, thisisland) # we add thisisland to the mask
self.satstars = np.cast['bool'](outmask)
logging.debug("Mask of saturated stars done")
def getsatstars(self):
"""
Returns the mask of saturated stars after finding them if not yet done.
Intended mainly for external use.
"""
if not self.satlevel > 0:
raise RuntimeError("Cannot determine satstars : you gave satlevel <= 0 !")
if self.satstars is None:
self.findsatstars()
return self.satstars
def getmask(self):
return self.mask
def getrawarray(self):
"""
For external use only, as it returns the rawarray minus pssl !
"""
return self.rawarray - self.pssl
def getcleanarray(self):
"""
For external use only, as it returns the cleanarray minus pssl !
"""
return self.cleanarray - self.pssl
def guessbackgroundlevel(self):
"""
Estimates the background level. This could be used to fill pixels in large cosmics.
"""
if self.backgroundlevel is None:
self.backgroundlevel = np.median(self.rawarray.ravel())
return self.backgroundlevel
def lacosmiciteration(self):
"""
Performs one iteration of the L.A.Cosmic algorithm.
It operates on self.cleanarray, and afterwards updates self.mask by adding the newly detected
cosmics to the existing self.mask. Cleaning is not made automatically ! You have to call
clean() after each iteration.
This way you can run it several times in a row to to L.A.Cosmic "iterations".
See function lacosmic, that mimics the full iterative L.A.Cosmic algorithm.
Returns a dict containing
- niter : the number of cosmic pixels detected in this iteration
- nnew : among these, how many were not yet in the mask
- itermask : the mask of pixels detected in this iteration
- newmask : the pixels detected that were not yet in the mask
If findsatstars() was called, we exclude these regions from the search.
"""
logging.debug("Convolving image with Laplacian kernel ...")
# We subsample, convolve, clip negative values, and rebin to original size
subsam = subsample(self.cleanarray)
conved = signal.convolve2d(subsam, laplkernel, mode="same", boundary="symm")
cliped = conved.clip(min=0.0)
# cliped = np.abs(conved) # unfortunately this does not work to find holes as well ...
lplus = rebin2x2(cliped)
logging.debug("Creating noise model ...")
# We build a custom noise map, so to compare the laplacian to
m5 = ndimage.filters.median_filter(self.cleanarray, size=5, mode='mirror')
# We keep this m5, as I will use it later for the interpolation.
m5clipped = m5.clip(min=0.00001) # As we will take the sqrt
noise = (1.0 / self.gain) * np.sqrt(self.gain * m5clipped + self.readnoise * self.readnoise)
logging.debug("Calculating Laplacian signal to noise ratio ...")
# Laplacian signal to noise ratio :
s = lplus / (2.0 * noise) # the 2.0 is from the 2x2 subsampling
# This s is called sigmap in the original lacosmic.cl
# We remove the large structures (s prime) :
sp = s - ndimage.filters.median_filter(s, size=5, mode='mirror')
logging.debug("Selecting candidate cosmic rays ...")
# Candidate cosmic rays (this will include stars + HII regions)
candidates = sp > self.sigclip
nbcandidates = np.sum(candidates)
logging.debug(" %5i candidate pixels" % nbcandidates)
# At this stage we use the saturated stars to mask the candidates, if available :
if self.satstars is not None:
logging.debug("Masking saturated stars ...")
candidates = np.logical_and(np.logical_not(self.satstars), candidates)
nbcandidates = np.sum(candidates)
logging.debug(" %5i candidate pixels not part of saturated stars" % nbcandidates)
logging.debug("Building fine structure image ...")
# We build the fine structure image :
m3 = ndimage.filters.median_filter(self.cleanarray, size=3, mode='mirror')
m37 = ndimage.filters.median_filter(m3, size=7, mode='mirror')
f = m3 - m37
# In the article that's it, but in lacosmic.cl f is divided by the noise...
# Ok I understand why, it depends on if you use sp/f or L+/f as criterion.
# There are some differences between the article and the iraf implementation.
# So I will stick to the iraf implementation.
f = f / noise
f = f.clip(min=0.01) # as we will divide by f. like in the iraf version.
logging.debug("Removing suspected compact bright objects ...")
# Now we have our better selection of cosmics :
cosmics = np.logical_and(candidates, sp / f > self.objlim)
# Note the sp/f and not lplus/f ... due to the f = f/noise above.
nbcosmics = np.sum(cosmics)
logging.debug(" %5i remaining candidate pixels" % nbcosmics)
# What follows is a special treatment for neighbors, with more relaxed constains.
logging.debug("Finding neighboring pixels affected by cosmic rays ...")
# We grow these cosmics a first time to determine the immediate neighborhod :
growcosmics = np.cast['bool'](
signal.convolve2d(np.cast['float32'](cosmics), growkernel, mode="same", boundary="symm"))
# From this grown set, we keep those that have sp > sigmalim
# so obviously not requiring sp/f > objlim, otherwise it would be pointless
growcosmics = np.logical_and(sp > self.sigclip, growcosmics)
# Now we repeat this procedure, but lower the detection limit to sigmalimlow :
finalsel = np.cast['bool'](
signal.convolve2d(np.cast['float32'](growcosmics), growkernel, mode="same", boundary="symm"))
finalsel = np.logical_and(sp > self.sigcliplow, finalsel)
# Again, we have to kick out pixels on saturated stars :
if self.satstars is not None:
logging.debug("Masking saturated stars ...")
finalsel = np.logical_and(np.logical_not(self.satstars), finalsel)
nbfinal = np.sum(finalsel)
logging.debug(" %5i pixels detected as cosmics" % nbfinal)
# Now the replacement of the cosmics...
# we outsource this to the function clean(), as for some purposes the cleaning might not even be needed.
# Easy way without masking would be :
# self.cleanarray[finalsel] = m5[finalsel]
# We find how many cosmics are not yet known :
newmask = np.logical_and(np.logical_not(self.mask), finalsel)
nbnew = np.sum(newmask)
# We update the mask with the cosmics we have found :
self.mask = np.logical_or(self.mask, finalsel)
# We return
# (used by function lacosmic)
return {"niter": nbfinal, "nnew": nbnew, "itermask": finalsel, "newmask": newmask}
def findholes(self):
"""
Detects "negative cosmics" in the cleanarray and adds them to the mask.
This is not working yet.
"""
pass
"""
if verbose == None:
verbose = self.verbose
if verbose :
print "Finding holes ..."
m3 = ndimage.filters.median_filter(self.cleanarray, size=3, mode='mirror')
h = (m3 - self.cleanarray).clip(min=0.0)
tofits("h.fits", h)
sys.exit()
# The holes are the peaks in this image that are not stars
#holes = h > 300
"""
"""
subsam = subsample(self.cleanarray)
conved = -signal.convolve2d(subsam, laplkernel, mode="same", boundary="symm")
cliped = conved.clip(min=0.0)
lplus = rebin2x2(conved)
tofits("lplus.fits", lplus)
m5 = ndimage.filters.median_filter(self.cleanarray, size=5, mode='mirror')
m5clipped = m5.clip(min=0.00001)
noise = (1.0/self.gain) * np.sqrt(self.gain*m5clipped + self.readnoise*self.readnoise)
s = lplus / (2.0 * noise) # the 2.0 is from the 2x2 subsampling
# This s is called sigmap in the original lacosmic.cl
# We remove the large structures (s prime) :
sp = s - ndimage.filters.median_filter(s, size=5, mode='mirror')
holes = sp > self.sigclip
"""
"""
# We have to kick out pixels on saturated stars :
if self.satstars != None:
if verbose:
print "Masking saturated stars ..."
holes = np.logical_and(np.logical_not(self.satstars), holes)
if verbose:
print "%i hole pixels found" % np.sum(holes)
# We update the mask with the holes we have found :
self.mask = np.logical_or(self.mask, holes)
"""
def run(self, maxiter=4):
"""
Full artillery :-)
- Find saturated stars
- Run maxiter L.A.Cosmic iterations (stops if no more cosmics are found)
Stops if no cosmics are found or if maxiter is reached.
"""
if self.satlevel > 0 and self.satstars is None:
self.findsatstars()
logging.debug("Starting %i L.A.Cosmic iterations ..." % maxiter)
for i in range(1, maxiter + 1):
logging.debug("Iteration %i" % i)
iterres = self.lacosmiciteration()
logging.debug("%i cosmic pixels (%i new)" % (iterres["niter"], iterres["nnew"]))
# self.clean(mask = iterres["mask"]) # No, we want clean to operate on really clean pixels only !
# Thus we always apply it on the full mask, as lacosmic does :
self.clean()
# But note that for huge cosmics, one might want to revise this.
# Thats why I added a feature to skip saturated stars !
if iterres["niter"] == 0:
break
# Top-level functions
# def fullarray(verbose = False):
# """
# Applies the full artillery using and returning only numpy arrays
# """
# pass
#
# def fullfits(infile, outcleanfile = None, outmaskfile = None):
# """
# Applies the full artillery of the function fullarray() directly on FITS files.
# """
# pass
# FITS import - export
def fromfits(infilename: object, hdu: object = 0) -> object:
"""
Reads a FITS file and returns a 2D numpy array of the data.
Use hdu to specify which HDU you want (default = primary = 0)
"""
pixelarray, hdr = pyfits.getdata(infilename, hdu, header=True)
pixelarray = np.asarray(pixelarray).transpose()
pixelarrayshape = pixelarray.shape
logging.debug("FITS import shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1]))
logging.debug("FITS file BITPIX : %s" % (hdr["BITPIX"]))
logging.debug("Internal array type :", pixelarray.dtype.name)
return pixelarray, hdr
def tofits(outfilename, pixelarray, hdr=None):
"""
Takes a 2D numpy array and write it into a FITS file.
If you specify a header (pyfits format, as returned by fromfits()) it will be used for the image.
You can give me boolean numpy arrays, I will convert them into 8 bit integers.
"""
pixelarrayshape = pixelarray.shape
logging.debug("FITS export shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1]))
if pixelarray.dtype.name == "bool":
pixelarray = np.cast["uint8"](pixelarray)
if os.path.isfile(outfilename):
os.remove(outfilename)
if hdr == None: # then a minimal header will be created
hdu = pyfits.PrimaryHDU(pixelarray.transpose())
else: # this if else is probably not needed but anyway ...
hdu = pyfits.PrimaryHDU(pixelarray.transpose(), hdr)
hdu.writeto(outfilename)
logging.debug("Wrote %s" % outfilename)
# Array manipulation
def subsample(a): # this is more a generic function then a method ...
"""
Returns a 2x2-subsampled version of array a (no interpolation, just cutting pixels in 4).
The version below is directly from the scipy cookbook on rebinning :
U{http://www.scipy.org/Cookbook/Rebinning}
There is ndimage.zoom(cutout.array, 2, order=0, prefilter=False), but it makes funny borders.
"""
"""
# Ouuwww this is slow ...
outarray = np.zeros((a.shape[0]*2, a.shape[1]*2), dtype=np.float64)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
outarray[2*i,2*j] = a[i,j]
outarray[2*i+1,2*j] = a[i,j]
outarray[2*i,2*j+1] = a[i,j]
outarray[2*i+1,2*j+1] = a[i,j]
return outarray
"""
# much better :
newshape = (2 * a.shape[0], 2 * a.shape[1])
slices = [slice(0, old, float(old) / new) for old, new in zip(a.shape, newshape)]
coordinates = np.mgrid[slices]
indices = coordinates.astype('i') # choose the biggest smaller integer index
return a[tuple(indices)]
def rebin(a, newshape):
"""
Auxiliary function to rebin an ndarray a.
U{http://www.scipy.org/Cookbook/Rebinning}
>>> a=rand(6,4); b=rebin(a,(3,2))
"""
shape = a.shape
lenShape = len(shape)
factor = np.asarray(shape) / np.asarray(newshape)
# print factor
evList = ['a.reshape('] + \
['int(newshape[%d]), int(factor[%d]),' % (i, i) for i in range(lenShape)] + \
[')'] + ['.sum(%d)' % (i + 1) for i in range(lenShape)] + \
['/factor[%d]' % i for i in range(lenShape)]
expr = ''.join(evList)
logging.debug(shape)
logging.debug(newshape)
logging.debug(lenShape)
logging.debug(factor)
logging.info('Evaluating expression: %s' % expr)
return eval(expr)
def rebin2x2(a):
"""
Wrapper around rebin that actually rebins 2 by 2
"""
inshape = np.array(a.shape)
if not (inshape % 2 == np.zeros(2)).all(): # Modulo check to see if size is even
raise RuntimeError("I want even image shapes !")
return rebin(a, inshape / 2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.