gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base Test Case for all Unit Tests"""
import contextlib
import gc
import logging as std_logging
import os
import os.path
import sys
import weakref
import eventlet.timeout
import fixtures
import mock
from oslo.config import cfg
from oslo.messaging import conffixture as messaging_conffixture
import testtools
from neutron.common import config
from neutron.common import rpc as n_rpc
from neutron.db import agentschedulers_db
from neutron import manager
from neutron.tests import fake_notifier
from neutron.tests import post_mortem_debug
CONF = cfg.CONF
CONF.import_opt('state_path', 'neutron.common.config')
TRUE_STRING = ['True', '1']
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
ROOTDIR = os.path.dirname(__file__)
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def fake_use_fatal_exceptions(*args):
return True
def fake_consume_in_threads(self):
return []
class BaseTestCase(testtools.TestCase):
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
#TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
# Perform a check for deallocation only if explicitly
# configured to do so since calling gc.collect() after every
# test increases test suite execution time by ~50%.
check_plugin_deallocation = (
os.environ.get('OS_CHECK_PLUGIN_DEALLOCATION') in TRUE_STRING)
if check_plugin_deallocation:
plugin = weakref.ref(nm._instance.plugin)
nm.clear_instance()
if check_plugin_deallocation:
gc.collect()
#TODO(marun) Ensure that mocks are deallocated?
if plugin() and not isinstance(plugin(), mock.Base):
self.fail('The plugin for this test was not deallocated.')
def setup_coreplugin(self, core_plugin=None):
if core_plugin is not None:
cfg.CONF.set_override('core_plugin', core_plugin)
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver)
@staticmethod
def config_parse(conf=None, args=None):
"""Create the default configurations."""
# neutron.conf.test includes rpc_backend which needs to be cleaned up
if args is None:
args = ['--config-file', etcdir('neutron.conf.test')]
if conf is None:
config.init(args=args)
else:
conf(args)
def setUp(self):
super(BaseTestCase, self).setUp()
# Ensure plugin cleanup is triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
# Configure this first to ensure pm debugging support for setUp()
if os.environ.get('OS_POST_MORTEM_DEBUG') in TRUE_STRING:
self.addOnException(post_mortem_debug.exception_handler)
if os.environ.get('OS_DEBUG') in TRUE_STRING:
_level = std_logging.DEBUG
else:
_level = std_logging.INFO
capture_logs = os.environ.get('OS_LOG_CAPTURE') in TRUE_STRING
if not capture_logs:
std_logging.basicConfig(format=LOG_FORMAT, level=_level)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(
format=LOG_FORMAT,
level=_level,
nuke_handlers=capture_logs,
))
# suppress all but errors here
self.useFixture(
fixtures.FakeLogger(
name='neutron.api.extensions',
format=LOG_FORMAT,
level=std_logging.ERROR,
nuke_handlers=capture_logs,
))
test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', 0))
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.temp_dir = self.useFixture(fixtures.TempDir()).path
cfg.CONF.set_override('state_path', self.temp_dir)
self.addCleanup(mock.patch.stopall)
self.addCleanup(CONF.reset)
if os.environ.get('OS_STDOUT_CAPTURE') in TRUE_STRING:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in TRUE_STRING:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.exceptions.NeutronException.use_fatal_exceptions',
fake_use_fatal_exceptions))
# don't actually start RPC listeners when testing
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.rpc.Connection.consume_in_threads',
fake_consume_in_threads))
self.useFixture(fixtures.MonkeyPatch(
'oslo.messaging.Notifier', fake_notifier.FakeNotifier))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.messaging_conf.response_timeout = 15
self.useFixture(self.messaging_conf)
self.addCleanup(n_rpc.clear_extra_exmods)
n_rpc.add_extra_exmods('neutron.test')
self.addCleanup(n_rpc.cleanup)
n_rpc.init(CONF)
if sys.version_info < (2, 7) and getattr(self, 'fmt', '') == 'xml':
raise self.skipException('XML Testing Skipped in Py26')
self.setup_config()
def setup_config(self):
"""Tests that need a non-default config can override this method."""
self.config_parse()
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.timeout.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
|
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for :module:`flocker.docs.version`.
"""
from twisted.trial.unittest import SynchronousTestCase
try:
from packaging.version import Version as PEP440Version
PACKAGING_INSTALLED = True
except ImportError:
PACKAGING_INSTALLED = False
from pyrsistent import PRecord, field
from ..version import (
_parse_version, FlockerVersion,
get_doc_version, get_installable_version, get_pre_release,
get_package_key_suffix,
is_pre_release, is_release, is_weekly_release,
target_release,
NotAPreRelease, UnparseableVersion,
)
from flocker.common.version import RPMVersion, make_rpm_version
class MakeRpmVersionTests(SynchronousTestCase):
"""
Tests for ``make_rpm_version``.
"""
def test_good(self):
"""
``make_rpm_version`` gives the expected ``RPMVersion`` instances when
supplied with valid ``flocker_version_number``s.
"""
expected = {
'0.1.0': RPMVersion(version='0.1.0', release='1'),
'0.1.0+99.g3d644b1': RPMVersion(
version='0.1.0', release='1.99.g3d644b1'),
'0.1.1rc1': RPMVersion(version='0.1.1', release='0.rc.1'),
'0.1.1': RPMVersion(version='0.1.1', release='1'),
'0.2.0.dev1': RPMVersion(version='0.2.0', release='0.dev.1'),
'0.2.0.dev2+99.g3d644b1':
RPMVersion(version='0.2.0', release='0.dev.2.99.g3d644b1'),
'0.2.0.dev3+100.g3d644b2.dirty': RPMVersion(
version='0.2.0', release='0.dev.3.100.g3d644b2.dirty'),
}
unexpected_results = []
for supplied_version, expected_rpm_version in expected.items():
actual_rpm_version = make_rpm_version(supplied_version)
if actual_rpm_version != expected_rpm_version:
unexpected_results.append((
supplied_version,
actual_rpm_version,
expected_rpm_version,
))
if unexpected_results:
self.fail(unexpected_results)
def test_non_integer_suffix(self):
"""
``make_rpm_version`` raises ``UnparseableVersion`` when supplied with a
version with a non-integer pre or dev suffix number.
"""
with self.assertRaises(UnparseableVersion):
make_rpm_version('0.1.2rcX')
class InvalidVersionTests(SynchronousTestCase):
"""
Tests for invalid versions.
"""
def test_invalid_Version(self):
"""
If an invalid vesion is passed to ``_parse_version``,
``UnparseableVersion`` is raised.
"""
self.assertRaises(UnparseableVersion, _parse_version, 'unparseable')
class VersionCase(PRecord):
"""
Description of a version and its expected interpretations.
:param bytes version: The version to parse.
:param FlockerVersion flocker_version: The parsed version.
:param bytes doc_version: The corresponding doc version.
:param bytes installable_version: The corresponding installable version.
:param bool is_release: Whether the version corresponds to a
release.
:param bool is_weekly_release: Whether the version corresponds
to a weekly release.
:param bool is_pre_release: Whether the version corresponds to
a pre-release.
:param bool is_legacy: Whether the version is an old-style
version. In particular, the version isn't normalized
according to PEP440.
"""
version = field(bytes, mandatory=True)
flocker_version = field(FlockerVersion, mandatory=True)
doc_version = field(bytes, mandatory=True)
installable_version = field(bytes, mandatory=True)
is_release = field(bool, mandatory=True)
is_weekly_release = field(bool, mandatory=True)
is_pre_release = field(bool, mandatory=True)
is_legacy = field(bool, mandatory=True, initial=False)
def build_version_test(name, version_case):
"""
Create a test case that checks that a given version
is interpreted as expected.
"""
class Tests(SynchronousTestCase):
def test_flocker_version(self):
"""
The parsed version matches the expected parsed version.
"""
self.assertEqual(
_parse_version(version_case.version),
version_case.flocker_version,
"Version doesn't match expected parsed version.",
)
def test_doc_version(self):
"""
The calculated doc version matches the expected doc version.",
"""
self.assertEqual(
get_doc_version(version_case.version),
version_case.doc_version,
"Calculated doc version doesn't match expected doc version.",
)
def test_installable_version(self):
"""
The calculated installable version matches the expected installable
version.
"""
self.assertEqual(
get_installable_version(version_case.version),
version_case.installable_version,
"Calculated installable version doesn't match"
"expected installable version.",)
if version_case.is_legacy:
test_installable_version.skip = (
"Legacy version don't generate proper installable version."
)
def test_is_release(self):
"""
``is_release`` returns the expected value for the version.
"""
self.assertEqual(
is_release(version_case.version),
version_case.is_release,
)
def test_is_weekly_release(self):
"""
``is_weekly_release`` returns the expected value for the version.
"""
self.assertEqual(
is_weekly_release(version_case.version),
version_case.is_weekly_release,
)
def test_is_pre_release(self):
"""
``is_pre_release`` returns the expected value for the version.
"""
self.assertEqual(
is_pre_release(version_case.version),
version_case.is_pre_release,
)
def test_pep_440(self):
"""
The version is a valid PEP440 version.
(``PEP440Version`` raises if provided an invalid version).
"""
PEP440Version(version_case.version)
def test_normalization(self):
"""
The version number is normalized according to PEP440.
"""
self.assertEqual(
version_case.version,
str(PEP440Version(version_case.version)),
"Version isn't normalized.",
)
if version_case.is_legacy:
test_normalization.skip = "Legacy version isn't normalized."
if not PACKAGING_INSTALLED:
test_normalization.skip = test_pep_440.skip = (
"``packaing`` not installed."
)
Tests.__name__ = name
return Tests
MarketingVersionTests = build_version_test(
"MarketingVersionTests",
VersionCase(
version=b'0.3.2',
flocker_version=FlockerVersion(
major=b'0',
minor=b'3',
micro=b'2',
),
doc_version=b'0.3.2',
installable_version=b'0.3.2',
is_release=True,
is_weekly_release=False,
is_pre_release=False,
),
)
WeeklyReleaseTests = build_version_test(
"WeeklyReleaseTests",
VersionCase(
version=b'0.3.2.dev1',
flocker_version=FlockerVersion(
major=b'0',
minor=b'3',
micro=b'2',
weekly_release=b'1',
),
doc_version=b'0.3.2.dev1',
installable_version=b'0.3.2.dev1',
is_release=False,
is_weekly_release=True,
is_pre_release=False,
),
)
PreReleaseTests = build_version_test(
"PreReleaseTests",
VersionCase(
version=b'0.3.2rc1',
flocker_version=FlockerVersion(
major=b'0',
minor=b'3',
micro=b'2',
pre_release=b'1',
),
doc_version=b'0.3.2rc1',
installable_version=b'0.3.2rc1',
is_release=False,
is_weekly_release=False,
is_pre_release=True,
),
)
DevelopmentVersionTests = build_version_test(
"DevelopmentVersionTestss",
VersionCase(
version=b'0.3.2+1.gf661a6a',
flocker_version=FlockerVersion(
major=b'0',
minor=b'3',
micro=b'2',
commit_count=b'1',
commit_hash=b'f661a6a',
),
doc_version=b'0.3.2+1.gf661a6a',
installable_version=b'0.3.2',
is_release=False,
is_weekly_release=False,
is_pre_release=False,
),
)
DirtyVersionTests = build_version_test(
"DirtyVersionTests",
VersionCase(
version=b'0.3.2+1.gf661a6a.dirty',
flocker_version=FlockerVersion(
major=b'0',
minor=b'3',
micro=b'2',
commit_count=b'1',
commit_hash=b'f661a6a',
dirty=b'.dirty',
),
doc_version=b'0.3.2+1.gf661a6a.dirty',
installable_version=b'0.3.2',
is_release=False,
is_weekly_release=False,
is_pre_release=False,
),
)
DocReleaseTests = build_version_test(
"DocReleaseTests",
VersionCase(
version=b'0.3.2.post11',
flocker_version=FlockerVersion(
major=b'0',
minor=b'3',
micro=b'2',
documentation_revision=b'11',
),
doc_version=b'0.3.2',
installable_version=b'0.3.2',
is_release=True,
is_weekly_release=False,
is_pre_release=False,
),
)
DocReleaseDirtyTests = build_version_test(
"DocReleaseDirtyTests",
VersionCase(
version=b'0.3.2.post11+1.gf661a6a.dirty',
flocker_version=FlockerVersion(
major=b'0',
minor=b'3',
micro=b'2',
documentation_revision=b'11',
commit_count=b'1',
commit_hash=b'f661a6a',
dirty=b'.dirty',
),
doc_version=b'0.3.2.post11+1.gf661a6a.dirty',
installable_version=b'0.3.2',
is_release=False,
is_weekly_release=False,
is_pre_release=False,
),
)
# Legacy Version Tests
# These only test with an appended version.
LegacyPreReleaseTests = build_version_test(
"LegacyPreReleaseTests",
VersionCase(
version=b'0.3.2pre11+1.gf661a6a',
flocker_version=FlockerVersion(
major=b'0',
minor=b'3',
micro=b'2',
pre_release=b'11',
commit_count=b'1',
commit_hash=b'f661a6a',
),
doc_version=b'0.3.2pre11+1.gf661a6a',
installable_version=b'0.3.2pre11',
is_release=False,
is_weekly_release=False,
is_pre_release=False,
is_legacy=True,
),
)
LegacyDocReleaseTests = build_version_test(
"LegacyPreReleaseTests",
VersionCase(
version=b'0.3.2+doc11.1.gf661a6a',
flocker_version=FlockerVersion(
major=b'0',
minor=b'3',
micro=b'2',
documentation_revision=b'11',
commit_count=b'1',
commit_hash=b'f661a6a',
),
doc_version=b'0.3.2+doc11.1.gf661a6a',
installable_version=b'0.3.2',
is_release=False,
is_weekly_release=False,
is_pre_release=False,
is_legacy=True,
),
)
class GetPreReleaseTests(SynchronousTestCase):
"""
Tests for :function:`get_pre_release`.
"""
def test_not_pre_release(self):
"""
If a version which is not a pre-release is passed to
``get_pre_release``, ``NotAPreRelease`` is raised.
"""
self.assertRaises(NotAPreRelease, get_pre_release, '0.3.0')
def test_pre_release(self):
"""
When a pre-release is passed to ``get_pre_release``, the number of the
pre-release is returned.
"""
self.assertEqual(get_pre_release('0.3.2rc3'), 3)
class TargetReleaseTests(SynchronousTestCase):
"""
Tests for :function:`target_release`.
"""
def test_not_pre_release(self):
"""
If a version which is not a pre-release is passed to
``target_release``, ``NotAPreRelease`` is raised.
"""
self.assertRaises(NotAPreRelease, target_release, '0.3.0')
def test_pre_release(self):
"""
When a pre-release is passed to ``target_release``, target final
release is returned.
"""
self.assertEqual(target_release('0.3.2rc3'), '0.3.2')
class GetPackageKeySuffixTests(SynchronousTestCase):
"""
Tests for :function:`get_package_key_suffix`.
"""
def test_marketing_release(self):
"""
If a marketing release is passed to ``get_package_key_suffix``, an
empty string is returned.
"""
self.assertEqual(get_package_key_suffix('0.3.0'), "")
def test_documentation_release(self):
"""
If a documentation release is passed to ``get_package_key_suffix``, an
empty string is returned.
"""
self.assertEqual(get_package_key_suffix('0.3.0.post1'), "")
def test_non_marketing_release(self):
"""
If a weekly release is passed to ``get_package_key_suffix``, "-testing"
is returned.
"""
self.assertEqual(get_package_key_suffix('0.3.0.dev1'), "-testing")
def test_pre_release(self):
"""
If a pre-release is passed to ``get_package_key_suffix``, "-testing"
is returned.
"""
self.assertEqual(get_package_key_suffix('0.3.0rc1'), "-testing")
|
|
#!/usr/bin/env python
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
USAGE = """
tika.py [-v] [-e] [-o <outputDir>] [--server <TikaServerEndpoint>] [--install <UrlToTikaServerJar>] [--port <portNumber>] <command> <option> <urlOrPathToFile>
tika.py parse all test.pdf test2.pdf (write output JSON metadata files for test1.pdf_meta.json and test2.pdf_meta.json)
tika.py detect type test.pdf (returns mime-type as text/plain)
tika.py language file french.txt (returns language e.g., fr as text/plain)
tika.py translate fr:en french.txt (translates the file french.txt from french to english)
tika.py config mime-types (see what mime-types the Tika Server can handle)
A simple python and command-line client for Tika using the standalone Tika server (JAR file).
All commands return results in JSON format by default (except text in text/plain).
To parse docs, use:
tika.py parse <meta | text | all> <path>
To check the configuration of the Tika server, use:
tika.py config <mime-types | detectors | parsers>
Commands:
parse = parse the input file and write a JSON doc file.ext_meta.json containing the extracted metadata, text, or both
detect type = parse the stream and 'detect' the MIME/media type, return in text/plain
language file = parse the file stream and identify the language of the text, return its 2 character code in text/plain
translate src:dest = parse and extract text and then translate the text from source language to destination language
config = return a JSON doc describing the configuration of the Tika server (i.e. mime-types it
can handle, or installed detectors or parsers)
Arguments:
urlOrPathToFile = file to be parsed, if URL it will first be retrieved and then passed to Tika
Switches:
--verbose, -v = verbose mode
--encode, -e = encode response in UTF-8
--server <TikaServerEndpoint> = use a remote Tika Server at this endpoint, otherwise use local server
--install <UrlToTikaServerJar> = download and exec Tika Server (JAR file), starting server on default port 9998
Example usage as python client:
-- from tika import runCommand, parse1
-- jsonOutput = runCommand('parse', 'all', filename)
or
-- jsonOutput = parse1('all', filename)
"""
import sys, os, getopt, time, codecs
try:
unicode_string = unicode
binary_string = str
except NameError:
unicode_string = str
binary_string = bytes
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse as urlparse
if sys.version_info[0] < 3:
import codecs
open = codecs.open
import requests
import socket
import tempfile
import hashlib
import platform
from subprocess import Popen
from subprocess import STDOUT
from os import walk
Windows = True if platform.system() == "Windows" else False
TikaVersion = "1.12"
TikaJarPath = tempfile.gettempdir()
TikaFilesPath = tempfile.gettempdir()
TikaServerJar = "http://search.maven.org/remotecontent?filepath=org/apache/tika/tika-server/"+TikaVersion+"/tika-server-"+TikaVersion+".jar"
ServerHost = "localhost"
Port = "9998"
ServerEndpoint = 'http://' + ServerHost + ':' + Port
Translator = "org.apache.tika.language.translate.Lingo24Translator"
TikaClientOnly = False
Verbose = 0
EncodeUtf8 = 0
def echo2(*s): sys.stderr.write(unicode_string('tika.py: %s\n') % unicode_string(' ').join(map(unicode_string, s)))
def warn(*s): echo2('Warn:', *s)
def die(*s): warn('Error:', *s); echo2(USAGE); sys.exit()
def runCommand(cmd, option, urlOrPaths, port, outDir=None, serverHost=ServerHost, tikaServerJar=TikaServerJar, verbose=Verbose, encode=EncodeUtf8):
"""Run the Tika command by calling the Tika server and return results in JSON format (or plain text)."""
# import pdb; pdb.set_trace()
if (cmd in 'parse' or cmd in 'detect') and (urlOrPaths == [] or urlOrPaths == None):
die('No URLs/paths specified.')
serverEndpoint = 'http://' + serverHost + ':' + port
if cmd == 'parse':
return parseAndSave(option, urlOrPaths, outDir, serverEndpoint, verbose, tikaServerJar)
elif cmd == "detect":
return detectType(option, urlOrPaths, serverEndpoint, verbose, tikaServerJar)
elif cmd == "language":
return detectLang(option, urlOrPaths, serverEndpoint, verbose, tikaServerJar)
elif cmd == "translate":
return doTranslate(option, urlOrPaths, serverEndpoint, verbose, tikaServerJar)
elif cmd == "config":
status, resp = getConfig(option, serverEndpoint, verbose, tikaServerJar)
return resp
else:
die('Bad args')
def getPaths(urlOrPaths):
"""Determines if the given URL in urlOrPaths is a URL or a file or directory. If it's
a directory, it walks the directory and then finds all file paths in it, and ads them
too. If it's a file, it adds it to the paths. If it's a URL it just adds it to the path.
"""
paths = []
for eachUrlOrPaths in urlOrPaths:
if os.path.isdir(eachUrlOrPaths):
for root, directories, filenames in walk(eachUrlOrPaths):
for filename in filenames:
paths.append(os.path.join(root,filename))
else:
paths.append(eachUrlOrPaths)
return paths
def parseAndSave(option, urlOrPaths, outDir=None, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='application/json', metaExtension='_meta.json',
services={'meta': '/meta', 'text': '/tika', 'all': '/rmeta'}):
"""Parse the objects and write extracted metadata and/or text in JSON format to matching
filename with an extension of '_meta.json'."""
metaPaths = []
paths = getPaths(urlOrPaths)
for path in paths:
if outDir is None:
metaPath = path + metaExtension
else:
metaPath = os.path.join(outDir, os.path.split(path)[1] + metaExtension)
echo2('Writing %s' % metaPath)
with open(metaPath, 'w', 'utf-8') as f:
f.write(parse1(option, path, serverEndpoint, verbose, tikaServerJar, \
responseMimeType, services)[1] + u"\n")
metaPaths.append(metaPath)
return metaPaths
def parse(option, urlOrPaths, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='application/json',
services={'meta': '/meta', 'text': '/tika', 'all': '/rmeta'}):
"""Parse the objects and return extracted metadata and/or text in JSON format."""
return [parse1(option, path, serverEndpoint, verbose, tikaServerJar, responseMimeType, services)
for path in urlOrPaths]
def parse1(option, urlOrPath, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='application/json',
services={'meta': '/meta', 'text': '/tika', 'all': '/rmeta/text'}):
"""Parse the object and return extracted metadata and/or text in JSON format."""
path, type = getRemoteFile(urlOrPath, TikaFilesPath)
if option not in services:
warn('config option must be one of meta, text, or all; using all.')
service = services.get(option, services['all'])
if service == '/tika': responseMimeType = 'text/plain'
status, response = callServer('put', serverEndpoint, service, open(path, 'rb'),
{'Accept': responseMimeType, 'Content-Disposition': 'attachment; filename=%s' % os.path.basename(path)},
verbose, tikaServerJar)
if type == 'remote': os.unlink(path)
return (status, response)
def detectLang(option, urlOrPaths, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='text/plain',
services={'file' : '/language/stream'}):
"""Detect the language of the provided stream and return its 2 character code as text/plain."""
paths = getPaths(urlOrPaths)
return [detectLang1(option, path, serverEndpoint, verbose, tikaServerJar, responseMimeType, services)
for path in paths]
def detectLang1(option, urlOrPath, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='text/plain',
services={'file' : '/language/stream'}):
"""Detect the language of the provided stream and return its 2 character code as text/plain."""
path, mode = getRemoteFile(urlOrPath, TikaFilesPath)
if option not in services:
die('Language option must be one of %s ' % binary_string(services.keys()))
service = services[option]
status, response = callServer('put', serverEndpoint, service, open(path, 'r'),
{'Accept': responseMimeType}, verbose, tikaServerJar)
return (status, response)
def doTranslate(option, urlOrPaths, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='text/plain',
services={'all': '/translate/all'}):
"""Translate the file from source language to destination language."""
paths = getPaths(urlOrPaths)
return [doTranslate1(option, path, serverEndpoint, verbose, tikaServerJar, responseMimeType, services)
for path in paths]
def doTranslate1(option, urlOrPath, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='text/plain',
services={'all': '/translate/all'}):
path, mode = getRemoteFile(urlOrPath, TikaFilesPath)
srcLang = ""
destLang = ""
if ":" in option:
options = option.rsplit(':')
srcLang = options[0]
destLang = options[1]
if len(options) != 2:
die('Translate options are specified as srcLang:destLang or as destLang')
else:
destLang = option
if srcLang != "" and destLang != "":
service = services["all"] + "/" + Translator + "/" + srcLang + "/" + destLang
else:
service = services["all"] + "/" + Translator + "/" + destLang
status, response = callServer('put', serverEndpoint, service, open(path, 'r'),
{'Accept' : responseMimeType},
verbose, tikaServerJar)
return (status, response)
def detectType(option, urlOrPaths, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='text/plain',
services={'type': '/detect/stream'}):
"""Detect the MIME/media type of the stream and return it in text/plain."""
paths = getPaths(urlOrPaths)
return [detectType1(option, path, serverEndpoint, verbose, tikaServerJar, responseMimeType, services)
for path in paths]
def detectType1(option, urlOrPath, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='text/plain',
services={'type': '/detect/stream'}):
"""Detect the MIME/media type of the stream and return it in text/plain."""
path, mode = getRemoteFile(urlOrPath, TikaFilesPath)
if option not in services:
die('Detect option must be one of %s' % binary_string(services.keys()))
service = services[option]
status, response = callServer('put', serverEndpoint, service, open(path, 'r'),
{'Accept': responseMimeType, 'Content-Disposition': 'attachment; filename=%s' % os.path.basename(path)},
verbose, tikaServerJar)
return (status, response)
def getConfig(option, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar, responseMimeType='application/json',
services={'mime-types': '/mime-types', 'detectors': '/detectors', 'parsers': '/parsers/details'}):
"""Get the configuration of the Tika Server (parsers, detectors, etc.) and return it in JSON format."""
if option not in services:
die('config option must be one of mime-types, detectors, or parsers')
service = services[option]
status, response = callServer('get', serverEndpoint, service, None, {'Accept': responseMimeType}, verbose, tikaServerJar)
return (status, response)
def callServer(verb, serverEndpoint, service, data, headers, verbose=Verbose, tikaServerJar=TikaServerJar,
httpVerbs={'get': requests.get, 'put': requests.put, 'post': requests.post}):
"""Call the Tika Server, do some error checking, and return the response."""
parsedUrl = urlparse(serverEndpoint)
serverHost = parsedUrl.hostname
port = parsedUrl.port
global TikaClientOnly
if not TikaClientOnly:
serverEndpoint = checkTikaServer(serverHost, port, tikaServerJar)
serviceUrl = serverEndpoint + service
if verb not in httpVerbs:
die('Tika Server call must be one of %s' % binary_string(httpVerbs.keys()))
verbFn = httpVerbs[verb]
if Windows and hasattr(data, "read"):
data = data.read()
encodedData = data
if type(data) is unicode_string:
encodedData = data.encode('utf-8')
resp = verbFn(serviceUrl, encodedData, headers=headers)
if verbose:
print(sys.stderr, "Request headers: ", headers)
print(sys.stderr, "Response headers: ", resp.headers)
if resp.status_code != 200:
warn('Tika server returned status:', resp.status_code)
resp.encoding = "utf-8"
return (resp.status_code, resp.text)
def checkTikaServer(serverHost=ServerHost, port = Port, tikaServerJar=TikaServerJar):
"""Check that tika-server is running. If not, download JAR file and start it up."""
urlp = urlparse(tikaServerJar)
serverEndpoint = 'http://%s:%s' % (serverHost, port)
jarPath = os.path.join(TikaJarPath, 'tika-server.jar')
if 'localhost' in serverEndpoint or '127.0.0.1' in serverEndpoint:
alreadyRunning = checkPortIsOpen(serverHost, port)
if not alreadyRunning:
if not os.path.isfile(jarPath) and urlp.scheme != '':
getRemoteJar(tikaServerJar, jarPath)
if not checkJarSig(tikaServerJar, jarPath):
os.remove(jarPath)
tikaServerJar = getRemoteJar(tikaServerJar, jarPath)
startServer(jarPath, serverHost, port)
return serverEndpoint
def checkJarSig(tikaServerJar, jarPath):
if not os.path.isfile(jarPath + ".md5"):
getRemoteJar(tikaServerJar + ".md5", jarPath + ".md5")
m = hashlib.md5()
with open(jarPath, 'rb') as f:
binContents = f.read()
m.update(binContents)
with open(jarPath + ".md5", "r") as em:
existingContents = em.read()
return existingContents == m.hexdigest()
def startServer(tikaServerJar, serverHost = ServerHost, port = Port):
host = "localhost"
if Windows:
host = "0.0.0.0"
cmd = 'java -jar %s --port %i --host %s &' % (tikaServerJar, port, host)
logFile = open(os.path.join(TikaJarPath, 'tika-server.log'), 'w')
cmd = Popen(cmd , stdout= logFile, stderr = STDOUT, shell =True)
time.sleep(5)
def getRemoteFile(urlOrPath, destPath):
"""Fetch URL to local path or just return absolute path."""
urlp = urlparse(urlOrPath)
if urlp.scheme == '':
return (os.path.abspath(urlOrPath), 'local')
elif urlp.scheme not in ('http', 'https'):
return (urlOrPath, 'local')
else:
filename = urlOrPath.rsplit('/',1)[1]
destPath = destPath + '/' +filename
echo2('Retrieving %s to %s.' % (urlOrPath, destPath))
try:
urlretrieve(urlOrPath, destPath)
except IOError:
# monkey patch fix for SSL/Windows per Tika-Python #54
# https://github.com/chrismattmann/tika-python/issues/54
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# delete whatever we had there
if os.path.exists(destPath) and os.path.isfile(destPath):
os.remove(destPath)
urlretrieve(urlOrPath, destPath)
return (destPath, 'remote')
def getRemoteJar(urlOrPath, destPath):
"""Fetch URL to local path or just return absolute path."""
urlp = urlparse(urlOrPath)
if urlp.scheme == '':
return (os.path.abspath(urlOrPath), 'local')
else:
echo2('Retrieving %s to %s.' % (urlOrPath, destPath))
try:
urlretrieve(urlOrPath, destPath)
except IOError:
# monkey patch fix for SSL/Windows per Tika-Python #54
# https://github.com/chrismattmann/tika-python/issues/54
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# delete whatever we had there
if os.path.exists(destPath) and os.path.isfile(destPath):
os.remove(destPath)
urlretrieve(urlOrPath, destPath)
return (destPath, 'remote')
def checkPortIsOpen(remoteServerHost=ServerHost, port = Port):
remoteServerIP = socket.gethostbyname(remoteServerHost)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, int(port)))
if result == 0:
return True
else :
return False
sock.close()
except KeyboardInterrupt:
print("You pressed Ctrl+C")
sys.exit()
except socket.gaierror:
print('Hostname could not be resolved. Exiting')
sys.exit()
except socket.error:
print("Couldn't connect to server")
sys.exit()
def main(argv=None):
"""Run Tika from command line according to USAGE."""
global Verbose
global EncodeUtf8
if argv is None:
argv = sys.argv
if (len(argv) < 3 and not (('-h' in argv) or ('--help' in argv))): die('Bad args')
try:
opts, argv = getopt.getopt(argv[1:], 'hi:s:o:p:v:e',
['help', 'install=', 'server=', 'output=', 'port=', 'verbose', 'encode'])
except getopt.GetoptError as opt_error:
msg, bad_opt = opt_error
die("%s error: Bad option: %s, %s" % (argv[0], bad_opt, msg))
tikaServerJar = TikaServerJar
serverHost = ServerHost
outDir = '.'
port = Port
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('--install'): tikaServerJar = val
elif opt in ('--server'): serverHost = val
elif opt in ('-o', '--output'): outDir = val
elif opt in ('--port'): port = val
elif opt in ('-v', '--verbose'): Verbose = 1
elif opt in ('-e', '--encode'): EncodeUtf8 = 1
else: die(USAGE)
cmd = argv[0]
option = argv[1]
try:
paths = argv[2:]
except:
paths = None
return runCommand(cmd, option, paths, port, outDir, serverHost=serverHost, tikaServerJar=tikaServerJar, verbose=Verbose, encode=EncodeUtf8)
if __name__ == '__main__':
resp = main(sys.argv)
if type(resp) == list:
print('\n'.join([r[1] for r in resp]))
else:
print(resp)
|
|
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import warnings
import os.path as op
import numpy as np
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_allclose
from mne import pick_channels, pick_types, Evoked, Epochs, read_events
from mne.epochs import _BaseEpochs
from mne.io.constants import FIFF
from mne.io import (set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from mne.io.proj import _has_eeg_average_ref_proj
from mne.io.reference import _apply_reference
from mne.datasets import testing
from mne.io import Raw
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
eve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif')
ave_fname = op.join(data_dir, 'sample_audvis_trunc-ave.fif')
def _test_reference(raw, reref, ref_data, ref_from):
"""Helper function to test whether a reference has been correctly
applied."""
# Separate EEG channels from other channel types
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=True, exclude='bads')
# Calculate indices of reference channesl
picks_ref = [raw.ch_names.index(ch) for ch in ref_from]
# Get data
if isinstance(raw, Evoked):
_data = raw.data
_reref = reref.data
else:
_data = raw._data
_reref = reref._data
# Check that the ref has been properly computed
assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2))
# Get the raw EEG data and other channel data
raw_eeg_data = _data[..., picks_eeg, :]
raw_other_data = _data[..., picks_other, :]
# Get the rereferenced EEG data
reref_eeg_data = _reref[..., picks_eeg, :]
reref_other_data = _reref[..., picks_other, :]
# Undo rereferencing of EEG channels
if isinstance(raw, _BaseEpochs):
unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :]
else:
unref_eeg_data = reref_eeg_data + ref_data
# Check that both EEG data and other data is the same
assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15)
assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15)
@testing.requires_testing_data
def test_apply_reference():
"""Test base function for rereferencing"""
raw = Raw(fif_fname, preload=True)
# Rereference raw data by creating a copy of original data
reref, ref_data = _apply_reference(
raw.copy(), ref_from=['EEG 001', 'EEG 002'])
assert_true(reref.info['custom_ref_applied'])
_test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
# The CAR reference projection should have been removed by the function
assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))
# Test that disabling the reference does not break anything
reref, ref_data = _apply_reference(raw, [])
assert_array_equal(raw._data, reref._data)
# Test that data is modified in place when copy=False
reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'])
assert_true(raw is reref)
# Test re-referencing Epochs object
raw = Raw(fif_fname, preload=False, add_eeg_ref=False)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
reref, ref_data = _apply_reference(
epochs.copy(), ref_from=['EEG 001', 'EEG 002'])
assert_true(reref.info['custom_ref_applied'])
_test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test re-referencing Evoked object
evoked = epochs.average()
reref, ref_data = _apply_reference(
evoked.copy(), ref_from=['EEG 001', 'EEG 002'])
assert_true(reref.info['custom_ref_applied'])
_test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test invalid input
raw_np = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])
@testing.requires_testing_data
def test_set_eeg_reference():
"""Test rereference eeg data"""
raw = Raw(fif_fname, preload=True)
raw.info['projs'] = []
# Test setting an average reference
assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))
reref, ref_data = set_eeg_reference(raw)
assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
assert_true(ref_data is None)
# Test setting an average reference when one was already present
with warnings.catch_warnings(record=True): # weight tables
reref, ref_data = set_eeg_reference(raw, copy=False)
assert_true(ref_data is None)
# Rereference raw data by creating a copy of original data
reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
assert_true(reref.info['custom_ref_applied'])
_test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test that data is modified in place when copy=False
reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
copy=False)
assert_true(raw is reref)
@testing.requires_testing_data
def test_set_bipolar_reference():
"""Test bipolar referencing"""
raw = Raw(fif_fname, preload=True)
reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar',
{'kind': FIFF.FIFFV_EOG_CH,
'extra': 'some extra value'})
assert_true(reref.info['custom_ref_applied'])
# Compare result to a manual calculation
a = raw.copy().pick_channels(['EEG 001', 'EEG 002'])
a = a._data[0, :] - a._data[1, :]
b = reref.copy().pick_channels(['bipolar'])._data[0, :]
assert_allclose(a, b)
# Original channels should be replaced by a virtual one
assert_true('EEG 001' not in reref.ch_names)
assert_true('EEG 002' not in reref.ch_names)
assert_true('bipolar' in reref.ch_names)
# Check channel information
bp_info = reref.info['chs'][reref.ch_names.index('bipolar')]
an_info = reref.info['chs'][raw.ch_names.index('EEG 001')]
for key in bp_info:
if key == 'loc':
assert_array_equal(bp_info[key], 0)
elif key == 'coil_type':
assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR)
elif key == 'kind':
assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH)
else:
assert_equal(bp_info[key], an_info[key])
assert_equal(bp_info['extra'], 'some extra value')
# Minimalist call
reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002')
assert_true('EEG 001-EEG 002' in reref.ch_names)
# Set multiple references at once
reref = set_bipolar_reference(
raw,
['EEG 001', 'EEG 003'],
['EEG 002', 'EEG 004'],
['bipolar1', 'bipolar2'],
[{'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'},
{'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'}],
)
a = raw.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004'])
a = np.array([a._data[0, :] - a._data[1, :],
a._data[2, :] - a._data[3, :]])
b = reref.copy().pick_channels(['bipolar1', 'bipolar2'])._data
assert_allclose(a, b)
# Test creating a bipolar reference that doesn't involve EEG channels:
# it should not set the custom_ref_applied flag
reref = set_bipolar_reference(raw, 'MEG 0111', 'MEG 0112',
ch_info={'kind': FIFF.FIFFV_MEG_CH})
assert_true(not reref.info['custom_ref_applied'])
assert_true('MEG 0111-MEG 0112' in reref.ch_names)
# Test a battery of invalid inputs
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar')
assert_raises(ValueError, set_bipolar_reference, raw,
['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar')
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2'])
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', 'bipolar',
ch_info=[{'foo': 'bar'}, {'foo': 'bar'}])
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', ch_name='EEG 003')
def _check_channel_names(inst, ref_names):
if isinstance(ref_names, str):
ref_names = [ref_names]
# Test that the names of the reference channels are present in `ch_names`
ref_idx = pick_channels(inst.info['ch_names'], ref_names)
assert_true(len(ref_idx), len(ref_names))
# Test that the names of the reference channels are present in the `chs`
# list
inst.info._check_consistency() # Should raise no exceptions
@testing.requires_testing_data
def test_add_reference():
raw = Raw(fif_fname, preload=True)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
# check if channel already exists
assert_raises(ValueError, add_reference_channels,
raw, raw.info['ch_names'][0])
# add reference channel to Raw
raw_ref = add_reference_channels(raw, 'Ref', copy=True)
assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
_check_channel_names(raw_ref, 'Ref')
orig_nchan = raw.info['nchan']
raw = add_reference_channels(raw, 'Ref', copy=False)
assert_array_equal(raw._data, raw_ref._data)
assert_equal(raw.info['nchan'], orig_nchan + 1)
_check_channel_names(raw, 'Ref')
ref_idx = raw.ch_names.index('Ref')
ref_data, _ = raw[ref_idx]
assert_array_equal(ref_data, 0)
raw = Raw(fif_fname, preload=True)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
# Test adding an existing channel as reference channel
assert_raises(ValueError, add_reference_channels, raw,
raw.info['ch_names'][0])
# add two reference channels to Raw
raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
_check_channel_names(raw_ref, ['M1', 'M2'])
assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
assert_array_equal(raw_ref._data[-2:, :], 0)
raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
_check_channel_names(raw, ['M1', 'M2'])
ref_idx = raw.ch_names.index('M1')
ref_idy = raw.ch_names.index('M2')
ref_data, _ = raw[[ref_idx, ref_idy]]
assert_array_equal(ref_data, 0)
# add reference channel to epochs
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)
assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
_check_channel_names(epochs_ref, 'Ref')
ref_idx = epochs_ref.ch_names.index('Ref')
ref_data = epochs_ref.get_data()[:, ref_idx, :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
assert_array_equal(epochs.get_data()[:, picks_eeg, :],
epochs_ref.get_data()[:, picks_eeg, :])
# add two reference channels to epochs
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
_check_channel_names(epochs_ref, ['M1', 'M2'])
ref_idx = epochs_ref.ch_names.index('M1')
ref_idy = epochs_ref.ch_names.index('M2')
assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1')
assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2')
ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
assert_array_equal(epochs.get_data()[:, picks_eeg, :],
epochs_ref.get_data()[:, picks_eeg, :])
# add reference channel to evoked
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
evoked = epochs.average()
evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
_check_channel_names(evoked_ref, 'Ref')
ref_idx = evoked_ref.ch_names.index('Ref')
ref_data = evoked_ref.data[ref_idx, :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
assert_array_equal(evoked.data[picks_eeg, :],
evoked_ref.data[picks_eeg, :])
# add two reference channels to evoked
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
evoked = epochs.average()
evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
_check_channel_names(evoked_ref, ['M1', 'M2'])
ref_idx = evoked_ref.ch_names.index('M1')
ref_idy = evoked_ref.ch_names.index('M2')
ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
assert_array_equal(evoked.data[picks_eeg, :],
evoked_ref.data[picks_eeg, :])
# Test invalid inputs
raw_np = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])
assert_raises(ValueError, add_reference_channels, raw, 1)
|
|
#!/usr/bin/env python
import argparse
import atexit
import configparser
import getpass
import hashlib
import logging
import logging.handlers
import math
import multiprocessing
import os
import requests
import re
import sys
from collections import defaultdict
try:
from json.decoder import JSONDecodeError
except ImportError:
class JSONDecodeError(ValueError):
pass
from queue import Empty
from urllib.parse import urlencode
NAME = 'wasapi_client' if __name__ == '__main__' else __name__
LOGGER = logging.getLogger(NAME)
READ_LIMIT = 1024 * 512
PROFILE_PATH = os.path.join(os.path.expanduser('~'), '.wasapi-client')
PRE_SIGNED_REGEX = [re.compile(r'https://.*\.s3.amazonaws\.com/.*[?].*Signature=.+')]
def start_listener_logging(log_q, path=''):
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
if path:
handler = logging.FileHandler(filename=path)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
# Get records from the queue and send them to the handler.
listener = logging.handlers.QueueListener(log_q, handler)
listener.start()
return listener
def configure_main_logging(log_q, log_level=logging.ERROR):
"""Put a handler on the root logger.
This allows handling log records from imported modules.
"""
root = logging.getLogger()
root.addHandler(logging.handlers.QueueHandler(log_q))
root.setLevel(log_level)
def configure_worker_logging(log_q, log_level=logging.ERROR):
"""Configure logging for worker processes."""
# Remove any existing handlers.
LOGGER.handlers = []
# Prevent root logger duplicating messages.
LOGGER.propagate = False
LOGGER.addHandler(logging.handlers.QueueHandler(log_q))
LOGGER.setLevel(log_level)
class WASAPIDownloadError(Exception):
pass
class WASAPIManifestError(Exception):
pass
def make_session(auth=None, headers={}):
"""Make a session that will store our auth.
`auth` is a tuple of the form (user, password)
"""
session = requests.Session()
session.auth = auth
session.headers.update(headers)
return session
def get_webdata(webdata_uri, session):
"""Make a request to the WASAPI."""
try:
response = session.get(webdata_uri)
except requests.exceptions.ConnectionError as err:
sys.exit('Could not connect at {}:\n{}'.format(webdata_uri, err))
LOGGER.info('requesting {}'.format(webdata_uri))
if response.status_code == 403:
sys.exit('Verify user/password for {}:\n{} {}'.format(webdata_uri,
response.status_code,
response.reason))
try:
return response.json()
except (JSONDecodeError, ValueError) as err:
sys.exit('Non-JSON response from {}:\n{}'.format(webdata_uri, err))
def get_files_count(webdata_uri, auth=None, headers={}):
"""Return total number of downloadable files."""
session = make_session(auth, headers)
webdata = get_webdata(webdata_uri, session)
session.close()
return webdata.get('count', None)
def get_files_size(page_uri, auth=None, headers={}):
"""Return total size (bytes) of downloadable files."""
session = make_session(auth, headers)
total = 0
count = 0
webdata = None
while page_uri:
webdata = get_webdata(page_uri, session)
for f in webdata['files']:
total += int(f['size'])
page_uri = webdata.get('next', None)
if webdata:
count = webdata.get('count', None)
session.close()
return count, total
def convert_bytes(size):
"""Make a human readable size."""
label = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
try:
i = int(math.floor(math.log(size, 1024)))
except ValueError:
i = 0
p = math.pow(1024, i)
readable_size = round(size/p, 2)
return '{}{}'.format(readable_size, label[i])
class Downloads:
"""Handles cycling through all of our query results.
If download is True, we create a queue of the files that need to be
downloaded. If manifest is True, store the checksums/filenames for
each available hash algorithm.
"""
def __init__(self, page_uri, auth=None, download=True, destination='',
headers={}):
self.page_uri = page_uri
self.auth = auth
self.download = download
if self.download:
self.get_q = multiprocessing.JoinableQueue()
self.checksums = defaultdict(list)
self.urls = []
self.destination = '' if destination == '.' else destination
self.headers = headers
self.populate_downloads()
def populate_downloads(self):
"""Repeat webdata requests to gather downloadable file info."""
session = make_session(self.auth, self.headers)
current_uri = self.page_uri
while current_uri:
webdata = get_webdata(current_uri, session)
for f in webdata['files']:
# Store the first locations URL per file only.
self.urls.append(f['locations'][0])
path = os.path.join(self.destination, f['filename'])
for algorithm, value in f['checksums'].items():
self.checksums[algorithm].append((value, path))
if self.download:
df = DataFile(f['locations'], f['filename'], f['checksums'], f['size'])
self.get_q.put(df)
current_uri = webdata.get('next', None)
session.close()
def generate_manifests(self):
"""Produce manifest files for all hash algorithms."""
for algorithm in self.checksums:
self.write_manifest_file(algorithm)
def write_manifest_file(self, algorithm):
"""Write a manifest file for the provided algorithm."""
if algorithm not in self.checksums:
raise WASAPIManifestError('No values for {}'.format(algorithm))
manifest_path = os.path.join(self.destination,
'manifest-{}.txt'.format(algorithm))
with open(manifest_path, 'w') as manifest_f:
for checksum, path in self.checksums[algorithm]:
manifest_f.write('{} {}\n'.format(checksum, path))
class DataFile:
"""Representation of a file to be downloaded.
`locations` is a list of URLs
`filename` is the name of the data file
`size` is the size of the file in bytes
`checksums` is a dictionary of hash algorithm/value pairs
`verified` is a Boolean value indicating a successful checksum verification
"""
def __init__(self, locations, filename, checksums, size):
self.locations = locations
self.filename = filename
self.checksums = checksums
self.size = size
self.verified = False
def download_file(data_file, session, output_path):
"""Download webdata file to disk."""
if check_exists(output_path, data_file.size, data_file.checksums):
# Don't download the file if it already exists.
LOGGER.info('{} exists with expected size/checksum'.format(data_file.filename))
data_file.verified = True
return data_file
for location in data_file.locations:
# if location matches a 'pre-signed' url regex pattern,
# skip auth for this location
for rx in PRE_SIGNED_REGEX:
if rx.match(location):
sesh = requests
else:
sesh = session
try:
response = sesh.get(location, stream=True)
except requests.exceptions.RequestException as err:
# This could be a remote disconnect, read timeout, connection timeout,
# temporary name resolution issue...
LOGGER.error('Error downloading {}:\n{}'.format(location, err))
continue
msg = '{}: {} {}'.format(location,
response.status_code,
response.reason)
if response.status_code == 200:
try:
write_file(response, output_path)
except OSError as err:
LOGGER.error('{}: {}'.format(location, str(err)))
break
# Successful download; don't try alternate locations.
LOGGER.info(msg)
return data_file
else:
LOGGER.error(msg)
# We didn't download successfully; raise error.
msg = 'FAILED to download {} from {}'.format(data_file.filename,
data_file.locations)
raise WASAPIDownloadError(msg)
def check_exists(path, size, checksums):
"""Check if file with matching size and checksum exists."""
if not os.path.isfile(path):
return False
if not os.path.getsize(path) == size:
return False
return verify_file(checksums, path)
def write_file(response, output_path=''):
"""Write file to disk."""
with open(output_path, 'wb') as wtf:
for chunk in response.iter_content(1024*4):
wtf.write(chunk)
def verify_file(checksums, file_path):
"""Verify the file checksum is correct.
Takes a dictionary of hash algorithms and the corresponding
expected value for the file_path provided. The first success
or failure determines if the file is valid.
"""
for algorithm, value in checksums.items():
read_limit = READ_LIMIT
hash_function = getattr(hashlib, algorithm, None)
if not hash_function and algorithm == 's3etag':
# if etag does not contain a '-', then its just a regular md5
if '-' not in value:
hash_function = hashlib.md5
# otherwise, its likely a 'double-md5'
# see: https://zihao.me/post/calculating-etag-for-aws-s3-objects/
else:
hash_function = S3DoubleMD5
# expected chunk size for S3 md5 computation
read_limit = 1024 * 1024 * 8
if not hash_function:
# The hash algorithm provided is not supported by hashlib.
LOGGER.debug('{} is unsupported'.format(algorithm))
continue
digest = calculate_sum(hash_function, file_path, read_limit)
if digest == value:
LOGGER.info('Checksum success at: {}'.format(file_path))
return True
else:
LOGGER.error('Checksum {} mismatch for {}: expected {}, got {}'.format(algorithm,
file_path,
value,
digest))
return False
# We didn't find a compatible algorithm.
return False
class S3DoubleMD5:
"""Implements double-md5 computation as suggested by:
https://zihao.me/post/calculating-etag-for-aws-s3-objects/
"""
def __init__(self):
self.md5s = []
def update(self, buff):
self.md5s.append(hashlib.md5(buff))
def hexdigest(self):
if len(self.md5s) == 1:
return self.md5s[0].hexdigest()
digests = b''.join(m.digest() for m in self.md5s)
digests_md5 = hashlib.md5(digests)
return '{}-{}'.format(digests_md5.hexdigest(), len(self.md5s))
def calculate_sum(hash_function, file_path, read_limit=READ_LIMIT):
"""Return the checksum of the given file."""
hasher = hash_function()
with open(file_path, 'rb') as rff:
r = rff.read(read_limit)
while r:
hasher.update(r)
r = rff.read(read_limit)
return hasher.hexdigest()
def convert_queue(tuple_q):
"""Convert a queue containing 2-element tuples into a dictionary.
The first element becomes a key. The key's value becomes a list
to which the second tuple element is appended.
"""
ddict = defaultdict(list)
while True:
try:
key, value = tuple_q.get(block=False)
except Empty:
break
ddict[key].append(value)
return ddict
def generate_report(result_q):
"""Create a summary of success/failure downloads."""
results = convert_queue(result_q)
success = len(results.get('success', []))
failure = len(results.get('failure', []))
total = success + failure
summary = ('Total downloads attempted: {}\n'
'Successful downloads: {}\n'
'Failed downloads: {}\n').format(total, success, failure)
if total != failure and failure > 0:
summary += 'Failed files (see log for details):\n'
for filename in results['failure']:
summary += ' {}\n'.format(filename)
return summary
class Downloader(multiprocessing.Process):
"""Worker for downloading web files with a persistent session."""
def __init__(self, get_q, result_q, log_q, log_level=logging.ERROR,
auth=None, destination='.', headers={}, *args, **kwargs):
super(Downloader, self).__init__(*args, **kwargs)
self.get_q = get_q
self.result_q = result_q
self.session = make_session(auth, headers)
self.destination = destination
configure_worker_logging(log_q, log_level)
def run(self):
"""Download files from the queue until there are no more.
Gets a file's data off the queue, attempts to download the
file, and puts the result onto another queue.
"""
while True:
try:
data_file = self.get_q.get(block=False)
except Empty:
break
result = 'failure'
output_path = os.path.join(self.destination, data_file.filename)
try:
data_file = download_file(data_file, self.session, output_path)
except WASAPIDownloadError as err:
LOGGER.error(str(err))
else:
# If we download the file without error, verify the checksum.
if data_file.verified or verify_file(data_file.checksums, output_path):
result = 'success'
self.result_q.put((result, data_file.filename))
self.get_q.task_done()
class SetQueryParametersAction(argparse.Action):
"""Store all of the query parameter argument values in a dict."""
def __call__(self, parser, namespace, values, option_string):
if not hasattr(namespace, 'query_params'):
setattr(namespace, 'query_params', {})
option = option_string.lstrip('-')
namespace.query_params[option] = values
def _parse_args(args=sys.argv[1:]):
"""Parse the commandline arguments."""
description = """
Download WARC files from a WASAPI access point.
Acceptable date/time formats are:
2017-01-01
2017-01-01T12:34:56
2017-01-01 12:34:56
2017-01-01T12:34:56Z
2017-01-01 12:34:56-0700
2017
2017-01"""
try:
# According to multiprocessing docs, this could fail on some platforms.
default_processes = multiprocessing.cpu_count()
except NotImplementedError:
default_processes = 1
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-b',
'--base-uri',
dest='base_uri',
default='https://partner.archive-it.org/wasapi/v1/webdata',
help='base URI for WASAPI access; default: '
'https://partner.archive-it.org/wasapi/v1/webdata')
parser.add_argument('-d',
'--destination',
default='.',
help='location for storing downloaded files')
parser.add_argument('-l',
'--log',
help='file to which logging should be written')
parser.add_argument('-n',
'--no-manifest',
action='store_true',
dest='skip_manifest',
help='do not generate checksum files (ignored'
' when used in combination with --manifest)')
parser.add_argument('-v',
'--verbose',
action='count',
default=0,
help='log verbosely; -v is INFO, -vv is DEBUG')
auth_group = parser.add_mutually_exclusive_group()
auth_group.add_argument('--profile',
dest='profile',
help='profile to use for API authentication')
auth_group.add_argument('-u',
'--user',
dest='user',
help='username for API authentication')
auth_group.add_argument('-t',
'--token',
dest='token',
help='token for API authentication')
out_group = parser.add_mutually_exclusive_group()
out_group.add_argument('-c',
'--count',
action='store_true',
help='print number of files for download and exit')
out_group.add_argument('-m',
'--manifest',
action='store_true',
help='generate checksum files only and exit')
out_group.add_argument('-p',
'--processes',
type=int,
default=default_processes,
help='number of WARC downloading processes')
out_group.add_argument('-s',
'--size',
action='store_true',
help='print count and total size of files and exit')
out_group.add_argument('-r',
'--urls',
action='store_true',
help='list URLs for downloadable files only and exit')
# Arguments to become part of query parameter string
param_group = parser.add_argument_group('query parameters',
'parameters for webdata request')
param_group.add_argument('--collection',
action=SetQueryParametersAction,
nargs='+',
help='collection identifier')
param_group.add_argument('--filename',
action=SetQueryParametersAction,
help='exact webdata filename to download')
param_group.add_argument('--crawl',
action=SetQueryParametersAction,
help='crawl job identifier')
param_group.add_argument('--crawl-time-after',
action=SetQueryParametersAction,
help='request files created on or after this '
'date/time')
param_group.add_argument('--crawl-time-before',
action=SetQueryParametersAction,
help='request files created before this date/time')
param_group.add_argument('--crawl-start-after',
action=SetQueryParametersAction,
help='request files from crawl jobs starting on '
'or after this date/time')
param_group.add_argument('--crawl-start-before',
action=SetQueryParametersAction,
help='request files from crawl jobs starting '
'before this date/time')
return parser.parse_args(args)
def get_credentials_env():
"""Get API credentials from environment variables."""
env = os.environ.get
auth = (env('WASAPI_USER'), env('WASAPI_PASS'))
if None in auth:
auth = None
else:
LOGGER.debug('Using API credentials from environment variables')
return auth
def get_credentials_config(profile, path=PROFILE_PATH):
"""Get API credentials from a config file."""
config = configparser.ConfigParser()
try:
config.read_file(open(path))
auth = (config.get(profile, 'username'),
config.get(profile, 'password'))
except (OSError,
configparser.NoSectionError,
configparser.NoOptionError) as err:
sys.exit('{}: please create config file to supply API credentials with format:\n\n'
'[{}]\n'
'username = someuser\n'
'password = secretpasswd\n'.format(err, profile))
LOGGER.debug('Using API credentials from {}'.format(path))
return auth
def get_credentials(user=None, profile=None):
"""Determine a username/password combination if one is supplied.
Order of precedence is command line, environment, config file."""
auth = None
if user:
# If there is a username, prompt for a password.
auth = (user, getpass.getpass())
else:
# Check for credentials in environment variables.
auth = get_credentials_env()
if profile and auth is None:
# Check for credentials in a config file.
auth = get_credentials_config(profile)
return auth
def main():
args = _parse_args()
if (not os.access(args.destination, os.W_OK)
and not args.size
and not args.count):
msg = 'Cannot write to destination: {}'.format(args.destination)
sys.exit(msg)
# Start log writing process.
manager = multiprocessing.Manager()
log_q = manager.Queue()
try:
listener = start_listener_logging(log_q, args.log)
except OSError as err:
print('Could not open file for logging:', err)
sys.exit(1)
@atexit.register
def stop_listener_logging():
"""Stop listener when exiting program normally."""
listener.stop()
# Configure a logger for the main process.
try:
log_level = [logging.ERROR, logging.INFO, logging.DEBUG][args.verbose]
except IndexError:
log_level = logging.DEBUG
configure_main_logging(log_q, log_level)
# Generate query string for the webdata request.
try:
query = '?{}'.format(urlencode(args.query_params, safe=':', doseq=True))
except AttributeError:
# Use empty query if user didn't enter any query parameters.
query = ''
webdata_uri = '{}{}'.format(args.base_uri, query)
# Set up authentication.
auth = None
headers = {}
if args.token:
# Set the HTTP Authentication header.
headers['Authorization'] = 'Token {}'.format(args.token)
else:
# Generate authentication tuple for the API calls.
auth = get_credentials(args.user, args.profile)
# If user wants the size, don't download files.
if args.size:
count, size = get_files_size(webdata_uri, auth, headers)
print('Number of Files: ', count)
print('Size of Files: ', convert_bytes(size))
sys.exit()
# If user wants a count, don't download files.
if args.count:
print('Number of Files: ', get_files_count(webdata_uri, auth, headers))
sys.exit()
# Process webdata requests to generate checksum files.
if args.manifest:
downloads = Downloads(webdata_uri, auth, download=False,
destination=args.destination, headers=headers)
downloads.generate_manifests()
sys.exit()
# Print the URLs for files that can be downloaded; don't download them.
if args.urls:
downloads = Downloads(webdata_uri, auth, download=False,
destination=args.destination, headers=headers)
for url in downloads.urls:
print(url)
sys.exit()
# Process webdata requests to fill webdata file queue.
downloads = Downloads(webdata_uri, auth, download=True,
destination=args.destination, headers=headers)
# Write manifest file(s).
if not args.skip_manifest:
downloads.generate_manifests()
# Download with multiple processes.
get_q = downloads.get_q
result_q = manager.Queue()
download_processes = []
try:
num_processes = min(args.processes, get_q.qsize())
except NotImplementedError:
num_processes = args.processes
for _ in range(num_processes):
dp = Downloader(get_q, result_q, log_q, log_level, auth,
args.destination, headers=headers)
dp.start()
download_processes.append(dp)
for dp in download_processes:
dp.join()
get_q.join()
print(generate_report(result_q))
if __name__ == '__main__':
main()
|
|
"""Computes saliency map for each storm object and each CNN component.
CNN = convolutional neural network
"""
import copy
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import argparse
import numpy
import keras.models
from keras import backend as K
from gewittergefahr.gg_io import storm_tracking_io as tracking_io
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import testing_io
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.deep_learning import model_interpretation
from gewittergefahr.deep_learning import saliency_maps
K.set_session(K.tf.Session(config=K.tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
allow_soft_placement=False
)))
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
CONV_LAYER_TYPE_STRINGS = ['Conv1D', 'Conv2D', 'Conv3D']
DENSE_LAYER_TYPE_STRINGS = ['Dense']
CLASS_COMPONENT_TYPE_STRING = model_interpretation.CLASS_COMPONENT_TYPE_STRING
NEURON_COMPONENT_TYPE_STRING = model_interpretation.NEURON_COMPONENT_TYPE_STRING
CHANNEL_COMPONENT_TYPE_STRING = (
model_interpretation.CHANNEL_COMPONENT_TYPE_STRING
)
MODEL_FILE_ARG_NAME = 'model_file_name'
COMPONENT_TYPE_ARG_NAME = 'component_type_string'
TARGET_CLASS_ARG_NAME = 'target_class'
LAYER_NAME_ARG_NAME = 'layer_name'
IDEAL_ACTIVATION_ARG_NAME = 'ideal_activation'
NEURON_INDICES_ARG_NAME = 'neuron_indices'
CHANNEL_INDEX_ARG_NAME = 'channel_index'
EXAMPLE_DIR_ARG_NAME = 'input_example_dir_name'
STORM_METAFILE_ARG_NAME = 'input_storm_metafile_name'
NUM_EXAMPLES_ARG_NAME = 'num_examples'
RANDOMIZE_ARG_NAME = 'randomize_weights'
CASCADING_ARG_NAME = 'cascading_random'
MULTIPLY_BY_INPUT_ARG_NAME = 'multiply_by_input'
OUTPUT_FILE_ARG_NAME = 'output_file_name'
MODEL_FILE_HELP_STRING = (
'Path to input file, containing a trained CNN. Will be read by '
'`cnn.read_model`.')
COMPONENT_TYPE_HELP_STRING = (
'Component type. Saliency maps may be computed for one class, one/many '
'neurons, or one/many channels. Valid options are listed below.\n{0:s}'
).format(str(model_interpretation.VALID_COMPONENT_TYPE_STRINGS))
TARGET_CLASS_HELP_STRING = (
'[used only if {0:s} = "{1:s}"] Saliency maps will be computed for each '
'storm object and each class k, where k = `{2:s}`.'
).format(COMPONENT_TYPE_ARG_NAME, CLASS_COMPONENT_TYPE_STRING,
TARGET_CLASS_ARG_NAME)
LAYER_NAME_HELP_STRING = (
'[used only if {0:s} = "{1:s}" or "{2:s}"] Name of layer with neurons or '
'channels for which saliency maps will be computed.'
).format(COMPONENT_TYPE_ARG_NAME, NEURON_COMPONENT_TYPE_STRING,
CLASS_COMPONENT_TYPE_STRING)
IDEAL_ACTIVATION_HELP_STRING = (
'[used only if {0:s} = "{1:s}" or "{2:s}"] The loss function will be '
'(neuron_activation - ideal_activation)**2 or [max(channel_activations) - '
'ideal_activation]**2. If {3:s} = -1, the loss function will be '
'-sign(neuron_activation) * neuron_activation**2 or '
'-sign(max(channel_activations)) * max(channel_activations)**2.'
).format(COMPONENT_TYPE_ARG_NAME, NEURON_COMPONENT_TYPE_STRING,
CHANNEL_COMPONENT_TYPE_STRING, IDEAL_ACTIVATION_ARG_NAME)
NEURON_INDICES_HELP_STRING = (
'[used only if {0:s} = "{1:s}"] Indices of neuron whose saliency map is to '
'be computed. For example, to compute saliency maps for neuron (0, 0, 2), '
'this argument should be "0 0 2".'
).format(COMPONENT_TYPE_ARG_NAME, NEURON_COMPONENT_TYPE_STRING)
CHANNEL_INDEX_HELP_STRING = (
'[used only if {0:s} = "{1:s}"] Index of channel whose saliency map is to '
'be computed.'
).format(COMPONENT_TYPE_ARG_NAME, CHANNEL_COMPONENT_TYPE_STRING)
EXAMPLE_DIR_HELP_STRING = (
'Name of top-level directory with input examples. Files therein will be '
'found by `input_examples.find_example_file` and read by '
'`input_examples.read_example_file`.')
STORM_METAFILE_HELP_STRING = (
'Path to Pickle file with storm IDs and times. Will be read by '
'`storm_tracking_io.read_ids_and_times`.')
NUM_EXAMPLES_HELP_STRING = (
'Number of examples (storm objects) to read from `{0:s}`. If you want to '
'read all examples, make this non-positive.'
).format(STORM_METAFILE_ARG_NAME)
RANDOMIZE_HELP_STRING = (
'Boolean flag. If 1, will randomize weights in each convolutional and '
'dense layer before producing saliency maps. This allows the '
'model-parameter-randomization test from Adebayo et al. (2018) to be '
'carried out.')
CASCADING_HELP_STRING = (
'[used only if `{0:s}` = 1] Boolean flag. If 1, will randomize weights in '
'a cascading manner, going from the deepest to shallowest layer. In this '
'case, when weights for layer L are randomized, weights for all deeper '
'layers are randomized as well. If 0, will do non-cascading randomization,'
' where weights for only one layer are randomized at a time.'
).format(RANDOMIZE_ARG_NAME)
MULTIPLY_BY_INPUT_HELP_STRING = (
'Boolean flag. If 1, will multiply by input, yielding input * gradient '
'maps. If 0, will not multiply by input, leaving saliency maps as saliency'
' maps.'
)
OUTPUT_FILE_HELP_STRING = (
'Path to output file (will be written by '
'`saliency_maps.write_standard_file`).'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + MODEL_FILE_ARG_NAME, type=str, required=True,
help=MODEL_FILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + COMPONENT_TYPE_ARG_NAME, type=str, required=True,
help=COMPONENT_TYPE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + TARGET_CLASS_ARG_NAME, type=int, required=False, default=1,
help=TARGET_CLASS_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LAYER_NAME_ARG_NAME, type=str, required=False, default='',
help=LAYER_NAME_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + IDEAL_ACTIVATION_ARG_NAME, type=float, required=False,
default=saliency_maps.DEFAULT_IDEAL_ACTIVATION,
help=IDEAL_ACTIVATION_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NEURON_INDICES_ARG_NAME, type=int, nargs='+', required=False,
default=[-1], help=NEURON_INDICES_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + CHANNEL_INDEX_ARG_NAME, type=int, required=False, default=-1,
help=CHANNEL_INDEX_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + EXAMPLE_DIR_ARG_NAME, type=str, required=True,
help=EXAMPLE_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + STORM_METAFILE_ARG_NAME, type=str, required=True,
help=STORM_METAFILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_EXAMPLES_ARG_NAME, type=int, required=False, default=-1,
help=NUM_EXAMPLES_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + RANDOMIZE_ARG_NAME, type=int, required=False, default=0,
help=RANDOMIZE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + CASCADING_ARG_NAME, type=int, required=False, default=0,
help=CASCADING_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MULTIPLY_BY_INPUT_ARG_NAME, type=int, required=False, default=0,
help=MULTIPLY_BY_INPUT_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_FILE_ARG_NAME, type=str, required=True,
help=OUTPUT_FILE_HELP_STRING
)
def _find_conv_and_dense_layers(model_object):
"""Finds convolutional and dense layers in model object.
:param model_object: Trained instance of `keras.models.Model` or
`keras.models.Sequential`.
:return: layer_names: 1-D list with names of convolutional and dense layers.
"""
layer_names = [l.name for l in model_object.layers]
layer_type_strings = [type(l).__name__ for l in model_object.layers]
conv_or_dense_flags = numpy.array([
t in CONV_LAYER_TYPE_STRINGS + DENSE_LAYER_TYPE_STRINGS
for t in layer_type_strings
], dtype=bool)
conv_or_dense_indices = numpy.where(conv_or_dense_flags)[0]
return [layer_names[k] for k in conv_or_dense_indices]
def _reset_weights_in_layer(model_object, layer_name):
"""Resets (or "reinitializes" or "randomizes") weights in one layer.
:param model_object: Trained instance of `keras.models.Model` or
`keras.models.Sequential`.
:param layer_name: Name of layer in which to reset weights.
"""
session_object = K.get_session()
layer_object = model_object.get_layer(name=layer_name)
layer_object.kernel.initializer.run(session=session_object)
def _run(model_file_name, component_type_string, target_class, layer_name,
ideal_activation, neuron_indices, channel_index, top_example_dir_name,
storm_metafile_name, num_examples, randomize_weights, cascading_random,
multiply_by_input, output_file_name):
"""Computes saliency map for each storm object and each model component.
This is effectively the main method.
:param model_file_name: See documentation at top of file.
:param component_type_string: Same.
:param target_class: Same.
:param layer_name: Same.
:param ideal_activation: Same.
:param neuron_indices: Same.
:param channel_index: Same.
:param top_example_dir_name: Same.
:param storm_metafile_name: Same.
:param num_examples: Same.
:param randomize_weights: Same.
:param cascading_random: Same.
:param multiply_by_input: Same.
:param output_file_name: Same.
"""
# Check input args.
file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
model_interpretation.check_component_type(component_type_string)
# Read model and metadata.
print('Reading model from: "{0:s}"...'.format(model_file_name))
model_object = cnn.read_model(model_file_name)
model_metafile_name = '{0:s}/model_metadata.p'.format(
os.path.split(model_file_name)[0]
)
print('Reading model metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
training_option_dict[trainval_io.REFLECTIVITY_MASK_KEY] = None
output_dir_name, pathless_output_file_name = os.path.split(output_file_name)
extensionless_output_file_name, output_file_extension = os.path.splitext(
pathless_output_file_name)
if randomize_weights:
conv_dense_layer_names = _find_conv_and_dense_layers(model_object)
conv_dense_layer_names.reverse()
num_sets = len(conv_dense_layer_names)
else:
conv_dense_layer_names = []
num_sets = 1
print('Reading storm metadata from: "{0:s}"...'.format(storm_metafile_name))
full_storm_id_strings, storm_times_unix_sec = (
tracking_io.read_ids_and_times(storm_metafile_name)
)
print(SEPARATOR_STRING)
if 0 < num_examples < len(full_storm_id_strings):
full_storm_id_strings = full_storm_id_strings[:num_examples]
storm_times_unix_sec = storm_times_unix_sec[:num_examples]
example_dict = testing_io.read_predictors_specific_examples(
top_example_dir_name=top_example_dir_name,
desired_full_id_strings=full_storm_id_strings,
desired_times_unix_sec=storm_times_unix_sec,
option_dict=training_option_dict,
layer_operation_dicts=model_metadata_dict[cnn.LAYER_OPERATIONS_KEY]
)
print(SEPARATOR_STRING)
predictor_matrices = example_dict[testing_io.INPUT_MATRICES_KEY]
sounding_pressure_matrix_pa = example_dict[
testing_io.SOUNDING_PRESSURES_KEY]
denorm_predictor_matrices = trainval_io.separate_shear_and_reflectivity(
list_of_input_matrices=copy.deepcopy(predictor_matrices),
training_option_dict=training_option_dict)
print('Denormalizing model inputs...')
denorm_predictor_matrices = model_interpretation.denormalize_data(
list_of_input_matrices=denorm_predictor_matrices,
model_metadata_dict=model_metadata_dict)
print(SEPARATOR_STRING)
for k in range(num_sets):
if randomize_weights:
if cascading_random:
_reset_weights_in_layer(
model_object=model_object,
layer_name=conv_dense_layer_names[k]
)
this_model_object = model_object
this_output_file_name = (
'{0:s}/{1:s}_cascading-random_{2:s}{3:s}'
).format(
output_dir_name, extensionless_output_file_name,
conv_dense_layer_names[k].replace('_', '-'),
output_file_extension
)
else:
this_model_object = keras.models.Model.from_config(
model_object.get_config()
)
this_model_object.set_weights(model_object.get_weights())
_reset_weights_in_layer(
model_object=this_model_object,
layer_name=conv_dense_layer_names[k]
)
this_output_file_name = '{0:s}/{1:s}_random_{2:s}{3:s}'.format(
output_dir_name, extensionless_output_file_name,
conv_dense_layer_names[k].replace('_', '-'),
output_file_extension
)
else:
this_model_object = model_object
this_output_file_name = output_file_name
# print(K.eval(this_model_object.get_layer(name='dense_3').weights[0]))
if component_type_string == CLASS_COMPONENT_TYPE_STRING:
print('Computing saliency maps for target class {0:d}...'.format(
target_class))
saliency_matrices = (
saliency_maps.get_saliency_maps_for_class_activation(
model_object=this_model_object, target_class=target_class,
list_of_input_matrices=predictor_matrices)
)
elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
print((
'Computing saliency maps for neuron {0:s} in layer "{1:s}"...'
).format(str(neuron_indices), layer_name))
saliency_matrices = (
saliency_maps.get_saliency_maps_for_neuron_activation(
model_object=this_model_object, layer_name=layer_name,
neuron_indices=neuron_indices,
list_of_input_matrices=predictor_matrices,
ideal_activation=ideal_activation)
)
else:
print((
'Computing saliency maps for channel {0:d} in layer "{1:s}"...'
).format(channel_index, layer_name))
saliency_matrices = (
saliency_maps.get_saliency_maps_for_channel_activation(
model_object=this_model_object, layer_name=layer_name,
channel_index=channel_index,
list_of_input_matrices=predictor_matrices,
stat_function_for_neuron_activations=K.max,
ideal_activation=ideal_activation)
)
if multiply_by_input:
for i in range(len(saliency_matrices)):
if saliency_matrices[i] is None:
continue
saliency_matrices[i] = (
saliency_matrices[i] * predictor_matrices[i]
)
saliency_matrices = trainval_io.separate_shear_and_reflectivity(
list_of_input_matrices=saliency_matrices,
training_option_dict=training_option_dict)
print('Writing saliency maps to file: "{0:s}"...'.format(
this_output_file_name))
saliency_metadata_dict = saliency_maps.check_metadata(
component_type_string=component_type_string,
target_class=target_class, layer_name=layer_name,
ideal_activation=ideal_activation, neuron_indices=neuron_indices,
channel_index=channel_index)
saliency_maps.write_standard_file(
pickle_file_name=this_output_file_name,
denorm_predictor_matrices=denorm_predictor_matrices,
saliency_matrices=saliency_matrices,
full_storm_id_strings=full_storm_id_strings,
storm_times_unix_sec=storm_times_unix_sec,
model_file_name=model_file_name,
metadata_dict=saliency_metadata_dict,
sounding_pressure_matrix_pa=sounding_pressure_matrix_pa)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
model_file_name=getattr(INPUT_ARG_OBJECT, MODEL_FILE_ARG_NAME),
component_type_string=getattr(
INPUT_ARG_OBJECT, COMPONENT_TYPE_ARG_NAME),
target_class=getattr(INPUT_ARG_OBJECT, TARGET_CLASS_ARG_NAME),
layer_name=getattr(INPUT_ARG_OBJECT, LAYER_NAME_ARG_NAME),
ideal_activation=getattr(INPUT_ARG_OBJECT, IDEAL_ACTIVATION_ARG_NAME),
neuron_indices=numpy.array(
getattr(INPUT_ARG_OBJECT, NEURON_INDICES_ARG_NAME), dtype=int),
channel_index=getattr(INPUT_ARG_OBJECT, CHANNEL_INDEX_ARG_NAME),
top_example_dir_name=getattr(INPUT_ARG_OBJECT, EXAMPLE_DIR_ARG_NAME),
storm_metafile_name=getattr(INPUT_ARG_OBJECT, STORM_METAFILE_ARG_NAME),
num_examples=getattr(INPUT_ARG_OBJECT, NUM_EXAMPLES_ARG_NAME),
randomize_weights=bool(getattr(INPUT_ARG_OBJECT, RANDOMIZE_ARG_NAME)),
cascading_random=bool(getattr(INPUT_ARG_OBJECT, CASCADING_ARG_NAME)),
multiply_by_input=bool(
getattr(INPUT_ARG_OBJECT, MULTIPLY_BY_INPUT_ARG_NAME)
),
output_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME)
)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import codecs
import os
import sys
import json
import re
import tarfile
from datetime import datetime
from collections import defaultdict
class Log(object):
log_file_opened = False
log_level_file = 1
log_level_console = 1
log_levels = ["DEBUG", "INFO", "WARNING", "ERROR"]
def __init__(self, filename, log_level_file, log_level_console):
if filename != "":
try:
self.log_file = codecs.open(filename, "a+", "utf-8")
self.log_file_opened = True
self.log_level_file = log_level_file
except PermissionError as e:
self.log_level_file = 4
self.log("Could not open logfile! {0}".format(e), 3)
else:
self.log_level_file = 4
self.log_level_console = log_level_console
def __del__(self):
if self.log_file_opened:
self.log_file.close()
def log(self, text, log_level=1):
msg = "[{0}] [{1}] {2}".format(self.get_timestamp(), self.log_levels[log_level], text)
if log_level >= self.log_level_file:
self.log_file.write(msg + "\n")
if log_level >= self.log_level_console:
print(msg)
@staticmethod
def get_timestamp():
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
class BackupRotation(object):
__version__ = "v1.1.2"
def __init__(self):
config_path = os.path.normpath(os.path.join(os.path.dirname(__file__), "config.json"))
log_path = ""
log_level_file = 1
log_level_console = 1
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
if arg == "-q":
log_level_console = 4
elif arg == "-c":
if len(sys.argv) > i + 1:
config_path = sys.argv[i + 1]
elif arg == "-l":
if len(sys.argv) > i + 1:
log_path = sys.argv[i + 1]
elif arg == "-lv":
if len(sys.argv) > i + 1:
log_level_file = int(sys.argv[i + 1])
self.log = Log(log_path, log_level_file, log_level_console).log
self.log("backup-rotation.py " + self.__version__)
if not os.path.isfile(config_path):
self.log("Stated configfile not found, aborting...", 3)
exit()
config_data = self.load_config(config_path)
self.now = datetime.now()
# Start rotation for every backup item
for backup_item in config_data["backup_items"]:
self.run_backup_rotation(backup_item)
self.log("Backup rotation finished.")
def run_backup_rotation(self, backup_item):
self.log("--- RUN: backup rotation for %s" % backup_item["source"])
if not backup_item["compression"] in ["gz", "xz", "bz2"]:
self.log("Unknown compression type \"{0}\". Using lzma.".format(backup_item["compression"]), 2)
backup_item["compression"] = "xz"
file_extension = ".tar." + backup_item["compression"]
file_prefix = self.now.strftime("%Y-%m-%d")
no_backups_created = True
timetuple = self.now.timetuple()
for max_backups, current_date, backup_date, period_name in [
(backup_item["daily_backups"], None, None, "DAILY"),
(backup_item["weekly_backups"], timetuple.tm_wday, backup_item["create_backup_day_of_week"], "WEEKLY"),
(backup_item["monthly_backups"], timetuple.tm_mday, backup_item["create_backup_day_of_month"], "MONTHLY"),
(backup_item["yearly_backups"], timetuple.tm_yday, backup_item["create_backup_day_of_year"], "YEARLY")
]:
if max_backups > 0:
if current_date == backup_date:
file_name = file_prefix + "-" + period_name + file_extension
self.create_backup(backup_item, file_name)
no_backups_created = False
if no_backups_created:
self.log("No backups created.")
# Check for old backups
backups = os.listdir(backup_item["destination"])
old_backups = defaultdict(list)
for str_ in backups:
match = re.match("\d{4}-\d{2}-\d{2}-(DAILY|WEEKLY|MONTHLY|YEARLY)" + re.escape(file_extension), str_)
if match:
old_backups[match.group(1)].append(os.path.normpath(os.path.join(backup_item["destination"], str_)))
# Check for overhang in old backups and delete it
for period_name, max_backups in [
('DAILY', backup_item["daily_backups"]),
('WEEKLY', backup_item["weekly_backups"]),
('MONTHLY', backup_item["monthly_backups"]),
('YEARLY', backup_item["yearly_backups"]),
]:
files = old_backups[period_name]
overhang = len(files) - max_backups
if overhang > 0:
self.log(
"Overhang found ({0} backups). Deleting {1} old backup(s)...".format(period_name, overhang))
files = sorted(files, key=os.path.getctime)
for i in range(0, overhang):
self.log("Deleting {0}...".format(os.path.basename(files[i])))
os.remove(files[i])
self.log("--- END: backup rotation for %s" % backup_item["source"])
def create_backup(self, backup_item, file_name):
self.log("Creating backup... Filename: %s" % file_name)
mode = "w:" + backup_item["compression"]
file_path = os.path.normpath(os.path.join(backup_item["destination"], file_name))
if os.path.exists(file_path):
self.log("%s already exists. Skipping..." % file_name)
return
with tarfile.open(file_path, mode) as tar:
BackupRotation.add(self, tar, backup_item["source"], arcname=os.path.basename(backup_item["source"]))
def load_config(self, config_path: str) -> dict:
with open(config_path) as config_file:
data = json.load(config_file)
result = dict()
result["default"] = {
'create_backup_day_of_week': 0,
'create_backup_day_of_month': 1,
'create_backup_day_of_year': 1,
'daily_backups': 7,
'weekly_backups': 4,
'monthly_backups': 6,
'yearly_backups': 4,
'compression': 'xz',
}
result["default"].update(data["default"])
backup_items = list()
i = 0
for raw_backup_item in data["backup_items"]:
if "source" not in raw_backup_item or "destination" not in raw_backup_item:
self.log("Backup item no {0} invalid. Skipping...".format(i), 2)
elif not os.path.isdir(raw_backup_item["source"]):
self.log("Source path \"{0}\" not valid! (backup item no {1})".format(raw_backup_item["source"], i), 2)
elif not os.path.isdir(raw_backup_item["destination"]):
self.log(
"Destination path \"{0}\" not valid! (backup item no {1})".format(raw_backup_item["source"], i), 2)
else:
backup_item = result["default"].copy()
backup_item.update(raw_backup_item)
backup_items.append(backup_item)
i += 1
result["backup_items"] = backup_items
self.log("Configuration loaded: %s" % config_path, 1)
return result
# modified copy of tarfile.add (https://hg.python.org/cpython/file/v3.5.1/Lib/tarfile.py)
# noinspection PyProtectedMember,SpellCheckingInspection,PyShadowingBuiltins
def add(self, tar, name, arcname=None, recursive=True, exclude=None, *, filter=None):
tar._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
tar._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if tar.name is not None and os.path.abspath(name) == tar.name:
tar._dbg(2, "tarfile: Skipped %r" % name)
return
tar._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = tar.gettarinfo(name, arcname)
if tarinfo is None:
tar._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
tar._dbg(2, "tarfile: Excluded %r" % name)
return
bltn_open = tar.open
# Append the tar header and data to the archive.
if tarinfo.isreg():
with bltn_open(name, "rb") as f:
try:
tar.addfile(tarinfo, f)
except Exception as e:
self.log("An error occurred: %s" % e, 3)
elif tarinfo.isdir():
tar.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
try:
tar.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
except Exception as e:
self.log("An error occurred: %s" % e, 3)
else:
try:
tar.addfile(tarinfo)
except Exception as e:
self.log("An error occurred: %s" % e, 3)
if __name__ == "__main__":
try:
BackupRotation()
except KeyboardInterrupt:
print("Aborting...")
|
|
# coding=utf8
# -*- coding: utf8 -*-
# vim: set fileencoding=utf8 :
from __future__ import unicode_literals
from django.conf import settings
from django.utils.timezone import now
from rest_framework import mixins, permissions, status, viewsets
from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from rest_messaging.compat import compat_get_paginated_response, compat_get_request_data, compat_pagination_messages, compat_serializer_check_is_valid, compat_thread_serializer_set, compat_perform_update
from rest_messaging.models import Message, NotificationCheck, Participant, Participation, Thread
from rest_messaging.permissions import IsInThread
from rest_messaging.serializers import MessageNotificationCheckSerializer, ComplexMessageSerializer, SimpleMessageSerializer, ThreadSerializer
import json
class ThreadView(mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""
The ThreadView allow us to create threads, and add/remove people to/from them.
It does not list the messages belonging to the thread.
"""
queryset = Thread.objects.all().prefetch_related('participants')
serializer_class = ThreadSerializer
permission_classes = (IsInThread,)
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = ThreadSerializer(instance, callback=getattr(settings, 'REST_MESSAGING_SERIALIZE_PARTICIPANTS_CALLBACK', None), context={'request': request}) # self.get_serializer will raise an error in DRF 2.4
return Response(serializer.data)
def create(self, request, *args, **kwargs):
""" We ensure the Thread only involves eligible participants. """
serializer = self.get_serializer(data=compat_get_request_data(request))
compat_serializer_check_is_valid(serializer)
self.perform_create(request, serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, request, serializer):
participants_ids = json.loads(compat_get_request_data(self.request).get('participants'))
thread = Thread.managers.get_or_create_thread(self.request, compat_get_request_data(self.request).get('name'), *participants_ids)
setattr(serializer, compat_thread_serializer_set(), thread)
def update(self, request, *args, **kwargs):
participants_ids = compat_get_request_data(self.request).getlist('participants', [])
if len(participants_ids) > 0:
# we warn the user he cannot update the participants here
return Response("Participant updates not allowed by this method.", status=status.HTTP_400_BAD_REQUEST)
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=compat_get_request_data(request), partial=partial)
compat_serializer_check_is_valid(serializer)
try:
self.perform_update(serializer)
except:
compat_perform_update(self, serializer)
return Response(serializer.data)
@detail_route(methods=['post'])
def add_participants(self, request, pk=None):
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we get the participants and add them
participants_ids = json.loads(compat_get_request_data(self.request).get('participants'))
thread.add_participants(request, *participants_ids)
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
@detail_route(methods=['post'])
def remove_participant(self, request, pk=None):
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we get the participant
participant_id = compat_get_request_data(self.request).get('participant')
participant = Participant.objects.get(id=participant_id)
# we remove him if thread.remove_participant allows us to
try:
thread.remove_participant(request, participant)
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['get'])
def get_removable_participants_ids(self, request, pk=None):
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we get the removable participants
removable_participants_ids = thread.get_removable_participants_ids(request)
# we remove him if thread.remove_participant allows us to
try:
return Response({'participants': removable_participants_ids})
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
@detail_route(methods=['post'])
def mark_thread_as_read(self, request, pk=None):
""" Pk is the pk of the Thread to which the messages belong. """
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we save the date
try:
participation = Participation.objects.get(thread=thread, participant=request.rest_messaging_participant)
participation.date_last_check = now()
participation.save()
# we return the thread
serializer = self.get_serializer(thread)
return Response(serializer.data)
except Exception:
return Response(status=status.HTTP_400_BAD_REQUEST)
@compat_pagination_messages
class MessageView(mixins.ListModelMixin,
viewsets.GenericViewSet):
""" The view only lists and creates. """
queryset = Message.objects.none()
serializer_class = ComplexMessageSerializer
permission_classes = (IsInThread,)
def get_queryset(self):
""" We list all the threads involving the user """
check_notifications = self.request.GET.get("check_notifications", True)
messages = Message.managers.get_lasts_messages_of_threads(self.request.rest_messaging_participant.id, check_who_read=True, check_is_notification=check_notifications)
return messages
@detail_route(methods=['post'], permission_classes=[IsInThread], serializer_class=SimpleMessageSerializer)
def post_message(self, request, pk=None):
""" Pk is the pk of the Thread to which the message belongs. """
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
# we get the body
body = compat_get_request_data(self.request).get('body')
# we create the message
# Message.objects.save() could return an Exception
try:
message = Message(sender=request.rest_messaging_participant, thread=thread, body=body)
message.save()
serializer = SimpleMessageSerializer(message)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Exception:
return Response(status=status.HTTP_412_PRECONDITION_FAILED)
@detail_route(methods=['get'], permission_classes=[IsInThread], serializer_class=ComplexMessageSerializer)
def list_messages_in_thread(self, request, pk=None):
""" Pk is the pk of the Thread to which the messages belong. """
# we get the thread and check for permission
thread = Thread.objects.get(id=pk)
self.check_object_permissions(request, thread)
messages = Message.managers.get_all_messages_in_thread(participant_id=request.rest_messaging_participant.id, thread_id=thread.id, check_who_read=True)
page = self.paginate_queryset(messages)
if page is not None:
return compat_get_paginated_response(self, page)
serializer = ComplexMessageSerializer(messages, many=True)
return Response(serializer.data)
class NotificationCheckView(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = MessageNotificationCheckSerializer
permission_classes = (permissions.IsAuthenticated,)
@list_route(methods=['post'])
def check(self, request, *args, **kwargs):
# we get the NotificationCheck instance corresponding to the user or we create it
try:
nc = NotificationCheck.objects.get(participant=request.rest_messaging_participant)
if nc:
nc.date_check = now()
nc.save()
status_code = status.HTTP_200_OK
except Exception:
nc = NotificationCheck.objects.create(participant=request.rest_messaging_participant, date_check=now())
status_code = status.HTTP_201_CREATED
serializer = self.get_serializer(nc)
return Response(serializer.data, status=status_code)
class ParticipantAuthenticationView(mixins.ListModelMixin, viewsets.GenericViewSet):
"""
View that simply return the participant id of the user as set by the middleware, if it exists.
"""
permission_classes = (permissions.IsAuthenticated,)
def list(self, request, *args, **kwargs):
participant = Participant.objects.get(id=self.request.rest_messaging_participant.id)
return Response({'id': participant.id})
|
|
import os, sys, re
import util, compression, text, ilp
from globals import *
import nltk
class SummaryProblem:
"""
A class for representing elements of a summary problem
self.id 'D0701'
self.title 'Southern Poverty Law Center'
self.narr 'Describe the activities of Morris Dees...'
self.query <title>: <narr>
self.new_docs_paths a list of paths to the input documents
self.old_docs_paths a list of paths to 'old' input docs (update task only)
self.new_docs [Document1, ... ]
self.old_docs [Document1, ... ]
self.annotators set(['A', 'B', 'C', 'D'])
self.training {'A': <summary A>, ... }
"""
def __init__(self, id, title, narr, new_docs, old_docs):
self.id = id
self.title = title
self.narr = narr
self.query = text.Sentence(title+": "+ narr)
self.new_docs_paths = new_docs[:]
self.old_docs_paths = old_docs[:]
## for checking state
self.loaded_docs = False
self.parsed = False
self.loaded_ir_docs = False
## variables that might get set later
self.new_docs = None
self.old_docs = None
self.training = {}
self.annotators = set()
def load_documents(self):
"""
"""
self.new_docs = []
for path in self.new_docs_paths:
doc = text.Document(path)
doc.get_sentences()
self.new_docs.append(doc)
self.old_docs = []
for path in self.old_docs_paths:
doc = text.Document(path)
self.old_docs.append(doc)
self.loaded_docs = True
def _load_training(self, path, source='DUC'):
"""
load [human] summaries, setting these member variables:
self.training_sent_sets = [[Sentence1, Sentence2, ... ], [ ... ], ... ]
self.annotators = set(['A1', 'A2', ... ]
"""
self.training = {}
self.annotators = set()
if source.startswith('DUC') or source.startswith('TAC'):
for file in os.listdir(path):
items = file.split('.')
id = items[0]
## skip ids not relevant to this problem
compare_id = self.id.upper()
if source == 'TAC08':
compare_id = self.id.upper()[:5] + self.id.upper()[6:]
if id.upper() != compare_id: continue
annotator = items[-1]
self.annotators.add(annotator)
rawsents = open(path + file).read().splitlines()
self.training[annotator] = rawsents
def get_new_sentences(self):
sents = []
for doc in self.new_docs:
for sent in doc.sentences:
sents.append(sent)
return sents
def __str__(self):
s = []
s.append('%s SUMMARYPROBLEM' %'#START')
s.append('ID %s' %self.id)
s.append('TITLE %s' %self.title)
s.append('NARR %s' %self.narr)
s.append('NEW_DOCS %d\n%s' %(len(self.new_docs), '\n'.join(['%s' %n for n in self.new_docs])))
s.append('OLD_DOCS %d\n%s' %(len(self.old_docs), '\n'.join(['%s' %n for n in self.old_docs])))
for annotator in self.annotators:
s.append('TRAIN %s\n%s' %(annotator, '\n'.join(['%s' %n for n in self.training[annotator]])))
return '\n'.join(s)
def check_state(problems):
checks = ['sentences', 'parsed', 'ir']
results = dict.fromkeys(checks, True)
for problem in problems:
if not problem.loaded_docs: results['sentences'] = False
if not problem.parsed: results['parsed'] = False
if not problem.loaded_ir_docs: results['ir'] = False
return results
### SETUP FUNCTIONS ###
def setup_simple(data_path, id='simple', title='', narr=''):
"""
create a summary problem from a single clean (text only) input file
"""
doc = text.Document(data_path, is_clean=True)
problem = SummaryProblem(id, title, narr, [doc], [])
return problem
def setup_TAC08(task, skip_updates=False):
"""
task.topic_file: xml file for TAC
task.doc_path: path containing source documents
task.manual_path: path for manual (human) summaries
"""
## get all document data
all_docs = {}
files = util.get_files(task.doc_path, r'[^_]+_[^_]+_\d+[\.\-]\d+')
sys.stderr.write('Loading [%d] files\n' %len(files))
for file in files:
id = os.path.basename(file)
all_docs[id] = file
## initialize problems
problems = []
# load XML task definition
from xml.etree import ElementTree
root = ElementTree.parse(task.topic_file).getroot()
for topic in root:
if topic.tag != "topic": continue
id = topic.attrib["id"]
title = None
narr = None
docsets = []
docset_ids = []
for node in topic:
if node.tag == "title":
title = node.text.strip()
elif node.tag == "narrative":
narr = node.text.strip()
elif node.tag == "docsetA":
documents = node.findall("doc")
docsets.append([doc.attrib["id"] for doc in documents])
docset_ids.append(node.attrib["id"])
elif node.tag == "docsetB":
if skip_updates: continue
documents = node.findall("doc")
docsets.append([doc.attrib["id"] for doc in documents])
docset_ids.append(node.attrib["id"])
old_docs = []
for docset_index in range(len(docsets)):
## map docids to documents
new_docs = [all_docs[doc] for doc in docsets[docset_index]]
## create a SummaryProblem
problem = SummaryProblem(docset_ids[docset_index], title, narr, new_docs, old_docs)
old_docs += new_docs
## include training data in problem
if task.manual_path: problem._load_training(task.manual_path, source='TAC08')
problems.append(problem)
sys.stderr.write('Setting up [%d] problems\n' %len(problems))
task.problems = problems
def setup_DUC_basic(task, skip_updates=False):
"""
task.topic_file: sgml file for DUC
task.doc_path: path containing source documents
task.manual_path: path for manual (human) summaries
"""
## get all document data
all_docs = {}
files = util.get_files(task.doc_path, '\w{2,3}\d+[\.\-]\d+')
sys.stderr.write('Loading [%d] files\n' %len(files))
for file in files:
id = os.path.basename(file)
all_docs[id] = file
## initialize problems
problems = []
data = open(task.topic_file).read().replace('\n', ' ')
topics = re.findall('<topic>.+?</topic>', data)
sys.stderr.write('Setting up [%d] problems\n' %len(topics))
for topic in topics:
id = util.remove_tags(re.findall('<num>.+?</num>', topic)[0])[:-1]
title = util.remove_tags(re.findall('<title>.+?</title>', topic)[0])
narr = util.remove_tags(re.findall('<narr>.+?</narr>', topic)[0])
docsets = re.findall('<docs.*?>.+?</docs.*?>', topic)
docsets = map(util.remove_tags, docsets)
docsets = [d.split() for d in docsets]
old_docs = []
for docset_index in range(len(docsets)):
## update naming convention different from main
if len(docsets) > 1: id_ext = '-' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[docset_index]
else: id_ext = ''
new_docs = [all_docs[doc] for doc in docsets[docset_index]]
## create a SummaryProblem
problem = SummaryProblem(id+id_ext, title, narr, new_docs, old_docs)
old_docs += new_docs
## include training data in problem
if task.manual_path: problem._load_training(task.manual_path)
problems.append(problem)
## skip updates?
if skip_updates: break
task.problems = problems
def setup_DUC_sentences(task, parser=None, reload=False, options=None):
## load problems quickly from pickle file
if (not reload) and os.path.isfile(task.data_pickle):
sys.stderr.write('Loading [%s] problem data from [%s]\n' %(task.name, task.data_pickle))
task.problems = util.load_pickle(task.data_pickle)
return
## parse sentences
if options:
text.text_processor.load_splitta_model(options.splitta_model)
else:
text.text_processor.load_splitta_model('/u/dgillick/sbd/splitta/model_nb/')
for problem in task.problems:
sys.stderr.write('%s\n' %problem.id)
problem.load_documents()
if parser:
for doc in problem.new_docs:
doc.parse_sentences(parser)
problem.parsed = True
if parser:
parser.run()
for sentence, parsetree in parser.parsed.items():
sentence.parsed = parsetree
## save pickled version for faster loading later
sys.stderr.write('Saving [%s] problem data in [%s]\n' %(task.name, task.data_pickle))
util.save_pickle(task.problems, task.data_pickle)
def build_program(problem, concept_weight, length=100, sentences = None):
"""
the ILP keeps tracks of the constraints
s<num> variables handle sentences, subsentences and removable subtrees
c<num> variables represent concepts in those selected pseudo-sentences
"""
program = compression.SentenceSelectionILP(concept_weight, length, use_subsentences=True, use_removables=True,
use_min_length=True, use_min_length_ratio=False)
if not sentences:
sentences = problem.get_new_sentences()
for sentence in sentences:
if not hasattr(sentence, "compression_node"):
sentence.compression_node = compression.TreebankNode(sentence.parsed)
nounPhraseMapping = compression.generateNounPhraseMapping([s.compression_node for s in sentences])
for sentence in sentences:
## generate a compression candidate tree
candidates = sentence.compression_node.getCandidateTree(nounPhraseMapping)
candidate_root = compression.TreebankNode(candidates)
candidate_root.sentence = sentence
## (or a non compressed tree)
#candidate_root = treenode.TreeNode(sentence.compression_node.getNonCompressedCandidate())
if candidate_root.isLeaf(): continue
## debugging
#candidate_root.original = root
#candidate_root.original_text = candidates
# update ILP with the new sentence
program.addSentence(candidate_root, lambda x: compression.get_bigrams_from_node(x,
node_skip=lambda y: not re.match(r'[A-Za-z0-9]', y.label), node_transform=lambda y: text.text_processor.porter_stem(y.text.lower())))
# skip debugging part
continue
sentence_concepts = program.getConcepts(candidate_root, lambda x: compression.get_bigrams_from_node(x,
node_skip=lambda y: not re.match(r'[A-Za-z0-9]', y.label), node_transform=lambda y: text.text_processor.porter_stem(y.text.lower())))
print sentence.original
print candidate_root.getPrettyCandidates()
for concept in sentence_concepts.keys():
if concept not in concept_weight:
del sentence_concepts[concept]
print sorted(sentence_concepts.keys())
units = dict([(x, 1) for x in util.get_ngrams(sentence.stemmed, n=2, bounds=False)])
for concept in units.keys():
if concept not in concept_weight:
del units[concept]
print sorted(units.keys())
return program
def get_program_result(program):
# get the selected sentences
selection = []
for id in program.output:
if id.startswith("s") and program.output[id] == 1:
node = program.binary[id] # gives you back the actual node (which can be a subsentence, or a chunk not removed)
if not program.nodeHasSelectedParent(node): # only start printing at the topmost nodes
# create a fake sentence to hold the compressed content
sentence = text.Sentence(compression.postProcess(program.getSelectedText(node)), \
node.root.sentence.order, node.root.sentence.source, node.root.sentence.date)
sentence.parsed = str(node)
sentence.original_node = node
selection.append(sentence)
#print node.root.getPrettyCandidates()
return selection
def build_alternative_program(problem, concept_weight, length=100, sentences = None, longuest_candidate_only=False):
if not sentences:
sentences = problem.get_new_sentences()
for sentence in sentences:
if not hasattr(sentence, "compression_node"):
sentence.compression_node = compression.TreebankNode(sentence.parsed)
nounPhraseMapping = compression.generateNounPhraseMapping([s.compression_node for s in sentences])
#print "generating acronyms"
acronymMapping = compression.generateAcronymMapping(problem.get_new_sentences())
print problem.id, acronymMapping
compressed_sentences = []
seen_sentences = {}
group_id = 0
for sentence in sentences:
subsentences = sentence.compression_node.getNodesByFilter(compression.TreebankNode.isSubsentence)
candidates = {}
for node in subsentences:
candidates.update(node.getCandidates(mapping=nounPhraseMapping))
if longuest_candidate_only:
max_length = 0
argmax = None
for candidate in candidates:
if len(candidate) > max_length:
max_length = len(candidate)
argmax = candidate
if argmax != None:
candidates = [argmax]
for candidate in candidates:
new_sentence = text.Sentence(compression.postProcess(candidate), sentence.order, sentence.source, sentence.date)
if new_sentence.length <= 5: continue # skip short guys
new_sentence.group_id = group_id
compressed_sentences.append(new_sentence)
seen_sentences[new_sentence.original] = 1
group_id += 1
compression.replaceAcronyms(compressed_sentences, acronymMapping)
log_file = open("%s.log" % problem.id, "w")
for sentence in compressed_sentences:
log_file.write("%d %s\n" %( group_id, str(sentence)))
log_file.close()
# generate ids for acronyms
acronym_id = {}
acronym_length = {}
for definition, acronym in acronymMapping.items():
if acronym not in acronym_id:
acronym_id[acronym] = len(acronym_id)
acronym_length[acronym] = len(definition.strip().split())
# get concepts
relevant_sentences = []
sentence_concepts = []
groups = {}
used_concepts = set()
acronym_index = {}
sent_index = 0
for sentence in compressed_sentences:
units = util.get_ngrams(sentence.stemmed, n=2, bounds=False)
overlapping = set([u for u in units if u in concept_weight])
if len(overlapping) == 0: continue
relevant_sentences.append(sentence)
sentence_concepts.append(overlapping)
used_concepts.update(overlapping)
if sentence.group_id not in groups: groups[sentence.group_id] = []
groups[sentence.group_id].append(sent_index)
# generate an acronym index
for acronym in acronym_id:
if re.search(r'\b' + acronym + r'\b', sentence.original):
if acronym not in acronym_index: acronym_index[acronym] = []
acronym_index[acronym].append(sent_index)
sent_index += 1
# build inverted index
filtered_concepts = {}
concept_index = {}
index = 0
for concept in used_concepts:
concept_index[concept] = index
filtered_concepts[concept] = concept_weight[concept]
index += 1
relevant_sent_concepts = [[concept_index[c] for c in cs] for cs in sentence_concepts]
concept_weights = filtered_concepts
curr_concept_sents = {}
for sent_index in range(len(relevant_sentences)):
concepts = relevant_sent_concepts[sent_index]
for concept in concepts:
if not concept in curr_concept_sents: curr_concept_sents[concept] = []
curr_concept_sents[concept].append(sent_index)
# generate the actual ILP
program = ilp.IntegerLinearProgram()
program.objective["score"] = ' + '.join(['%f c%d' %(concept_weight[concept], concept_index[concept]) for concept in concept_index])
s1 = ' + '.join(['%d s%d' %(relevant_sentences[sent_index].length, sent_index) for sent_index in range(len(relevant_sentences))])
# add enough space to fit the definition of each acronym employed in the summary
s_acronyms = ' + '.join(['%d a%d' %(acronym_length[acronym], acronym_id[acronym]) for acronym in acronym_id])
if s_acronyms != "":
s_acronyms = " + " + s_acronyms
s2 = ' <= %s\n' %length
program.constraints["length"] = s1 + s_acronyms + s2
for concept, index in concept_index.items():
## at least one sentence containing a selected bigram must be selected
s1 = ' + '.join([ 's%d' %sent_index for sent_index in curr_concept_sents[index]])
s2 = ' - c%d >= 0' %index
program.constraints["presence_%d" % index] = s1 + s2
## if a bigram is not selected then all sentences containing it are deselected
s1 = ' + '.join([ 's%d' %sent_index for sent_index in curr_concept_sents[index]])
s2 = '- %d c%d <= 0' %(len(curr_concept_sents[index]), index)
program.constraints["absence_%d" % index] = s1 + s2
# constraints so that acronyms get selected along with sentences they belong to
for acronym, index in acronym_index.items():
s1 = ' + '.join([ 's%d' %sent_index for sent_index in index])
s2 = ' - a%d >= 0' %acronym_id[acronym]
program.constraints["acronym_presence_%d" % acronym_id[acronym]] = s1 + s2
s1 = ' + '.join([ 's%d' %sent_index for sent_index in index])
s2 = '- %d a%d <= 0' %(len(index), acronym_id[acronym])
program.constraints["acronym_absence_%d" % acronym_id[acronym]] = s1 + s2
# add sentence compression groups
for group in groups:
program.constraints["group_%d" % group] = " + ".join(["s%d" % sent_index for sent_index in groups[group]]) + " <= 1"
for sent_index in range(len(relevant_sentences)):
program.binary["s%d" % sent_index] = relevant_sentences[sent_index]
for concept, concept_index in concept_index.items():
program.binary["c%d" % concept_index] = 1
for acronym, id in acronym_id.items():
program.binary["a%d" % id] = 1
sys.stderr.write("compression candidates: %d, original: %d\n" % (len(relevant_sentences), len(sentences)))
program.acronyms = acronymMapping
return program
|
|
#!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ 'ati1yt', 'atvo1t', 'au81v1', 'aukzz0', 'av00cj', 'avcllf', 'avq91r' ]
flaskport = 8969
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
# submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
commentForest = submission.comments
comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
|
# msp430 emulator
import utils
import msp_base as base
import msp_fr5969_model as model
import msp_peripheral_timer as peripheral_timer
import msp_reference_timing as reference_timing
import msp_elftools as elftools
import smt
from msp_isa import isa
class Emulator(object):
def __init__(self, tracing = False, tinfo = None, verbosity = 0):
self.tracing = tracing
self.trace = []
self.iotrace = model.iotrace_init()
self.iotrace2 = []
self.verbosity = verbosity
if tracing:
self.state = model.Model(trace=self.iotrace)
else:
self.state = model.Model()
if tinfo is None:
self.timing = False
self._mmio_default()
elif tinfo == 'reference':
self.timing = True
self._timer_default()
self.timer_state_default = None
self.timer_ttab = []
self.timer_stab = []
self._timer_reset()
else:
self.timing = True
self._timer_default()
self.timer_state_default = tinfo['state_default']
self.timer_ttab = tinfo['ttab']
self.timer_stab = tinfo['stab']
self._timer_reset()
if self.verbosity >= 3:
print('created {:s}'.format(str(self)))
self.state.dump()
def _mmio_default(self):
# watchdog (unimplemented)
for addr in [0x15c, 0x15d]:
self.state.mmio_handle_default(addr)
# timerA (unimplemented)
for addr in [0x0340, 0x0341, 0x0342, 0x0343, 0x0350, 0x0351, 0x0352, 0x0353]:
self.state.mmio_handle_default(addr)
def _timer_default(self):
# watchdog (unimplemented)
for addr in [0x15c, 0x15d]:
self.state.mmio_handle_default(addr)
# call out to timer module
self.timer_A = peripheral_timer.Peripheral_Timer()
self.timer_A.attach_timer(self.state, peripheral_timer.timer_A_base)
def _timer_reset(self):
self.timer_cycles = 0
self.timer_state = self.timer_state_default
def _timer_update(self, ins, fields):
# cycles = None
# iname = smt.smt_iname(ins)
# if (self.timer_state, iname, None, None) in self.timer_ttab:
# assert cycles is None
# cycles = self.timer_ttab[(self.timer_state, iname, None, None)]
# assert cycles is not None
# rsname = smt.smt_rsrc(fields)
# if (self.timer_state, iname, rsname, None) in self.timer_ttab:
# assert cycles is None
# cycles = self.timer_ttab[(self.timer_state, iname, rsname, None)]
# assert cycles is not None
# rdname = smt.smt_rdst(fields)
# if (self.timer_state, iname, None, rdname) in self.timer_ttab:
# assert cycles is None
# cycles = self.timer_ttab[(self.timer_state, iname, None, rdname)]
# assert cycles is not None
# if (self.timer_state, iname, rsname, rdname) in self.timer_ttab:
# assert cycles is None
# cycles = self.timer_ttab[(self.timer_state, iname, rsname, rdname)]
# assert cycles is not None
# assert cycles is not None and cycles >= 0
# self.timer_state = self.timer_stab[self.timer_state, iname]
if self.timer_ttab or self.timer_stab:
iname = smt.smt_iname(ins)
rsname = smt.ext_smt_rsrc(fields)
rdname = smt.ext_smt_rdst(fields)
cycles = self.timer_ttab[self.timer_state, iname, rsname, rdname]
if cycles is None and (ins.name in {'BIC', 'BIS'} and
ins.smode in {'@Rn', '@Rn+'} and
ins.dmode in {'Rn'} and
rsname in {smt.smt_rnames[2], smt.smt_rnames[3]} and
rdname in {smt.smt_rnames[2]}):
cycles = 1
if cycles is None:
raise base.UnknownBehavior('missing timer entry for {:d} {:s} {:s} {:s}'
.format(self.timer_state, iname, rsname, rdname))
new_state = self.timer_stab[self.timer_state, iname, rsname, rdname]
if new_state is None and (ins.name in {'BIC', 'BIS'} and
ins.smode in {'@Rn', '@Rn+'} and
ins.dmode in {'Rn'} and
rsname in {smt.smt_rnames[2], smt.smt_rnames[3]} and
rdname in {smt.smt_rnames[2]}):
new_state = 0
if new_state is None:
raise base.UnknownBehavior('missing timer state transition for {:d} {:s} {:s} {:s}'
.format(self.timer_state, iname, rsname, rdname))
else:
# use reference
cycles = reference_timing.reference_time(ins, fields)
new_state = self.timer_state_default
self.timer_state = new_state
self.timer_cycles = self.timer_cycles + cycles
return cycles
def reset(self):
reset_pc = model.mk_read16(self.state.read8)(model.resetvec)
self.state.writereg(0, reset_pc)
def load(self, fname, restore_regs = True):
if self.verbosity >= 1:
print('programming {:s}'.format(fname))
elftools.load(self.state, fname, restore_regs=restore_regs, verbosity=self.verbosity)
def save(self, fname):
if self.verbosity >= 1:
print('saving {:s}'.format(fname))
elftools.save(self.state, fname, verbosity=self.verbosity)
def prog(self, fname):
self.fill(model.fram_start, model.fram_size, [0xff])
self.load(fname)
self.reset()
def mw(self, addr, pattern):
# print('emulator invoking mw {:05x} '.format(addr), utils.makehex(pattern))
for i in range(len(pattern)):
self.state.write8(addr + i, pattern[i])
# utils.printhex([self.state.read8(i) for i in range(addr, addr+len(pattern))])
def fill(self, addr, size, pattern):
for i in range(size):
self.state.write8(addr + i, pattern[i%len(pattern)])
def setreg(self, register, value):
self.state.writereg(register, value)
def md(self, addr, size):
# print('emulator invoking md {:05x} {:d}'.format(addr, size))
memreads = [self.state.read8(i) for i in range(addr, addr+size)]
# utils.printhex(memreads[:16])
return memreads
def regs(self):
return [self.state.readreg(i) for i in range(len(self.state.regs))]
def step(self):
pc = self.state.readreg(0)
word = model.mk_read16(self.state.read8)(pc)
ins = isa.decode(word)
if ins is None:
raise base.ExecuteError('failed to decode {:#04x} ( PC: {:05x})'.format(word, pc))
# TODO: iotrace should probably work in a reasonable way
# right now we have two lists of io traces, one which includes all io, even not
# from instruction execution, and a second one in self.iotrace2 which is only
# io events from actually executing instructions
if self.tracing:
model.iotrace_next(self.iotrace)
fields = ins.readfields(self.state)
if self.tracing:
self.trace.append(fields)
if self.verbosity >= 2:
print(utils.describe_regs(self.regs()))
ins.describe()
utils.print_dict(fields)
ins.execute(fields)
ins.writefields(self.state, fields)
# remember the thing we just added to our iotrace, and make a dummy to intercept
# non-execution IO before the next instruction
if self.tracing:
self.iotrace2.append(self.iotrace[-1])
model.iotrace_next(self.iotrace)
# # manual breakpoints / watchpoints
# step_io = self.iotrace2[-1]
# for addr, value in step_io['w']['mem']:
# if addr >= 0xfffe:
# print(hex(pc))
# utils.print_dict(step_io)
# raise base.Breakpoint('manual')
# if pc == 0xfe84:
# print(hex(pc))
# raise base.Breakpoint('manual')
# # end
# update the timer if we're doing that
if self.timing:
cycles = self._timer_update(ins, fields)
self.timer_A.elapse(cycles)
if self.tracing and self.verbosity >= 2:
print('----')
if word == 0x3fff:
# halt
return False
else:
return True
def run(self, max_steps = 0):
steps = 0
try:
while self.step():
steps += 1
if max_steps > 0 and steps >= max_steps:
break
success = True
except base.ExecuteError as e:
if self.verbosity >= 0:
print('Execution Error: {:s}'.format(str(e)))
success = False
except base.UnknownBehavior as e:
if self.verbosity >= 0:
print('Unknown Behavior: {:s}'.format(str(e)))
success = False
except base.Breakpoint as e:
if self.verbosity >= 0:
print('Breakpoint: {:s}'.format(str(e)))
success = True
else:
success = True
return success, steps
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print('usage: {:s} <ELF>'.format(sys.argv[0]))
exit(1)
fname = sys.argv[1]
if len(sys.argv) >= 3:
outname = sys.argv[2]
else:
outname = None
mulator = Emulator(tracing=True, verbosity=2)
mulator.prog(fname)
success, steps = mulator.run(max_steps = 100000)
print('Success: {}, steps: {:d}'.format(success, steps))
print(len(mulator.trace))
print(len(mulator.iotrace2))
if not outname is None:
mulator.save(outname)
|
|
"""
Extensible validation for Python dictionaries.
This module implements Cerberus Validator class
:copyright: 2012-2015 by Nicola Iarocci.
:license: ISC, see LICENSE for more details.
Full documentation is available at http://cerberus.readthedocs.org/
"""
import sys
import re
import copy
from datetime import datetime, date
from collections import Iterable, Mapping, Sequence
from . import errors
if sys.version_info[0] == 3:
_str_type = str
_int_types = (int,)
else:
_str_type = basestring # noqa
_int_types = (int, long) # noqa
class ValidationError(ValueError):
""" Raised when the target dictionary is missing or has the wrong format
"""
pass
class SchemaError(ValueError):
""" Raised when the validation schema is missing, has the wrong format or
contains errors.
"""
pass
class Validator(object):
""" Validator class. Validates any Python dict against a validation schema,
which is provided as an argument at class instantiation, or upon calling
the :func:`validate` method.
:param schema: optional validation schema.
:param transparent_schema_rules: if ``True`` unknown schema rules will be
ignored (no SchemaError will be raised).
Defaults to ``False``. Useful you need to
extend the schema grammar beyond Cerberus'
domain.
:param ignore_none_values: If ``True`` it will ignore None values for type
checking. (no UnknownType error will be added).
Defaults to ``False``. Useful if your document
is composed from function kwargs with defaults.
:param allow_unknown: if ``True`` unknown key/value pairs (not present in
the schema) will be ignored, and validation will
pass. Defaults to ``False``, returning an 'unknown
field error' un validation.
.. versionchanged:: 0.9.1
'required' will always be validated, regardless of any dependencies.
.. versionadded:: 0.9
'anyof', 'noneof', 'allof', 'anyof' validation rules.
PyPy support.
'coerce' rule.
'propertyschema' validation rule.
'validator.validated' takes a document as argument and returns a
Validated document or 'None' if validation failed.
.. versionchanged:: 0.9
Use 'str.format' in error messages so if someone wants to override them
does not get an excpetion if arguments are not passed.
'keyschema' is renamed to 'valueschema'. Closes #92.
'type' can be a list of valid types.
Usages of 'document' to 'self.document' in '_validate'.
When 'items' is applied to a list, field name is used as key for
'validator.errors', and offending field indexes are used as keys for
Field errors ({'a_list_of_strings': {1: 'not a string'}})
Additional kwargs that are passed to the __init__-method of an
Instance of Validator-(sub-)class are passed to child-validators.
Ensure that additional **kwargs of a subclass persist through validation
Improve failure message when testing against multiple types.
Ignore 'keyschema' when not a mapping.
Ignore 'schema' when not a sequence.
'allow_unknown' can also be set for nested dicts. Closes #75.
Raise SchemaError when an unallowed 'type' is used in conjunction with
'schema' rule.
.. versionchanged:: 0.8.1
'dependencies' for sub-document fields. Closes #64.
'readonly' should be validated before any other validation. Closes #63.
'allow_unknown' does not apply to sub-dictionaries in a list.
Closes #67.
update mode does not ignore required fields in subdocuments. Closes #72.
'allow_unknown' does not respect custom rules. Closes #66.
.. versionadded:: 0.8
'dependencies' also support a dict of dependencies.
'allow_unknown' can be a schema used to validate unknown fields.
Support for function-based validation mode.
.. versionchanged:: 0.7.2
Successfully validate int as a float type.
.. versionchanged:: 0.7.1
Validator options like 'allow_unknown' and 'ignore_none_values' are now
taken into consideration when validating sub-dictionaries.
Make self.document always the root level document.
Up-front validation for schemas.
.. versionadded:: 0.7
'keyschema' validation rule.
'regex' validation rule.
'dependencies' validation rule.
'mix', 'max' now apply on floats and numbers too. Closes #30.
'set' data type.
.. versionadded:: 0.6
'number' (integer or float) validator.
.. versionchanged:: 0.5.0
``validator.errors`` returns a dict where keys are document fields and
values are validation errors.
.. versionchanged:: 0.4.0
:func:`validate_update` is deprecated. Use :func:`validate` with
``update=True`` instead.
Type validation is always performed first (only exception being
``nullable``). On failure, it blocks other rules on the same field.
Closes #18.
.. versionadded:: 0.2.0
`self.errors` returns an empty list when validate() has not been called.
Option so allow nullable field values.
Option to allow unknown key/value pairs.
.. versionadded:: 0.1.0
Option to ignore None values for type checking.
.. versionadded:: 0.0.3
Support for transparent schema rules.
Added new 'empty' rule for string fields.
.. versionadded:: 0.0.2
Support for addition and validation of custom data types.
"""
special_rules = "required", "nullable", "type", "dependencies", \
"readonly", "allow_unknown", "schema", "coerce"
def __init__(self, *args, **kwargs):
signature = ('schema', 'transparent_schema_rules',
'ignore_none_values', 'allow_unknown')
for i, p in enumerate(signature[:len(args)]):
if p in kwargs:
raise TypeError("__init__ got multiple values for argument "
"'%s'" % p)
else:
kwargs[p] = args[i]
self.__config = kwargs
self.schema = kwargs.get('schema')
self.transparent_schema_rules = kwargs.get('transparent_schema_rules',
False)
self.ignore_none_values = kwargs.get('ignore_none_values', False)
self.allow_unknown = kwargs.get('allow_unknown', False)
if self.schema:
self.validate_schema(self.schema)
self._errors = {}
self._current = None
def __call__(self, *args, **kwargs):
return self.validate(*args, **kwargs)
@property
def current(self):
"""Get the current document being validated.
When validating, the current (sub)document will be available
via this property.
"""
return self._current
@property
def errors(self):
"""
:rtype: a list of validation errors. Will be empty if no errors
were found during. Resets after each call to :func:`validate`.
"""
return self._errors
def validate_update(self, document, schema=None, context=None):
""" Validates a Python dictionary against a validation schema. The
difference with :func:`validate` is that the ``required`` rule will be
ignored here.
:param schema: optional validation schema. Defaults to ``None``. If not
provided here, the schema must have been provided at
class instantiation.
:param context: the context in which the document should be validated.
Defaults to ``None``.
:return: True if validation succeeds, False otherwise. Check the
:func:`errors` property for a list of validation errors.
.. deprecated:: 0.4.0
Use :func:`validate` with ``update=True`` instead.
"""
return self._validate(document, schema, update=True, context=context)
def validate(self, document, schema=None, update=False, context=None):
""" Validates a Python dictionary against a validation schema.
:param document: the dict to validate.
:param schema: the validation schema. Defaults to ``None``. If not
provided here, the schema must have been provided at
class instantiation.
:param update: If ``True`` validation of required fields won't be
performed.
:param context: the document in which context validation should be
performed. Defaults to ``None``.
:return: True if validation succeeds, False otherwise. Check the
:func:`errors` property for a list of validation errors.
.. versionchanged:: 0.4.0
Support for update mode.
"""
return self._validate(document, schema, update=update, context=context)
def validated(self, *args, **kwargs):
""" Wrapper around ``Validator.validate`` that returns the validated
document or ``None`` if validation failed.
"""
self.validate(*args, **kwargs)
if self.errors:
return None
else:
return self.document
def _validate(self, document, schema=None, update=False, context=None):
self._errors = {}
self.update = update
if schema is not None:
self.validate_schema(schema)
self.schema = schema
elif self.schema is None:
raise SchemaError(errors.ERROR_SCHEMA_MISSING)
if document is None:
raise ValidationError(errors.ERROR_DOCUMENT_MISSING)
if not isinstance(document, Mapping):
raise ValidationError(
errors.ERROR_DOCUMENT_FORMAT.format(document))
# make root document available for validators (Cerberus #42, Eve #295)
if not context:
try:
# might fail when dealing with complex document values
self.document = copy.deepcopy(document)
except:
# fallback on a shallow copy
self.document = copy.copy(document)
finally:
self._current = self.document
else:
self.document = context
self._current = document
# copy keys since the document might change during its iteration
for field in [f for f in self._current]:
value = self._current[field]
if self.ignore_none_values and value is None:
continue
definition = self.schema.get(field)
if definition is not None:
self._validate_definition(definition, field, value)
else:
if self.allow_unknown:
if isinstance(self.allow_unknown, Mapping):
# validate that unknown fields matches the schema
# for unknown_fields
unknown_validator = \
self.__get_child_validator(
schema={field: self.allow_unknown})
if not unknown_validator.validate({field: value}):
self._error(field, unknown_validator.errors[field])
else:
# allow unknown field to pass without any kind of
# validation
pass
else:
self._error(field, errors.ERROR_UNKNOWN_FIELD)
if not self.update:
self._validate_required_fields(self._current)
return len(self._errors) == 0
def _validate_definition(self, definition, field, value):
if value is None:
if definition.get("nullable", False) is True:
return
else:
self._error(field, errors.ERROR_NOT_NULLABLE)
if 'coerce' in definition:
value = self._validate_coerce(definition['coerce'], field,
value)
self.document[field] = value
if 'readonly' in definition:
self._validate_readonly(definition['readonly'], field,
value)
if self.errors.get(field):
return
if 'type' in definition:
self._validate_type(definition['type'], field, value)
if self.errors.get(field):
return
if 'dependencies' in definition:
self._validate_dependencies(
document=self.document,
dependencies=definition["dependencies"],
field=field
)
if self.errors.get(field):
return
if 'schema' in definition:
self._validate_schema(definition['schema'],
field,
value,
definition.get('allow_unknown'))
definition_rules = [rule for rule in definition.keys()
if rule not in self.special_rules]
for rule in definition_rules:
validatorname = "_validate_" + rule.replace(" ", "_")
validator = getattr(self, validatorname, None)
if validator:
validator(definition[rule], field, value)
def _error(self, field, _error):
field_errors = self._errors.get(field, [])
if not isinstance(field_errors, list):
field_errors = [field_errors]
if isinstance(_error, (_str_type, dict)):
field_errors.append(_error)
else:
field_errors.extend(_error)
if len(field_errors) == 1:
field_errors = field_errors.pop()
self._errors[field] = field_errors
def validate_schema(self, schema):
""" Validates a schema against supported rules.
:param schema: the schema to be validated as a legal cerberus schema
according to the rules of this Validator object.
.. versionadded:: 0.7.1
"""
if not isinstance(schema, Mapping):
raise SchemaError(errors.ERROR_SCHEMA_FORMAT.format(schema))
# TODO remove on next major release
def update_to_valueschema(schema, warning_printed=False):
if 'keyschema' in schema:
schema['valueschema'] = schema['keyschema']
del schema['keyschema']
if not warning_printed:
print('WARNING cerberus: `keyschema` is deprecated, '
'use `valueschema` instead')
warning_printed = True
for key, value in schema.items():
if isinstance(value, Mapping):
schema[key] = update_to_valueschema(value, warning_printed)
return schema
schema = update_to_valueschema(schema)
for field, constraints in schema.items():
if not isinstance(constraints, Mapping):
raise SchemaError(errors.ERROR_DEFINITION_FORMAT.format(field))
for constraint, value in constraints.items():
if constraint == 'type':
values = value if isinstance(value, list) else [value]
for value in values:
if not hasattr(self, '_validate_type_' + value):
raise SchemaError(
errors.ERROR_UNKNOWN_TYPE.format(value))
if 'dict' in values and 'list' in values:
if 'valueschema' in constraints and \
'schema' not in constraints: # noqa
raise SchemaError('You must provide a compleme'
'ntary `schema`')
if 'schema' in constraints and \
'valueschema' not in constraints: # noqa
raise SchemaError('You must provide a compleme'
'ntary `valueschema`')
elif constraint == 'schema':
constraint_type = constraints.get('type')
if constraint_type is not None:
if constraint_type == 'list' or \
'list' in constraint_type:
self.validate_schema({'schema': value})
elif constraint_type == 'dict' or \
'dict' in constraint_type:
self.validate_schema(value)
else:
raise SchemaError(
errors.ERROR_SCHEMA_TYPE.format(field))
elif constraint in self.special_rules:
pass
elif constraint in ('anyof', 'allof', 'noneof', 'oneof'):
if(isinstance(value, Sequence) and
not isinstance(value, _str_type)):
# make sure each definition in an
# anyof/allof constraint validates
for v in value:
# get a copy of the schema with
# anyof/allof replaced with their value
s = copy.copy(constraints)
del s[constraint]
s.update(v)
self.validate_schema({field: s})
else:
self.validate_schema({field: [value]})
elif constraint == 'items':
if isinstance(value, Mapping):
# list of dicts, deprecated
self.validate_schema(value)
else:
for item_schema in value:
self.validate_schema({'schema': item_schema})
elif not hasattr(self, '_validate_' + constraint):
if not self.transparent_schema_rules:
raise SchemaError(errors.ERROR_UNKNOWN_RULE.format(
constraint, field))
def _validate_coerce(self, coerce, field, value):
try:
value = coerce(value)
except (TypeError, ValueError):
self._error(field, errors.ERROR_COERCION_FAILED.format(field))
return value
def _validate_required_fields(self, document):
""" Validates that required fields are not missing. If dependencies
are precised then validate 'required' only if all dependencies
are validated.
:param document: the document being validated.
"""
required = list(field for field, definition in self.schema.items()
if definition.get('required') is True)
missing = set(required) - set(key for key in document.keys()
if document.get(key) is not None or
not self.ignore_none_values)
for field in missing:
self._error(field, errors.ERROR_REQUIRED_FIELD)
def _validate_readonly(self, read_only, field, value):
if read_only:
self._error(field, errors.ERROR_READONLY_FIELD)
def _validate_regex(self, match, field, value):
"""
.. versionadded:: 0.7
"""
if not isinstance(value, _str_type):
return
pattern = re.compile(match)
if not pattern.match(value):
self._error(field, errors.ERROR_REGEX.format(match))
def _validate_type(self, data_type, field, value):
def call_type_validation(_type, value):
validator = getattr(self, "_validate_type_" + _type)
validator(field, value)
if isinstance(data_type, _str_type):
call_type_validation(data_type, value)
elif isinstance(data_type, Iterable):
prev_errors = self._errors.copy()
for _type in data_type:
call_type_validation(_type, value)
if len(self._errors) == len(prev_errors):
return
else:
self._errors = prev_errors.copy()
self._error(field, errors.ERROR_BAD_TYPE.format(", ".
join(data_type[:-1]) + ' or ' + data_type[-1]))
def _validate_type_string(self, field, value):
if not isinstance(value, _str_type):
self._error(field, errors.ERROR_BAD_TYPE.format("string"))
def _validate_type_bytes(self, field, value):
if not isinstance(value, bytes):
self._error(field, errors.ERROR_BAD_TYPE.format("bytes"))
def _validate_type_integer(self, field, value):
if not isinstance(value, _int_types):
self._error(field, errors.ERROR_BAD_TYPE.format("integer"))
def _validate_type_float(self, field, value):
if not isinstance(value, float):
self._error(field, errors.ERROR_BAD_TYPE.format("float"))
def _validate_type_number(self, field, value):
"""
.. versionadded:: 0.6
"""
if not isinstance(value, float) and not isinstance(value, _int_types):
self._error(field, errors.ERROR_BAD_TYPE.format("number"))
def _validate_type_boolean(self, field, value):
if not isinstance(value, bool):
self._error(field, errors.ERROR_BAD_TYPE.format("boolean"))
def _validate_type_datetime(self, field, value):
if not isinstance(value, datetime):
self._error(field, errors.ERROR_BAD_TYPE.format("datetime"))
def _validate_type_date(self, field, value):
if not isinstance(value, date):
self._error(field, errors.ERROR_BAD_TYPE.format("date"))
def _validate_type_dict(self, field, value):
if not isinstance(value, Mapping):
self._error(field, errors.ERROR_BAD_TYPE.format("dict"))
def _validate_type_list(self, field, value):
if not isinstance(value, Sequence) or isinstance(
value, _str_type):
self._error(field, errors.ERROR_BAD_TYPE.format("list"))
def _validate_type_set(self, field, value):
if not isinstance(value, set):
self._error(field, errors.ERROR_BAD_TYPE.format("set"))
def _validate_maxlength(self, max_length, field, value):
if isinstance(value, Sequence):
if len(value) > max_length:
self._error(field, errors.ERROR_MAX_LENGTH.format(max_length))
def _validate_minlength(self, min_length, field, value):
if isinstance(value, Sequence):
if len(value) < min_length:
self._error(field, errors.ERROR_MIN_LENGTH.format(min_length))
def _validate_maxsize(self, max_size, field, value):
if isinstance(value, bytes):
if sys.getsizeof(value) > max_size:
self._error(field, errors.ERROR_MAX_SIZE.format(max_size))
def _validate_minsize(self, min_size, field, value):
if isinstance(value, bytes):
if sys.getsizeof(value) < min_size:
self._error(field, errors.ERROR_MIN_SIZE.format(min_size))
def _validate_max(self, max_value, field, value):
if isinstance(value, (_int_types, float)):
if value > max_value:
self._error(field, errors.ERROR_MAX_VALUE.format(max_value))
def _validate_min(self, min_value, field, value):
if isinstance(value, (_int_types, float)):
if value < min_value:
self._error(field, errors.ERROR_MIN_VALUE.format(min_value))
def _validate_before(self, max_value, field, value):
if isinstance(value, (datetime, date)):
if value > max_value:
self._error(field,
errors.ERROR_MAX_VALUE.format(repr(max_value)))
def _validate_after(self, min_value, field, value):
if isinstance(value, (datetime, date)):
if value < min_value:
self._error(field,
errors.ERROR_MIN_VALUE.format(repr(min_value)))
def _validate_allowed(self, allowed_values, field, value):
if isinstance(value, _str_type):
if value not in allowed_values:
self._error(field, errors.ERROR_UNALLOWED_VALUE.format(value))
elif isinstance(value, Sequence):
disallowed = set(value) - set(allowed_values)
if disallowed:
self._error(
field,
errors.ERROR_UNALLOWED_VALUES.format(list(disallowed))
)
elif isinstance(value, int):
if value not in allowed_values:
self._error(field, errors.ERROR_UNALLOWED_VALUE.format(value))
def _validate_empty(self, empty, field, value):
if isinstance(value, _str_type) and len(value) == 0 and not empty:
self._error(field, errors.ERROR_EMPTY_NOT_ALLOWED)
def _validate_schema(self, schema, field, value, nested_allow_unknown):
if isinstance(value, Sequence) and not isinstance(value, _str_type):
list_errors = {}
for i in range(len(value)):
validator = self.__get_child_validator(
schema={i: schema}, allow_unknown=self.allow_unknown)
validator.validate({i: value[i]}, context=self.document)
list_errors.update(validator.errors)
if len(list_errors):
self._error(field, list_errors)
elif isinstance(value, Mapping):
if 'list' in self.schema[field]['type']:
return
validator = copy.copy(self)
validator.schema = schema
if not validator.allow_unknown:
validator.allow_unknown = nested_allow_unknown
validator.validate(value, context=self.document,
update=self.update)
if len(validator.errors):
self._error(field, validator.errors)
def _validate_valueschema(self, schema, field, value):
if isinstance(value, Mapping):
for key, document in value.items():
validator = self.__get_child_validator()
validator.validate(
{key: document}, {key: schema}, context=self.document)
if len(validator.errors):
self._error(field, validator.errors)
def _validate_propertyschema(self, schema, field, value):
if isinstance(value, Mapping):
validator = self.__get_child_validator(
schema={field: {'type': 'list', 'schema': schema}})
validator.validate({field: list(value.keys())},
context=self.document)
for error in validator.errors:
self._error(field, error)
def _validate_items(self, items, field, value):
if isinstance(items, Mapping):
self._validate_items_schema(items, field, value)
elif isinstance(items, Sequence):
self._validate_items_list(items, field, value)
def _validate_items_list(self, schema, field, values):
if len(schema) != len(values):
self._error(field, errors.ERROR_ITEMS_LIST.format(len(schema)))
else:
for i in range(len(schema)):
validator = self.__get_child_validator(schema={i: schema[i]})
validator.validate({i: values[i]}, context=self.document)
for error in validator.errors:
self.errors.setdefault(field, {})
self.errors[field].update(validator.errors)
def _validate_items_schema(self, schema, field, value):
validator = self.__get_child_validator(schema=schema)
for item in value:
validator.validate(item, context=self.document)
for field, error in validator.errors.items():
self._error(field, error)
def _validate_dependencies(self, document, dependencies, field,
break_on_error=False):
if isinstance(dependencies, _str_type):
dependencies = [dependencies]
if isinstance(dependencies, Sequence):
for dependency in dependencies:
parts = dependency.split('.')
subdoc = copy.copy(document)
for part in parts:
if part not in subdoc:
if not break_on_error:
self._error(field,
errors.ERROR_DEPENDENCIES_FIELD
.format(dependency))
else:
return False
else:
subdoc = subdoc[part]
elif isinstance(dependencies, Mapping):
for dep_name, dep_values in dependencies.items():
if isinstance(dep_values, _str_type):
dep_values = [dep_values]
parts = dep_name.split('.')
subdoc = copy.copy(document)
for part in parts:
if part not in subdoc:
if not break_on_error:
self._error(
field,
errors.ERROR_DEPENDENCIES_FIELD_VALUE.format(
(dep_name, dep_values))
)
break
else:
return False
else:
subdoc = subdoc[part]
if isinstance(subdoc, _str_type) and subdoc not in dep_values:
if not break_on_error:
self._error(
field,
errors.ERROR_DEPENDENCIES_FIELD_VALUE.format(
(dep_name, dep_values))
)
else:
return False
return True
def _validate_validator(self, validator, field, value):
# call customized validator function
validator(field, value, self._error)
def _validate_logical(self, operator, definitions, field, value):
# validates value against each definition in definitions
if isinstance(definitions, Mapping):
definitions = [definitions]
# count the number of definitions that validate
valid = 0
errorstack = {}
for i in range(len(definitions)):
definition = definitions[i]
# create a schema instance with the rules in definition
s = copy.copy(self.schema[field])
del s[operator]
s.update(definition)
# get a child validator to do our work
v = self.__get_child_validator(schema={field: s})
if v.validate({field: value}):
valid += 1
errorstack["definition %d" % i] = \
v.errors.get(field, 'validated')
if operator == 'anyof' and valid < 1:
e = {'anyof': 'no definitions validated'}
e.update(errorstack)
self._error(field, e)
if operator == 'allof' and valid < len(definitions):
e = {'allof': 'one or more definitions did not validate'}
e.update(errorstack)
self._error(field, e)
if operator == 'noneof' and valid > 0:
e = {'noneof': 'one or more definitions validated'}
e.update(errorstack)
self._error(field, e)
if operator == 'oneof' and valid != 1:
e = {'oneof': 'more than one rule (or no rules) validated'}
e.update(errorstack)
self._error(field, e)
def _validate_anyof(self, definitions, field, value):
self._validate_logical('anyof', definitions, field, value)
def _validate_allof(self, definitions, field, value):
self._validate_logical('allof', definitions, field, value)
def _validate_noneof(self, definitions, field, value):
self._validate_logical('noneof', definitions, field, value)
def _validate_oneof(self, definitions, field, value):
self._validate_logical('oneof', definitions, field, value)
def __get_child_validator(self, **kwargs):
""" creates a new instance of Validator-(sub-)class, all initial
parameters of the parent are passed to the initialization, unless
a parameter is given as an explicit *keyword*-parameter.
:rtype: an instance of self.__class__"""
child_config = self.__config.copy()
child_config.update(kwargs)
return self.__class__(**child_config)
|
|
from __future__ import absolute_import, division, unicode_literals
import os
import re
import urlparse
import logging
from collections import defaultdict
from datetime import datetime
import jsonschema
from jsonschema.compat import str_types, int_types
from flexget.event import fire_event
from flexget.utils import qualities, template
from flexget.utils.tools import parse_timedelta
schema_paths = {}
log = logging.getLogger('config_schema')
# TODO: Rethink how config key and schema registration work
def register_schema(path, schema):
"""
Register `schema` to be available at `path` for $refs
:param path: Path to make schema available
:param schema: The schema, or function which returns the schema
"""
schema_paths[path] = schema
# Validator that handles root structure of config.
_root_config_schema = None
def register_config_key(key, schema, required=False):
""" Registers a valid root level key for the config.
:param string key:
Name of the root level key being registered.
:param dict schema:
Schema for the key.
:param bool required:
Specify whether this is a mandatory key.
"""
_root_config_schema['properties'][key] = schema
if required:
_root_config_schema.setdefault('required', []).append(key)
register_schema('/schema/config/%s' % key, schema)
def get_schema():
global _root_config_schema
if _root_config_schema is None:
_root_config_schema = {'type': 'object', 'properties': {}, 'additionalProperties': False}
fire_event('config.register')
# TODO: Is /schema/root this the best place for this?
register_schema('/schema/config', _root_config_schema)
return _root_config_schema
def one_or_more(schema, unique_items=False):
"""
Helper function to construct a schema that validates items matching `schema` or an array
containing items matching `schema`.
"""
schema.setdefault('title', 'single value')
return {
'oneOf': [
{'title': 'multiple values', 'type': 'array', 'items': schema, 'minItems': 1, 'uniqueItems': unique_items},
schema
]
}
def resolve_ref(uri):
"""
Finds and returns a schema pointed to by `uri` that has been registered in the register_schema function.
"""
parsed = urlparse.urlparse(uri)
if parsed.path in schema_paths:
schema = schema_paths[parsed.path]
if callable(schema):
return schema(**dict(urlparse.parse_qsl(parsed.query)))
return schema
raise jsonschema.RefResolutionError("%s could not be resolved" % uri)
def process_config(config, schema=None, set_defaults=True):
"""
Validates the config, and sets defaults within it if `set_defaults` is set.
If schema is not given, uses the root config schema.
:returns: A list with :class:`jsonschema.ValidationError`s if any
"""
if schema is None:
schema = get_schema()
resolver = RefResolver.from_schema(schema)
validator = SchemaValidator(schema, resolver=resolver, format_checker=format_checker)
if set_defaults:
validator.VALIDATORS['properties'] = validate_properties_w_defaults
try:
errors = list(validator.iter_errors(config))
finally:
validator.VALIDATORS['properties'] = jsonschema.Draft4Validator.VALIDATORS['properties']
# Customize the error messages
for e in errors:
set_error_message(e)
e.json_pointer = '/' + '/'.join(map(unicode, e.path))
return errors
def parse_time(time_string):
"""Parse a time string from the config into a :class:`datetime.time` object."""
formats = ['%I:%M %p', '%H:%M', '%H:%M:%S']
for f in formats:
try:
return datetime.strptime(time_string, f).time()
except ValueError:
continue
raise ValueError('invalid time `%s`' % time_string)
def parse_interval(interval_string):
"""Takes an interval string from the config and turns it into a :class:`datetime.timedelta` object."""
regexp = r'^\d+ (second|minute|hour|day|week)s?$'
if not re.match(regexp, interval_string):
raise ValueError("should be in format 'x (seconds|minutes|hours|days|weeks)'")
return parse_timedelta(interval_string)
def parse_percent(percent_input):
"""Takes a size string from the config and turns it into int(bytes)."""
percent_input = percent_input.rstrip('%')
try:
return float(percent_input)
except ValueError:
raise ValueError("should be in format '0-x%'")
def parse_size(size_input):
"""Takes a size string from the config and turns it into int(bytes)."""
prefixes = [None, 'K', 'M', 'G', 'T', 'P']
try:
# Bytes
return int(size_input)
except ValueError:
size_input = size_input.upper().rstrip('IB')
value, unit = float(size_input[:-1]), size_input[-1:]
if unit not in prefixes:
raise ValueError("should be in format '0-x (KiB, MiB, GiB, TiB, PiB)'")
return int(1024 ** prefixes.index(unit) * value)
# Public API end here, the rest should not be used outside this module
class RefResolver(jsonschema.RefResolver):
def __init__(self, *args, **kwargs):
kwargs.setdefault('handlers', {'': resolve_ref})
super(RefResolver, self).__init__(*args, **kwargs)
format_checker = jsonschema.FormatChecker(('email',))
@format_checker.checks('quality', raises=ValueError)
def is_quality(instance):
if not isinstance(instance, str_types):
return True
return qualities.get(instance)
@format_checker.checks('quality_requirements', raises=ValueError)
def is_quality_req(instance):
if not isinstance(instance, str_types):
return True
return qualities.Requirements(instance)
@format_checker.checks('time', raises=ValueError)
def is_time(time_string):
if not isinstance(time_string, str_types):
return True
return parse_time(time_string) is not None
@format_checker.checks('interval', raises=ValueError)
def is_interval(interval_string):
if not isinstance(interval_string, str_types):
return True
return parse_interval(interval_string) is not None
@format_checker.checks('size', raises=ValueError)
def is_size(size_string):
if not isinstance(size_string, (str_types, int_types)):
return True
return parse_size(size_string) is not None
@format_checker.checks('percent', raises=ValueError)
def is_percent(percent_string):
if not isinstance(percent_string, str_types):
return True
return parse_percent(percent_string) is not None
@format_checker.checks('regex', raises=ValueError)
def is_regex(instance):
if not isinstance(instance, str_types):
return True
try:
return re.compile(instance)
except re.error as e:
raise ValueError('Error parsing regex: %s' % e)
@format_checker.checks('file', raises=ValueError)
def is_file(instance):
if not isinstance(instance, str_types):
return True
if os.path.isfile(os.path.expanduser(instance)):
return True
raise ValueError('`%s` does not exist' % instance)
@format_checker.checks('path', raises=ValueError)
def is_path(instance):
if not isinstance(instance, str_types):
return True
# Only validate the part of the path before the first identifier to be replaced
pat = re.compile(r'{[{%].*[}%]}')
result = pat.search(instance)
if result:
instance = os.path.dirname(instance[0:result.start()])
if os.path.isdir(os.path.expanduser(instance)):
return True
raise ValueError('`%s` does not exist' % instance)
# TODO: jsonschema has a format checker for uri if rfc3987 is installed, perhaps we should use that
@format_checker.checks('url')
def is_url(instance):
if not isinstance(instance, str_types):
return True
regexp = ('(' + '|'.join(['ftp', 'http', 'https', 'file', 'udp']) +
'):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?')
return re.match(regexp, instance)
def set_error_message(error):
"""
Create user facing error message from a :class:`jsonschema.ValidationError` `error`
"""
# First, replace default error messages with our custom ones
if error.validator == 'type':
if isinstance(error.validator_value, basestring):
valid_types = [error.validator_value]
else:
valid_types = list(error.validator_value)
# Replace some types with more pythony ones
replace = {'object': 'dict', 'array': 'list'}
valid_types = [replace.get(t, t) for t in valid_types]
# Make valid_types into an english list, with commas and 'or'
valid_types = ', '.join(valid_types[:-2] + ['']) + ' or '.join(valid_types[-2:])
if isinstance(error.instance, dict):
error.message = 'Got a dict, expected: %s' % valid_types
if isinstance(error.instance, list):
error.message = 'Got a list, expected: %s' % valid_types
error.message = 'Got `%s`, expected: %s' % (error.instance, valid_types)
elif error.validator == 'format':
if error.cause:
error.message = unicode(error.cause)
elif error.validator == 'enum':
error.message = 'Must be one of the following: %s' % ', '.join(map(unicode, error.validator_value))
elif error.validator == 'additionalProperties':
if error.validator_value is False:
extras = set(jsonschema._utils.find_additional_properties(error.instance, error.schema))
if len(extras) == 1:
error.message = 'The key `%s` is not valid here.' % extras.pop()
else:
error.message = 'The keys %s are not valid here.' % ', '.join('`%s`' % e for e in extras)
else:
# Remove u'' string representation from jsonschema error messages
error.message = re.sub('u\'(.*?)\'', '`\\1`', error.message)
# Then update with any custom error message supplied from the schema
custom_error = error.schema.get('error_%s' % error.validator, error.schema.get('error'))
if custom_error:
error.message = template.render(custom_error, error.__dict__)
def select_child_errors(validator, errors):
"""
Looks through subschema errors, if any subschema is determined to be the intended one,
(based on 'type' keyword errors,) errors from its branch will be released instead of the parent error.
"""
for error in errors:
if not error.context:
yield error
continue
# Split the suberrors up by which subschema they are from
subschema_errors = defaultdict(list)
for sube in error.context:
subschema_errors[sube.schema_path[0]].append(sube)
# Find the subschemas that did not have a 'type' error validating the instance at this path
no_type_errors = dict(subschema_errors)
valid_types = set()
for i, errors in subschema_errors.iteritems():
for e in errors:
if e.validator == 'type' and not e.path:
# Remove from the no_type_errors dict
no_type_errors.pop(i, None)
# Add the valid types to the list of all valid types
if validator.is_type(e.validator_value, 'string'):
valid_types.add(e.validator_value)
else:
valid_types.update(e.validator_value)
if not no_type_errors:
# If all of the branches had a 'type' error, create our own virtual type error with all possible types
for e in validator.descend(error.instance, {'type': valid_types}):
yield e
elif len(no_type_errors) == 1:
# If one of the possible schemas did not have a 'type' error, assume that is the intended one and issue
# all errors from that subschema
for e in no_type_errors.values()[0]:
e.schema_path.extendleft(reversed(error.schema_path))
e.path.extendleft(reversed(error.path))
yield e
else:
yield error
def validate_properties_w_defaults(validator, properties, instance, schema):
if not validator.is_type(instance, 'object'):
return
for key, subschema in properties.iteritems():
if 'default' in subschema:
instance.setdefault(key, subschema['default'])
for error in jsonschema.Draft4Validator.VALIDATORS["properties"](validator, properties, instance, schema):
yield error
def validate_anyOf(validator, anyOf, instance, schema):
errors = jsonschema.Draft4Validator.VALIDATORS["anyOf"](validator, anyOf, instance, schema)
for e in select_child_errors(validator, errors):
yield e
def validate_oneOf(validator, oneOf, instance, schema):
errors = jsonschema.Draft4Validator.VALIDATORS["oneOf"](validator, oneOf, instance, schema)
for e in select_child_errors(validator, errors):
yield e
def validate_deprecated(validator, message, instance, schema):
"""Not really a validator, just warns if deprecated section of config is being used."""
log.warning(message)
validators = {
'anyOf': validate_anyOf,
'oneOf': validate_oneOf,
'deprecated': validate_deprecated
}
SchemaValidator = jsonschema.validators.extend(jsonschema.Draft4Validator, validators)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from lxml import etree
import nova.tests.fakelibvirt as libvirt
def get_vm_xml(name="testname", uuid=None, source_type='file',
interface_type='bridge'):
uuid_tag = ''
if uuid:
uuid_tag = '<uuid>%s</uuid>' % (uuid,)
return '''<domain type='kvm'>
<name>%(name)s</name>
%(uuid_tag)s
<memory>128000</memory>
<vcpu>1</vcpu>
<os>
<type>hvm</type>
<kernel>/somekernel</kernel>
<cmdline>root=/dev/sda</cmdline>
<boot dev='hd'/>
</os>
<features>
<acpi/>
</features>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source %(source_type)s='/somefile'/>
<target dev='vda' bus='virtio'/>
</disk>
<interface type='%(interface_type)s'>
<mac address='05:26:3e:31:28:1f'/>
<source %(interface_type)s='br100'/>
</interface>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='5901' autoport='yes' keymap='en-us'/>
<graphics type='spice' port='5901' autoport='yes' keymap='en-us'/>
</devices>
</domain>''' % {'name': name,
'uuid_tag': uuid_tag,
'source_type': source_type,
'interface_type': interface_type}
class FakeLibvirtTests(test.TestCase):
def setUp(self):
super(FakeLibvirtTests, self).setUp()
libvirt._reset()
def get_openReadOnly_curry_func(self):
return lambda uri: libvirt.openReadOnly(uri)
def get_openAuth_curry_func(self):
def fake_cb(credlist):
return 0
return lambda uri: libvirt.openAuth(uri,
[[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_NOECHOPROMPT],
fake_cb,
None], 0)
def _test_connect_method_accepts_None_uri_by_default(self, conn_method):
conn = conn_method(None)
self.assertNotEqual(conn, None, "Connecting to fake libvirt failed")
def test_openReadOnly_accepts_None_uri_by_default(self):
conn_method = self.get_openReadOnly_curry_func()
self._test_connect_method_accepts_None_uri_by_default(conn_method)
def test_openAuth_accepts_None_uri_by_default(self):
conn_method = self.get_openAuth_curry_func()
self._test_connect_method_accepts_None_uri_by_default(conn_method)
def _test_connect_method_can_refuse_None_uri(self, conn_method):
libvirt.allow_default_uri_connection = False
self.assertRaises(ValueError, conn_method, None)
def test_openReadOnly_can_refuse_None_uri(self):
conn_method = self.get_openReadOnly_curry_func()
self._test_connect_method_can_refuse_None_uri(conn_method)
def test_openAuth_can_refuse_None_uri(self):
conn_method = self.get_openAuth_curry_func()
self._test_connect_method_can_refuse_None_uri(conn_method)
def _test_connect_method_refuses_invalid_URI(self, conn_method):
self.assertRaises(libvirt.libvirtError, conn_method, 'blah')
def test_openReadOnly_refuses_invalid_URI(self):
conn_method = self.get_openReadOnly_curry_func()
self._test_connect_method_refuses_invalid_URI(conn_method)
def test_openAuth_refuses_invalid_URI(self):
conn_method = self.get_openAuth_curry_func()
self._test_connect_method_refuses_invalid_URI(conn_method)
def test_getInfo(self):
conn = libvirt.openReadOnly(None)
res = conn.getInfo()
self.assertIn(res[0], ('i686', 'x86_64'))
self.assertTrue(1024 <= res[1] <= 16384,
"Memory unusually high or low.")
self.assertTrue(1 <= res[2] <= 32,
"Active CPU count unusually high or low.")
self.assertTrue(800 <= res[3] <= 4500,
"CPU speed unusually high or low.")
self.assertTrue(res[2] <= (res[5] * res[6]),
"More active CPUs than num_sockets*cores_per_socket")
def test_createXML_detects_invalid_xml(self):
self._test_XML_func_detects_invalid_xml('createXML', [0])
def test_defineXML_detects_invalid_xml(self):
self._test_XML_func_detects_invalid_xml('defineXML', [])
def _test_XML_func_detects_invalid_xml(self, xmlfunc_name, args):
conn = self.get_openAuth_curry_func()('qemu:///system')
try:
getattr(conn, xmlfunc_name)("this is not valid </xml>", *args)
except libvirt.libvirtError, e:
self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_XML_DETAIL)
self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_DOMAIN)
return
raise self.failureException("Invalid XML didn't raise libvirtError")
def test_defineXML_defines_domain(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
conn.defineXML(get_vm_xml())
dom = conn.lookupByName('testname')
self.assertEqual('testname', dom.name())
self.assertEqual(0, dom.isActive())
dom.undefine()
self.assertRaises(libvirt.libvirtError,
conn.lookupByName,
'testname')
def test_blockStats(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
conn.createXML(get_vm_xml(), 0)
dom = conn.lookupByName('testname')
blockstats = dom.blockStats('vda')
self.assertEqual(len(blockstats), 5)
for x in blockstats:
self.assertTrue(type(x) in [int, long])
def test_attach_detach(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
conn.createXML(get_vm_xml(), 0)
dom = conn.lookupByName('testname')
xml = '''<disk type='block'>
<driver name='qemu' type='raw'/>
<source dev='/dev/nbd0'/>
<target dev='/dev/vdc' bus='virtio'/>
</disk>'''
self.assertTrue(dom.attachDevice(xml))
self.assertTrue(dom.detachDevice(xml))
def test_info(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
conn.createXML(get_vm_xml(), 0)
dom = conn.lookupByName('testname')
info = dom.info()
self.assertEqual(info[0], libvirt.VIR_DOMAIN_RUNNING)
self.assertEqual(info[1], 128000)
self.assertTrue(info[2] <= 128000)
self.assertEqual(info[3], 1)
self.assertTrue(type(info[4]) in [int, long])
def test_createXML_runs_domain(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
conn.createXML(get_vm_xml(), 0)
dom = conn.lookupByName('testname')
self.assertEqual('testname', dom.name())
self.assertEqual(1, dom.isActive())
dom.destroy()
try:
dom = conn.lookupByName('testname')
except libvirt.libvirtError as e:
self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_DOMAIN)
self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_QEMU)
return
self.fail("lookupByName succeeded for destroyed non-defined VM")
def test_defineXML_remembers_uuid(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
uuid = 'b21f957d-a72f-4b93-b5a5-45b1161abb02'
conn.defineXML(get_vm_xml(uuid=uuid))
dom = conn.lookupByName('testname')
self.assertEquals(dom.UUIDString(), uuid)
def test_createWithFlags(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
conn.defineXML(get_vm_xml())
dom = conn.lookupByName('testname')
self.assertFalse(dom.isActive(), 'Defined domain was running.')
dom.createWithFlags(0)
self.assertTrue(dom.isActive(),
'Domain wasn\'t running after createWithFlags')
def test_managedSave(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
conn.defineXML(get_vm_xml())
dom = conn.lookupByName('testname')
self.assertFalse(dom.isActive(), 'Defined domain was running.')
dom.createWithFlags(0)
self.assertEquals(dom.hasManagedSaveImage(0), 0)
dom.managedSave(0)
self.assertEquals(dom.hasManagedSaveImage(0), 1)
dom.managedSaveRemove(0)
self.assertEquals(dom.hasManagedSaveImage(0), 0)
def test_listDomainsId_and_lookupById(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
self.assertEquals(conn.listDomainsID(), [])
conn.defineXML(get_vm_xml())
dom = conn.lookupByName('testname')
dom.createWithFlags(0)
self.assertEquals(len(conn.listDomainsID()), 1)
dom_id = conn.listDomainsID()[0]
self.assertEquals(conn.lookupByID(dom_id), dom)
dom_id = conn.listDomainsID()[0]
try:
conn.lookupByID(dom_id + 1)
except libvirt.libvirtError, e:
self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_DOMAIN)
self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_QEMU)
return
raise self.failureException("Looking up an invalid domain ID didn't "
"raise libvirtError")
def test_define_and_retrieve(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
self.assertEquals(conn.listDomainsID(), [])
conn.defineXML(get_vm_xml())
dom = conn.lookupByName('testname')
xml = dom.XMLDesc(0)
etree.fromstring(xml)
def _test_accepts_source_type(self, source_type):
conn = self.get_openAuth_curry_func()('qemu:///system')
self.assertEquals(conn.listDomainsID(), [])
conn.defineXML(get_vm_xml(source_type=source_type))
dom = conn.lookupByName('testname')
xml = dom.XMLDesc(0)
tree = etree.fromstring(xml)
elem = tree.find('./devices/disk/source')
self.assertEquals(elem.get('file'), '/somefile')
def test_accepts_source_dev(self):
self._test_accepts_source_type('dev')
def test_accepts_source_path(self):
self._test_accepts_source_type('path')
def test_network_type_bridge_sticks(self):
self._test_network_type_sticks('bridge')
def test_network_type_network_sticks(self):
self._test_network_type_sticks('network')
def _test_network_type_sticks(self, network_type):
conn = self.get_openAuth_curry_func()('qemu:///system')
self.assertEquals(conn.listDomainsID(), [])
conn.defineXML(get_vm_xml(interface_type=network_type))
dom = conn.lookupByName('testname')
xml = dom.XMLDesc(0)
tree = etree.fromstring(xml)
elem = tree.find('./devices/interface')
self.assertEquals(elem.get('type'), network_type)
elem = elem.find('./source')
self.assertEquals(elem.get(network_type), 'br100')
def test_getType(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
self.assertEquals(conn.getType(), 'QEMU')
def test_getVersion(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
self.assertTrue(type(conn.getVersion()) is int)
def test_getCapabilities(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
etree.fromstring(conn.getCapabilities())
def test_nwfilter_define_undefine(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
# Will raise an exception if it's not valid XML
xml = '''<filter name='nova-instance-instance-789' chain='root'>
<uuid>946878c6-3ad3-82b2-87f3-c709f3807f58</uuid>
</filter>'''
conn.nwfilterDefineXML(xml)
nwfilter = conn.nwfilterLookupByName('nova-instance-instance-789')
nwfilter.undefine()
try:
conn.nwfilterLookupByName('nova-instance-instance-789320334')
except libvirt.libvirtError, e:
self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_NWFILTER)
self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_NWFILTER)
return
raise self.failureException("Invalid NWFilter name didn't"
" raise libvirtError")
def test_compareCPU_compatible(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
xml = '''<cpu>
<arch>%s</arch>
<model>%s</model>
<vendor>%s</vendor>
<topology sockets="%d" cores="%d" threads="%d"/>
</cpu>''' % (libvirt.node_arch,
libvirt.node_cpu_model,
libvirt.node_cpu_vendor,
libvirt.node_sockets,
libvirt.node_cores,
libvirt.node_threads)
self.assertEqual(conn.compareCPU(xml, 0),
libvirt.VIR_CPU_COMPARE_IDENTICAL)
def test_compareCPU_incompatible_vendor(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
xml = '''<cpu>
<arch>%s</arch>
<model>%s</model>
<vendor>%s</vendor>
<topology sockets="%d" cores="%d" threads="%d"/>
</cpu>''' % (libvirt.node_arch,
libvirt.node_cpu_model,
"AnotherVendor",
libvirt.node_sockets,
libvirt.node_cores,
libvirt.node_threads)
self.assertEqual(conn.compareCPU(xml, 0),
libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
def test_compareCPU_incompatible_arch(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
xml = '''<cpu>
<arch>%s</arch>
<model>%s</model>
<vendor>%s</vendor>
<topology sockets="%d" cores="%d" threads="%d"/>
</cpu>''' % ('not-a-valid-arch',
libvirt.node_cpu_model,
libvirt.node_cpu_vendor,
libvirt.node_sockets,
libvirt.node_cores,
libvirt.node_threads)
self.assertEqual(conn.compareCPU(xml, 0),
libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
def test_compareCPU_incompatible_model(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
xml = '''<cpu>
<arch>%s</arch>
<model>%s</model>
<vendor>%s</vendor>
<topology sockets="%d" cores="%d" threads="%d"/>
</cpu>''' % (libvirt.node_arch,
"AnotherModel",
libvirt.node_cpu_vendor,
libvirt.node_sockets,
libvirt.node_cores,
libvirt.node_threads)
self.assertEqual(conn.compareCPU(xml, 0),
libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
def test_compareCPU_compatible_unspecified_model(self):
conn = self.get_openAuth_curry_func()('qemu:///system')
xml = '''<cpu>
<arch>%s</arch>
<vendor>%s</vendor>
<topology sockets="%d" cores="%d" threads="%d"/>
</cpu>''' % (libvirt.node_arch,
libvirt.node_cpu_vendor,
libvirt.node_sockets,
libvirt.node_cores,
libvirt.node_threads)
self.assertEqual(conn.compareCPU(xml, 0),
libvirt.VIR_CPU_COMPARE_IDENTICAL)
|
|
##for Raleigh & Grant
##who contributed more than they know
################################################################################
############################## WHEEL OF FORTUNE ################################
################################################################################
import random
import string
import time
import pickle
PUZZLE_FILENAME = "puzzles_and_clues.txt"
inFile = open(PUZZLE_FILENAME, "r")
puzzles_and_clues = pickle.load(inFile)
inFile.close()
def get_puzzle_and_clue(puzzles_and_clues):
"""
puzzles_and_clues: dictionary of puzzles and clues,
whose keys are clues and values are puzzles.
Returns tuple of length two, where first element is
clue and second element is puzzle.
"""
clue = puzzles_and_clues.keys()[random.randint(0, len(puzzles_and_clues.keys()) - 1)]
puzzle = puzzles_and_clues[clue][random.randint(0, len(puzzles_and_clues[clue]) - 1)]
puzzle_and_clue = (clue, string.upper(puzzle))
return puzzle_and_clue
def start():
"""
Starts game, initializes important variables, and calls function:
gameSetup(playerNames_hum, playerNames_comp, playerOrder_val, rounds)
"""
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
print string.center((("*" * 5) + (" " * 70) + ("*" * 5)), 80)
print string.center((("*" * 5) + (" " * 21) + "Welcome to WHEEL OF FORTUNE!" + (" " * 21) + ("*" * 5)), 80)
print string.center((("*" * 5) + (" " * 7) + "I'm your host, Pat Sajak, with your hostess Vanna White." + (" " * 7) + ("*" * 5)), 80)
print string.center((("*" * 5) + (" " * 70) + ("*" * 5)), 80)
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
playerNames_hum = ["Player 1", "Player 2", "Player 3"]
playerNames_comp = ["Chad Ledouche", "Braxton Beauregard"]
playerOrder_val = [[0, 0], [0, 0], [0, 0]]
rounds = ["first", "second", "third", "fourth"]
gameSetup(playerNames_hum, playerNames_comp, playerOrder_val, rounds)
def gameSetup(playerNames_hum, playerNames_comp, playerOrder_val, rounds):
"""
Calls game setup functions: get_numPlayers() and
get_playerNames(numPlayers, playerNames_hum, playerNames_comp)
Also calls function: game(players, playerOrder_val)
"""
numPlayers = get_numPlayers()
players = get_playerNames(numPlayers, playerNames_hum, playerNames_comp)
game(players, playerOrder_val)
def disp_scores(playerOrder_val):
print "playerOrder_val in disp_scores is:", playerOrder_val
playerOrder_val_round = playerOrder_val[:]
playerOrder_val_round.sort(reverse = True)
print "playerOrder_val in disp_scores is:", playerOrder_val
first = playerOrder_val[0]
second = playerOrder_val[1]
third = playerOrder_val[2]
if first[0] != second[0] and second[0] != third[0]:
print first[1], "in first place with $" + str(first[0]) + "."
print second[1], "in second place with $" + str(second[0]) + "."
print third[1], "in third place with $" + str(third[0]) + "."
if first[0] > second[0] and second[0] == third[0]:
print first[1], "in first place with $" + str(first[0]) + "."
print second[1], "and", third[1], "tied for second with $" + str(third[0]) + " each."
if first[0] == second[0] and second[0] > third[0]:
print second[1], "and", first[1], "tied for the lead with $" + str(third[0]) + " each ."
print third[1], "in second place with $" + str(third[0]) + "."
if first[0] == second[0] and second[0] == third[0]:
print second[1] + ", " + third[1] + ", and,", first[1], "tied for the lead with $" + str(third[0]) + " each ."
print "Surely, this is more improbable than the Big Bang (Theory's merciful cancellation.)"
def game(players, playerOrder_val):
"""
Calls function: get_playerOrder(players, playerOrder_val) and saves
result to variable: playerOrder
Calls function: game_round(players, playerOrder_val) and saves
result to variable: playerOrder_val
Iterates through function: game_round(playerOrder, playerOrder_val)
four times, each time returning variable: playerOrder_val
"""
# sets the number of rounds in the game
num_rounds = 4
# tracks the game's round number
round_num = 1
# list that tracks the starting order of players throughout game
playerOrder = get_playerOrder(players, playerOrder_val)
while round_num <= num_rounds:
## print "playerOrder_val is:", playerOrder_val
## if round_num == 1:
## playerOrder_val = [[200, 'Grant'], [3100, 'Raleigh'], [0, 'Eric']]
playerOrder_val = game_round(playerOrder, playerOrder_val, round_num)
## print "playerOrder_val:", playerOrder_val
print ""
print "At the end of ROUND", round_num, "the scores are:"
disp_scores(playerOrder_val)
print ""
round_num += 1
end_game(players)
def disp_puzzle_init(puzzle_and_clue):
disp_puzzle = ""
for i in range(len(puzzle_and_clue[1])):
if puzzle_and_clue[1][i] in string.punctuation or puzzle_and_clue[1][i] == " ":
disp_puzzle += puzzle_and_clue[1][i] + " "
else:
disp_puzzle += "_ "
return disp_puzzle
def incom_puzzle_init(puzzle_and_clue):
incom_puzzle = ""
for i in range(len(puzzle_and_clue[1])):
if puzzle_and_clue[1][i] in string.punctuation or puzzle_and_clue[1][i] == " ":
incom_puzzle += puzzle_and_clue[1][i]
else:
incom_puzzle += "_"
return incom_puzzle
def disp_remaining_letters(alpha):
vowels = ["A", "E", "I", "O", "U"]
consonants = ['B', 'C', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z']
disp_vowels = ""
disp_consonants = ""
for i in range(len(vowels)):
if vowels[i] in alpha:
disp_vowels += vowels[i] + " "
for i in range(len(consonants)):
if consonants[i] in alpha:
disp_consonants += consonants[i] + " "
print ""
print "Remaining letters:"
print "Vowels: " + disp_vowels
print "Consonants: " + disp_consonants
print ""
def disp_puzzle_and_clue(puzzle_and_clue, disp_puzzle):
print ""
print 'Clue: "' + puzzle_and_clue[0] + '"'
print "Puzzle: " + disp_puzzle
def game_round(playerOrder, playerOrder_val, round_num):
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
print ""
print string.center("ROUND " + str(round_num), 80)
print ""
print string.center(("*" * 80), 80)
print string.center(("*" * 80), 80)
## playerOrder_val = playerOrder_val
playerOrder_val_round = [[0, 0], [0, 0], [0, 0]]
# retrieves and stores tuple, length two, whose first element
# is round clue and second element is puzzle
puzzle_and_clue = get_puzzle_and_clue(puzzles_and_clues)
## puzzle_and_clue = ("Event", "EARTHQUAKE")
# retrieves and stores string of empty puzzle
disp_puzzle = disp_puzzle_init(puzzle_and_clue)
# fills out as letters are guessed; not meant to be printed
incom_puzzle = incom_puzzle_init(puzzle_and_clue)
# stores uppercase alphabet in variable alpha
alpha = string.ascii_uppercase
turn_num = 1
num_turns = 100
print "The puzzle and clue for ROUND", round_num, "are:"
## print puzzle_and_clue[1]
player_turn = (round_num - 1) % 3
while incom_puzzle != puzzle_and_clue[1]:
if turn_num == num_turns:
break
while num_turns >= turn_num:
turn_num += 1
if turn_num == num_turns:
break
disp_puzzle_and_clue(puzzle_and_clue, disp_puzzle)
disp_remaining_letters(alpha)
player_selection = get_player_selection(playerOrder, player_turn, playerOrder_val_round)
if player_selection == 0:
print ""
print "You chose to solve the puzzle."
guess = string.upper(get_guessWord())
if guess == puzzle_and_clue[1]:
incom_puzzle = puzzle_and_clue[1]
else:
print ""
print "Sorry, " + playerOrder[player_turn] + ", that is not the solution to the puzzle."
print "Possession of the Wheel passes to " + playerOrder[(player_turn + 1) % 3] + "."
print ""
print string.center(("-" * 80), 80)
time.sleep(1)
player_turn = (player_turn + 1) % 3
if player_selection == 1:
print ""
print "You chose to spin The Wheel."
prize = get_prize(game_round)
subPrize = prize
if prize == "bankrupt":
playerOrder_val = bankrupt(player_turn, playerOrder, playerOrder_val_round)
player_turn = (player_turn + 1) % 3
if prize == "loseATurn":
lose_a_turn(player_turn, playerOrder)
player_turn = (player_turn + 1) % 3
if prize == "freePlay":
freePlay_choice = 0
print ""
print playerOrder[player_turn], "spun for a FREE PLAY!"
print playerOrder[player_turn] + ", you may solve or guess a letter (including vowels) without penalty."
print ""
selection_freePlay = get_freePlayChoice(playerOrder[player_turn])
subPrize = 500
if selection_freePlay == 1:
guess = string.upper(get_guessfreePlay())
print string.center(("-" * 80), 80)
print ""
print string.center(("Vanna, does the puzzle contain any '" + guess + "'s?"), 80)
print ""
print string.center(("-" * 80), 80)
time.sleep(0.7)
letter_app = 0
print ""
print disp_puzzle
for i in range(len(puzzle_and_clue[1])):
if puzzle_and_clue[1][i] == guess:
time.sleep(0.7)
disp_puzzle = disp_puzzle[0:(i * 2)] + guess + disp_puzzle[((i * 2) + 1):]
incom_puzzle = incom_puzzle[0:i] + guess + incom_puzzle[(i + 1):]
print ""
print disp_puzzle
letter_app += 1
playerOrder_val_round[player_turn][0] = guess_result(player_turn, playerOrder, playerOrder_val_round, guess, subPrize, letter_app)
if incom_puzzle == puzzle_and_clue[1]:
break
if selection_freePlay == 2:
guess_word = get_guessWord()
guess_word = string.upper(guess_word)
if guess_word == puzzle_and_clue[1]:
incom_puzzle = guess_word
break
else:
print ""
print "Sorry, that is not the solution to the puzzle."
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
print string.center(("-" * 80), 80)
if type(prize) is int:
print ""
print playerOrder[player_turn] + " spun for $" + str(prize) + "!"
print ""
guess = get_guessConsonant()
if guess in alpha:
alpha = alpha.replace(guess, "")
print string.center(("-" * 80), 80)
print ""
print string.center(("Vanna, does the puzzle contain any '" + guess + "'s?"), 80)
print ""
print string.center(("-" * 80), 80)
time.sleep(0.7)
letter_app = 0
print ""
print disp_puzzle
time.sleep(0.7)
if guess in puzzle_and_clue[1]:
for i in range(len(puzzle_and_clue[1])):
if puzzle_and_clue[1][i] == guess:
## print disp_puzzle
disp_puzzle = disp_puzzle[0:(i * 2)] + guess + disp_puzzle[((i * 2) + 1):]
incom_puzzle = incom_puzzle[0:i] + guess + incom_puzzle[(i + 1):]
print ""
print disp_puzzle
time.sleep(0.7)
for i in range(len(puzzle_and_clue[1])):
if puzzle_and_clue[1][i] == guess:
letter_app += 1
playerOrder_val_round[player_turn][0] = guess_result(player_turn, playerOrder, playerOrder_val_round, guess, subPrize, letter_app)
if incom_puzzle == puzzle_and_clue[1]:
break
else:
print ""
print string.center(("-" * 80), 80)
print ""
print "I'm sorry", playerOrder[player_turn] + ", but there are no '" + guess + "'s in the puzzle."
print ""
print "Possession of The Wheel passes to " + playerOrder[(player_turn + 1) % 3] + "."
print ""
print string.center(("-" * 80), 80)
time.sleep(1.5)
player_turn = (player_turn + 1) % 3
else:
guess_previously_called(player_turn, playerOrder, guess)
if player_selection == 2:
print ""
print "You chose to buy a vowel."
print ""
playerOrder_val_round[player_turn][0] = (playerOrder_val_round[player_turn][0] - 250)
guess = string.upper(get_guessVowel())
if guess in alpha:
alpha = alpha.replace(guess, "")
else:
guess_previously_called(player_turn, playerOrder, guess)
player_turn = (player_turn + 1) % 3
break
print ""
print string.center(("-" * 80), 80)
print ""
print string.center(("Vanna, does the puzzle contain any '" + guess + "'s?"), 80)
print ""
print string.center(("-" * 80), 80)
time.sleep(0.7)
print ""
print disp_puzzle
letter_app = 0
if guess in puzzle_and_clue[1]:
for i in range(len(puzzle_and_clue[1])):
if puzzle_and_clue[1][i] == guess:
time.sleep(0.7)
## print disp_puzzle
disp_puzzle = disp_puzzle[0:(i * 2)] + guess + disp_puzzle[((i * 2) + 1):]
incom_puzzle = incom_puzzle[0:i] + guess + incom_puzzle[(i + 1):]
print ""
print disp_puzzle
letter_app += 1
if letter_app == 0:
print ""
print string.center(("-" * 80), 80)
print ""
print "I'm sorry", playerOrder[player_turn] + ", but there are no '" + guess + "'s in the puzzle."
print ""
print "Possession of The Wheel passes to " + playerOrder[(player_turn + 1) % 3] + "."
print ""
print string.center(("-" * 80), 80)
time.sleep(1.5)
player_turn = (player_turn + 1) % 3
break
if letter_app == 1:
print ""
print "Good guess,", playerOrder[player_turn] + "! There is 1", guess, "in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
if letter_app >= 2:
print ""
print "Good guess,", playerOrder[player_turn] + "! There are", letter_app, "'" + guess + "'s in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
if incom_puzzle == puzzle_and_clue[1]:
playerOrder_val[player_turn][0] = playerOrder_val_round[player_turn][0] + playerOrder_val[player_turn][0]
print string.center(("-" * 80), 80)
time.sleep(2.5)
print ""
print "Congratulations,", playerOrder[player_turn] + ". You correctly solved the puzzle:"
print string.upper(puzzle_and_clue[1])
print ""
break
print "playerOrder_val right before func return:", playerOrder_val
return playerOrder_val
def end_game(players):
print "----------------------"
print "GAME OVER!"
print "----------------------"
print "Would you like to play again? (y/n)"
selection = string.lower(raw_input())
if selection == "y" or selection == "yes":
playerOrder_val = [[0, 0], [0, 0], [0, 0]]
game(players, playerOrder_val)
def get_numPlayers():
numPlayers = 0
while numPlayers <= 0 or numPlayers > 3:
print ""
print "How many contestants (max: 3) will be playing today?"
numPlayers = raw_input("Number of players: ",)
if numPlayers == "One" or numPlayers == "one" or numPlayers == "ONE" or numPlayers == "1":
numPlayers = 1
print "You have selected play for 1 player."
if numPlayers == "Two" or numPlayers == "two" or numPlayers == "TWO" or numPlayers == "2":
numPlayers = 2
print "You have selected play for 2 players."
if numPlayers == "Three" or numPlayers == "three" or numPlayers == "THREE" or numPlayers == "3":
numPlayers = 3
print "You have selected play for 3 players."
if numPlayers < 1 or numPlayers > 3 or numPlayers == type(int):
print ""
print string.center(("-" * 80), 80)
print "ERROR: INVALID PLAYER NUMBER"
print string.center(("-" * 80), 80)
return numPlayers
def get_playerNames(numPlayers, playerNames_hum, playerNames_comp):
players = ["Player 1", "Player 2", "Player 3"]
print ""
## print string.center(("-" * 80), 80)
## print string.center(("-" * 80), 80)
for i in range(numPlayers):
name = ""
while name == "":
name = raw_input(players[i] + ", what is your name? ")
name = name.title()
if name == "":
print ""
print string.center(("-" * 80), 80)
print string.expandtabs("ERROR, FIELD EMPTY")
print string.expandtabs("Please try again.")
print string.center(("-" * 80), 80)
print ""
players[i] = name
if numPlayers == 3:
print ""
## print string.center(("-" * 80), 80)
print string.center(("-" * 80), 80)
print ""
print "Welcome", players[0] + ",", players[1] + ", and", players[2] + "!"
print ""
if numPlayers == 2:
players[2] = playerNames_comp[0]
print ""
## print string.center(("-" * 80), 80)
print "Welcome", players[0] + " and", players[1] + "! Today you will be playing against", players[2] + "."
if numPlayers == 1:
players[1] = playerNames_comp[0]
players[2] = playerNames_comp[1]
print ""
## print string.center(("-" * 80), 80)
print "Welcome", players[0] + "! Today you will be playing against", players[1], "and", players[2] + "."
return players
def get_playerOrder(players, playerOrder_val):
playerOrder = []
print "We will now play the Toss-Up Puzzle for possession of The Wheel in the first"
print "round."
print ""
print players[0] + " will spin first."
print ""
print string.center(("-" * 80), 80)
raw_input ("Press 'ENTER' to continue: ")
for i in (0, 1, 2):
print ""
print players[i] + ", get ready. You're up next!"
print players[i] + " prepares to spin The Wheel."
print ""
raw_input("Press 'ENTER' to spin The Wheel. ")
print ""
prize = prize = get_prize(0)
print string.center(("-" * 80), 80)
print string.center((players[i] + " received $" + str(prize) + "."), 80)
print string.center(("-" * 80), 80)
for j in (0, 1):
if j == 0:
playerOrder_val[i][j] = prize
else:
playerOrder_val[i][j] = players[i]
playerOrder_val.sort(reverse=True)
for i in range(3):
if i == 0:
playerOrder.insert(0, playerOrder_val[i][1])
else:
if i == 1:
if playerOrder_val[i][0] > playerOrder_val[0][0]:
playerOrder.insert(0, playerOrder_val[i][1])
else:
playerOrder.insert(1, playerOrder_val[i][1])
else:
if playerOrder_val[i][0] > playerOrder_val[0][0] and playerOrder_val[1][0]:
playerOrder.insert(0, playerOrder_val[i][1])
elif playerOrder_val[i][0] < playerOrder_val[0][0] and playerOrder_val[1][0]:
playerOrder.insert(2, playerOrder_val[i][1])
else:
playerOrder.insert(1, playerOrder_val[i][1])
print ""
print "Congratulations,", playerOrder[0] + "! You have won the Toss-Up Spin and will take possession"
print "of The Wheel at the beginning of the first round."
print ""
print playerOrder[1] + " will Take possession of The Wheel after", playerOrder[0] + ", followed by", playerOrder[2] + "."
print ""
print string.center(("-" * 80), 80)
raw_input ("Press 'ENTER' to begin the first round: ")
print ""
return playerOrder
def get_playerOrder_val(playerOrder_val):
for i in (0, 1):
if j == 0:
playerOrder_val[i][player_turn] = (i * 100)
def get_guessConsonant():
check = False
while check == False:
guess = string.upper(raw_input("Please guess a consonant: ",))
if len(guess) == 1 and guess in ['B', 'C', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z']:
check = True
if len(guess) != 1:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: INVALID ENTRY!"
print "Please enter one letter per guess."
print string.center(("-" * 80) , 80)
print ""
check = False
if guess in ["A", "E", "I", "O", "U"]:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: INVALID ENTRY!"
print "Entry must be a consonant."
print string.center(("-" * 80) , 80)
print ""
check = False
return guess
def get_guessfreePlay():
check = False
while check == False:
guess = string.upper(raw_input("Please guess a letter: ",))
if len(guess) == 1 and guess in string.ascii_uppercase:
check = True
if len(guess) != 1:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: INVALID ENTRY!"
print "Please enter one letter per guess."
print string.center(("-" * 80) , 80)
print ""
check = False
return guess
def get_guessVowel():
check = False
while check == False:
guess = string.upper(raw_input("Please guess a vowel: ",))
if len(guess) == 1 and guess in ["A", "E", "I", "O", "U"]:
check = True
if len(guess) != 1:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: INVALID ENTRY!"
print "Please enter one letter per guess."
print string.center(("-" * 80) , 80)
print ""
check = False
if guess in ['B', 'C', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z']:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: INVALID ENTRY!"
print "Entry must be a vowel."
print string.center(("-" * 80) , 80)
print ""
check = False
return guess
def get_prize(game_round):
prize = 0
if game_round == 0:
prizes = [500, 500, 500, 500, 500, 500, 500, 500, 750, 750,
500, 500, 500, 500, 550, 550, 550, 600, 600, 600, 600, 600,
600, 650, 650, 650, 650, 650, 650, 700, 700, 700, 700, 700,
700, 700, 700, 700, 800, 800, 800, 800, 800, 800, 900, 900,
900, 900, 900, 900, 900, 900, 900, 2500, 2500, 2500, 750, 750]
prize = prizes[random.randint(0, 57)]
if game_round == 1:
prizes = ["bankrupt", "bankrupt", "bankrupt", "bankrupt", "bankrupt",
"bankrupt", "bankrupt", "bankrupt", 500, 500, 500, 500, 500, 500, 500, 500,
500, 500, 500, 500, 550, 550, 550, 600, 600, 600, 600, 600,
600, 650, 650, 650, 650, 650, 650, 700, 700, 700, 700, 700,
700, 700, 700, 700, 800, 800, 800, 800, 800, 800, 900, 900,
900, 900, 900, 900, 900, 900, 900, 2500, 2500, 2500, "loseATurn",
"loseATurn", "loseATurn", "freePlay", "freePlay", "freePlay", 750, 750,
750, 750]
prize = prizes[random.randint(0, 71)]
if game_round == 2:
prizes = ["bankrupt", "bankrupt", "bankrupt", "bankrupt", "bankrupt",
"bankrupt", "bankrupt", "bankrupt", 500, 500, 500, 500, 500, 500, 500, 500,
500, 500, 500, 500, 550, 550, 550, 600, 600, 600, 600, 600,
600, 650, 650, 650, 650, 650, 650, 700, 700, 700, 700, 700,
700, 700, 700, 700, 800, 800, 800, 800, 800, 800, 900, 900,
900, 900, 900, 900, 900, 900, 900, 3500, 3500, 3500, "loseATurn",
"loseATurn", "loseATurn", "freePlay", "freePlay", "freePlay", 750, 750,
750, 750]
prize = prizes[random.randint(0, 71)]
if game_round == 3:
prizes = ["bankrupt", "bankrupt", "bankrupt", "bankrupt", "bankrupt",
"bankrupt", "bankrupt", "bankrupt", 500, 500, 500, 500, 500, 500, 500, 500,
500, 500, 500, 500, 550, 550, 550, 600, 600, 600, 600, 600,
600, 650, 650, 650, 650, 650, 650, 700, 700, 700, 700, 700,
700, 700, 700, 700, 800, 800, 800, 800, 800, 800, 900, 900,
900, 900, 900, 900, 900, 900, 900, 3500, 3500, 3500, "loseATurn",
"loseATurn", "loseATurn", "freePlay", "freePlay", "freePlay", 750, 750,
750, 750]
prize = prizes[random.randint(0, 71)]
if game_round >= 4:
prizes = ["bankrupt", "bankrupt", "bankrupt", "bankrupt", "bankrupt",
"bankrupt", "bankrupt", "bankrupt", 500, 500, 500, 500, 500, 500, 500, 500,
500, 500, 500, 500, 550, 550, 550, 600, 600, 600, 600, 600,
600, 650, 650, 650, 650, 650, 650, 700, 700, 700, 700, 700,
700, 700, 700, 700, 800, 800, 800, 800, 800, 800, 900, 900,
900, 900, 900, 900, 900, 900, 900, 5000, 5000, 5000, "loseATurn",
"loseATurn", "loseATurn", "freePlay", "freePlay", "freePlay", 750, 750,
750, 750]
prize = prizes[random.randint(0, 71)]
return prize
def get_guessWord():
print ""
guess = string.lower(raw_input("Input puzzle solution: ",))
print ""
return guess
def get_freePlayChoice(player):
selection_freePlay = 0
choice = False
while choice is False:
while selection_freePlay != "letter" or selection_freePlay != "choose" or selection_freePlay != "s" or selection_freePlay != "solve" or selection_freePlay != "choose a letter" or selection_freePlay != "pick" or selection_freePlay != "pick a letter" or selection_freePlay == "solve the puzzle":
print string.center(("-" * 80), 80)
print ""
print player + ", would you like to solve the puzzle or choose a letter?"
selection_freePlay = raw_input("Selection: ")
selection_freePlay = selection_freePlay.lower()
if selection_freePlay == "letter" or selection_freePlay == "choose" or selection_freePlay == "s" or selection_freePlay == "solve the puzzle" or selection_freePlay == "solve" or selection_freePlay == "choose a letter" or selection_freePlay == "pick" or selection_freePlay == "pick a letter":
break
else:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: UNRECOGNIZED COMMAND."
print "Please select from the following and try again:"
print "'SOLVE'"
print "'LETTER'"
print "'CHOOSE'"
print "'CHOOSE A LETTER'"
print "'PICK'"
print "'PICK A LETTER'"
print string.center(("-" * 80) , 80)
print ""
if selection_freePlay == "pick a letter" or selection_freePlay == "pick" or selection_freePlay == "letter" or selection_freePlay == "choose":
selection_freePlay = 1
if selection_freePlay == "solve" or selection_freePlay == "solve the puzzle" or selection_freePlay == "s":
selection_freePlay = 2
return selection_freePlay
def get_player_selection(playerOrder, player_turn, playerOrder_val_round):
selection = 0
choice = False
while choice is False:
while selection != "solve" or selection != "spin" or selection != "s" or selection != "pick" or selection != "solve the puzzle" or selection != "buy" or selection != "buy a vowel" or selection != "vowel" or selection != "v":
print string.center(("-" * 80), 80)
if playerOrder_val_round[player_turn][0] >= 250:
print ""
print playerOrder[player_turn] + ", would you like to SPIN, BUY A VOWEL, or SOLVE THE PUZZLE?"
else:
print ""
print playerOrder[player_turn] + ", would you like to SPIN or SOLVE THE PUZZLE?"
selection = raw_input("Selection: ")
selection = selection.lower()
if selection == "solve" or selection == "pick" or selection == "spin" or selection == "solve the puzzle" or selection == "buy" or selection == "buy a vowel" or selection == "vowel" or selection == "v":
break
else:
print ""
print string.center(("-" * 80) , 80)
print "ERROR: UNRECOGNIZED COMMAND."
print "Please select from the following and try again:"
print "'SOLVE'"
print "'BUY A VOWEL'"
print "'SPIN'"
if selection == "pick a letter" or selection == "pick" or selection == "spin" or selection == "letter":
selection = 1
return selection
if selection == "buy" or selection == "buy a vowel" or selection == "vowel":
if playerOrder_val_round[player_turn][0] >= 250:
selection = 2
return selection
else:
print ""
print "You need a round prize of at least $250 in order to buy a vowel."
print "Please try again."
print ""
if selection == "solve" or selection == "solve the puzzle":
selection = 0
return selection
def bankrupt(player_turn, playerOrder, playerOrder_val_round):
print ""
print playerOrder[player_turn], "spun for BANKRUPT, bringing his total prize for this round to $0."
playerOrder_val_round[player_turn][0] = 0
print "Possession of The Wheel passes to", playerOrder[((player_turn + 1) % 3)] + "."
print ""
print string.center(("-" * 80), 80)
time.sleep(2.5)
return playerOrder_val_round
def lose_a_turn(player_turn, playerOrder):
print ""
print playerOrder[player_turn], "spun for LOSE A TURN!"
print ""
print "Sorry, " + playerOrder[player_turn] + ". Possession of The Wheel passes to " + playerOrder[(player_turn + 1) % 3] + "."
print string.center(("-" * 80), 80)
time.sleep(2.5)
def letter_app_sing(player_turn, playerOrder, playerOrder_val_round, guess, subPrize, letter_app):
time.sleep(0.7)
print ""
print "Good guess,", playerOrder[player_turn] + "! There is 1", guess, "in the puzzle!"
print "That adds $" + str(subPrize) + " to your total prize score!"
print ""
playerOrder_val_round[player_turn][0] = playerOrder_val_round[player_turn][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[player_turn] + "'s total prize score for this round is now $" + str(playerOrder_val_round[player_turn][0]) + "!"), 80)
print ""
print string.center(("-" * 80), 80)
time.sleep(2.5)
return playerOrder_val_round[player_turn][0]
def letter_app_plur(player_turn, playerOrder, playerOrder_val_round, guess, subPrize, letter_app):
time.sleep(0.7)
print ""
print "Good guess,", playerOrder[player_turn] + "! There are", letter_app, "'" + guess + "'s in the puzzle!"
print "That adds $" + str(subPrize * letter_app) + " to your total prize score!"
print ""
playerOrder_val_round[player_turn][0] = playerOrder_val_round[player_turn][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[player_turn] + "'s total prize score is now $" + str(playerOrder_val_round[player_turn][0]) + "!"), 80)
print ""
print string.center(("-" * 80), 80)
time.sleep(2.5)
return playerOrder_val_round[player_turn][0]
def guess_result(player_turn, playerOrder, playerOrder_val_round, guess, subPrize, letter_app):
if letter_app == 0:
print ""
print "I'm sorry", playerOrder[player_turn] + ", but there are no '" + guess + "s in the puzzle."
print "Your Free Play, however, means that you keep possession of The Wheel."
print ""
print string.center(("-" * 80), 80)
if letter_app == 1:
playerOrder_val_round[player_turn][0] = letter_app_sing(player_turn, playerOrder, playerOrder_val_round, guess, subPrize, letter_app)
if letter_app >= 2:
playerOrder_val_round[player_turn][0] = letter_app_plur(player_turn, playerOrder, playerOrder_val_round, guess, subPrize, letter_app)
return playerOrder_val_round[player_turn][0]
def guess_previously_called(player_turn, playerOrder, guess):
print ""
print "Sorry, '" + guess + "' has already been called in this round."
print playerOrder[(player_turn + 1) % 3] + " now takes possession of The Wheel."
print ""
print string.center(("-" * 80), 80)
time.sleep(1.5)
return playerOrder
start()
|
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset loading utilities.
Creates a thin wrapper around TensorFlow Datasets (TFDS) to enable seamless
CPU/GPU/TPU workloads. The main entry point is 'get_dataset' which takes a
dataset name and a random seed and returns the corresponding tf.data.Dataset
object.
Available datasets are defined in the DATASETS dictionary. To add any dataset
supported by TFDS, simply extend the ImageDatasetV2 class as shown below with
the MNIST example and add it to DICTIONARY dictionary. Alternatively, you can
extend the ImageDatasetV2 class and load the datasets from another source.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
from absl import flags
from absl import logging
from compare_gan.tpu import tpu_random
import gin
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
flags.DEFINE_string(
"tfds_data_dir", None,
"TFDS (TensorFlow Datasets) directory. If not set it will default to "
"'~/tensorflow_datasets'. If the directory does not contain the requested "
"dataset TFDS will download the dataset to this folder.")
flags.DEFINE_boolean(
"data_fake_dataset", False,
"If True don't load datasets from disk but create fake values.")
flags.DEFINE_integer(
"data_shuffle_buffer_size", 10000,
"Number of examples for the shuffle buffer.")
# Deprecated, only used for "replacing labels". TFDS will always use 64 threads.
flags.DEFINE_integer(
"data_reading_num_threads", 64,
"The number of threads used to read the dataset.")
class ImageDatasetV2(object):
"""Interface for Image datasets based on TFDS (TensorFlow Datasets).
This method handles both CPU/GPU and TPU data loading settings. If the flag
--data_fake_dataset is True the methods will create a small fake dataset from
in-memory NumPy arrays and not read from disk.
The pipleline of input operations is as follows:
1) Shuffle filenames (with seed).
2) Load file content from disk. Decode images.
Dataset content after this step is a dictionary.
3) Prefetch call here.
4) Filter examples (e.g. by size or label).
5) Parse example.
Dataset content after this step is a tuple of tensors (image, label).
6) train_only: Repeat dataset.
7) Transform (random cropping with seed, resizing).
8) Preprocess (adding sampled noise/labels with seed).
Dataset content after this step is a tuple (feature dictionary, label tensor).
9) train only: Shuffle examples (with seed).
10) Batch examples.
11) Prefetch examples.
Step 1-3 are done by _load_dataset() and wrap tfds.load().
Step 4-11 are done by train_input_fn() and eval_input_fn().
"""
def __init__(self,
name,
tfds_name,
resolution,
colors,
num_classes,
eval_test_samples,
seed):
logging.info("ImageDatasetV2(name=%s, tfds_name=%s, resolution=%d, "
"colors=%d, num_classes=%s, eval_test_samples=%s, seed=%s)",
name, tfds_name, resolution, colors, num_classes,
eval_test_samples, seed)
self._name = name
self._tfds_name = tfds_name
self._resolution = resolution
self._colors = colors
self._num_classes = num_classes
self._eval_test_sample = eval_test_samples
self._seed = seed
self._train_split = tfds.Split.TRAIN
self._eval_split = tfds.Split.TEST
@property
def name(self):
"""Name of the dataset."""
return self._name
@property
def num_classes(self):
return self._num_classes
@property
def eval_test_samples(self):
"""Number of examples in the "test" split of this dataset."""
if FLAGS.data_fake_dataset:
return 100
return self._eval_test_sample
@property
def image_shape(self):
"""Returns a tuple with the image shape."""
return (self._resolution, self._resolution, self._colors)
def _make_fake_dataset(self, split):
"""Returns a fake data set with the correct shapes."""
np.random.seed(self._seed)
num_samples_per_epoch = 100
num_epochs = self.eval_test_samples // 100 if split == "test" else None
images_shape = [num_samples_per_epoch] + list(self.image_shape)
images = np.random.uniform(size=images_shape).astype(np.float32)
labels = np.ones((num_samples_per_epoch,), dtype=np.int32)
ds = tf.data.Dataset.from_tensor_slices((images, labels))
return ds.repeat(num_epochs)
def _get_per_host_random_seed(self, tpu_context=None):
"""Returns the dataset seed for according to the TPUContext.
On CPU/GPU it returns the default seed. For TPUs the input_fn is executed
on every host machine (if per-host input is set, which is set by default).
We use a different (but deterministically computed) random seed on each host
to ensure each host machine sees a different stream of input data.
Args:
tpu_context: TPU execution context.
Returns:
The current seed if CPU/GPU and a host-specific seed for TPU.
"""
if self._seed is None:
logging.warning("Dataset seed not set.")
return None
if tpu_context is None:
logging.warning("No TPUContext, using unmodified dataset seed %s.",
self._seed)
return self._seed
seed = self._seed + tpu_context.current_host
logging.info("Running with %d hosts, modifying dataset seed for "
"host %d to %s.", tpu_context.num_hosts,
tpu_context.current_host, seed)
return seed
@gin.configurable("replace_labels", whitelist=["file_pattern"])
def _replace_labels(self, split, ds, file_pattern=None):
"""Replaces the labels in the dataset with labels from separate files.
This functionality is used if one wants to either replace the labels with
soft labels (i.e. softmax over the logits) or label the instances with
a new classifier.
Args:
split: Dataset split (e.g. train/test/validation).
ds: The underlying TFDS object.
file_pattern: Path to the replacement files.
Returns:
An instance of tf.data.Dataset with the updated labels.
"""
if not file_pattern:
return ds
file_pattern = file_pattern.format(split=split)
logging.warning("Using labels from %s for split %s.", file_pattern, split)
label_ds = tf.data.Dataset.list_files(file_pattern, shuffle=False)
label_ds = label_ds.interleave(
tf.data.TFRecordDataset,
cycle_length=FLAGS.data_reading_num_threads)
ds = tf.data.Dataset.zip((ds, label_ds)).map(self._replace_label)
return ds
def _replace_label(self, feature_dict, new_unparsed_label):
"""Replaces the label from the feature_dict with the new label.
Furthermore, if the feature_dict contains a key for the file_name which
identifies an instance, we double-check that the we are replacing the label
of the correct instance.
Args:
feature_dict: A serialized TFRecord containing the old label.
new_unparsed_label: A serialized TFRecord containing the new label.
Returns:
Updates the label in the label dict to the new label.
"""
label_spec = {
"file_name": tf.FixedLenFeature((), tf.string),
"label": tf.FixedLenFeature((), tf.int64),
}
parsed_label = tf.parse_single_example(new_unparsed_label, label_spec)
with tf.control_dependencies([
tf.assert_equal(parsed_label["file_name"], feature_dict["file_name"])]):
feature_dict["label"] = tf.identity(parsed_label["label"])
return feature_dict
def _parse_fn(self, features):
image = tf.cast(features["image"], tf.float32) / 255.0
return image, features["label"]
def _load_dataset(self, split):
"""Loads the underlying dataset split from disk.
Args:
split: Name of the split to load.
Returns:
Returns a `tf.data.Dataset` object with a tuple of image and label tensor.
"""
if FLAGS.data_fake_dataset:
return self._make_fake_dataset(split)
ds = tfds.load(
self._tfds_name,
split=split,
data_dir=FLAGS.tfds_data_dir,
as_dataset_kwargs={"shuffle_files": False})
ds = self._replace_labels(split, ds)
ds = ds.map(self._parse_fn)
return ds.prefetch(tf.contrib.data.AUTOTUNE)
def _train_filter_fn(self, image, label):
del image, label
return True
def _train_transform_fn(self, image, label, seed):
del seed
return image, label
def _eval_transform_fn(self, image, label, seed):
del seed
return image, label
def train_input_fn(self, params=None, preprocess_fn=None):
"""Input function for reading data.
Args:
params: Python dictionary with parameters. Must contain the key
"batch_size". TPUEstimator will set this for you!
preprocess_fn: Function to process single examples. This is allowed to
have a `seed` argument.
Returns:
`tf.data.Dataset` with preprocessed and batched examples.
"""
if params is None:
params = {}
seed = self._get_per_host_random_seed(params.get("context", None))
logging.info("train_input_fn(): params=%s seed=%s", params, seed)
ds = self._load_dataset(split=self._train_split)
ds = ds.filter(self._train_filter_fn)
ds = ds.repeat()
ds = ds.map(functools.partial(self._train_transform_fn, seed=seed))
if preprocess_fn is not None:
if "seed" in inspect.getargspec(preprocess_fn).args:
preprocess_fn = functools.partial(preprocess_fn, seed=seed)
ds = ds.map(preprocess_fn)
# Add a feature for the random offset of operations in tpu_random.py.
ds = tpu_random.add_random_offset_to_features(ds)
ds = ds.shuffle(FLAGS.data_shuffle_buffer_size, seed=seed)
if "batch_size" in params:
ds = ds.batch(params["batch_size"], drop_remainder=True)
return ds.prefetch(tf.contrib.data.AUTOTUNE)
def eval_input_fn(self, params=None, split=None):
"""Input function for reading data.
Args:
params: Python dictionary with parameters. Must contain the key
"batch_size". TPUEstimator will set this for you!
split: Name of the split to use. If None will use the default eval split
of the dataset.
Returns:
`tf.data.Dataset` with preprocessed and batched examples.
"""
if params is None:
params = {}
if split is None:
split = self._eval_split
seed = self._get_per_host_random_seed(params.get("context", None))
logging.info("eval_input_fn(): params=%s seed=%s", params, seed)
ds = self._load_dataset(split=split)
# No filter, no rpeat.
ds = ds.map(functools.partial(self._eval_transform_fn, seed=seed))
# No shuffle.
if "batch_size" in params:
ds = ds.batch(params["batch_size"], drop_remainder=True)
return ds.prefetch(tf.contrib.data.AUTOTUNE)
# For backwards compatibility ImageDataset.
def input_fn(self, params, mode=tf.estimator.ModeKeys.TRAIN,
preprocess_fn=None):
assert mode == tf.estimator.ModeKeys.TRAIN, mode
return self.train_input_fn(params=params, preprocess_fn=preprocess_fn)
# For backwards compatibility ImageDataset.
def load_dataset(self, split_name):
assert split_name == "test", split_name
return self.eval_input_fn()
class MnistDataset(ImageDatasetV2):
"""Wrapper for the MNIST dataset from TFDS."""
def __init__(self, seed):
super(MnistDataset, self).__init__(
name="mnist",
tfds_name="mnist",
resolution=28,
colors=1,
num_classes=10,
eval_test_samples=10000,
seed=seed)
class FashionMnistDataset(ImageDatasetV2):
"""Wrapper for the Fashion-MNIST dataset from TDFS."""
def __init__(self, seed):
super(FashionMnistDataset, self).__init__(
name="fashion_mnist",
tfds_name="fashion_mnist",
resolution=28,
colors=1,
num_classes=10,
eval_test_samples=10000,
seed=seed)
class Cifar10Dataset(ImageDatasetV2):
"""Wrapper for the CIFAR10 dataset from TDFS."""
def __init__(self, seed):
super(Cifar10Dataset, self).__init__(
name="cifar10",
tfds_name="cifar10",
resolution=32,
colors=3,
num_classes=10,
eval_test_samples=10000,
seed=seed)
class CelebaDataset(ImageDatasetV2):
"""Wrapper for the CelebA dataset from TFDS."""
def __init__(self, seed):
super(CelebaDataset, self).__init__(
name="celeb_a",
tfds_name="celeb_a",
resolution=64,
colors=3,
num_classes=None,
eval_test_samples=10000,
seed=seed)
def _parse_fn(self, features):
"""Returns 64x64x3 image and constant label."""
image = features["image"]
image = tf.image.resize_image_with_crop_or_pad(image, 160, 160)
# Note: possibly consider using NumPy's imresize(image, (64, 64))
image = tf.image.resize_images(image, [64, 64])
image.set_shape(self.image_shape)
image = tf.cast(image, tf.float32) / 255.0
label = tf.constant(0, dtype=tf.int32)
return image, label
class LsunBedroomDataset(ImageDatasetV2):
"""Wrapper from the LSUN Bedrooms dataset from TFDS."""
def __init__(self, seed):
super(LsunBedroomDataset, self).__init__(
name="lsun-bedroom",
tfds_name="lsun/bedroom",
resolution=128,
colors=3,
num_classes=None,
eval_test_samples=30000,
seed=seed)
# As the official LSUN validation set only contains 300 samples, which is
# insufficient for FID computation, we're splitting off some trianing
# samples. The smallest percentage selectable through TFDS is 1%, so we're
# going to use that (corresponding roughly to 30000 samples).
# If you want to use fewer eval samples, just modify eval_test_samples.
self._train_split, self._eval_split = \
tfds.Split.TRAIN.subsplit([99, 1])
def _parse_fn(self, features):
"""Returns a 128x128x3 Tensor with constant label 0."""
image = features["image"]
image = tf.image.resize_image_with_crop_or_pad(
image, target_height=128, target_width=128)
image = tf.cast(image, tf.float32) / 255.0
label = tf.constant(0, dtype=tf.int32)
return image, label
def _transform_imagnet_image(image, target_image_shape, crop_method, seed):
"""Preprocesses ImageNet images to have a target image shape.
Args:
image: 3-D tensor with a single image.
target_image_shape: List/Tuple with target image shape.
crop_method: Method for cropping the image:
One of: distorted, random, middle, none
seed: Random seed, only used for `crop_method=distorted`.
Returns:
Image tensor with shape `target_image_shape`.
"""
if crop_method == "distorted":
begin, size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image),
tf.zeros([0, 0, 4], tf.float32),
aspect_ratio_range=[1.0, 1.0],
area_range=[0.5, 1.0],
use_image_if_no_bounding_boxes=True,
seed=seed)
image = tf.slice(image, begin, size)
# Unfortunately, the above operation loses the depth-dimension. So we need
# to restore it the manual way.
image.set_shape([None, None, target_image_shape[-1]])
elif crop_method == "random":
tf.set_random_seed(seed)
shape = tf.shape(image)
h, w = shape[0], shape[1]
size = tf.minimum(h, w)
begin = [h - size, w - size] * tf.random.uniform([2], 0, 1)
begin = tf.cast(begin, tf.int32)
begin = tf.concat([begin, [0]], axis=0) # Add channel dimension.
image = tf.slice(image, begin, [size, size, 3])
elif crop_method == "middle":
shape = tf.shape(image)
h, w = shape[0], shape[1]
size = tf.minimum(h, w)
begin = tf.cast([h - size, w - size], tf.float32) / 2.0
begin = tf.cast(begin, tf.int32)
begin = tf.concat([begin, [0]], axis=0) # Add channel dimension.
image = tf.slice(image, begin, [size, size, 3])
elif crop_method != "none":
raise ValueError("Unsupported crop method: {}".format(crop_method))
image = tf.image.resize_images(
image, [target_image_shape[0], target_image_shape[1]])
image.set_shape(target_image_shape)
return image
@gin.configurable("train_imagenet_transform", whitelist=["crop_method"])
def _train_imagenet_transform(image, target_image_shape, seed,
crop_method="distorted"):
return _transform_imagnet_image(
image,
target_image_shape=target_image_shape,
crop_method=crop_method,
seed=seed)
@gin.configurable("eval_imagenet_transform", whitelist=["crop_method"])
def _eval_imagenet_transform(image, target_image_shape, seed,
crop_method="middle"):
return _transform_imagnet_image(
image,
target_image_shape=target_image_shape,
crop_method=crop_method,
seed=seed)
class ImagenetDataset(ImageDatasetV2):
"""ImageNet2012 as defined by TF Datasets."""
def __init__(self, resolution, seed, filter_unlabeled=False):
if resolution not in [64, 128, 256, 512]:
raise ValueError("Unsupported resolution: {}".format(resolution))
super(ImagenetDataset, self).__init__(
name="imagenet_{}".format(resolution),
tfds_name="imagenet2012",
resolution=resolution,
colors=3,
num_classes=1000,
eval_test_samples=50000,
seed=seed)
self._eval_split = tfds.Split.VALIDATION
self._filter_unlabeled = filter_unlabeled
def _train_filter_fn(self, image, label):
del image
if not self._filter_unlabeled:
return True
logging.warning("Filtering unlabeled examples.")
return tf.math.greater_equal(label, 0)
def _train_transform_fn(self, image, label, seed):
image = _train_imagenet_transform(
image=image, target_image_shape=self.image_shape, seed=seed)
return image, label
def _eval_transform_fn(self, image, label, seed):
image = _eval_imagenet_transform(
image=image, target_image_shape=self.image_shape, seed=seed)
return image, label
class SizeFilteredImagenetDataset(ImagenetDataset):
"""ImageNet from TFDS filtered by image size."""
def __init__(self, resolution, threshold, seed):
super(SizeFilteredImagenetDataset, self).__init__(
resolution=resolution,
seed=seed)
self._name = "imagenet_{}_hq{}".format(resolution, threshold)
self._threshold = threshold
def _train_filter_fn(self, image, label):
"""The minimum image dimension has to be larger than the threshold."""
del label
size = tf.math.reduce_min(tf.shape(image)[:2])
return tf.greater_equal(size, self._threshold)
class SingleClassImagenetDataset(ImagenetDataset):
"""ImageNet from TFDS with all instances having a constant label 0.
It can be used to simmulate the setting where no labels are provided.
"""
def __init__(self, resolution, seed):
super(SingleClassImagenetDataset, self).__init__(
resolution=resolution,
seed=seed)
self._name = "single_class_" + self._name
self._num_classes = 1
def _parse_fn(self, features):
image, _ = super(SingleClassImagenetDataset, self)._parse_fn(features)
label = tf.constant(0, dtype=tf.int32)
return image, label
class RandomClassImagenetDataset(ImagenetDataset):
"""ImageNet2012 dataset with random labels."""
def __init__(self, resolution, seed):
super(RandomClassImagenetDataset, self).__init__(
resolution=resolution,
seed=seed)
self._name = "random_class_" + self._name
self._num_classes = 1000
def _parse_fn(self, features):
image, _ = super(RandomClassImagenetDataset, self)._parse_fn(features)
label = tf.random.uniform(minval=0, maxval=1000, dtype=tf.int32)
return image, label
class SoftLabeledImagenetDataset(ImagenetDataset):
"""ImageNet2012 dataset with soft labels."""
def __init__(self, resolution, seed):
super(SoftLabeledImagenetDataset, self).__init__(
resolution=resolution,
seed=seed)
self._name = "soft_labeled_" + self._name
def _replace_label(self, feature_dict, new_unparsed_label):
"""Replaces the label from the feature_dict with the new (soft) label.
The function assumes that the new_unparsed_label contains a list of logits
which will be converted to a soft label using the softmax.
Args:
feature_dict: A serialized TFRecord containing the old label.
new_unparsed_label: A serialized TFRecord containing the new label.
Returns:
Updates the label in the label dict to the new soft label.
"""
label_spec = {
"file_name": tf.FixedLenFeature((), tf.string),
"label": tf.FixedLenFeature([self._num_classes], tf.float32)
}
parsed_label = tf.parse_single_example(new_unparsed_label, label_spec)
with tf.control_dependencies([
tf.assert_equal(parsed_label["file_name"], feature_dict["file_name"])]):
feature_dict["label"] = tf.nn.softmax(logits=parsed_label["label"])
return feature_dict
DATASETS = {
"celeb_a": CelebaDataset,
"cifar10": Cifar10Dataset,
"fashion-mnist": FashionMnistDataset,
"lsun-bedroom": LsunBedroomDataset,
"mnist": MnistDataset,
"imagenet_64": functools.partial(ImagenetDataset, resolution=64),
"imagenet_128": functools.partial(ImagenetDataset, resolution=128),
"imagenet_256": functools.partial(ImagenetDataset, resolution=256),
"imagenet_512": functools.partial(ImagenetDataset, resolution=512),
"imagenet_512_hq400": (functools.partial(
SizeFilteredImagenetDataset, resolution=512, threshold=400)),
"soft_labeled_imagenet_128": functools.partial(
SoftLabeledImagenetDataset, resolution=128),
"single_class_imagenet_128": functools.partial(
SingleClassImagenetDataset, resolution=128),
"random_class_imagenet_128": functools.partial(
RandomClassImagenetDataset, resolution=128),
"labeled_only_imagenet_128": functools.partial(
ImagenetDataset, resolution=128, filter_unlabeled=True),
}
@gin.configurable("dataset")
def get_dataset(name, seed=547):
"""Instantiates a data set and sets the random seed."""
if name not in DATASETS:
raise ValueError("Dataset %s is not available." % name)
return DATASETS[name](seed=seed)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
from magnum.api.controllers import link
from magnum.api.controllers.v1 import base as v1_base
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api.controllers.v1 import utils as api_utils
from magnum.api import expose
from magnum.api import validation
from magnum.common import exception
from magnum.common import k8s_manifest
from magnum.common import policy
from magnum import objects
class PodPatchType(v1_base.K8sPatchType):
pass
class Pod(v1_base.K8sResourceBase):
"""API representation of a pod.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a pod.
"""
uuid = types.uuid
"""Unique UUID for this pod"""
desc = wtypes.text
"""Description of this pod"""
images = [wtypes.text]
"""A list of images used by containers in this pod."""
status = wtypes.text
"""Staus of this pod """
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated pod links"""
host = wtypes.text
"""The host of this pod"""
def __init__(self, **kwargs):
super(Pod, self).__init__()
self.fields = []
for field in objects.Pod.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@staticmethod
def _convert_with_links(pod, url, expand=True):
if not expand:
pod.unset_fields_except(['uuid', 'name', 'desc', 'bay_uuid',
'images', 'labels', 'status', 'host'])
pod.links = [link.Link.make_link('self', url,
'pods', pod.uuid),
link.Link.make_link('bookmark', url,
'pods', pod.uuid,
bookmark=True)
]
return pod
@classmethod
def convert_with_links(cls, rpc_pod, expand=True):
pod = Pod(**rpc_pod.as_dict())
return cls._convert_with_links(pod, pecan.request.host_url, expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='f978db47-9a37-4e9f-8572-804a10abc0aa',
name='MyPod',
desc='Pod - Description',
bay_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae',
images=['MyImage'],
labels={'name': 'foo'},
status='Running',
host='10.0.0.3',
manifest_url='file:///tmp/rc.yaml',
manifest = '''{
"metadata": {
"name": "name_of_pod"
},
"spec": {
"containers": [
{
"name": "test",
"image": "test"
}
]
}
}''',
created_at=datetime.datetime.utcnow(),
updated_at=datetime.datetime.utcnow())
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
def parse_manifest(self):
try:
manifest = k8s_manifest.parse(self._get_manifest())
except ValueError as e:
raise exception.InvalidParameterValue(message=str(e))
try:
self.name = manifest["metadata"]["name"]
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field metadata['name'] can't be empty in manifest.")
images = []
try:
for container in manifest["spec"]["containers"]:
images.append(container["image"])
self.images = images
except (KeyError, TypeError):
raise exception.InvalidParameterValue(
"Field spec['containers'] can't be empty in manifest.")
if "labels" in manifest["metadata"]:
self.labels = manifest["metadata"]["labels"]
class PodCollection(collection.Collection):
"""API representation of a collection of pods."""
pods = [Pod]
"""A list containing pods objects"""
def __init__(self, **kwargs):
self._type = 'pods'
@staticmethod
def convert_with_links(rpc_pods, limit, url=None, expand=False, **kwargs):
collection = PodCollection()
collection.pods = [Pod.convert_with_links(p, expand)
for p in rpc_pods]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.pods = [Pod.sample(expand=False)]
return sample
class PodsController(rest.RestController):
"""REST controller for Pods."""
def __init__(self):
super(PodsController, self).__init__()
_custom_actions = {
'detail': ['GET'],
}
def _get_pods_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.Pod.get_by_uuid(pecan.request.context,
marker)
pods = pecan.request.rpcapi.pod_list(pecan.request.context, limit,
marker_obj, sort_key=sort_key,
sort_dir=sort_dir)
return PodCollection.convert_with_links(pods, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@policy.enforce_wsgi("pod")
@expose.expose(PodCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, pod_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of pods.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
return self._get_pods_collection(marker, limit, sort_key,
sort_dir)
@policy.enforce_wsgi("pod")
@expose.expose(PodCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def detail(self, pod_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of pods with detail.
:param pod_uuid: UUID of a pod, to get only pods for that pod.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "pods":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['pods', 'detail'])
return self._get_pods_collection(marker, limit,
sort_key, sort_dir, expand,
resource_url)
@policy.enforce_wsgi("pod", "get")
@expose.expose(Pod, types.uuid_or_name)
def get_one(self, pod_ident):
"""Retrieve information about the given pod.
:param pod_ident: UUID of a pod or logical name of the pod.
"""
rpc_pod = api_utils.get_rpc_resource('Pod', pod_ident)
return Pod.convert_with_links(rpc_pod)
@policy.enforce_wsgi("pod", "create")
@expose.expose(Pod, body=Pod, status_code=201)
@validation.enforce_bay_types('kubernetes')
def post(self, pod):
"""Create a new pod.
:param pod: a pod within the request body.
"""
pod.parse_manifest()
pod_dict = pod.as_dict()
context = pecan.request.context
pod_dict['project_id'] = context.project_id
pod_dict['user_id'] = context.user_id
pod_obj = objects.Pod(context, **pod_dict)
new_pod = pecan.request.rpcapi.pod_create(pod_obj)
# Set the HTTP Location Header
pecan.response.location = link.build_url('pods', new_pod.uuid)
return Pod.convert_with_links(new_pod)
@policy.enforce_wsgi("pod", "update")
@wsme.validate(types.uuid, [PodPatchType])
@expose.expose(Pod, types.uuid_or_name, body=[PodPatchType])
def patch(self, pod_ident, patch):
"""Update an existing pod.
:param pod_ident: UUID or logical name of a pod.
:param patch: a json PATCH document to apply to this pod.
"""
rpc_pod = api_utils.get_rpc_resource('Pod', pod_ident)
# Init manifest and manifest_url field because we don't store them
# in database.
rpc_pod['manifest'] = None
rpc_pod['manifest_url'] = None
try:
pod_dict = rpc_pod.as_dict()
pod = Pod(**api_utils.apply_jsonpatch(pod_dict, patch))
if pod.manifest or pod.manifest_url:
pod.parse_manifest()
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.Pod.fields:
try:
patch_val = getattr(pod, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if rpc_pod[field] != patch_val:
rpc_pod[field] = patch_val
if pod.manifest or pod.manifest_url:
pecan.request.rpcapi.pod_update(rpc_pod)
else:
rpc_pod.save()
return Pod.convert_with_links(rpc_pod)
@policy.enforce_wsgi("pod")
@expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, pod_ident):
"""Delete a pod.
:param pod_ident: UUID of a pod or logical name of the pod.
"""
rpc_pod = api_utils.get_rpc_resource('Pod', pod_ident)
pecan.request.rpcapi.pod_delete(rpc_pod.uuid)
|
|
######################################################################
# CliNER - model.py #
# #
# Willie Boag #
# #
# Purpose: Define the model for clinical concept extraction. #
######################################################################
import sys
from sklearn.feature_extraction import DictVectorizer
import os
import random
import math
import io
import numpy as np
from time import localtime, strftime
from collections import defaultdict
from notes.documents import labels as tag2id, id2tag
from tools import flatten, save_list_structure, reconstruct_list
from tools import print_str, print_vec, print_files, write
cliner_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
tmp_dir = os.path.join(cliner_dir, 'data', 'tmp')
class ClinerModel:
def log(self, out, model_file=None):
'''
ClinerModel::log()
Log training information of model.
@param out. Either a filename or file channel to output the log string.
@param model_file. A path to optionally identify where the model was saved.
@return None
'''
if not self._log:
log = self.__log_str(model_file)
else:
log = self._log
# depending on whether it is already opened as a channel
if isinstance(out,type(sys.stdout)):
write(out, '%s\n' % log)
else:
with open(out, 'a') as f:
write(f, '%s\n' % log)
def __log_str_NEURAL(self,model_file=None):
""
def __log_str(self, model_file=None):
'''
ClinerModel::__log_str()
Build a string of information about training for the model's log file.
@param model_file. A path to optionally identify where the model was saved.
@return A string of the model's training information
'''
assert self._is_trained, 'ClinerModel not trained'
with io.StringIO() as f:
write(f, u'\n')
write(f, '-'*40)
write(f, u'\n\n')
if model_file:
write(f, 'model : %s\n' % os.path.abspath(model_file))
write(f, u'\n')
if self._use_lstm:
write(f, u'modeltype: LSTM\n')
else:
write(f, u'modeltype: CRF\n')
if 'hyperparams' in self._score:
for name,value in self._score['hyperparams'].items():
write(f, u'\t%-10s: %s\n' % (name,value))
write(f, u'\n')
print_str(f, 'features', self._features)
write(f, u'\n')
write(f, u'\n')
write(f, 'training began: %s\n' % self._time_train_begin)
write(f, 'training ended: %s\n' % self._time_train_end)
write(f, u'\n')
write(f, u'scores\n')
print_vec(f, 'train precision', self._score['train']['precision'])
print_vec(f, 'train recall ', self._score['train']['recall' ])
print_vec(f, 'train f1 ', self._score['train']['f1' ])
write(f, self._score['train']['conf'])
if 'dev' in self._score:
print_vec(f, u'dev precision ', self._score['dev']['precision'])
print_vec(f, u'dev recall ', self._score['dev']['recall' ])
print_vec(f, u'dev f1 ', self._score['dev']['f1' ])
write(f, self._score['dev']['conf'])
if 'test' in self._score:
print_vec(f, u'test precision ', self._score['test']['precision'])
print_vec(f, u'test recall ', self._score['test']['recall' ])
print_vec(f, u'test f1 ', self._score['test']['f1' ])
write(f, self._score['test']['conf'])
if 'history' in self._score:
for label,vec in self._score['history'].items():
print_vec(f, '%-16s'%label, vec)
write(f, u'\n')
if self._training_files:
write(f, u'\n')
write(f, u'Training Files\n')
if len(self._training_files) < 200:
print_files(f, self._training_files)
else:
write(f, '\t%d files\n'%len(self._training_files))
write(f, u'\n')
write(f, u'-'*40)
write(f, u'\n\n')
# get output as full string
contents = f.getvalue()
return contents
def __init__(self, use_lstm):
"""
ClinerModel::__init__()
Instantiate a ClinerModel object.
@param use_lstm. Bool indicating whether to train a CRF or LSTM.
"""
self._use_lstm = use_lstm
self._is_trained = False
self._clf = "latin1"
self._vocab = None
self._training_files = None
self._log = None
self._text_feats = None
# Import the tools for either CRF or LSTM
if use_lstm:
# NEW
import DatasetCliner_experimental as Exp
import tensorflow as tf
import entity_lstm as entity_model
import training_predict_LSTM
import pickle
import copy
import helper_dataset as hd
import shutil
self._pretrained_dataset=None
self._pretrained_wordvectors=None
self._current_model=None
self._parameters=None
def train(self, train_notes, val=[], test=[]):
"""
ClinerModel::train()
Purpose: Train a Machine Learning model on annotated data
@param notes. A list of Note objects (containing text and annotations)
@return None
"""
# Extract formatted data
train_sents = flatten([n.getTokenizedSentences() for n in train_notes])
train_labels = flatten([n.getTokenLabels() for n in train_notes])
if test:
test_sents = flatten([n.getTokenizedSentences() for n in test])
test_labels = flatten([n.getTokenLabels() for n in test])
else:
test_sents = []
test_labels = []
if val:
print ("VAL")
val_sents = flatten([n.getTokenizedSentences() for n in val])
val_labels = flatten([n.getTokenLabels() for n in val])
self.train_fit(train_sents,train_labels,val_sents=val_sents,val_labels=val_labels,test_sents=test_sents,test_labels=test_labels)
else:
print ("NO DEV")
self.train_fit(train_sents, train_labels, dev_split=0.1,
test_sents=test_sents, test_labels=test_labels)
self._train_files = [ n.getName() for n in train_notes+val ]
def train_fit(self, train_sents, train_labels, val_sents=None, val_labels=None,
test_sents=None, test_labels=None, dev_split=None):
"""
ClinerModel::train_fit()
Purpose: Train clinical concept extraction model using annotated data.
@param train_sents. A list of sentences, where each sentence is tokenized into words.
@param train_labels. Parallel to 'train_sents', 7-way labels for concept spans.
@param val_sents. Validation data. Same format as tokenized_sents
@param val_labels. Validation data. Same format as iob_nested_labels
@param dev_split A real number from 0 to 1
"""
# metadata
self._time_train_begin = strftime("%Y-%m-%d %H:%M:%S", localtime())
# train classifier
if self._use_lstm==False:
voc, clf, dev_score, enabled_features = generic_train('all',
train_sents ,
train_labels ,
self._use_lstm ,
val_sents=val_sents ,
val_labels=val_labels ,
test_sents=test_sents ,
test_labels=test_labels ,
dev_split=dev_split )
self._is_trained = True
self._vocab = voc
self._clf = clf
self._score = dev_score
self._features = enabled_features
# metadata
self._time_train_end = strftime("%Y-%m-%d %H:%M:%S", localtime())
else:
print ("IN ERROR CHECK")
print (dev_split)
parameters,dataset,best = generic_train('all',
train_sents ,
train_labels ,
self._use_lstm ,
val_sents=val_sents ,
val_labels=val_labels ,
test_sents=test_sents ,
test_labels=test_labels ,
dev_split=dev_split )
self._is_trained = True
self.pretrained_dataset=dataset
self.parameters=parameters
self._score=best
self._time_train_end = strftime("%Y-%m-%d %H:%M:%S", localtime())
print ("BEST EPOCH")
print (best)
#self._vocab = voc
#self._clf = clf
#self._score = dev_score
#self._features = enabled_features
# metadata
#self._time_train_end = strftime("%Y-%m-%d %H:%M:%S", localtime())
def predict_classes_from_document(self, document):
"""
ClinerModel::predict_classes_from_documents()
Predict concept annotations for a given document
@param note. A Document object (containing text and annotations)
@return List of predictions
"""
# Extract formatted data
tokenized_sents = document.getTokenizedSentences()
return self.predict_classes(tokenized_sents)
def predict_classes(self, tokenized_sents):
"""
ClinerModel::predict_classes()
Predict concept annotations for unlabeled, tokenized sentences
@param tokenized_sents. A list of sentences, where each sentence is tokenized
into words
@return List of predictions
"""
hyperparams = {}
# Predict labels for prose
if self._use_lstm:
if self.parameters==None:
hyperprams['parameters'] = hd.load_parameters_from_file("LSTM_parameters.txt")
if self._pretrained_dataset==None:
temp_pretrained_dataset = os.path.join(hyperparams['parameters']['model_folder'],
"dataset.pickle")
hyperparams['pretrained_dataset'] = pickle.load(open(temp_pretrained_dataset_adress, 'rb'))
vectorized_pred = generic_predict('all' ,
tokenized_sents ,
vocab = self._vocab ,
clf = self._clf ,
use_lstm = self._use_lstm,
hyperparams = hyperparams)
#pretrained_dataset=self._pretrained_dataset,
#tokens_to_vec=self._pretrained_wordvector,
#current_model=self._current_model,
#parameters=self.parameters)
#self._current_model=model
if self._use_lstm:
iob_pred = vectorized_pred
else:
iob_pred = [ [id2tag[p] for p in seq] for seq in vectorized_pred ]
return iob_pred
############################################################################
### Lowest-level (interfaces to ML modules) ###
############################################################################
def generic_train(p_or_n, train_sents, train_labels, use_lstm, val_sents=None, val_labels=None, test_sents=None, test_labels=None, dev_split=None):
'''
generic_train()
Train a model that works for both prose and nonprose
@param p_or_n. A string that indicates "prose", "nonprose", or "all"
@param train_sents. A list of sentences; each sentence is tokenized into words
@param train_labels. Parallel to `train_sents`, 7-way labels for concept spans
@param use_lstm Bool indicating whether to train CRF or LSTM.
@param val_sents. Validation data. Same format as train_sents
@param val_labels. Validation data. Same format as train_labels
@param dev_split. A real number from 0 to 1
'''
# Must have data to train on:
if len(train_sents) == 0:
raise Exception('Training must have %s training examples' % p_or_n)
# if you should split the data into train/dev yourself
if (not val_sents) and (dev_split > 0.0) and (len(train_sents)>10):
p = int(dev_split*100)
sys.stdout.write('\tCreating %d/%d train/dev split\n' % (100-p,p))
perm = list(range(len(train_sents)))
random.shuffle(perm)
train_sents = [ train_sents[i] for i in perm ]
train_labels = [ train_labels[i] for i in perm ]
ind = int(dev_split*len(train_sents))
val_sents = train_sents[:ind ]
train_sents = train_sents[ ind:]
val_labels = train_labels[:ind ]
train_labels = train_labels[ ind:]
else:
sys.stdout.write('\tUsing existing validation data\n')
sys.stdout.write('\tvectorizing words %s\n' % p_or_n)
if use_lstm:
print ("TESTING NEW DATSET OBJECT")
dataset = Exp.Dataset()
parameters=hd.load_parameters_from_file("LSTM_parameters.txt")
parameters['use_pretrained_model']=False
Datasets_tokens={}
Datasets_labels={}
Datasets_tokens['train']=train_sents
Datasets_labels['train']=train_labels
if val_sents!=None:
Datasets_tokens['valid']=val_sents
Datasets_labels['valid']=val_labels
if test_sents!=None:
Datasets_tokens['test']=test_sents
Datasets_labels['test']=test_labels
dataset.load_dataset(Datasets_tokens,Datasets_labels,"",parameters)
pickle.dump(dataset, open(os.path.join(parameters['model_folder'], 'dataset.pickle'), 'wb'))
print (Datasets_tokens['valid'][0])
print (Datasets_tokens['test'][0])
parameters['Feature_vector_length']=dataset.feature_vector_size
parameters['use_features_before_final_lstm']=False
parameters['learning_rate']=0.005
sess = tf.Session()
number_of_sent=list(range(len(dataset.token_indices['train'])))
with sess.as_default():
model=entity_model.EntityLSTM(dataset,parameters)
sess.run(tf.global_variables_initializer())
model.load_pretrained_token_embeddings(sess, dataset,parameters)
epoch_number = -1
transition_params_trained = np.random.rand(5+2,5+2)
values={}
values["best"]=0
f1_dictionary={}
f1_dictionary['best']=0
model_saver = tf.train.Saver(max_to_keep=100)
print ("START TRAINING")
eval_dir = os.path.join(tmo_dir, 'cliner_eval_%d' % random.randint(0,256)+os.sep)
parameters['conll_like_result_folder']=eval_dir
test_temp = os.path.join(parameters['conll_like_result_folder'], 'test/')
train_temp = os.path.join(parameters['conll_like_result_folder'], 'train/')
valid_temp = os.path.join(parameters['conll_like_result_folder'], 'valid/')
os.mkdir(parameters['conll_like_result_folder'])
os.mkdir(test_temp)
os.mkdir(train_temp)
os.mkdir(valid_temp)
while epoch_number<90:
average_loss_per_phrase=0
accuracy_per_phase=0
step = 0
epoch_number += 1
if epoch_number != 0:
sequence_numbers=list(range(len(dataset.token_indices['train'])))
random.shuffle(sequence_numbers)
for sequence_number in sequence_numbers:
loss,accuracy,transition_params_trained=training_predict_LSTM.train_step(sess, dataset, sequence_number, model)
average_loss_per_phrase+=loss
accuracy_per_phase+=accuracy
step += 1
if step % 10 == 0:
print('Training {0:.2f}% done\n'.format(step/len(sequence_numbers)*100))
model_saver.save(sess, os.path.join(parameters['model_folder'], 'model_{0:05d}.ckpt'.format(epoch_number)))
total_loss=average_loss_per_phrase
total_accuracy=accuracy_per_phase
average_loss_per_phrase=average_loss_per_phrase/len(number_of_sent)
accuracy_per_phase=accuracy_per_phase/len(number_of_sent)
if epoch_number>0:
""
f1,predictions=training_predict_LSTM.prediction_step(sess,dataset,"test",model,epoch_number,parameters['conll_like_result_folder'],transition_params_trained)
f1_train,_=training_predict_LSTM.prediction_step(sess,dataset,"train", model,epoch_number,parameters['conll_like_result_folder'],transition_params_trained)
f1_valid,_=training_predict_LSTM.prediction_step(sess,dataset,"valid", model,epoch_number,parameters['conll_like_result_folder'],transition_params_trained)
correctly_predicted_tokens=training_predict_LSTM.compute_train_accuracy(parameters['conll_like_result_folder']+"valid"+os.sep+"epoche_"+str(epoch_number)+".txt")
if f1_dictionary['best']<float(f1_valid):
f1_dictionary['epoche']=epoch_number
f1_dictionary['best']=float(f1_valid)
if values["best"]<correctly_predicted_tokens:
values["epoche"]=epoch_number
values["best"]=correctly_predicted_tokens
#print ("Number of correctly predicted tokens -test "+str(correctly_predicted_tokens))
print ("NEW EPOCHE"+" "+str(epoch_number))
print ("Current F1 on train"+" "+str(f1_train))
print ("Current F1 on valid"+" "+str(f1_valid))
print ("Current F1 on test"+" "+str(f1))
print ("Current F1 best (validation): ")
print (f1_dictionary)
shutil.rmtree(parameters['conll_like_result_folder'])
return parameters, dataset,f1_dictionary['best']
else:
########
# CRF
########
from feature_extraction.features import extract_features
# vectorize tokenized sentences
text_features = extract_features(train_sents)
# type(text_features): <type 'list'>
# Collect list of feature types
enabled_features = set()
for sf in text_features:
for wf in sf:
for (feature_type,instance),value in wf.items():
if feature_type.startswith('prev'):
feature_type = 'PREV*'
if feature_type.startswith('next'):
feature_type = 'NEXT*'
enabled_features.add(feature_type)
enabled_features = sorted(enabled_features)
# Vectorize features
vocab = DictVectorizer()
flat_X_feats = vocab.fit_transform( flatten(text_features) )
X_feats = reconstruct_list(flat_X_feats, save_list_structure(text_features))
# vectorize IOB labels
Y_labels = [ [tag2id[y] for y in y_seq] for y_seq in train_labels ]
assert len(X_feats) == len(Y_labels)
for i in range(len(X_feats)):
assert X_feats[i].shape[0] == len(Y_labels[i])
# if there is specified validation data, then vectorize it
if val_sents:
# vectorize validation X
val_text_features = extract_features(val_sents)
flat_val_X_feats = vocab.transform( flatten(val_text_features) )
val_X = reconstruct_list(flat_val_X_feats,
save_list_structure(val_text_features))
# vectorize validation Y
val_Y = [ [tag2id[y] for y in y_seq] for y_seq in val_labels ]
# if there is specified test data, then vectorize it
if test_sents:
# vectorize test X
test_text_features = extract_features(test_sents)
flat_test_X_feats = vocab.transform( flatten(test_text_features) )
test_X = reconstruct_list(flat_test_X_feats,
save_list_structure(test_text_features))
# vectorize test Y
test_Y = [ [tag2id[y] for y in y_seq] for y_seq in test_labels ]
else:
test_X = None
test_Y = None
sys.stdout.write('\ttraining classifiers %s\n' % p_or_n)
if use_lstm:
# train using lstm
clf, dev_score = keras_ml.train(X_seq_ids, Y_labels, tag2id, len(vocab),
val_X_ids=val_X, val_Y_ids=val_Y,
test_X_ids=test_X, test_Y_ids=test_Y)
else:
# train using crf
from machine_learning import crf
clf, dev_score = crf.train(X_feats, Y_labels, val_X=val_X, val_Y=val_Y,
test_X=test_X, test_Y=test_Y)
return vocab, clf, dev_score, enabled_features
#def generic_predict(p_or_n, tokenized_sents, vocab, clf, use_lstm, pretrained_dataset=None,tokens_to_vec=None, current_model=None, parameters=None):
def generic_predict(p_or_n, tokenized_sents, vocab, clf, use_lstm, hyperparams):
'''
generic_predict()
Train a model that works for both prose and nonprose
@param p_or_n. A string that indicates "prose", "nonprose", or "all"
@param tokenized_sents. A list of sentences, where each sentence is tokenized
into words
@param vocab. A dictionary mapping word tokens to numeric indices.
@param clf. An encoding of the trained keras model.
@param use_lstm. Bool indicating whether clf is a CRF or LSTM.
'''
# use_lstm=self._use_lstm
if use_lstm:
#parameters=hd.load_parameters_from_file("LSTM_parameters.txt")
parameters['use_pretrained_model']=True
#model_folder="./models/NN_models"
predictions=[]
sys.stdout.write('\n use_lstm \n')
dataset = Exp.Dataset()
fictional_labels= copy.deepcopy(tokenized_sents)
for idx,x in enumerate(fictional_labels):
for val_id,value in enumerate(x):
fictional_labels[idx][val_id]='O'
Datasets_tokens={}
Datasets_labels={}
Datasets_tokens['deploy']=tokenized_sents
Datasets_labels['deploy']=fictional_labels
token_to_vector=dataset.load_dataset(Datasets_tokens, Datasets_labels, "", parameters,token_to_vector=tokens_to_vec, pretrained_dataset=pretrained_dataset)
print (dataset.token_indices.keys())
parameters['Feature_vector_length']=dataset.feature_vector_size
parameters['use_features_before_final_lstm']=False
dataset.update_dataset("", ['deploy'],Datasets_tokens,Datasets_labels)
del Datasets_tokens
del Datasets_labels
#model=current_model
model=entity_model.EntityLSTM(dataset,parameters)
os.mkdir(parameters['conll_like_result_folder'])
test_temp = os.path.join(parameters['conll_like_result_folder'], 'test/')
train_temp = os.path.join(parameters['conll_like_result_folder'], 'train/')
valid_temp = os.path.join(parameters['conll_like_result_folder'], 'valid/')
os.mkdir(test_temp)
os.mkdir(train_temp)
os.mkdir(valid_temp)
sess = tf.Session()
with sess.as_default():
#model=entity_model.EntityLSTM(dataset,parameters)
transition_params_trained=model.restore_from_pretrained_model(parameters, dataset, sess, token_to_vector=token_to_vector,pretrained_dataset=pretrained_dataset)
del token_to_vector
predictions=training_predict_LSTM.prediction_step(sess,dataset,"deploy",model,0,parameters['conll_like_result_folder'],transition_params_trained)
sess.close()
tf.reset_default_graph()
shutil.rmtree(parameters['conll_like_result_folder'])
return predictions, model
# If nothing to predict, skip actual prediction
if len(tokenized_sents) == 0:
sys.stdout.write('\tnothing to predict %s\n' % p_or_n)
return []
sys.stdout.write('\tvectorizing words %s\n' % p_or_n)
if use_lstm:
print('todo: incorporate lstm')
# vectorize tokenized sentences
#X = []
#for sent in tokenized_sents:
# id_seq = []
# for w in sent:
# if w in vocab:
# id_seq.append(vocab[w])
# else:
# id_seq.append(vocab['oov'])
# X.append(id_seq)
else:
from feature_extraction.features import extract_features
# vectorize validation X
text_features = extract_features(tokenized_sents)
flat_X_feats = vocab.transform( flatten(text_features) )
X = reconstruct_list(flat_X_feats, save_list_structure(text_features))
sys.stdout.write('\tpredicting labels %s\n' % p_or_n)
# Predict labels
if use_lstm:
print ("TEST_PREDICT")
exit()
else:
from machine_learning import crf
predictions = crf.predict(clf, X)
# Format labels from output
return predictions
|
|
import operator # Used to sort by an element of a class
import collections # Used to get a dictionary with .append()
try:
from enum import Enum # Used to make algorithma bit more readable
class PointType(Enum):
Nothing = -1
Peak = 0
Valley = 1
except ImportError:
class PointType(object):
Nothing = -1
Peak = 0
Valley = 1
try:
from numpy import std
except ImportError:
import math
def std(ll):
n = float(len(ll))
if n < 1:
raise ValueError('Mean requires at least one data point!')
avg = float(0)
for el in ll:
avg += el
avg /= n
sigma_sq = float(0)
for el in ll:
sigma_sq += (el - avg)**2
sigma_sq /= n
return math.sqrt(sigma_sq)
class MageDataPoint(object):
'''
This class defines a single data point,
consisting of a time in minutes, glucose
value in mg/dL, and a standard deviation in
mg/dL. Additionally, we define useful operations
on the class, like -, >, >=, <, <=, and print()
'''
def __init__(self, newTime=-1, newGlucose=-1):
if newTime == -1 or newGlucose == -1:
raise ValueError('Mage Data Points need both time and glucose!')
self.t = newTime
self.g = newGlucose
self.stdev = 0
def gluc(self):
# Access glucose value of the current object
return self.g
def __str__(self):
return "Time: {},\tGlucose: {},\tStDev: {}"\
.format(self.t, self.g, self.stdev)
def plusSigma(self):
return self.g + self.stdev
def minusSigma(self):
return self.g - self.stdev
def __sub__(self, other):
if isinstance(other, MageDataPoint):
return self.g - other.g
elif isinstance(other, (int, float)):
return self.g - other
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, MageDataPoint):
return self.g < other.g
elif isinstance(other, (int, float)):
return self.g < other
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, MageDataPoint):
return self.g <= other.g
elif isinstance(other, (int, float)):
return self.g <= other
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, MageDataPoint):
return self.g > other.g
elif isinstance(other, (int, float)):
return self.g > other
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, MageDataPoint):
return self.g >= other.g
elif isinstance(other, (int, float)):
return self.g >= other
else:
return NotImplemented
class MageDataSet(object):
'''
This class defines a dataset. The default constructor
takes as its argument a list of times and glucose values.
The .calculate() method can be used to calculate MAGE, and
the getMAGE() method can be used to get the most recently
calculated MAGE without redoing the entire calculation.
'''
def __init__(self, newTimes=None, newGlucoses=None):
self.OneDay = 1440 # length of a day in minutes
self.MAGE = -1
self.NUM_MAGE_PTS = 1
self.printPoints = False
if newTimes is None or newGlucoses is None:
self.points = list()
else:
'''
Make sure you dont overstep your bounds when indexing
the incoming arrays, because they may be of unequal sizes
'''
self.points = [MageDataPoint(newTimes[i], newGlucoses[i])
for i in range(min(len(newTimes),
len(newGlucoses)))]
def __str__(self):
if self.printPoints:
for point in self.points:
print(point)
return "MAGE score: {}".format(self.getMAGE())
def printEverything(self, booly):
if isinstance(booly, bool):
self.printPoints = booly
def sortByTime(self):
self.points.sort(key=operator.attrgetter('t'))
def prepareValues(self):
self.sortByTime()
self.pointsByDay = collections.defaultdict(list)
# Group all points by day, using a dictionary
for point in self.points:
self.pointsByDay[point.t // self.OneDay].append(point)
'''
Once grouped, calculate the standard deviation
for that day, and set the corresponding point in the
actual object up with that standard deviation
'''
daily_offset = 0
for day, dailyData in self.pointsByDay.items():
stddev = std([point.gluc() for point in dailyData])
for offset, point in enumerate(dailyData):
point.stdev = stddev
self.points[day * daily_offset + offset] = point
daily_offset = len(dailyData)
def current(self):
if self.pointIndex >= 0:
return self.points[self.pointIndex]
else:
return self.points[0]
def findFirstPeakAndValley(self):
'''
Can't even start until we have 3 points.
Makes sense, since MAGE is a Peak finding
algorithm at its core.
'''
if len(self.points) < 3:
return
self.pointIndex = 1
found = False
self.lastFound = PointType.Nothing
'''
Forgive me, this next bit is O(n^2)
'''
while(found is False):
for point in self.points[:self.pointIndex]:
if self.current() >= point.plusSigma():
found = True
self.lastFound = PointType.Peak
self.currentPeak = self.current()
self.currentValley = point
break #return#break
elif self.current() <= point.minusSigma():
found = True
self.lastFound = PointType.Valley
self.currentPeak = point
self.currentValley = self.current()
break #return#break
self.pointIndex += 1
def findOtherPeaksAndValleys(self):
'''
again, we constrain the number
of points since it doesn't make
sense to move on if the algorithm
doesn't have enough data.
'''
if len(self.points) < 3:
return
self.mage = 0
self.num_mage_pts = 0
for point in self.points[self.pointIndex:]:
'''
print("pp: {}, vv: {}, cc: {}".format\
(self.currentPeak, self.currentValley, point))
'''
if self.lastFound == PointType.Valley:
if point >= self.currentValley.plusSigma():
'''
we found a Peak! now safe the add the previous two
to our running mage sum
'''
print("found Peak!\t", point)
self.lastFound = PointType.Peak
self.mage += abs(self.currentPeak - self.currentValley)
self.num_mage_pts += 1
self.currentPeak = point
else:
# check for smaller Valley
#print("checking for smaller Valley,\t", self.currentValley)
self.currentValley = point if point < self.currentValley else self.currentValley
#print("maybe new Valley?\t\t\t", self.currentValley)
elif self.lastFound == PointType.Peak:
if point <= self.currentPeak.minusSigma():
'''
we found a Valley! now safe to add the previous two
to our running mage sum
'''
print("found Valley!\t", point)
self.lastFound = PointType.Valley
self.mage += abs(self.currentPeak - self.currentValley)
self.num_mage_pts += 1
self.currentValley = point
else:
#print("checking for larger Peak,\t", self.currentPeak)
self.currentPeak = point if point > self.currentPeak else self.currentPeak
#print("maybe new Peak?\t\t\t", self.currentPeak)
else:
raise ValueError('Attempted to find next peak/valley\
without an initial! Exiting.')
top = self.currentPeak
bottom = self.currentValley
if self.lastFound == PointType.Peak:
#print("last found was a Peak")
if point <= self.currentPeak.minusSigma():
top = point
else:
top = self.currentValley
bottom = self.currentPeak
elif self.lastFound == PointType.Valley:
#print("last found was a Valley")
if point >= self.currentValley.plusSigma():
top = point
else:
top = self.currentPeak
bottom = self.currentValley
self.mage += abs(top - bottom)
self.num_mage_pts += 1
# self.pointIndex += 1 # do this so we can use .current() properly
def getMAGE(self):
if(self.MAGE < 0):
self.calculate()
return self.MAGE/self.NUM_MAGE_PTS
def calculate(self):
self.prepareValues()
self.findFirstPeakAndValley()
self.findOtherPeaksAndValleys()
if __name__ == '__main__':
times = [60*i for i in range(1000)]
'''glucs = [200, 180, 160, 140, 120, 100, 70, 114, 80, 95, 120, 140, 160, \
140, 100, 150, 170, 220, 215, 210, 170, 140, 200, 60, 65, 75, 85, 95, \
140, 80, 60, 80, 100, 120, 160, 180, 240, 220, 170, 250, 300, 240, 200, \
150, 125, 100, 140, 180]
'''
glucs = [100, 150, 100, 130, 100, 150, 100, 130, 100, 150, 100, 130, \
100, 150, 100, 130, 100, 150, 100, 130, 100, 150, 100, 130]
m = MageDataSet(times, glucs)#[0, 500, 1400, 2000, 2400],[200,250,120, 180, 160])
m.printEverything(True)
m.getMAGE()
print(m)
|
|
# stdlib
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from utils.platform import Platform
from tests.checks.common import AgentCheckTest
@attr(requires='mysql')
class TestMySql(AgentCheckTest):
CHECK_NAME = 'mysql'
METRIC_TAGS = ['tag1', 'tag2']
SC_TAGS = ['server:localhost', 'port:unix_socket']
MYSQL_MINIMAL_CONFIG = [{
'server': 'localhost',
'user': 'dog',
'pass': 'dog'
}]
MYSQL_COMPLEX_CONFIG = [{
'server': 'localhost',
'user': 'dog',
'pass': 'dog',
'options': {
'replication': True,
'extra_status_metrics': True,
'extra_innodb_metrics': True,
'extra_performance_metrics': True,
'schema_size_metrics': True,
},
'tags': METRIC_TAGS,
'queries': [
{
'query': "SELECT * from testdb.users where name='Alice' limit 1;",
'metric': 'alice.age',
'type': 'gauge',
'field': 'age'
},
{
'query': "SELECT * from testdb.users where name='Bob' limit 1;",
'metric': 'bob.age',
'type': 'gauge',
'field': 'age'
}
]
}]
CONNECTION_FAILURE = [{
'server': 'localhost',
'user': 'unknown',
'pass': 'dog',
}]
STATUS_VARS = [
# Command Metrics
'mysql.performance.slow_queries',
'mysql.performance.questions',
'mysql.performance.queries',
'mysql.performance.com_select',
'mysql.performance.com_insert',
'mysql.performance.com_update',
'mysql.performance.com_delete',
'mysql.performance.com_replace',
'mysql.performance.com_load',
'mysql.performance.com_insert_select',
'mysql.performance.com_update_multi',
'mysql.performance.com_delete_multi',
'mysql.performance.com_replace_select',
# Connection Metrics
'mysql.net.connections',
'mysql.net.max_connections',
'mysql.net.aborted_clients',
'mysql.net.aborted_connects',
# Table Cache Metrics
'mysql.performance.open_files',
'mysql.performance.open_tables',
# Network Metrics
'mysql.performance.bytes_sent',
'mysql.performance.bytes_received',
# Query Cache Metrics
'mysql.performance.qcache_hits',
'mysql.performance.qcache_inserts',
'mysql.performance.qcache_lowmem_prunes',
# Table Lock Metrics
'mysql.performance.table_locks_waited',
'mysql.performance.table_locks_waited.rate',
# Temporary Table Metrics
'mysql.performance.created_tmp_tables',
'mysql.performance.created_tmp_disk_tables',
'mysql.performance.created_tmp_files',
# Thread Metrics
'mysql.performance.threads_connected',
'mysql.performance.threads_running',
# MyISAM Metrics
'mysql.myisam.key_buffer_bytes_unflushed',
'mysql.myisam.key_buffer_bytes_used',
'mysql.myisam.key_read_requests',
'mysql.myisam.key_reads',
'mysql.myisam.key_write_requests',
'mysql.myisam.key_writes',
]
# Possibly from SHOW GLOBAL VARIABLES
VARIABLES_VARS = [
'mysql.myisam.key_buffer_size',
'mysql.performance.key_cache_utilization',
'mysql.net.max_connections_available',
'mysql.performance.qcache_size',
'mysql.performance.table_open_cache',
'mysql.performance.thread_cache_size'
]
INNODB_VARS = [
# InnoDB metrics
'mysql.innodb.data_reads',
'mysql.innodb.data_writes',
'mysql.innodb.os_log_fsyncs',
'mysql.innodb.mutex_spin_waits',
'mysql.innodb.mutex_spin_rounds',
'mysql.innodb.mutex_os_waits',
'mysql.innodb.row_lock_waits',
'mysql.innodb.row_lock_time',
'mysql.innodb.row_lock_current_waits',
# 'mysql.innodb.current_row_locks', MariaDB status
'mysql.innodb.buffer_pool_dirty',
'mysql.innodb.buffer_pool_free',
'mysql.innodb.buffer_pool_used',
'mysql.innodb.buffer_pool_total',
'mysql.innodb.buffer_pool_read_requests',
'mysql.innodb.buffer_pool_reads',
'mysql.innodb.buffer_pool_utilization',
]
# Calculated from "SHOW MASTER LOGS;"
BINLOG_VARS = [
# 'mysql.binlog.disk_use', Only collected if log_bin is true
]
SYSTEM_METRICS = [
'mysql.performance.user_time',
'mysql.performance.kernel_time',
'mysql.performance.cpu_time',
]
OPTIONAL_REPLICATION_METRICS = [
'mysql.replication.slave_running',
'mysql.replication.seconds_behind_master',
'mysql.replication.slaves_connected',
]
# Additional Vars found in "SHOW STATUS;"
# Will collect if [FLAG NAME] is True
OPTIONAL_STATUS_VARS = [
'mysql.binlog.cache_disk_use',
'mysql.binlog.cache_use',
'mysql.performance.handler_commit',
'mysql.performance.handler_delete',
'mysql.performance.handler_prepare',
'mysql.performance.handler_read_first',
'mysql.performance.handler_read_key',
'mysql.performance.handler_read_next',
'mysql.performance.handler_read_prev',
'mysql.performance.handler_read_rnd',
'mysql.performance.handler_read_rnd_next',
'mysql.performance.handler_rollback',
'mysql.performance.handler_update',
'mysql.performance.handler_write',
'mysql.performance.opened_tables',
'mysql.performance.qcache_total_blocks',
'mysql.performance.qcache_free_blocks',
'mysql.performance.qcache_free_memory',
'mysql.performance.qcache_not_cached',
'mysql.performance.qcache_queries_in_cache',
'mysql.performance.select_full_join',
'mysql.performance.select_full_range_join',
'mysql.performance.select_range',
'mysql.performance.select_range_check',
'mysql.performance.select_scan',
'mysql.performance.sort_merge_passes',
'mysql.performance.sort_range',
'mysql.performance.sort_rows',
'mysql.performance.sort_scan',
'mysql.performance.table_locks_immediate',
'mysql.performance.table_locks_immediate.rate',
'mysql.performance.threads_cached',
'mysql.performance.threads_created'
]
OPTIONAL_STATUS_VARS_5_6_6 = [
'mysql.performance.table_cache_hits',
'mysql.performance.table_cache_misses',
]
# Will collect if [FLAG NAME] is True
OPTIONAL_INNODB_VARS = [
'mysql.innodb.active_transactions',
'mysql.innodb.buffer_pool_data',
'mysql.innodb.buffer_pool_pages_data',
'mysql.innodb.buffer_pool_pages_dirty',
'mysql.innodb.buffer_pool_pages_flushed',
'mysql.innodb.buffer_pool_pages_free',
'mysql.innodb.buffer_pool_pages_total',
'mysql.innodb.buffer_pool_read_ahead',
'mysql.innodb.buffer_pool_read_ahead_evicted',
'mysql.innodb.buffer_pool_read_ahead_rnd',
'mysql.innodb.buffer_pool_wait_free',
'mysql.innodb.buffer_pool_write_requests',
'mysql.innodb.checkpoint_age',
'mysql.innodb.current_transactions',
'mysql.innodb.data_fsyncs',
'mysql.innodb.data_pending_fsyncs',
'mysql.innodb.data_pending_reads',
'mysql.innodb.data_pending_writes',
'mysql.innodb.data_read',
'mysql.innodb.data_written',
'mysql.innodb.dblwr_pages_written',
'mysql.innodb.dblwr_writes',
'mysql.innodb.hash_index_cells_total',
'mysql.innodb.hash_index_cells_used',
'mysql.innodb.history_list_length',
'mysql.innodb.ibuf_free_list',
'mysql.innodb.ibuf_merged',
'mysql.innodb.ibuf_merged_delete_marks',
'mysql.innodb.ibuf_merged_deletes',
'mysql.innodb.ibuf_merged_inserts',
'mysql.innodb.ibuf_merges',
'mysql.innodb.ibuf_segment_size',
'mysql.innodb.ibuf_size',
'mysql.innodb.lock_structs',
'mysql.innodb.locked_tables',
'mysql.innodb.locked_transactions',
'mysql.innodb.log_waits',
'mysql.innodb.log_write_requests',
'mysql.innodb.log_writes',
'mysql.innodb.lsn_current',
'mysql.innodb.lsn_flushed',
'mysql.innodb.lsn_last_checkpoint',
'mysql.innodb.mem_adaptive_hash',
'mysql.innodb.mem_additional_pool',
'mysql.innodb.mem_dictionary',
'mysql.innodb.mem_file_system',
'mysql.innodb.mem_lock_system',
'mysql.innodb.mem_page_hash',
'mysql.innodb.mem_recovery_system',
'mysql.innodb.mem_thread_hash',
'mysql.innodb.mem_total',
'mysql.innodb.os_file_fsyncs',
'mysql.innodb.os_file_reads',
'mysql.innodb.os_file_writes',
'mysql.innodb.os_log_pending_fsyncs',
'mysql.innodb.os_log_pending_writes',
'mysql.innodb.os_log_written',
'mysql.innodb.pages_created',
'mysql.innodb.pages_read',
'mysql.innodb.pages_written',
'mysql.innodb.pending_aio_log_ios',
'mysql.innodb.pending_aio_sync_ios',
'mysql.innodb.pending_buffer_pool_flushes',
'mysql.innodb.pending_checkpoint_writes',
'mysql.innodb.pending_ibuf_aio_reads',
'mysql.innodb.pending_log_flushes',
'mysql.innodb.pending_log_writes',
'mysql.innodb.pending_normal_aio_reads',
'mysql.innodb.pending_normal_aio_writes',
'mysql.innodb.queries_inside',
'mysql.innodb.queries_queued',
'mysql.innodb.read_views',
'mysql.innodb.rows_deleted',
'mysql.innodb.rows_inserted',
'mysql.innodb.rows_read',
'mysql.innodb.rows_updated',
'mysql.innodb.s_lock_os_waits',
'mysql.innodb.s_lock_spin_rounds',
'mysql.innodb.s_lock_spin_waits',
'mysql.innodb.semaphore_wait_time',
'mysql.innodb.semaphore_waits',
'mysql.innodb.tables_in_use',
'mysql.innodb.x_lock_os_waits',
'mysql.innodb.x_lock_spin_rounds',
'mysql.innodb.x_lock_spin_waits',
]
PERFORMANCE_VARS = [
'mysql.performance.query_run_time.avg',
'mysql.performance.digest_95th_percentile.avg_us',
]
SCHEMA_VARS = [
'mysql.info.schema.size'
]
SYNTHETIC_VARS = [
'mysql.performance.qcache.utilization',
'mysql.performance.qcache.utilization.instant',
]
def _test_optional_metrics(self, optional_metrics, at_least):
"""
Check optional metrics - there should be at least `at_least` matches
"""
before = len(filter(lambda m: m[3].get('tested'), self.metrics))
for mname in optional_metrics:
self.assertMetric(mname, tags=self.METRIC_TAGS, at_least=0)
# Compute match rate
after = len(filter(lambda m: m[3].get('tested'), self.metrics))
self.assertTrue(after - before > at_least)
def test_minimal_config(self):
config = {'instances': self.MYSQL_MINIMAL_CONFIG}
self.run_check_twice(config)
# Test service check
self.assertServiceCheck('mysql.can_connect', status=AgentCheck.OK,
tags=self.SC_TAGS, count=1)
# Test metrics
testable_metrics = (self.STATUS_VARS + self.VARIABLES_VARS + self.INNODB_VARS
+ self.BINLOG_VARS + self.SYSTEM_METRICS + self.SYNTHETIC_VARS)
for mname in testable_metrics:
self.assertMetric(mname, count=1)
def test_complex_config(self):
config = {'instances': self.MYSQL_COMPLEX_CONFIG}
self.run_check_twice(config)
# Test service check
self.assertServiceCheck('mysql.can_connect', status=AgentCheck.OK,
tags=self.SC_TAGS, count=1)
# Travis MySQL not running replication - FIX in flavored test.
self.assertServiceCheck('mysql.replication.slave_running', status=AgentCheck.CRITICAL,
tags=self.SC_TAGS, count=1)
ver = map(lambda x: int(x), self.service_metadata[0]['version'].split("."))
ver = tuple(ver)
testable_metrics = (self.STATUS_VARS + self.VARIABLES_VARS + self.INNODB_VARS
+ self.BINLOG_VARS + self.SYSTEM_METRICS + self.SCHEMA_VARS + self.SYNTHETIC_VARS)
if ver >= (5, 6, 0):
testable_metrics.extend(self.PERFORMANCE_VARS)
# Test metrics
for mname in testable_metrics:
# These two are currently not guaranteed outside of a Linux
# environment.
if mname == 'mysql.performance.user_time' and not Platform.is_linux():
continue
if mname == 'mysql.performance.kernel_time' and not Platform.is_linux():
continue
if mname == 'mysql.performance.cpu_time' and Platform.is_windows():
continue
if mname == 'mysql.performance.query_run_time.avg':
self.assertMetric(mname, tags=self.METRIC_TAGS+['schema:testdb'], count=1)
elif mname == 'mysql.info.schema.size':
self.assertMetric(mname, tags=self.METRIC_TAGS+['schema:testdb'], count=1)
self.assertMetric(mname, tags=self.METRIC_TAGS+['schema:information_schema'], count=1)
self.assertMetric(mname, tags=self.METRIC_TAGS+['schema:performance_schema'], count=1)
else:
self.assertMetric(mname, tags=self.METRIC_TAGS, count=1)
# Assert service metadata
self.assertServiceMetadata(['version'], count=1)
# test custom query metrics
self.assertMetric('alice.age', value=25)
self.assertMetric('bob.age', value=20)
# test optional metrics
self._test_optional_metrics((self.OPTIONAL_REPLICATION_METRICS
+ self.OPTIONAL_INNODB_VARS
+ self.OPTIONAL_STATUS_VARS
+ self.OPTIONAL_STATUS_VARS_5_6_6), 1)
# Raises when COVERAGE=true and coverage < 100%
self.coverage_report()
def test_connection_failure(self):
"""
Service check reports connection failure
"""
config = {'instances': self.CONNECTION_FAILURE}
self.assertRaises(
Exception,
lambda: self.run_check(config)
)
self.assertServiceCheck('mysql.can_connect', status=AgentCheck.CRITICAL,
tags=self.SC_TAGS, count=1)
self.coverage_report()
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions related to the movement of the FFN FoV."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
import json
import weakref
import numpy as np
from scipy.special import logit
import tensorflow as tf
from ..training.import_util import import_symbol
# Unless stated otherwise, all shape/coordinate triples in this file are in zyx
# order.
# TODO(mjanusz): This has the potential problem that when an 'Y'-like or more
# complex fork is present, the model could fail to follow one of the branches.
# Doesn't seem to matter in practice though, at least with the current
# FoVs/datasets.
#
# For larger FoVs, we would need to threshold the probability map for every
# face, and look at the max probability point in every connected component
# within a face. Probably best to implement this in C++ and just use a Python
# wrapper.
def get_scored_move_offsets(deltas, prob_map, threshold=0.9):
"""Looks for potential moves for a FFN.
The possible moves are determined by extracting probability map values
corresponding to cuboid faces at +/- deltas, and considering the highest
probability value for every face.
Args:
deltas: (z,y,x) tuple of base move offsets for the 3 axes
prob_map: current probability map as a (z,y,x) numpy array
threshold: minimum score required at the new FoV center for a move to be
considered valid
Yields:
tuples of:
score (probability at the new FoV center),
position offset tuple (z,y,x) relative to center of prob_map
The order of the returned tuples is arbitrary and should not be depended
upon. In particular, the tuples are not necessarily sorted by score.
"""
center = np.array(prob_map.shape) // 2
assert center.size == 3
# Selects a working subvolume no more than +/- delta away from the current
# center point.
subvol_sel = [slice(c - dx, c + dx + 1) for c, dx
in zip(center, deltas)]
done = set()
for axis, axis_delta in enumerate(deltas):
if axis_delta == 0:
continue
for axis_offset in (-axis_delta, axis_delta):
# Move exactly by the delta along the current axis, and select the face
# of the subvolume orthogonal to the current axis.
face_sel = subvol_sel[:]
face_sel[axis] = axis_offset + center[axis]
face_prob = prob_map[face_sel]
shape = face_prob.shape
# Find voxel with maximum activation.
face_pos = np.unravel_index(face_prob.argmax(), shape)
score = face_prob[face_pos]
# Only move if activation crosses threshold.
if score < threshold:
continue
# Convert within-face position to be relative vs the center of the face.
relative_pos = [face_pos[0] - shape[0] // 2, face_pos[1] - shape[1] // 2]
relative_pos.insert(axis, axis_offset)
ret = (score, tuple(relative_pos))
if ret not in done:
done.add(ret)
yield ret
class BaseMovementPolicy(object):
"""Base class for movement policy queues.
The principal usage is to initialize once with the policy's parameters and
set up a queue for candidate positions. From this queue candidates can be
iteratively consumed and the scores should be updated in the FFN
segmentation loop.
"""
def __init__(self, canvas, scored_coords, deltas):
"""Initializes the policy.
Args:
canvas: Canvas object for FFN inference
scored_coords: mutable container of tuples (score, zyx coord)
deltas: step sizes as (z,y,x)
"""
# TODO(mjanusz): Remove circular reference between Canvas and seed policies.
self.canvas = weakref.proxy(canvas)
self.scored_coords = scored_coords
self.deltas = np.array(deltas)
def __len__(self):
return len(self.scored_coords)
def __iter__(self):
return self
def next(self):
raise StopIteration()
def append(self, item):
self.scored_coords.append(item)
def update(self, prob_map, position):
"""Updates the state after an FFN inference call.
Args:
prob_map: object probability map returned by the FFN (in logit space)
position: postiion of the center of the FoV where inference was performed
(z, y, x)
"""
raise NotImplementedError()
def get_state(self):
"""Returns the state of this policy as a pickable Python object."""
raise NotImplementedError()
def restore_state(self, state):
raise NotImplementedError()
def reset_state(self, start_pos):
"""Resets the policy.
Args:
start_pos: starting position of the current object as z, y, x
"""
raise NotImplementedError()
class FaceMaxMovementPolicy(BaseMovementPolicy):
"""Selects candidates from maxima on prediction cuboid faces."""
def __init__(self, canvas, deltas=(4, 8, 8), score_threshold=0.9):
self.done_rounded_coords = set()
self.score_threshold = score_threshold
self._start_pos = None
super(FaceMaxMovementPolicy, self).__init__(canvas, deque([]), deltas)
def reset_state(self, start_pos):
self.scored_coords = deque([])
self.done_rounded_coords = set()
self._start_pos = start_pos
def get_state(self):
return [(self.scored_coords, self.done_rounded_coords)]
def restore_state(self, state):
self.scored_coords, self.done_rounded_coords = state[0]
def __next__(self):
"""Pops positions from queue until a valid one is found and returns it."""
while self.scored_coords:
_, coord = self.scored_coords.popleft()
coord = tuple(coord)
if self.quantize_pos(coord) in self.done_rounded_coords:
continue
if self.canvas.is_valid_pos(coord):
break
else: # Else goes with while, not with if!
raise StopIteration()
return tuple(coord)
def next(self):
return self.__next__()
def quantize_pos(self, pos):
"""Quantizes the positions symmetrically to a grid downsampled by deltas."""
# Compute offset relative to the origin of the current segment and
# shift by half delta size. This ensures that all directions are treated
# approximately symmetrically -- i.e. the origin point lies in the middle of
# a cell of the quantized lattice, as opposed to a corner of that cell.
rel_pos = (np.array(pos) - self._start_pos)
coord = (rel_pos + self.deltas // 2) // np.maximum(self.deltas, 1)
return tuple(coord)
def update(self, prob_map, position):
"""Adds movements to queue for the cuboid face maxima of ``prob_map``."""
qpos = self.quantize_pos(position)
self.done_rounded_coords.add(qpos)
scored_coords = get_scored_move_offsets(self.deltas, prob_map,
threshold=self.score_threshold)
scored_coords = sorted(scored_coords, reverse=True)
for score, rel_coord in scored_coords:
# convert to whole cube coordinates
coord = [rel_coord[i] + position[i] for i in range(3)]
self.scored_coords.append((score, coord))
def get_policy_fn(request, ffn_model):
"""Returns a policy class based on the InferenceRequest proto."""
if request.movement_policy_name:
movement_policy_class = globals().get(request.movement_policy_name, None)
if movement_policy_class is None:
movement_policy_class = import_symbol(request.movement_policy_name)
else: # Default / fallback.
movement_policy_class = FaceMaxMovementPolicy
if request.movement_policy_args:
kwargs = json.loads(request.movement_policy_args)
else:
kwargs = {}
if 'deltas' not in kwargs:
kwargs['deltas'] = ffn_model.deltas[::-1]
if 'score_threshold' not in kwargs:
kwargs['score_threshold'] = logit(request.inference_options.move_threshold)
return lambda canvas: movement_policy_class(canvas, **kwargs)
class MovementRestrictor(object):
"""Restricts the movement of the FFN FoV."""
def __init__(self, mask=None, shift_mask=None, shift_mask_fov=None,
shift_mask_threshold=4, shift_mask_scale=1, seed_mask=None):
"""Initializes the restrictor.
Args:
mask: 3d ndarray-like of shape (z, y, x); positive values indicate voxels
that are not going to be segmented
shift_mask: 4d ndarray-like of shape (2, z, y, x) representing a 2d shift
vector field
shift_mask_fov: bounding_box.BoundingBox around large shifts in which to
restrict movement. BoundingBox specified as XYZ, start can be
negative.
shift_mask_threshold: if any component of the shift vector exceeds this
value within the FoV, the location will not be segmented
shift_mask_scale: an integer factor specifying how much larger the pixels
of the shift mask are compared to the data set processed by the FFN
"""
self.mask = mask
self.seed_mask = seed_mask
self._shift_mask_scale = shift_mask_scale
self.shift_mask = None
if shift_mask is not None:
self.shift_mask = (np.max(np.abs(shift_mask), axis=0) >=
shift_mask_threshold)
assert shift_mask_fov is not None
self._shift_mask_fov_pre_offset = shift_mask_fov.start[::-1]
self._shift_mask_fov_post_offset = shift_mask_fov.end[::-1] - 1
def is_valid_seed(self, pos):
"""Checks whether a given position is a valid seed point.
Args:
pos: position within the dataset as (z, y, x)
Returns:
True iff location is a valid seed
"""
if self.seed_mask is not None and self.seed_mask[pos]:
return False
return True
def is_valid_pos(self, pos):
"""Checks whether a given position should be segmented.
Args:
pos: position within the dataset as (z, y, x)
Returns:
True iff location should be segmented
"""
# Location masked?
if self.mask is not None and self.mask[pos]:
return False
if self.shift_mask is not None:
np_pos = np.array(pos)
fov_low = np.maximum(np_pos + self._shift_mask_fov_pre_offset, 0)
fov_high = np_pos + self._shift_mask_fov_post_offset
start = fov_low // self._shift_mask_scale
end = fov_high // self._shift_mask_scale
# Do not allow movement through highly distorted areas, which often
# result in merge errors. In the simplest case, the distortion magnitude
# is quantified with a patch-based cross-correlation map.
if np.any(self.shift_mask[fov_low[0]:(fov_high[0] + 1),
start[1]:(end[1] + 1),
start[2]:(end[2] + 1)]):
return False
return True
|
|
"""BST data structure."""
import timeit
class Node(object):
"""Node class used for the bst."""
def __init__(self, data=None):
"""Init node."""
self.data = data
self.left = None
self.right = None
self.parent = None
self.depth = 1
def _set_child(self, child):
"""Set child to the parent(leaf node)."""
if self.parent:
if self.parent.left is self:
self.parent.left = child
else:
self.parent.right = child
def children(self):
"""Return all children node of the current node."""
return [child for child in [self.right, self.left] if child]
class BinarySearchTree(object):
"""Binary search tree class."""
def __init__(self, iterable=()):
"""Init the bst class."""
self.root = None
self.count = 0
self.rotation = 0
if isinstance(iterable, (str, list, tuple)):
for val in iterable:
if isinstance(val, int) or isinstance(val, float):
self.insert(val)
else:
raise ValueError('value(s) must be a number!')
else:
raise ValueError('must be a list, str or tuple')
def insert(self, item):
"""Insert a value/node into the tree."""
if self.root is None:
self.root = Node(item)
self.count += 1
elif self.search(item):
return
elif isinstance(item, int) or isinstance(item, float):
curr_data = self.root
new_node = None
while curr_data is not None:
if item < curr_data.data:
if curr_data.left is None:
curr_data.left = Node(item)
curr_data.left.parent = curr_data
self.count += 1
new_node = curr_data.left
return
else:
curr_data = curr_data.left
else: # greater than current node
if curr_data.right is None:
curr_data.right = Node(item)
curr_data.right.parent = curr_data
self.count += 1
new_node = curr_data.right
return
else:
curr_data = curr_data.right
root = self._find_unbalanced(new_node)
if root:
self._rebalance(root)
else:
raise ValueError('can only insert number')
def search(self, item):
"""Search a value in the tree."""
node = self.root
while node is not None:
if node.data == int(item): # in py2.6 item is str & node.data is int
return node
elif node.data < int(item):
node = node.right
continue
else:
node = node.left
continue
return
def delete(self, item):
"""Delete a node from the tree."""
target = self.search(item)
if target is None:
raise ValueError('val not in bst')
elif target.left is None and target.right is None:
if target is self.root:
self.root = None
self.count = 0
return
elif item > target.parent.data:
target.parent.right = None
else:
target.parent.left = None
self.count -= 1
return
elif target.left is None or target.right is None:
if target.left:
replacer = target.left
else:
replacer = target.right
if target.parent:
if target.parent.left is target:
target.parent.left = replacer
else:
target.parent.right = replacer
replacer.parent = target.parent
else:
self.root = replacer
self.root.parent = None
self.count -= 1
return
else:
replacer = self._delete_helper(target.right)
target.data = replacer.data
if replacer is target.right:
replacer.parent.right = None
else:
replacer.parent.left = None
self.count -= 1
return
def _delete_helper(self, node):
"""Return the smallest node on right side of bst from the biggest right side node(passed in)."""
if node.left is None:
return node
return self._delete_helper(node.left)
def size(self):
"""Return the size of the current tree."""
return self.count
def contains(self, item):
"""Return a boolean value if a node is in the tree."""
return isinstance(self.search(item), Node)
def depth(self, root):
"""Return the depth of the current tree (0 for root only tree as required by assignment)."""
return max(0, self._depth(root) - 1)
def _depth(self, root):
"""Return the depth of the current tree internal use only."""
if root is None:
return 0
else:
return max(self._depth(root.left), self._depth(root.right)) + 1
def balance(self):
"""Return the current balance of the tree."""
if self.root is None:
return 0
else:
left_depth = self._depth(self.root.left)
right_depth = self._depth(self.root.right)
return left_depth - right_depth
def _balance(self, node='root'):
"""Ck balance of the node to decide rotation."""
if node == 'roto':
node = self.root
if node is None:
return 0
left_depth = self._depth(node.left)
right_depth = self._depth(node.right)
return left_depth - right_depth
def in_order(self, root):
"""Return val of tree in-order traversal one at a time."""
if root is not None:
for val in self.in_order(root.left):
yield val
yield root.data
for val in self.in_order(root.right):
yield val
def post_order(self, root):
"""Return val of tree post-order traversal one at a time."""
if root is not None:
for val in self.post_order(root.left):
yield val
for val in self.post_order(root.right):
yield val
yield root.data
def pre_order(self, root):
"""Return val of tree pre-order traversal one at a time."""
if root is not None:
yield root.data
for val in self.pre_order(root.left):
yield val
for val in self.pre_order(root.right):
yield val
def _rotate_left(self, root):
"""Rotate left.."""
new_root = root.right
root.right = new_root.left
if new_root.left:
new_root.left.parent = root
if root.parent is None:
self.root = new_root
root._set_child(new_root)
new_root.parent = root.parent
new_root.left = root
root.parent = new_root
return new_root
def _rotate_right(self, root):
"""Rotate right."""
new_root = root.left
root.left = new_root.root
if new_root.right:
new_root.right.parent = root
if root.parent is None:
self.root = new_root
root._set_child(new_root)
new_root.right = root
new_root.parent = root.parent
root.parent = new_root
return new_root
def _rotate_lr(self, root):
"""Rotate left and right."""
left_root = root.left
new_root = left_root.right
left_root.right = new_root.left
root.left = new_root.right
if new_root.left:
new_root.left.parent = left_root
if new_root.right:
new_root.right.parent = root
root._set_child(new_root)
if root.parent is None:
self.root = new_root
new_root.parent = root.parent
new_root.right = root
new_root.left = left_root
root.parent = new_root
left_root.parent = new_root
return new_root
def _rotate_rl(self, root):
""""Rotate right and left."""
right_root = root.right
new_root = right_root.left
right_root.left = new_root.right
root.right = new_root.left
if new_root.right:
new_root.right.parent = right_root
if new_root.left:
new_root.left.parent = root
root._set_child(new_root)
if root.parent is None:
self.root = new_root
new_root.parent = root.parent
new_root.left = root
new_root.right = right_root
root.parent = new_root
right_root.parent = new_root
return new_root
def _rebalance(self, node):
"""Rebalance the tree from self.root."""
self.rotation += 1
if self._balance(node) < 0:
if self._balance(node.right) <= 0:
root = self._rotate_left(node)
else:
root = self._rotate_rl(node)
else:
if self._balance(node.left) >= 0:
root = self._rotate_right(node)
else:
root = self._rotate_lr(node)
self._traverse_depth(root)
next_ = self._find_unbalance(node)
if next_:
self._rebalance(next_)
def _traverse_depth(self, node):
"""Traverse up to find the depth."""
children = node.children()
node.depth = 1
if children:
node.depth += max(child.depth for child in children)
if node.parent:
self._traverse_depth(node.parent)
def _find_unbalanced(self, node):
"""Ck if tree is unbalanced."""
while node:
if abs(self.balance(node)) > 1:
return node
node = node.parent
if __name__ == '__main__': # pragma: no cover
b = BinarySearchTree([20, 10, 5, 15, 3, 7, 13, 17, 30, 25, 23, 27, 35, 37, 23]) # balanced tree depth 3
a = BinarySearchTree() # all right node tree
for num in range(0, 15):
a.insert(num)
t_s = timeit.timeit('b.search(30) ', setup='from __main__ import b')
print('shortest search time for my unbalanced tree of size 15 is ' + str(t_s) + ' seconds')
t_l = timeit.timeit('a.search(14)', setup='from __main__ import a')
print('longest search time for my unbalanced tree of size 15 is ' + str(t_l) + ' seconds')
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
def find_all(srcstr, substr):
'''
to find all desired substring in the source string
and return their starting indices as a list
Args:
srcstr(str): the parent string
substr(str): substr
Returns:
list: a list of the indices of the substrings
found
'''
indices = []
gotone = srcstr.find(substr)
while (gotone != -1):
indices.append(gotone)
gotone = srcstr.find(substr, gotone + 1)
return indices
def check_indent(cdline):
'''
to check the indent of a given code line
to get the number of starting blank chars,
e.t. blankspaces and \t
\t will be interpreted as 4 single blankspaces,
e.t. '\t'=' '
Args:
cdline(str) : a single line of code from the source file
Returns:
int : the indent of the number of interpreted
blankspaces
'''
indent = 0
for c in cdline:
if c == '\t':
indent += 4
elif c == ' ':
indent += 1
if c != ' ' and c != '\t':
break
return indent
#srccom: raw comments in the source,including ''' and original indent
def sampcd_extract_and_run(srccom,
name,
logf,
htype="def",
hname="",
show_details=False):
'''
Extract and run sample codes from source comment and
the result will be returned.
As an ultimate result, this function returns a list of
status codes for each sample code (in top-down order)
found in srccom.
status code deciphering:
3:error sample code
2:have sample code but format is wrong
1:no sample code
0:successful
-1:no comments found
-2:in white list
there may be several examples in a source comment
so status deserves a list to contain the states.
For instance, some API has three example codes,
code 1 is successful, code 2 is error, code 3 is successful
so the list to return is [0,3,0]
Args:
srccom(str): the source comment of some API whose
example codes will be extracted and run.
name(str): the name of the API.
logf(file): for logging the output in case they are
flushed.
htype(str): the type of hint banners, def/class/method.
hname(str): the name of the hint banners , e.t. def hname.
show_details(bool): Set it to False to print wrong sample
codes only.
Returns:
list: the status code of all the sample codes found in srccom.
'''
def sampcd_header_print(name, sampcd, htype, hname, logf):
'''
print hint banner headers.
Args:
name(str): the name of the API.
sampcd(str): sample code string
htype(str): the type of hint banners, def/class/method.
hname(str): the name of the hint banners , e.t. def hname.
logf(file): for logging the output in case they are
flushed.
'''
print_header(logf, htype, hname)
print "Sample code " + str(y) + " extracted for " + name + " :"
print "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
print(sampcd)
print "----example code check----\n"
print "executing sample code ....."
print "execution result:"
logf.write("\nSample code extracted for " + name + " :\n")
logf.write("\n" + sampcd + "\n")
logf.write("\n----example code check----\n")
logf.write("\nexecuting sample code .....\n")
logf.write("\nexecution result:\n")
sampcd_begins = find_all(srccom, " code-block:: python")
status = []
if (len(sampcd_begins) == 0):
print_header(logf, htype, hname)
'''
detect sample codes using >>> to format
and consider this situation as wrong
'''
if (srccom.find("Examples:") != -1):
print "----example code check----\n"
logf.write("\n----example code check----\n")
if (srccom.find(">>>") != -1):
logf.write(
"Deprecated sample code style:\n\n Examples:\n\n >>>codeline\n >>>codeline\n\n\n "
+ "Please use '.. code-block:: python' to " +
"format sample code.\n")
print(
"Deprecated sample code style:\n\n Examples:\n\n >>>codeline\n >>>codeline\n\n\n "
+ "Please use '.. code-block:: python' to " +
"format sample code.\n")
status.append(2)
print "status code for all sample codes in " + name + " : " + str(
status)
else:
print "No sample code!\n"
logf.write("\nNo sample code!\n")
status.append(1)
print "status code for all sample codes in " + name + " : " + str(
status)
for y in range(1, len(sampcd_begins) + 1):
sampcd_begin = sampcd_begins[y - 1]
sampcd = srccom[sampcd_begin + len(" code-block:: python") + 1:]
sampcd = sampcd.split("\n")
#remove starting empty lines
while sampcd[0].replace(' ', '').replace('\t', '') == '':
sampcd.pop(0)
#the mininmum indent, which is the indent of the first
#non-empty line
min_indent = check_indent(sampcd[0])
sampcd_to_write = []
for i in range(0, len(sampcd)):
cdline = sampcd[i]
#handle empty lines or those only with spaces/tabs
if cdline.strip() == '':
continue
this_indent = check_indent(cdline)
if (this_indent < min_indent):
break
else:
cdline = cdline.replace('\t', ' ')
sampcd_to_write.append(cdline[min_indent:])
sampcd = '\n'.join(sampcd_to_write)
if sys.argv[1] == "cpu":
sampcd = '\nimport os\n' + 'os.environ["CUDA_VISIBLE_DEVICES"] = ""\n' + sampcd
if sys.argv[1] == "gpu":
sampcd = '\nimport os\n' + 'os.environ["CUDA_VISIBLE_DEVICES"] = "0"\n' + sampcd
sampcd += '\nprint ' + '\"' + name + ' sample code is executed successfully!\"\n'
if (len(sampcd_begins) > 1):
tfname = name + "_example_" + str(y) + ".py"
else:
tfname = name + "_example" + ".py"
tempf = open("samplecode_temp/" + tfname, 'w')
tempf.write(sampcd)
tempf.close()
cmd = ["python", "samplecode_temp/" + tfname]
subprc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = subprc.communicate()
msg = "".join(output)
err = "".join(error)
if (subprc.returncode != 0):
print("\nSample code error found in " + name + ":\n")
sampcd_header_print(name, sampcd, htype, hname, logf)
print "subprocess return code: " + str(subprc.returncode)
print("Error Raised from Sample Code " + name + " :\n")
print err
print msg
logf.write("\nError Raised from Sample Code " + name + " :\n")
logf.write("\n" + msg + "\n")
status.append(3)
print "status code for all sample codes in " + name + str(status)
#It works!
else:
status.append(0)
if show_details:
sampcd_header_print(name, sampcd, htype, hname, logf)
print "subprocess return code: " + str(subprc.returncode)
print msg
logf.write("\n" + msg + "\n")
print "status code for all sample codes in " + name + " : " + str(
status)
#msg is the returned code execution report
os.remove("samplecode_temp/" + tfname)
return status
def single_defcom_extract(start_from, srcls, is_class_begin=False):
'''
to extract a def function/class/method comments body
Args:
start_from(int): the line num of "def" header
srcls(list): the source file in lines
is_class_begin(bool): whether the start_from is a beginning a class. \
For a sole class body itself may end up with its method if it has no
docstring. But the body of \
a common def function can only be ended up by a none-indented def/class
Returns:
string : the extracted comment body, inclusive of its quote marks.
'''
i = start_from
fcombody = "" #def comment body
comstart = -1 # the starting line index of comment mark "'''" or """"""
#if it is not -1, it indicates the loop is in the comment body
comstyle = 0 # comment mark style ,comments quoted with ''' is coded as 1
# comments quoted with """ is coded as 2
for x in range(i + 1, len(srcls)):
if is_class_begin:
if (srcls[x].replace('\t', ' ').startswith(' def ')):
break
if ((srcls[x].startswith('def ') or srcls[x].startswith('class '))):
break
else:
if (comstart == -1 and srcls[x].replace(" ", '').replace(
"\t", '').replace("\n", '').startswith("\"\"\"")):
comstart = x
comstyle = 2
continue
if (comstyle == 2 and comstart != -1 and
srcls[x].replace(" ", '').replace("\t", '').replace(
"\n", '').startswith("\"\"\"")):
break
if (comstart == -1 and srcls[x].replace(" ", '').replace(
"\t", '').replace("\n", '').startswith("\'\'\'")):
comstart = x
comstyle = 1
continue
if (comstyle == 1 and comstart != -1 and
srcls[x].replace(" ", '').replace("\t", '').replace(
"\n", '').startswith("\'\'\'")):
break
if (comstart !=
-1): #when the comments start, begin to add line to fcombody
fcombody += srcls[x]
return fcombody
def print_header(logf, htype, name):
print htype + " name:" + name
print "-----------------------"
logf.write("\n\n" + htype + " name:" + name + "\n")
logf.write("-----------------------\n")
def srcf_print(srcfile):
print "source file name:" + srcfile.name
print "---------------------------------------------------"
logf.write("source file name:" + srcfile.name + "\n")
logf.write("---------------------------------------------------\n\n")
def show_alllist(alllist):
print "__all__:" + str(alllist) + "\n"
logf.write("__all__:" + str(alllist) + "\n\n")
def srccoms_extract(srcfile, logf, status_all, wlist, show_details):
'''
Given a source file ``srcfile``, this function will
extract its API(doc comments) and run sample codes in the
API.
Args:
srcfile(file): the source file
logf(file): log recording file
status_all(dict): record all the sample code execution states.
wlist(list): white list
show_details(bool): if show_details is True, the whole process will be printed for you
to debug it locally
Returns:
string: the length of __all__ list in srcfile versus the exact number of
analysed API to make sure no API is missed in this srcfile and it
is useful for statistic practices.
'''
srcc = srcfile.read()
#2. get defs and classes header line number
#set file pointer to its beginning
srcfile.seek(0, 0)
srcls = srcfile.readlines() #source lines
if show_details:
srcf_print(srcfile)
#1. fetch__all__ list
allidx = srcc.find("__all__")
if (allidx != -1):
alllist = []
#get all list for layers/ops.py
if (srcfile.name.find("ops.py") != -1):
for ai in range(0, len(srcls)):
if (srcls[ai].startswith("__all__")):
lb = srcls[ai].find('[')
rb = srcls[ai].find(']')
if (lb == -1):
continue
allele = srcls[ai][lb + 1:rb].replace("'", '').replace(
" ", '').replace("\"", '')
alllist.append(allele)
if '' in alllist:
alllist.remove('')
if show_details:
show_alllist(alllist)
else:
alllist_b = allidx + len("__all__")
allstr = srcc[alllist_b + srcc[alllist_b:].find("[") + 1:alllist_b +
srcc[alllist_b:].find("]")]
allstr = allstr.replace("\n", '').replace(" ", '').replace(
"'", '').replace("\"", '')
alllist = allstr.split(',')
if '' in alllist:
alllist.remove('')
if show_details:
show_alllist(alllist)
api_alllist_count = len(alllist)
api_count = 0
handled = []
#get src contents in layers/ops.py
if (srcfile.name.find("ops.py") != -1):
for i in range(0, len(srcls)):
if srcls[i].find("__doc__") != -1:
opname = srcls[i][:srcls[i].find("__doc__") - 1]
if opname in wlist:
status_all[srcfile.name + '/' + opname] = [-2]
if show_details:
print_header(logf, "def", opname)
print opname + " is in white list, thus skipped"
logf.write("\n" + opname +
" is in white list, thus skipped\n")
print status_all[srcfile.name + '/' + opname]
logf.write("\n" + "execution status" + str(
status_all[srcfile.name + '/' + opname]) + "\n")
continue
comstart = i
for j in range(i, len(srcls)):
if (srcls[j].find("\"\"\"") != -1):
comstart = i
opcom = ""
for j in range(comstart + 1, len(srcls)):
opcom += srcls[j]
if (srcls[j].find("\"\"\"") != -1):
break
status = sampcd_extract_and_run(opcom, opname, logf, "def",
opname, show_details)
api_count += 1
status_all[srcfile.name + '/' + opname] = status
handled.append(
opname) #ops.py also has normal formatted functions
#use list 'handled' to mark the functions have been handled here
#which will be ignored in the following step
for i in range(0, len(srcls)):
if srcls[i].startswith(
'def '): #a function header is detected in line i
f_header = srcls[i].replace(" ", '')
fn = f_header[len('def'):f_header.find('(')] #function name
if fn in handled:
continue
if fn in alllist:
api_count += 1
if fn in wlist or fn + "@" + srcfile.name in wlist:
status_all[srcfile.name + '/' + fn] = [-2]
if show_details:
print_header(logf, "def", fn)
print fn + " is in white list, thus skipped"
logf.write("\n" + fn +
" is in white list, thus skipped\n")
print status_all[srcfile.name + '/' + fn]
logf.write("\n" + "execution status" + str(
status_all[srcfile.name + '/' + fn]) + "\n")
continue
fcombody = single_defcom_extract(i, srcls)
if (fcombody == ""): #if no comment
print_header(logf, "def", fn)
print "WARNING: no comments in function " + fn + ", but it deserves."
logf.write("no comments in function " + fn + "\n\n")
status_all[srcfile.name + '/' + fn] = [-1]
print status_all[srcfile.name + '/' + fn]
logf.write("\n" + "execution status" + str(status_all[
srcfile.name + '/' + fn]) + "\n")
continue
else:
status = sampcd_extract_and_run(fcombody, fn, logf,
"def", fn, show_details)
status_all[srcfile.name + '/' + fn] = status
else:
if show_details:
print_header(logf, "def", fn)
print fn + " not in __all__ list"
logf.write(fn + " not in __all__ list\n\n")
if srcls[i].startswith('class '):
c_header = srcls[i].replace(" ", '')
cn = c_header[len('class'):c_header.find('(')] #class name
if cn in handled:
continue
if cn in alllist:
api_count += 1
if cn in wlist or cn + "@" + srcfile.name in wlist:
status_all[srcfile.name + '/' + cn] = [-2]
if show_details:
print cn + " is in white list, thus skipped"
logf.write("\n" + cn +
" is in white list, thus skipped\n")
print status_all[srcfile.name + '/' + cn]
logf.write("\n" + "execution status" + str(
status_all[srcfile.name + '/' + cn]) + "\n")
continue
#class comment
classcom = single_defcom_extract(i, srcls, True)
if (classcom != ""):
status = sampcd_extract_and_run(
classcom, cn, logf, "class", cn, show_details)
status_all[srcfile.name + '/' + cn] = status
else:
print "WARNING: no comments in class itself " + cn + ", but it deserves.\n"
logf.write("no comments in class itself " + cn +
"\n\n\n")
status_all[srcfile.name + '/' + cn] = [-1]
print status_all[srcfile.name + '/' + cn]
logf.write("\n" + "execution status" + str(status_all[
srcfile.name + '/' + cn]) + "\n")
#handling methods in class bodies
for x in range(
i + 1,
len(srcls)): #from the next line of class header
if (srcls[x].startswith('def ') or
srcls[x].startswith('class ')):
break
else:
#member method def header
srcls[x] = srcls[x].replace('\t', ' ')
if (srcls[x].startswith(
' def ')): #detect a mehtod header..
thisl = srcls[x]
indent = len(thisl) - len(thisl.lstrip())
mn = thisl[indent + len('def '):thisl.find(
'(')] #method name
name = cn + "." + mn #full name
if mn.startswith('_'):
if show_details:
print mn + " is hidden, not visible to users\n"
logf.write(
"\n" + mn +
" is hidden, not visible to users\n")
continue
if name in wlist or name + "@" + srcfile.name in wlist:
status_all[srcfile.name + '/' + name] = [-2]
if show_details:
print name + " is in white list, thus skipped"
logf.write(
"\n" + name +
" is in white list, thus skipped\n")
print status_all[srcfile.name + '/' +
name]
logf.write(
"\n" + "execution status" + str(
status_all[srcfile.name + '/' +
name]) + "\n")
continue
thismethod = [] #method body lines
thismethod.append(thisl[indent:])
#get all the lines of a single method body
#into thismethod(list)
#and send it to single_defcom_extract
for y in range(x + 1, len(srcls)):
srcls[y] = srcls[y].replace('\t', ' ')
if (srcls[y].startswith('def ') or
srcls[y].startswith('class ')):
#end of method
break
elif (srcls[y].startswith(' def ')):
#end of method
break
else:
thismethod.append(srcls[y][indent:])
thismtdcom = single_defcom_extract(0,
thismethod)
if (thismtdcom != ""):
status = sampcd_extract_and_run(
thismtdcom, name, logf, "method", name,
show_details)
status_all[srcfile.name + '/' +
name] = status
else:
if show_details:
print "no comments in method " + name + "\n"
logf.write("no comments in method " +
name + "\n\n\n")
status_all[srcfile.name + '/' +
name] = [-1]
print status_all[srcfile.name + '/' +
name]
logf.write(
"\n" + "execution status" + str(
status_all[srcfile.name + '/' +
name]) + "\n")
else:
if show_details:
print cn + " is not in __all__ list"
logf.write(cn + " is not in __all__ list\n\n")
return [
srcfile.name + " all list length: " + str(api_alllist_count),
"analysed api count: " + str(api_count)
]
'''
Important constant lists:
filenames : the modules pending for check .
wlist : a list of API that should not trigger the example check .
It is composed of wlist_temp + wlist_inneed + wlist_ignore.
show_details: a boolean value to indicate whether it should be run
in debugging mode.
status_all: a status list containing all the execution status of all
APIs
srcfile: the source .py code file
'''
filenames = [
"layers/control_flow.py", "layers/io.py", "layers/nn.py", "layers/ops.py",
"layers/tensor.py", "layers/learning_rate_scheduler.py",
"layers/detection.py", "layers/metric_op.py"
]
filenames += [
"dygraph/layers.py", "dygraph/base.py", "dygraph/nn.py",
"dygraph/tracer.py", "dygraph/profiler.py", "dygraph/parallel.py",
"dygraph/checkpoint.py", "dygraph/learning_rate_scheduler.py",
"dygraph/backward_strategy.py"
]
filenames += [
"data_feeder.py", "dataset.py", "clip.py", "metrics.py", "executor.py",
"initializer.py", "io.py", "nets.py", "optimizer.py", "profiler.py",
"regularizer.py", "backward.py", "average.py", "unique_name.py",
"framework.py", "evaluator.py", "param_attr.py"
]
wlist_inneed = [
"append_LARS", "BuildStrategy.debug_graphviz_path",
"BuildStrategy.enable_sequential_execution",
"BuildStrategy.fuse_elewise_add_act_ops",
"BuildStrategy.fuse_relu_depthwise_conv",
"BuildStrategy.gradient_scale_strategy", "BuildStrategy.reduce_strategy",
"BuildStrategy.remove_unnecessary_lock", "BuildStrategy.sync_batch_norm",
"DynamicRNN.step_input", "DynamicRNN.static_input", "DynamicRNN.block",
"DynamicRNN.update_memory", "DynamicRNN.output",
"transpiler.DistributeTranspilerConfig",
"transpiler.DistributeTranspilerConfig.slice_var_up",
"transpiler.DistributeTranspilerConfig.split_method",
"transpiler.DistributeTranspilerConfig.min_block_size",
"DistributeTranspilerConfig.slice_var_up",
"DistributeTranspilerConfig.split_method", "ModelAverage.apply",
"ModelAverage.restore", "DistributeTranspilerConfig",
"DistributeTranspilerConfig.min_block_size",
"ExecutionStrategy.allow_op_delay", "load", "Accuracy.update",
"ChunkEvaluator.update", "ExecutionStrategy.num_iteration_per_drop_scope",
"ExecutionStrategy.num_threads", "CompiledProgram._with_inference_optimize",
"CompositeMetric.add_metric", "CompositeMetric.update",
"CompositeMetric.eval", "DetectionMAP.get_map_var", "MetricBase",
"MetricBase.reset", "MetricBase.get_config", "MetricBase.update",
"MetricBase.eval", "Accuracy.eval", "Auc.update", "Auc.eval",
"EditDistance.update", "EditDistance.eval",
"ExponentialMovingAverage.apply", "ExponentialMovingAverage.restore",
"ExponentialMovingAverage.update", "StaticRNN.step", "StaticRNN.step_input",
"StaticRNN.step_output", "StaticRNN.update_memory", "DetectionMAP.reset",
'StaticRNN.output', "cuda_places", "CUDAPinnedPlace", "CUDAPlace",
"Program.parse_from_string"
]
wlist_temp = [
'ChunkEvaluator',
'EditDistance',
'ErrorClipByValue',
'Program.clone',
'cuda_pinned_places',
'DataFeeder',
'elementwise_floordiv',
'Layer',
'Layer.create_parameter',
'Layer.create_variable',
'Layer.sublayers',
'Layer.add_parameter',
'Layer.add_sublayer',
'Layer.parameters',
'Tracer',
'Layer.full_name',
'InMemoryDataset',
'layer_norm',
'bipartite_match',
'double_buffer',
'cumsum',
'thresholded_relu',
'group_norm',
'random_crop',
'py_func',
'row_conv',
'hard_shrink',
'ssd_loss',
'retinanet_target_assign',
'InMemoryDataset.global_shuffle',
'InMemoryDataset.get_memory_data_size',
'DetectionMAP',
'hash',
'InMemoryDataset.set_queue_num',
'LayerNorm',
'Preprocessor',
'chunk_eval',
'GRUUnit',
'ExponentialMovingAverage',
'QueueDataset.global_shuffle',
'NumpyArrayInitializer',
'create_py_reader_by_data',
'InMemoryDataset.local_shuffle',
'InMemoryDataset.get_shuffle_data_size',
'size',
'edit_distance',
'nce',
'BilinearInitializer',
'NaturalExpDecay',
'noam_decay',
'retinanet_detection_output',
'Pool2D',
'PipelineOptimizer',
'generate_mask_labels',
'isfinite',
'InMemoryDataset.set_fleet_send_batch_size',
'cuda_profiler',
'unfold',
'Executor',
'InMemoryDataset.load_into_memory',
'ExponentialDecay',
'BatchNorm',
'deformable_conv',
'InMemoryDataset.preload_into_memory',
'py_reader',
'linear_lr_warmup',
'InMemoryDataset.wait_preload_done',
'CosineDecay',
'roi_perspective_transform',
'unique',
'ones_like',
'LambOptimizer',
'InMemoryDataset.release_memory',
'Conv2DTranspose',
'QueueDataset.local_shuffle',
# wrong in dygraph/checkpoint.py ok in io.py [duplicated name]
'save_persistables@dygraph/checkpoint.py',
'load_persistables@dygraph/checkpoint.py'
]
'''
white list of private API/ redundant API
'''
wlist_ignore = [
'elementwise_pow', 'WeightedAverage.reset', 'ChunkEvaluator.eval',
'NCE.forward', 'elementwise_div', 'BilinearTensorProduct.forward',
'NoamDecay.step', 'elementwise_min', 'PiecewiseDecay.step',
'Conv3DTranspose.forward', 'elementwise_add', 'IfElse.output',
'IfElse.true_block', 'InverseTimeDecay.step', 'PolynomialDecay.step',
'Precision.eval', 'enabled', 'elementwise_max', 'stop_gperf_profiler',
'IfElse.false_block', 'WeightedAverage.add', 'Auc.trapezoid_area',
'elementwise_mul', 'GroupNorm.forward', 'SpectralNorm.forward',
'elementwise_sub', 'Switch.case', 'IfElse.input', 'prepare_context',
'PRelu.forward', 'Recall.update', 'start_gperf_profiler',
'TreeConv.forward', 'Conv2D.forward', 'Switch.default', 'elementwise_mod',
'Precision.update', 'WeightedAverage.eval', 'Conv3D.forward',
'Embedding.forward', 'Recall.eval', 'FC.forward', 'While.block'
]
# only white on CPU
gpu_not_white = [
"deformable_conv", "cuda_places", "CUDAPinnedPlace", "CUDAPlace",
"cuda_profiler"
]
wlist = wlist_temp + wlist_inneed + wlist_ignore
if len(sys.argv) < 2:
print "Error: inadequate number of arguments"
print('''If you are going to run it on
"CPU: >>> python sampcd_processor.py cpu
"GPU: >>> python sampcd_processor.py gpu
''')
sys.exit("lack arguments")
else:
show_details = False
if sys.argv[1] == "gpu":
for _gnw in gpu_not_white:
wlist.remove(_gnw)
elif sys.argv[1] != "cpu":
print("Unrecognized argument:'" + sys.argv[1] + "' , 'cpu' or 'gpu' is "
+ "desired\n")
sys.exit("Invalid arguments")
if len(sys.argv) == 3:
if sys.argv[2] == "sd":
show_details = True
else:
print("Unrecognized argument:'" + sys.argv[2] + "' , 'sd' is " +
"desired\n")
sys.exit("Invalid arguments")
print("* * * * * * * * * * * * * * * * * * * * * * * *\n" +
"* *\n" +
"* API check -- Example Code Cheker *\n" +
"* *\n" +
"* *\n" +
"* This process is meant to check *\n" +
"* all example codes per CI to ensure *\n" +
"* the example codes can be run successfully *\n" +
"* *\n" +
"* *\n" +
"* Refer to the comments for detailed *\n" +
"* introduction *\n" +
"* *\n" +
"* *\n" +
"* * * * * * * * * * * * * * * * * * * * * * * *\n")
status_all = {}
#a file to record the terminal output
logf = open("example-code-check-log.txt", 'w')
# a temp directory to store temporary sample code file
# subprocess needs a single file to run the code
if not os.path.isdir("./samplecode_temp"):
os.mkdir("./samplecode_temp")
to_check = filenames
for filename in to_check:
srcfile = open(filename, 'r')
counts = srccoms_extract(srcfile, logf, status_all, wlist, show_details)
if show_details:
logf.write("\n\n" + str(counts) + "\n\n")
srcfile.close()
# clear temp files
for root, dirs, files in os.walk("./samplecode_temp"):
for fntemp in files:
os.remove("./samplecode_temp/" + fntemp)
os.rmdir("./samplecode_temp")
status_groups = {-2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
ci_pass = True
for key in status_all:
statusl = status_all[key]
for ele in statusl:
if (ele != 0 and ele != -2 and ele != -1):
ci_pass = False
break
if len(statusl) == 1:
status_groups[statusl[0]].append(key)
else:
for u in range(0, len(statusl)):
status_groups[statusl[u]].append(key + '_' + str(u + 1))
logf.close()
print(
"\n\n------------------End of the Check-------------------------------------------\n\n"
)
errorapisl = status_groups[1] + status_groups[2] + status_groups[3]
if len(errorapisl) > 0:
print "Error raised from: " + str(errorapisl)
if not ci_pass:
print(
"\nOh no.. Mistakes found in sample codes, refer to the log for details\n\n"
)
print('''
- How to run it locally?
Simply put this script under directory:
Paddle/python/paddle/fluid/
and run in python 2.7 (as some interfaces of subprocess may
not work in python 3)
You must specify the device type to run the sample code on:
CPU: >>> python sampcd_processor.py cpu
GPU: >>> python sampcd_processor.py gpu
- How to debug?
This script has an option for showing the details of
the execution status:
>>> python sampcd_processor.py cpu sd
- NOTE:
Please ensure your are using
.. code-block:: python
[sample code starts here]
ONLY 1 BLANKSPACE between '::' and 'python'
''')
exit(1)
else:
print "Sample code check is successful!"
|
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
H = W1.shape[1]
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
a1 = X.dot(W1) + b1
h1 = np.maximum(0, a1)
scores = (h1).dot(W2) + b2
#############################################################################
# END OF YOUR CODE #
#############################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. #
#############################################################################
exp_scores = np.exp(scores)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
loss_vec = -np.log(probs[np.arange(N), y])
loss = np.sum(loss_vec) / N
loss += reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
#############################################################################
# END OF YOUR CODE #
#############################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
da2 = (probs[range[N], y] - 1) / N
grads['W2'] = np.dot(h1.T, da2)
grads['b2'] = np.sum(da2, axis=0)
dh1 = np.dot(da2, W2.T)
da1 = dh1[a1 <= 0] = 0
grads['W1'] = np.dot(X.T, da1)
grads['b1'] = np.sum(da1, axis=0)
grads['W2'] += reg * W2
grads['W1'] += reg * W1
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
sample_indics = np.random.choice(np.arange(num_train), batch_size)
X_batch = X[sample_indics]
y_batch = y[sample_indics]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
self.params['W1'] += -learning_rate * grads['W1']
self.params['W2'] += -learning_rate * grads['W2']
self.params['b1'] += -learning_rate * grads['b1']
self.params['b2'] += -learning_rate * grads['b2']
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
z1 = X.dot(self.params['W1']) + self.params['b1']
a1 = np.maximum(0, z1)
scores = a1.dot(self.params['W2']) + self.params['b2']
y_pred = np.argmax(scores, axis=1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
|
|
"""Tests for HTMLParser.py."""
import html.parser
import pprint
import unittest
from test import support
class EventCollector(html.parser.HTMLParser):
def __init__(self, *args, **kw):
self.events = []
self.append = self.events.append
html.parser.HTMLParser.__init__(self, *args, **kw)
def get_events(self):
# Normalize the list of events so that buffer artefacts don't
# separate runs of contiguous characters.
L = []
prevtype = None
for event in self.events:
type = event[0]
if type == prevtype == "data":
L[-1] = ("data", L[-1][1] + event[1])
else:
L.append(event)
prevtype = type
self.events = L
return L
# structure markup
def handle_starttag(self, tag, attrs):
self.append(("starttag", tag, attrs))
def handle_startendtag(self, tag, attrs):
self.append(("startendtag", tag, attrs))
def handle_endtag(self, tag):
self.append(("endtag", tag))
# all other markup
def handle_comment(self, data):
self.append(("comment", data))
def handle_charref(self, data):
self.append(("charref", data))
def handle_data(self, data):
self.append(("data", data))
def handle_decl(self, data):
self.append(("decl", data))
def handle_entityref(self, data):
self.append(("entityref", data))
def handle_pi(self, data):
self.append(("pi", data))
def unknown_decl(self, decl):
self.append(("unknown decl", decl))
class EventCollectorExtra(EventCollector):
def handle_starttag(self, tag, attrs):
EventCollector.handle_starttag(self, tag, attrs)
self.append(("starttag_text", self.get_starttag_text()))
class TestCaseBase(unittest.TestCase):
def get_collector(self):
raise NotImplementedError
def _run_check(self, source, expected_events, collector=None):
if collector is None:
collector = self.get_collector()
parser = collector
for s in source:
parser.feed(s)
parser.close()
events = parser.get_events()
if events != expected_events:
self.fail("received events did not match expected events\n"
"Expected:\n" + pprint.pformat(expected_events) +
"\nReceived:\n" + pprint.pformat(events))
def _run_check_extra(self, source, events):
self._run_check(source, events, EventCollectorExtra())
def _parse_error(self, source):
def parse(source=source):
parser = self.get_collector()
parser.feed(source)
parser.close()
self.assertRaises(html.parser.HTMLParseError, parse)
class HTMLParserStrictTestCase(TestCaseBase):
def get_collector(self):
return EventCollector(strict=True)
def test_processing_instruction_only(self):
self._run_check("<?processing instruction>", [
("pi", "processing instruction"),
])
self._run_check("<?processing instruction ?>", [
("pi", "processing instruction ?"),
])
def test_simple_html(self):
self._run_check("""
<!DOCTYPE html PUBLIC 'foo'>
<HTML>&entity; 
<!--comment1a
-></foo><bar><<?pi?></foo<bar
comment1b-->
<Img sRc='Bar' isMAP>sample
text
“
<!--comment2a-- --comment2b-->
</Html>
""", [
("data", "\n"),
("decl", "DOCTYPE html PUBLIC 'foo'"),
("data", "\n"),
("starttag", "html", []),
("entityref", "entity"),
("charref", "32"),
("data", "\n"),
("comment", "comment1a\n-></foo><bar><<?pi?></foo<bar\ncomment1b"),
("data", "\n"),
("starttag", "img", [("src", "Bar"), ("ismap", None)]),
("data", "sample\ntext\n"),
("charref", "x201C"),
("data", "\n"),
("comment", "comment2a-- --comment2b"),
("data", "\n"),
("endtag", "html"),
("data", "\n"),
])
def test_malformatted_charref(self):
self._run_check("<p>&#bad;</p>", [
("starttag", "p", []),
("data", "&#bad;"),
("endtag", "p"),
])
def test_unclosed_entityref(self):
self._run_check("&entityref foo", [
("entityref", "entityref"),
("data", " foo"),
])
def test_bad_nesting(self):
# Strangely, this *is* supposed to test that overlapping
# elements are allowed. HTMLParser is more geared toward
# lexing the input that parsing the structure.
self._run_check("<a><b></a></b>", [
("starttag", "a", []),
("starttag", "b", []),
("endtag", "a"),
("endtag", "b"),
])
def test_bare_ampersands(self):
self._run_check("this text & contains & ampersands &", [
("data", "this text & contains & ampersands &"),
])
def test_bare_pointy_brackets(self):
self._run_check("this < text > contains < bare>pointy< brackets", [
("data", "this < text > contains < bare>pointy< brackets"),
])
def test_illegal_declarations(self):
self._parse_error('<!spacer type="block" height="25">')
def test_starttag_end_boundary(self):
self._run_check("""<a b='<'>""", [("starttag", "a", [("b", "<")])])
self._run_check("""<a b='>'>""", [("starttag", "a", [("b", ">")])])
def test_buffer_artefacts(self):
output = [("starttag", "a", [("b", "<")])]
self._run_check(["<a b='<'>"], output)
self._run_check(["<a ", "b='<'>"], output)
self._run_check(["<a b", "='<'>"], output)
self._run_check(["<a b=", "'<'>"], output)
self._run_check(["<a b='<", "'>"], output)
self._run_check(["<a b='<'", ">"], output)
output = [("starttag", "a", [("b", ">")])]
self._run_check(["<a b='>'>"], output)
self._run_check(["<a ", "b='>'>"], output)
self._run_check(["<a b", "='>'>"], output)
self._run_check(["<a b=", "'>'>"], output)
self._run_check(["<a b='>", "'>"], output)
self._run_check(["<a b='>'", ">"], output)
output = [("comment", "abc")]
self._run_check(["", "<!--abc-->"], output)
self._run_check(["<", "!--abc-->"], output)
self._run_check(["<!", "--abc-->"], output)
self._run_check(["<!-", "-abc-->"], output)
self._run_check(["<!--", "abc-->"], output)
self._run_check(["<!--a", "bc-->"], output)
self._run_check(["<!--ab", "c-->"], output)
self._run_check(["<!--abc", "-->"], output)
self._run_check(["<!--abc-", "->"], output)
self._run_check(["<!--abc--", ">"], output)
self._run_check(["<!--abc-->", ""], output)
def test_starttag_junk_chars(self):
self._parse_error("</>")
self._parse_error("</$>")
self._parse_error("</")
self._parse_error("</a")
self._parse_error("<a<a>")
self._parse_error("</a<a>")
self._parse_error("<!")
self._parse_error("<a")
self._parse_error("<a foo='bar'")
self._parse_error("<a foo='bar")
self._parse_error("<a foo='>'")
self._parse_error("<a foo='>")
def test_valid_doctypes(self):
# from http://www.w3.org/QA/2002/04/valid-dtd-list.html
dtds = ['HTML', # HTML5 doctype
('HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd"'),
('HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" '
'"http://www.w3.org/TR/html4/loose.dtd"'),
('html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"'),
('html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd"'),
('math PUBLIC "-//W3C//DTD MathML 2.0//EN" '
'"http://www.w3.org/Math/DTD/mathml2/mathml2.dtd"'),
('html PUBLIC "-//W3C//DTD '
'XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" '
'"http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"'),
('svg PUBLIC "-//W3C//DTD SVG 1.1//EN" '
'"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"'),
'html PUBLIC "-//IETF//DTD HTML 2.0//EN"',
'html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"']
for dtd in dtds:
self._run_check("<!DOCTYPE %s>" % dtd,
[('decl', 'DOCTYPE ' + dtd)])
def test_declaration_junk_chars(self):
self._parse_error("<!DOCTYPE foo $ >")
def test_startendtag(self):
self._run_check("<p/>", [
("startendtag", "p", []),
])
self._run_check("<p></p>", [
("starttag", "p", []),
("endtag", "p"),
])
self._run_check("<p><img src='foo' /></p>", [
("starttag", "p", []),
("startendtag", "img", [("src", "foo")]),
("endtag", "p"),
])
def test_get_starttag_text(self):
s = """<foo:bar \n one="1"\ttwo=2 >"""
self._run_check_extra(s, [
("starttag", "foo:bar", [("one", "1"), ("two", "2")]),
("starttag_text", s)])
def test_cdata_content(self):
contents = [
'<!-- not a comment --> ¬-an-entity-ref;',
"<not a='start tag'>",
'<a href="" /> <p> <span></span>',
'foo = "</scr" + "ipt>";',
'foo = "</SCRIPT" + ">";',
'foo = <\n/script> ',
'<!-- document.write("</scr" + "ipt>"); -->',
('\n//<![CDATA[\n'
'document.write(\'<s\'+\'cript type="text/javascript" '
'src="http://www.example.org/r=\'+new '
'Date().getTime()+\'"><\\/s\'+\'cript>\');\n//]]>'),
'\n<!-- //\nvar foo = 3.14;\n// -->\n',
'foo = "</sty" + "le>";',
'<!-- \u2603 -->',
# these two should be invalid according to the HTML 5 spec,
# section 8.1.2.2
#'foo = </\nscript>',
#'foo = </ script>',
]
elements = ['script', 'style', 'SCRIPT', 'STYLE', 'Script', 'Style']
for content in contents:
for element in elements:
element_lower = element.lower()
s = '<{element}>{content}</{element}>'.format(element=element,
content=content)
self._run_check(s, [("starttag", element_lower, []),
("data", content),
("endtag", element_lower)])
def test_cdata_with_closing_tags(self):
# see issue #13358
# make sure that HTMLParser calls handle_data only once for each CDATA.
# The normal event collector normalizes the events in get_events,
# so we override it to return the original list of events.
class Collector(EventCollector):
def get_events(self):
return self.events
content = """<!-- not a comment --> ¬-an-entity-ref;
<a href="" /> </p><p> <span></span></style>
'</script' + '>'"""
for element in [' script', 'script ', ' script ',
'\nscript', 'script\n', '\nscript\n']:
element_lower = element.lower().strip()
s = '<script>{content}</{element}>'.format(element=element,
content=content)
self._run_check(s, [("starttag", element_lower, []),
("data", content),
("endtag", element_lower)],
collector=Collector())
def test_comments(self):
html = ("<!-- I'm a valid comment -->"
'<!--me too!-->'
'<!------>'
'<!---->'
'<!----I have many hyphens---->'
'<!-- I have a > in the middle -->'
'<!-- and I have -- in the middle! -->')
expected = [('comment', " I'm a valid comment "),
('comment', 'me too!'),
('comment', '--'),
('comment', ''),
('comment', '--I have many hyphens--'),
('comment', ' I have a > in the middle '),
('comment', ' and I have -- in the middle! ')]
self._run_check(html, expected)
def test_condcoms(self):
html = ('<!--[if IE & !(lte IE 8)]>aren\'t<![endif]-->'
'<!--[if IE 8]>condcoms<![endif]-->'
'<!--[if lte IE 7]>pretty?<![endif]-->')
expected = [('comment', "[if IE & !(lte IE 8)]>aren't<![endif]"),
('comment', '[if IE 8]>condcoms<![endif]'),
('comment', '[if lte IE 7]>pretty?<![endif]')]
self._run_check(html, expected)
class HTMLParserTolerantTestCase(HTMLParserStrictTestCase):
def get_collector(self):
return EventCollector(strict=False)
def test_tolerant_parsing(self):
self._run_check('<html <html>te>>xt&a<<bc</a></html>\n'
'<img src="URL><//img></html</html>', [
('starttag', 'html', [('<html', None)]),
('data', 'te>>xt'),
('entityref', 'a'),
('data', '<<bc'),
('endtag', 'a'),
('endtag', 'html'),
('data', '\n<img src="URL>'),
('comment', '/img'),
('endtag', 'html<')])
def test_starttag_junk_chars(self):
self._run_check("</>", [])
self._run_check("</$>", [('comment', '$')])
self._run_check("</", [('data', '</')])
self._run_check("</a", [('data', '</a')])
# XXX this might be wrong
self._run_check("<a<a>", [('data', '<a'), ('starttag', 'a', [])])
self._run_check("</a<a>", [('endtag', 'a<a')])
self._run_check("<!", [('data', '<!')])
self._run_check("<a", [('data', '<a')])
self._run_check("<a foo='bar'", [('data', "<a foo='bar'")])
self._run_check("<a foo='bar", [('data', "<a foo='bar")])
self._run_check("<a foo='>'", [('data', "<a foo='>'")])
self._run_check("<a foo='>", [('data', "<a foo='>")])
def test_slashes_in_starttag(self):
self._run_check('<a foo="var"/>', [('startendtag', 'a', [('foo', 'var')])])
html = ('<img width=902 height=250px '
'src="/sites/default/files/images/homepage/foo.jpg" '
'/*what am I doing here*/ />')
expected = [(
'startendtag', 'img',
[('width', '902'), ('height', '250px'),
('src', '/sites/default/files/images/homepage/foo.jpg'),
('*what', None), ('am', None), ('i', None),
('doing', None), ('here*', None)]
)]
self._run_check(html, expected)
html = ('<a / /foo/ / /=/ / /bar/ / />'
'<a / /foo/ / /=/ / /bar/ / >')
expected = [
('startendtag', 'a', [('foo', None), ('=', None), ('bar', None)]),
('starttag', 'a', [('foo', None), ('=', None), ('bar', None)])
]
self._run_check(html, expected)
def test_declaration_junk_chars(self):
self._run_check("<!DOCTYPE foo $ >", [('decl', 'DOCTYPE foo $ ')])
def test_illegal_declarations(self):
self._run_check('<!spacer type="block" height="25">',
[('comment', 'spacer type="block" height="25"')])
def test_with_unquoted_attributes(self):
# see #12008
html = ("<html><body bgcolor=d0ca90 text='181008'>"
"<table cellspacing=0 cellpadding=1 width=100% ><tr>"
"<td align=left><font size=-1>"
"- <a href=/rabota/><span class=en> software-and-i</span></a>"
"- <a href='/1/'><span class=en> library</span></a></table>")
expected = [
('starttag', 'html', []),
('starttag', 'body', [('bgcolor', 'd0ca90'), ('text', '181008')]),
('starttag', 'table',
[('cellspacing', '0'), ('cellpadding', '1'), ('width', '100%')]),
('starttag', 'tr', []),
('starttag', 'td', [('align', 'left')]),
('starttag', 'font', [('size', '-1')]),
('data', '- '), ('starttag', 'a', [('href', '/rabota/')]),
('starttag', 'span', [('class', 'en')]), ('data', ' software-and-i'),
('endtag', 'span'), ('endtag', 'a'),
('data', '- '), ('starttag', 'a', [('href', '/1/')]),
('starttag', 'span', [('class', 'en')]), ('data', ' library'),
('endtag', 'span'), ('endtag', 'a'), ('endtag', 'table')
]
self._run_check(html, expected)
def test_comma_between_attributes(self):
self._run_check('<form action="/xxx.php?a=1&b=2&", '
'method="post">', [
('starttag', 'form',
[('action', '/xxx.php?a=1&b=2&'),
(',', None), ('method', 'post')])])
def test_weird_chars_in_unquoted_attribute_values(self):
self._run_check('<form action=bogus|&#()value>', [
('starttag', 'form',
[('action', 'bogus|&#()value')])])
def test_invalid_end_tags(self):
# A collection of broken end tags. <br> is used as separator.
# see http://www.w3.org/TR/html5/tokenization.html#end-tag-open-state
# and #13993
html = ('<br></label</p><br></div end tmAd-leaderBoard><br></<h4><br>'
'</li class="unit"><br></li\r\n\t\t\t\t\t\t</ul><br></><br>')
expected = [('starttag', 'br', []),
# < is part of the name, / is discarded, p is an attribute
('endtag', 'label<'),
('starttag', 'br', []),
# text and attributes are discarded
('endtag', 'div'),
('starttag', 'br', []),
# comment because the first char after </ is not a-zA-Z
('comment', '<h4'),
('starttag', 'br', []),
# attributes are discarded
('endtag', 'li'),
('starttag', 'br', []),
# everything till ul (included) is discarded
('endtag', 'li'),
('starttag', 'br', []),
# </> is ignored
('starttag', 'br', [])]
self._run_check(html, expected)
def test_broken_invalid_end_tag(self):
# This is technically wrong (the "> shouldn't be included in the 'data')
# but is probably not worth fixing it (in addition to all the cases of
# the previous test, it would require a full attribute parsing).
# see #13993
html = '<b>This</b attr=">"> confuses the parser'
expected = [('starttag', 'b', []),
('data', 'This'),
('endtag', 'b'),
('data', '"> confuses the parser')]
self._run_check(html, expected)
def test_correct_detection_of_start_tags(self):
# see #13273
html = ('<div style="" ><b>The <a href="some_url">rain</a> '
'<br /> in <span>Spain</span></b></div>')
expected = [
('starttag', 'div', [('style', '')]),
('starttag', 'b', []),
('data', 'The '),
('starttag', 'a', [('href', 'some_url')]),
('data', 'rain'),
('endtag', 'a'),
('data', ' '),
('startendtag', 'br', []),
('data', ' in '),
('starttag', 'span', []),
('data', 'Spain'),
('endtag', 'span'),
('endtag', 'b'),
('endtag', 'div')
]
self._run_check(html, expected)
html = '<div style="", foo = "bar" ><b>The <a href="some_url">rain</a>'
expected = [
('starttag', 'div', [('style', ''), (',', None), ('foo', 'bar')]),
('starttag', 'b', []),
('data', 'The '),
('starttag', 'a', [('href', 'some_url')]),
('data', 'rain'),
('endtag', 'a'),
]
self._run_check(html, expected)
def test_unescape_function(self):
p = self.get_collector()
self.assertEqual(p.unescape('&#bad;'),'&#bad;')
self.assertEqual(p.unescape('&'),'&')
# see #12888
self.assertEqual(p.unescape('{ ' * 1050), '{ ' * 1050)
def test_broken_comments(self):
html = ('<! not really a comment >'
'<! not a comment either -->'
'<! -- close enough -->'
'<!><!<-- this was an empty comment>'
'<!!! another bogus comment !!!>')
expected = [
('comment', ' not really a comment '),
('comment', ' not a comment either --'),
('comment', ' -- close enough --'),
('comment', ''),
('comment', '<-- this was an empty comment'),
('comment', '!! another bogus comment !!!'),
]
self._run_check(html, expected)
def test_broken_condcoms(self):
# these condcoms are missing the '--' after '<!' and before the '>'
html = ('<![if !(IE)]>broken condcom<![endif]>'
'<![if ! IE]><link href="favicon.tiff"/><![endif]>'
'<![if !IE 6]><img src="firefox.png" /><![endif]>'
'<![if !ie 6]><b>foo</b><![endif]>'
'<![if (!IE)|(lt IE 9)]><img src="mammoth.bmp" /><![endif]>')
# According to the HTML5 specs sections "8.2.4.44 Bogus comment state"
# and "8.2.4.45 Markup declaration open state", comment tokens should
# be emitted instead of 'unknown decl', but calling unknown_decl
# provides more flexibility.
# See also Lib/_markupbase.py:parse_declaration
expected = [
('unknown decl', 'if !(IE)'),
('data', 'broken condcom'),
('unknown decl', 'endif'),
('unknown decl', 'if ! IE'),
('startendtag', 'link', [('href', 'favicon.tiff')]),
('unknown decl', 'endif'),
('unknown decl', 'if !IE 6'),
('startendtag', 'img', [('src', 'firefox.png')]),
('unknown decl', 'endif'),
('unknown decl', 'if !ie 6'),
('starttag', 'b', []),
('data', 'foo'),
('endtag', 'b'),
('unknown decl', 'endif'),
('unknown decl', 'if (!IE)|(lt IE 9)'),
('startendtag', 'img', [('src', 'mammoth.bmp')]),
('unknown decl', 'endif')
]
self._run_check(html, expected)
class AttributesStrictTestCase(TestCaseBase):
def get_collector(self):
return EventCollector(strict=True)
def test_attr_syntax(self):
output = [
("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)])
]
self._run_check("""<a b='v' c="v" d=v e>""", output)
self._run_check("""<a b = 'v' c = "v" d = v e>""", output)
self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
def test_attr_values(self):
self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
[("starttag", "a", [("b", "xxx\n\txxx"),
("c", "yyy\t\nyyy"),
("d", "\txyz\n")])])
self._run_check("""<a b='' c="">""",
[("starttag", "a", [("b", ""), ("c", "")])])
# Regression test for SF patch #669683.
self._run_check("<e a=rgb(1,2,3)>",
[("starttag", "e", [("a", "rgb(1,2,3)")])])
# Regression test for SF bug #921657.
self._run_check(
"<a href=mailto:xyz@example.com>",
[("starttag", "a", [("href", "mailto:xyz@example.com")])])
def test_attr_nonascii(self):
# see issue 7311
self._run_check(
"<img src=/foo/bar.png alt=\u4e2d\u6587>",
[("starttag", "img", [("src", "/foo/bar.png"),
("alt", "\u4e2d\u6587")])])
self._run_check(
"<a title='\u30c6\u30b9\u30c8' href='\u30c6\u30b9\u30c8.html'>",
[("starttag", "a", [("title", "\u30c6\u30b9\u30c8"),
("href", "\u30c6\u30b9\u30c8.html")])])
self._run_check(
'<a title="\u30c6\u30b9\u30c8" href="\u30c6\u30b9\u30c8.html">',
[("starttag", "a", [("title", "\u30c6\u30b9\u30c8"),
("href", "\u30c6\u30b9\u30c8.html")])])
def test_attr_entity_replacement(self):
self._run_check(
"<a b='&><"''>",
[("starttag", "a", [("b", "&><\"'")])])
def test_attr_funky_names(self):
self._run_check(
"<a a.b='v' c:d=v e-f=v>",
[("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")])])
def test_entityrefs_in_attributes(self):
self._run_check(
"<html foo='€&aa&unsupported;'>",
[("starttag", "html", [("foo", "\u20AC&aa&unsupported;")])])
class AttributesTolerantTestCase(AttributesStrictTestCase):
def get_collector(self):
return EventCollector(strict=False)
def test_attr_funky_names2(self):
self._run_check(
"<a $><b $=%><c \=/>",
[("starttag", "a", [("$", None)]),
("starttag", "b", [("$", "%")]),
("starttag", "c", [("\\", "/")])])
def test_entities_in_attribute_value(self):
# see #1200313
for entity in ['&', '&', '&', '&']:
self._run_check('<a href="%s">' % entity,
[("starttag", "a", [("href", "&")])])
self._run_check("<a href='%s'>" % entity,
[("starttag", "a", [("href", "&")])])
self._run_check("<a href=%s>" % entity,
[("starttag", "a", [("href", "&")])])
def test_malformed_attributes(self):
# see #13357
html = (
"<a href=test'style='color:red;bad1'>test - bad1</a>"
"<a href=test'+style='color:red;ba2'>test - bad2</a>"
"<a href=test' style='color:red;bad3'>test - bad3</a>"
"<a href = test' style='color:red;bad4' >test - bad4</a>"
)
expected = [
('starttag', 'a', [('href', "test'style='color:red;bad1'")]),
('data', 'test - bad1'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'+style='color:red;ba2'")]),
('data', 'test - bad2'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'\xa0style='color:red;bad3'")]),
('data', 'test - bad3'), ('endtag', 'a'),
('starttag', 'a', [('href', "test'\xa0style='color:red;bad4'")]),
('data', 'test - bad4'), ('endtag', 'a')
]
self._run_check(html, expected)
def test_malformed_adjacent_attributes(self):
# see #12629
self._run_check('<x><y z=""o"" /></x>',
[('starttag', 'x', []),
('startendtag', 'y', [('z', ''), ('o""', None)]),
('endtag', 'x')])
self._run_check('<x><y z="""" /></x>',
[('starttag', 'x', []),
('startendtag', 'y', [('z', ''), ('""', None)]),
('endtag', 'x')])
# see #755670 for the following 3 tests
def test_adjacent_attributes(self):
self._run_check('<a width="100%"cellspacing=0>',
[("starttag", "a",
[("width", "100%"), ("cellspacing","0")])])
self._run_check('<a id="foo"class="bar">',
[("starttag", "a",
[("id", "foo"), ("class","bar")])])
def test_missing_attribute_value(self):
self._run_check('<a v=>',
[("starttag", "a", [("v", "")])])
def test_javascript_attribute_value(self):
self._run_check("<a href=javascript:popup('/popup/help.html')>",
[("starttag", "a",
[("href", "javascript:popup('/popup/help.html')")])])
def test_end_tag_in_attribute_value(self):
# see #1745761
self._run_check("<a href='http://www.example.org/\">;'>spam</a>",
[("starttag", "a",
[("href", "http://www.example.org/\">;")]),
("data", "spam"), ("endtag", "a")])
def test_main():
support.run_unittest(HTMLParserStrictTestCase, HTMLParserTolerantTestCase,
AttributesStrictTestCase, AttributesTolerantTestCase)
if __name__ == "__main__":
test_main()
|
|
import unittest
import unishark
import os
import shutil
from unishark.util import get_interpreter
class TestProgramTestCase(unittest.TestCase):
def setUp(self):
super(TestProgramTestCase, self).setUp()
self.dest = 'results'
if os.path.exists(self.dest):
shutil.rmtree(self.dest)
def tearDown(self):
if os.path.exists(self.dest):
shutil.rmtree(self.dest)
class DefaultTestProgramTestCase(TestProgramTestCase):
def test_sequential_run(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit']
}
}
program = unishark.DefaultTestProgram(dict_conf)
self.assertDictEqual(program.concurrency, {'type': 'threads', 'max_workers': 1, 'timeout': None})
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_multithreading_on_suites(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 2},
}
}
program = unishark.DefaultTestProgram(dict_conf)
self.assertEqual(program.concurrency, {'max_workers': 2, 'type': 'threads', 'timeout': None})
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_multithreading_on_classes(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 0},
}
}
program = unishark.DefaultTestProgram(dict_conf)
self.assertEqual(program.concurrency, {'max_workers': 0, 'type': 'threads', 'timeout': None})
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_multithreading_on_suites_and_within_suite(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 2, 'level': 'module'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 8, 'level': 'method'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 2, 'type': 'threads'},
}
}
program = unishark.DefaultTestProgram(dict_conf)
self.assertEqual(program.concurrency, {'max_workers': 2, 'type': 'threads', 'timeout': None})
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_multiprocessing_on_suites(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 2, 'level': 'module'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 8, 'level': 'method'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 2, 'type': 'processes'},
}
}
if get_interpreter().startswith('jython'):
with self.assertRaises(ValueError) as cm:
unishark.DefaultTestProgram(dict_conf)
self.assertEqual(cm.exception.message, 'Jython does not support multiprocessing.')
else:
program = unishark.DefaultTestProgram(dict_conf)
self.assertEqual(program.concurrency, {'max_workers': 2, 'type': 'processes', 'timeout': None})
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_illegal_suites_concurrency_type(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 2, 'level': 'module'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 8, 'level': 'method'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 2, 'type': 'processing'},
}
}
with self.assertRaises(ValueError):
unishark.DefaultTestProgram(dict_conf)
self.assertFalse(os.path.exists(self.dest))
def test_program_with_no_suites(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': [],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 2},
}
}
program = unishark.DefaultTestProgram(dict_conf)
exit_code = program.run()
self.assertEqual(exit_code, 0)
exp_filenames = ['index.html', 'overview.html', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_program_with_no_reporters_1(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': [],
'concurrency': {'max_workers': 2},
}
}
program = unishark.DefaultTestProgram(dict_conf)
exit_code = program.run()
self.assertEqual(exit_code, 1)
self.assertFalse(os.path.exists(self.dest))
def test_program_with_no_reporters_2(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'concurrency': {'max_workers': 2},
}
}
program = unishark.DefaultTestProgram(dict_conf)
exit_code = program.run()
self.assertEqual(exit_code, 1)
self.assertFalse(os.path.exists(self.dest))
def test_program_with_name_pattern(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'concurrency': {'max_workers': 2},
'name_pattern': '^no_such_prefix\w*'
}
}
program = unishark.DefaultTestProgram(dict_conf)
exit_code = program.run()
self.assertEqual(exit_code, 0)
def test_default_suites_concurrency(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
}
}
program = unishark.DefaultTestProgram(dict_conf)
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_missing_max_workers(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {}
}
}
with self.assertRaises(KeyError):
program = unishark.DefaultTestProgram(dict_conf)
program.run()
self.assertFalse(os.path.exists(self.dest))
def test_illegal_max_workers_type(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 'a'}
}
}
with self.assertRaises(ValueError):
program = unishark.DefaultTestProgram(dict_conf)
program.run()
self.assertFalse(os.path.exists(self.dest))
def test_misplacing_max_workers(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'max_workers': 1
}
}
with self.assertRaises(KeyError) as cm:
program = unishark.DefaultTestProgram(dict_conf)
program.run()
self.assertEqual(cm.exception.message, 'Please set "max_workers" in the "concurrency" sub-dict instead.')
self.assertFalse(os.path.exists(self.dest))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
import sys
import platform
from decimal import Decimal
import numpy as np
from numpy.core import *
from numpy.random import rand, randint, randn
from numpy.testing import *
from numpy.testing.utils import WarningManager
from numpy.core.multiarray import dot as dot_
import warnings
class Vec:
def __init__(self,sequence=None):
if sequence is None:
sequence=[]
self.array=array(sequence)
def __add__(self,other):
out=Vec()
out.array=self.array+other.array
return out
def __sub__(self,other):
out=Vec()
out.array=self.array-other.array
return out
def __mul__(self,other): # with scalar
out=Vec(self.array.copy())
out.array*=other
return out
def __rmul__(self,other):
return self*other
class TestDot(TestCase):
def setUp(self):
self.A = rand(10,8)
self.b1 = rand(8,1)
self.b2 = rand(8)
self.b3 = rand(1,8)
self.b4 = rand(10)
self.N = 14
def test_matmat(self):
A = self.A
c1 = dot(A.transpose(), A)
c2 = dot_(A.transpose(), A)
assert_almost_equal(c1, c2, decimal=self.N)
def test_matvec(self):
A, b1 = self.A, self.b1
c1 = dot(A, b1)
c2 = dot_(A, b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_matvec2(self):
A, b2 = self.A, self.b2
c1 = dot(A, b2)
c2 = dot_(A, b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat(self):
A, b4 = self.A, self.b4
c1 = dot(b4, A)
c2 = dot_(b4, A)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat2(self):
b3, A = self.b3, self.A
c1 = dot(b3, A.transpose())
c2 = dot_(b3, A.transpose())
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat3(self):
A, b4 = self.A, self.b4
c1 = dot(A.transpose(),b4)
c2 = dot_(A.transpose(),b4)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecvecouter(self):
b1, b3 = self.b1, self.b3
c1 = dot(b1, b3)
c2 = dot_(b1, b3)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecvecinner(self):
b1, b3 = self.b1, self.b3
c1 = dot(b3, b1)
c2 = dot_(b3, b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_columnvect1(self):
b1 = ones((3,1))
b2 = [5.3]
c1 = dot(b1,b2)
c2 = dot_(b1,b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_columnvect2(self):
b1 = ones((3,1)).transpose()
b2 = [6.2]
c1 = dot(b2,b1)
c2 = dot_(b2,b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecscalar(self):
b1 = rand(1,1)
b2 = rand(1,8)
c1 = dot(b1,b2)
c2 = dot_(b1,b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecscalar2(self):
b1 = rand(8,1)
b2 = rand(1,1)
c1 = dot(b1,b2)
c2 = dot_(b1,b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_all(self):
dims = [(),(1,),(1,1)]
for dim1 in dims:
for dim2 in dims:
arg1 = rand(*dim1)
arg2 = rand(*dim2)
c1 = dot(arg1, arg2)
c2 = dot_(arg1, arg2)
assert_(c1.shape == c2.shape)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecobject(self):
U_non_cont = transpose([[1.,1.],[1.,2.]])
U_cont = ascontiguousarray(U_non_cont)
x = array([Vec([1.,0.]),Vec([0.,1.])])
zeros = array([Vec([0.,0.]),Vec([0.,0.])])
zeros_test = dot(U_cont,x) - dot(U_non_cont,x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
class TestResize(TestCase):
def test_copies(self):
A = array([[1,2],[3,4]])
Ar1 = array([[1,2,3,4],[1,2,3,4]])
assert_equal(resize(A, (2,4)), Ar1)
Ar2 = array([[1,2],[3,4],[1,2],[3,4]])
assert_equal(resize(A, (4,2)), Ar2)
Ar3 = array([[1,2,3],[4,1,2],[3,4,1],[2,3,4]])
assert_equal(resize(A, (4,3)), Ar3)
def test_zeroresize(self):
A = array([[1,2],[3,4]])
Ar = resize(A, (0,))
assert_equal(Ar, array([]))
class TestNonarrayArgs(TestCase):
# check that non-array arguments to functions wrap them in arrays
def test_squeeze(self):
A = [[[1,1,1],[2,2,2],[3,3,3]]]
assert_(squeeze(A).shape == (3,3))
def test_cumproduct(self):
A = [[1,2,3],[4,5,6]]
assert_(all(cumproduct(A) == array([1,2,6,24,120,720])))
def test_size(self):
A = [[1,2,3],[4,5,6]]
assert_(size(A) == 6)
assert_(size(A,0) == 2)
assert_(size(A,1) == 3)
def test_mean(self):
A = [[1,2,3],[4,5,6]]
assert_(mean(A) == 3.5)
assert_(all(mean(A,0) == array([2.5,3.5,4.5])))
assert_(all(mean(A,1) == array([2.,5.])))
def test_std(self):
A = [[1,2,3],[4,5,6]]
assert_almost_equal(std(A), 1.707825127659933)
assert_almost_equal(std(A,0), array([1.5, 1.5, 1.5]))
assert_almost_equal(std(A,1), array([0.81649658, 0.81649658]))
def test_var(self):
A = [[1,2,3],[4,5,6]]
assert_almost_equal(var(A), 2.9166666666666665)
assert_almost_equal(var(A,0), array([2.25, 2.25, 2.25]))
assert_almost_equal(var(A,1), array([0.66666667, 0.66666667]))
class TestBoolScalar(TestCase):
def test_logical(self):
f = False_
t = True_
s = "xyz"
self.assertTrue((t and s) is s)
self.assertTrue((f and s) is f)
def test_bitwise_or(self):
f = False_
t = True_
self.assertTrue((t | t) is t)
self.assertTrue((f | t) is t)
self.assertTrue((t | f) is t)
self.assertTrue((f | f) is f)
def test_bitwise_and(self):
f = False_
t = True_
self.assertTrue((t & t) is t)
self.assertTrue((f & t) is f)
self.assertTrue((t & f) is f)
self.assertTrue((f & f) is f)
def test_bitwise_xor(self):
f = False_
t = True_
self.assertTrue((t ^ t) is f)
self.assertTrue((f ^ t) is t)
self.assertTrue((t ^ f) is t)
self.assertTrue((f ^ f) is f)
class TestSeterr(TestCase):
def test_default(self):
err = geterr()
self.assertEqual(err, dict(
divide='warn',
invalid='warn',
over='warn',
under='ignore',
))
def test_set(self):
err = seterr()
try:
old = seterr(divide='print')
self.assertTrue(err == old)
new = seterr()
self.assertTrue(new['divide'] == 'print')
seterr(over='raise')
self.assertTrue(geterr()['over'] == 'raise')
self.assertTrue(new['divide'] == 'print')
seterr(**old)
self.assertTrue(geterr() == old)
finally:
seterr(**err)
def test_divide_err(self):
err = seterr(divide='raise')
try:
try:
array([1.]) / array([0.])
except FloatingPointError:
pass
else:
self.fail()
seterr(divide='ignore')
array([1.]) / array([0.])
finally:
seterr(**err)
class TestFloatExceptions(TestCase):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError, exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
"""Check that fpe exception is raised.
Given a floating operation `flop` and two scalar values, check that
the operation raises the floating point exception specified by
`fpeerr`. Tests all variants with 0-d array scalars as well.
"""
self.assert_raises_fpe(fpeerr, flop, sc1, sc2);
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2);
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]);
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]);
@dec.knownfailureif((sys.platform == "darwin") and
("powerpc" in platform.processor()),
"See ticket 1755")
def test_floating_exceptions(self):
"""Test basic arithmetic function errors"""
oldsettings = np.seterr(all='raise')
try:
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a,b:a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a,b:a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a,b:a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a,b:a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a,b:a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a,b:a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(divbyzero,
lambda a,b:a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a,b:a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a,b:a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a,b:a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a,b:a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a,b:a*b, ftype(0), ftype(np.inf))
finally:
np.seterr(**oldsettings)
@dec.knownfailureif(sys.platform.startswith('win') or
(sys.platform == "darwin" and "powerpc" in platform.processor()),
"See ticket 1755")
def test_floating_exceptions_power(self):
"""Test basic arithmetic function errors"""
oldsettings = np.seterr(all='raise')
try:
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
overflow = 'overflow'
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
finally:
np.seterr(**oldsettings)
class TestTypes(TestCase):
def check_promotion_cases(self, promote_func):
"""Tests that the scalars get coerced correctly."""
b = np.bool_(0)
i8, i16, i32, i64 = int8(0), int16(0), int32(0), int64(0)
u8, u16, u32, u64 = uint8(0), uint16(0), uint32(0), uint64(0)
f32, f64, fld = float32(0), float64(0), longdouble(0)
c64, c128, cld = complex64(0), complex128(0), clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8,i16), np.dtype(int16))
assert_equal(promote_func(i32,i8), np.dtype(int32))
assert_equal(promote_func(i16,i64), np.dtype(int64))
assert_equal(promote_func(u8,u32), np.dtype(uint32))
assert_equal(promote_func(f32,f64), np.dtype(float64))
assert_equal(promote_func(fld,f32), np.dtype(longdouble))
assert_equal(promote_func(f64,fld), np.dtype(longdouble))
assert_equal(promote_func(c128,c64), np.dtype(complex128))
assert_equal(promote_func(cld,c128), np.dtype(clongdouble))
assert_equal(promote_func(c64,fld), np.dtype(clongdouble))
# coercion between kinds
assert_equal(promote_func(b,i32), np.dtype(int32))
assert_equal(promote_func(b,u8), np.dtype(uint8))
assert_equal(promote_func(i8,u8), np.dtype(int16))
assert_equal(promote_func(u8,i32), np.dtype(int32))
assert_equal(promote_func(i64,u32), np.dtype(int64))
assert_equal(promote_func(u64,i32), np.dtype(float64))
assert_equal(promote_func(i32,f32), np.dtype(float64))
assert_equal(promote_func(i64,f32), np.dtype(float64))
assert_equal(promote_func(f32,i16), np.dtype(float32))
assert_equal(promote_func(f32,u32), np.dtype(float64))
assert_equal(promote_func(f32,c64), np.dtype(complex64))
assert_equal(promote_func(c128,f32), np.dtype(complex128))
assert_equal(promote_func(cld,f64), np.dtype(clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(array([b]),i8), np.dtype(int8))
assert_equal(promote_func(array([b]),u8), np.dtype(uint8))
assert_equal(promote_func(array([b]),i32), np.dtype(int32))
assert_equal(promote_func(array([b]),u32), np.dtype(uint32))
assert_equal(promote_func(array([i8]),i64), np.dtype(int8))
assert_equal(promote_func(u64,array([i32])), np.dtype(int32))
assert_equal(promote_func(i64,array([u32])), np.dtype(uint32))
assert_equal(promote_func(int32(-1),array([u64])), np.dtype(float64))
assert_equal(promote_func(f64,array([f32])), np.dtype(float32))
assert_equal(promote_func(fld,array([f32])), np.dtype(float32))
assert_equal(promote_func(array([f64]),fld), np.dtype(float64))
assert_equal(promote_func(fld,array([c64])), np.dtype(complex64))
assert_equal(promote_func(c64,array([f64])), np.dtype(complex128))
assert_equal(promote_func(complex64(3j),array([f64])),
np.dtype(complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(array([b]),f64), np.dtype(float64))
assert_equal(promote_func(array([b]),i64), np.dtype(int64))
assert_equal(promote_func(array([b]),u64), np.dtype(uint64))
assert_equal(promote_func(array([i8]),f64), np.dtype(float64))
assert_equal(promote_func(array([u16]),f64), np.dtype(float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(array([u16]), i32), np.dtype(uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(array([f32]),c128), np.dtype(complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True,False]), np.array([-3,12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
## Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
## a float32, shouldn't promote to float64
#a = np.array([1.0, 1.5], dtype=np.float32)
#t = np.array([True, False])
#b = t*a
#assert_equal(b, [1.0, 0.0])
#assert_equal(b.dtype, np.dtype('f4'))
#b = (1-t)*a
#assert_equal(b, [0.0, 1.5])
#assert_equal(b.dtype, np.dtype('f4'))
## Probably ~t (bitwise negation) is more proper to use here,
## but this is arguably less intuitive to understand at a glance, and
## would fail if 't' is actually an integer array instead of boolean:
#b = (~t)*a
#assert_equal(b, [0.0, 1.5])
#assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, np.complex))
assert_(not np.can_cast(np.complex, np.float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S4'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
class TestFromiter(TestCase):
def makegen(self):
for x in xrange(24):
yield x**2
def test_types(self):
ai32 = fromiter(self.makegen(), int32)
ai64 = fromiter(self.makegen(), int64)
af = fromiter(self.makegen(), float)
self.assertTrue(ai32.dtype == dtype(int32))
self.assertTrue(ai64.dtype == dtype(int64))
self.assertTrue(af.dtype == dtype(float))
def test_lengths(self):
expected = array(list(self.makegen()))
a = fromiter(self.makegen(), int)
a20 = fromiter(self.makegen(), int, 20)
self.assertTrue(len(a) == len(expected))
self.assertTrue(len(a20) == 20)
try:
fromiter(self.makegen(), int, len(expected) + 10)
except ValueError:
pass
else:
self.fail()
def test_values(self):
expected = array(list(self.makegen()))
a = fromiter(self.makegen(), int)
a20 = fromiter(self.makegen(), int, 20)
self.assertTrue(alltrue(a == expected,axis=0))
self.assertTrue(alltrue(a20 == expected[:20],axis=0))
class TestNonzero(TestCase):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(array([])), 0)
assert_equal(np.nonzero(array([])), ([],))
assert_equal(np.count_nonzero(array(0)), 0)
assert_equal(np.nonzero(array(0)), ([],))
assert_equal(np.count_nonzero(array(1)), 1)
assert_equal(np.nonzero(array(1)), ([0],))
def test_nonzero_onedim(self):
x = array([1,0,2,-1,0,0,8])
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
x = array([(1,2),(0,0),(1,1),(-1,3),(0,7)],
dtype=[('a','i4'),('b','i2')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
assert_equal(np.nonzero(x['a']), ([0,2,3],))
assert_equal(np.nonzero(x['b']), ([0,2,3,4],))
def test_nonzero_twodim(self):
x = array([[0,1,0],[2,0,3]])
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0,1,1],[1,0,2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0,1,2],[0,1,2]))
x = array([[(0,1),(0,0),(1,11)],
[(1,1),(1,0),(0,0)],
[(0,0),(1,5),(0,1)]], dtype=[('a','f4'),('b','u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0,1,1,2],[2,0,1,1]))
assert_equal(np.nonzero(x['b']), ([0,0,1,2,2],[0,2,0,1,2]))
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0,1,1,2],[1,1,2,0]))
assert_equal(np.nonzero(x['b'].T), ([0,0,1,2,2],[0,1,2,0,2]))
class TestIndex(TestCase):
def test_boolean(self):
a = rand(3,5,8)
V = rand(5,8)
g1 = randint(0,5,size=15)
g2 = randint(0,8,size=15)
V[g1,g2] = -V[g1,g2]
assert_((array([a[0][V>0],a[1][V>0],a[2][V>0]]) == a[:,V>0]).all())
class TestBinaryRepr(TestCase):
def test_zero(self):
assert_equal(binary_repr(0),'0')
def test_large(self):
assert_equal(binary_repr(10736848),'101000111101010011010000')
def test_negative(self):
assert_equal(binary_repr(-1), '-1')
assert_equal(binary_repr(-1, width=8), '11111111')
class TestBaseRepr(TestCase):
def test_base3(self):
assert_equal(base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(base_repr(12, 10), '12')
assert_equal(base_repr(12, 10, 4), '000012')
assert_equal(base_repr(12, 4), '30')
assert_equal(base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(base_repr(-12, 10), '-12')
assert_equal(base_repr(-12, 10, 4), '-000012')
assert_equal(base_repr(-12, 4), '-30')
class TestArrayComparisons(TestCase):
def test_array_equal(self):
res = array_equal(array([1,2]), array([1,2]))
assert_(res)
assert_(type(res) is bool)
res = array_equal(array([1,2]), array([1,2,3]))
assert_(not res)
assert_(type(res) is bool)
res = array_equal(array([1,2]), array([3,4]))
assert_(not res)
assert_(type(res) is bool)
res = array_equal(array([1,2]), array([1,3]))
assert_(not res)
assert_(type(res) is bool)
def test_array_equiv(self):
res = array_equiv(array([1,2]), array([1,2]))
assert_(res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([1,2,3]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([3,4]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([1,3]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,1]), array([1]))
assert_(res)
assert_(type(res) is bool)
res = array_equiv(array([1,1]), array([[1],[1]]))
assert_(res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([2]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([[1],[2]]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([[1,2,3],[4,5,6],[7,8,9]]))
assert_(not res)
assert_(type(res) is bool)
def assert_array_strict_equal(x, y):
assert_array_equal(x, y)
# Check flags
assert_(x.flags == y.flags)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
class TestClip(TestCase):
def setUp(self):
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None):
if out is None:
return a.clip(m,M)
else:
return a.clip(m,M,out)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = less(a, m)+2*greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.j *rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(float32)
def _neg_byteorder(self, a):
a = asarray(a)
if sys.byteorder == 'little':
a = a.astype(a.dtype.newbyteorder('>'))
else:
a = a.astype(a.dtype.newbyteorder('<'))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(int32)
# Now the real test cases
def test_simple_double(self):
"""Test native double input with scalar min/max."""
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_int(self):
"""Test native int input with scalar min/max."""
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_array_double(self):
"""Test native double input with array min/max."""
a = self._generate_data(self.nr, self.nc)
m = zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
"""Test non native double input with scalar min/max.
Test native double input with non native double scalar min/max."""
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
"Test native double input with non native double scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
"""Test native complex input with native double scalar min/max.
Test native input with complex double scalar min/max.
"""
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
"Test native input with complex double scalar min/max."
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_clip_non_contig(self):
"""Test clip for non contiguous native input and native scalar min/max."""
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_strict_equal(ac, act)
def test_simple_out(self):
"""Test native double input with scalar min/max."""
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = zeros(a.shape)
act = zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_inout(self):
"""Test native int32 input with double min/max and int32 out."""
a = self._generate_int32_data(self.nr, self.nc)
m = float64(0)
M = float64(2)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
"""Test native int32 input with int32 scalar min/max and int64 out."""
a = self._generate_int32_data(self.nr, self.nc)
m = int32(-1)
M = int32(1)
ac = zeros(a.shape, dtype = int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
"""Test native int32 input with double array min/max and int32 out."""
a = self._generate_int32_data(self.nr, self.nc)
m = zeros(a.shape, float64)
M = float64(1)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
"""Test native double input with scalar min/max and int out."""
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
"""Test native double input with array min/max in-place."""
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
"""Test native double input with scalar min/max in-place."""
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
"""Test non contiguous double input with double scalar min/max in-place."""
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
"Test native double input with scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
"Test native int32 input with int32 scalar min/max."
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
"Test native int32 input with float64 scalar min/max."
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, float64(m), float64(M))
act = self.clip(a, float64(m), float64(M))
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
"Test native int32 input with float32 scalar min/max."
a = self._generate_int32_data(self.nr, self.nc)
m = float32(-2)
M = float32(4)
act = self.fastclip(a,m,M)
ac = self.clip(a,m,M)
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
"Test native int32 with double arrays min/max."
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m * zeros(a.shape), M)
act = self.clip(a, m * zeros(a.shape), M)
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
"Test native with NON native scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
"Test NON native with native array min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5 * ones(a.shape)
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
"Test NON native with native scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m , M)
act = a_s.clip(m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
"Test native with NON native array min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5 * ones(a.shape)
M = 1.
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s , M)
act = self.clip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
"""Test native int32 with float min/max and float out for output argument."""
a = self._generate_int_data(self.nr, self.nc)
b = zeros(a.shape, dtype = float32)
m = float32(-0.5)
M = float32(1)
act = self.clip(a, m, M, out = b)
ac = self.fastclip(a, m , M, out = b)
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
"Test non native with native scalar, min/max, out non native"
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
bt = b.copy()
m = -0.5
M = 1.
self.fastclip(a, m , M, out = b)
self.clip(a, m, M, out = bt)
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
"Test native int32 input and min/max and float out"
a = self._generate_int_data(self.nr, self.nc)
b = zeros(a.shape, dtype = float32)
m = int32(0)
M = int32(1)
act = self.clip(a, m, M, out = b)
ac = self.fastclip(a, m , M, out = b)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
"Test native double input with scalar min/max"
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = zeros(a.shape)
act = zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
"Test native int32 input with double min/max and int32 out"
a = self._generate_int32_data(self.nr, self.nc)
m = float64(0)
M = float64(2)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple_int32(self):
"Test native int32 input with int32 scalar min/max and int64 out"
a = self._generate_int32_data(self.nr, self.nc)
m = int32(-1)
M = int32(1)
ac = zeros(a.shape, dtype = int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_int32(self):
"Test native int32 input with double array min/max and int32 out"
a = self._generate_int32_data(self.nr, self.nc)
m = zeros(a.shape, float64)
M = float64(1)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_outint32(self):
"Test native double input with scalar min/max and int out"
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_inplace_array(self):
"Test native double input with array min/max"
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_inplace_simple(self):
"Test native double input with scalar min/max"
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_func_takes_out(self):
""" Ensure that the clip() function takes an out= argument.
"""
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
a2 = clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
self.assertTrue(a2 is a)
class TestAllclose:
rtol = 1e-5
atol = 1e-8
def tst_allclose(self,x,y):
assert_(allclose(x,y), "%s and %s not close" % (x,y))
def tst_not_allclose(self,x,y):
assert_(not allclose(x,y), "%s and %s shouldn't be close" % (x,y))
def test_ip_allclose(self):
"""Parametric test factory."""
arr = array([100,1000])
aran = arange(125).reshape((5,5,5))
atol = self.atol
rtol = self.rtol
data = [([1,0], [1,0]),
([atol], [0]),
([1], [1+rtol+atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol*2),
(aran, aran + aran*rtol),
(inf, inf),
(inf, [inf])]
for (x,y) in data:
yield (self.tst_allclose,x,y)
def test_ip_not_allclose(self):
"""Parametric test factory."""
aran = arange(125).reshape((5,5,5))
atol = self.atol
rtol = self.rtol
data = [([inf,0], [1,inf]),
([inf,0], [1,0]),
([inf,inf], [1,inf]),
([inf,inf], [1,0]),
([-inf, 0], [inf, 0]),
([nan,0], [nan,0]),
([atol*2], [0]),
([1], [1+rtol+atol*2]),
(aran, aran + aran*atol + atol*2),
(array([inf,1]), array([0,inf]))]
for (x,y) in data:
yield (self.tst_not_allclose,x,y)
def test_no_parameter_modification(self):
x = array([inf,1])
y = array([0,inf])
allclose(x,y)
assert_array_equal(x,array([inf,1]))
assert_array_equal(y,array([0,inf]))
class TestStdVar(TestCase):
def setUp(self):
self.A = array([1,-1,1,-1])
self.real_var = 1
def test_basic(self):
assert_almost_equal(var(self.A),self.real_var)
assert_almost_equal(std(self.A)**2,self.real_var)
def test_ddof1(self):
assert_almost_equal(var(self.A,ddof=1),
self.real_var*len(self.A)/float(len(self.A)-1))
assert_almost_equal(std(self.A,ddof=1)**2,
self.real_var*len(self.A)/float(len(self.A)-1))
def test_ddof2(self):
assert_almost_equal(var(self.A,ddof=2),
self.real_var*len(self.A)/float(len(self.A)-2))
assert_almost_equal(std(self.A,ddof=2)**2,
self.real_var*len(self.A)/float(len(self.A)-2))
class TestStdVarComplex(TestCase):
def test_basic(self):
A = array([1,1.j,-1,-1.j])
real_var = 1
assert_almost_equal(var(A),real_var)
assert_almost_equal(std(A)**2,real_var)
class TestLikeFuncs(TestCase):
'''Test ones_like, zeros_like, and empty_like'''
def setUp(self):
self.data = [
# Array scalars
(array(3.), None),
(array(3), 'f8'),
# 1D arrays
(arange(6, dtype='f4'), None),
(arange(6), 'c16'),
# 2D C-layout arrays
(arange(6).reshape(2,3), None),
(arange(6).reshape(3,2), 'i1'),
# 2D F-layout arrays
(arange(6).reshape((2,3), order='F'), None),
(arange(6).reshape((3,2), order='F'), 'i1'),
# 3D C-layout arrays
(arange(24).reshape(2,3,4), None),
(arange(24).reshape(4,3,2), 'f4'),
# 3D F-layout arrays
(arange(24).reshape((2,3,4), order='F'), None),
(arange(24).reshape((4,3,2), order='F'), 'f4'),
# 3D non-C/F-layout arrays
(arange(24).reshape(2,3,4).swapaxes(0,1), None),
(arange(24).reshape(4,3,2).swapaxes(0,1), '?'),
]
def check_like_function(self, like_function, value):
for d, dtype in self.data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype)
assert_equal(dz.shape, d.shape)
assert_equal(array(dz.strides)*d.dtype.itemsize,
array(d.strides)*dz.dtype.itemsize)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# C order, default dtype
dz = like_function(d, order='C', dtype=dtype)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# F order, default dtype
dz = like_function(d, order='F', dtype=dtype)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# A order
dz = like_function(d, order='A', dtype=dtype)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# Test the 'subok' parameter'
a = np.matrix([[1,2],[3,4]])
b = like_function(a)
assert_(type(b) is np.matrix)
b = like_function(a, subok=False)
assert_(not (type(b) is np.matrix))
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
class _TestCorrelate(TestCase):
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.y = np.array([-1, -2, -3], dtype=dt)
self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt)
self.z2 = np.array([ -5., -14., -26., -20., -14., -8., -3.], dtype=dt)
def test_float(self):
self._setup(np.float)
z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z2)
def test_object(self):
self._setup(Decimal)
z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z2)
class TestCorrelate(_TestCorrelate):
old_behavior = True
def _setup(self, dt):
# correlate uses an unconventional definition so that correlate(a, b)
# == correlate(b, a), so force the corresponding outputs to be the same
# as well
_TestCorrelate._setup(self, dt)
self.z2 = self.z1
@dec.deprecated()
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
y = np.array([-1, -2j, 3+1j], dtype=np.complex)
r_z = np.array([3+1j, 6, 8-1j, 9+1j, -1-8j, -4-1j], dtype=np.complex)
z = np.correlate(x, y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, r_z)
@dec.deprecated()
def test_float(self):
_TestCorrelate.test_float(self)
@dec.deprecated()
def test_object(self):
_TestCorrelate.test_object(self)
class TestCorrelateNew(_TestCorrelate):
old_behavior = False
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
y = np.array([-1, -2j, 3+1j], dtype=np.complex)
r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex)
#z = np.acorrelate(x, y, 'full')
#assert_array_almost_equal(z, r_z)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, r_z)
class TestArgwhere:
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
[[0, 2],
[1, 0],
[1, 1],
[1, 2]])
def test_list(self):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
class TestStringFunction:
def test_set_string_function(self):
a = np.array([1])
np.set_string_function(lambda x: "FOO", repr=True)
assert_equal(repr(a), "FOO")
np.set_string_function(None, repr=True)
assert_equal(repr(a), "array([1])")
np.set_string_function(lambda x: "FOO", repr=False)
assert_equal(str(a), "FOO")
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
if __name__ == "__main__":
run_module_suite()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.code import If, Switch, SwitchLogic
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.stream import HStream
from hwt.hdl.types.struct import HStruct
from hwt.hdl.value import HValue
from hwt.interfaces.std import Signal, HandshakeSync, VectSignal
from hwt.interfaces.utils import propagateClkRstn
from hwt.math import log2ceil
from hwt.serializer.mode import serializeParamsUniq
from hwt.synthesizer.param import Param
from hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal
from hwtLib.amba.axis_comp.frame_join import AxiS_FrameJoin
from hwtLib.amba.constants import RESP_OKAY
from hwtLib.amba.datapump.base import AxiDatapumpBase
from hwtLib.amba.datapump.intf import AxiRDatapumpIntf
from hwtLib.handshaked.fifo import HandshakedFifo
from hwtLib.handshaked.streamNode import StreamNode
from pyMathBitPrecise.bit_utils import mask
from hwt.hdl.types.defs import BIT
class TransEndInfo(HandshakeSync):
"""
.. hwt-autodoc::
"""
def _config(self):
self.ID_WIDTH = Param(0)
self.DATA_WIDTH = Param(64)
self.HAS_PROPAGATE_LAST = Param(True)
self.SHIFT_OPTIONS = Param((0,))
def _declr(self):
if self.ID_WIDTH:
self.id = VectSignal(self.ID_WIDTH)
# rem is number of bits in last word which is valid - 1,
# if rem == 0 it means all bytes are valid
self.rem = VectSignal(log2ceil(self.DATA_WIDTH // 8))
if self.SHIFT_OPTIONS != (0,):
self.shift = VectSignal(log2ceil(len(self.SHIFT_OPTIONS)))
if self.HAS_PROPAGATE_LAST:
self.propagateLast = Signal()
HandshakeSync._declr(self)
@serializeParamsUniq
class Axi_rDatapump(AxiDatapumpBase):
"""
Forward request to axi address read channel
and collect data to data channel form axi read data channel
* Blocks data channel when there is no request pending.
* If req len is wider transaction is internally split to multiple axi
transactions, but returned read data is a single packet as originally requested.
* errorRead stays high when there was error on axi read channel
it will not affect unit functionality
* id of driver is a different id than is used on AXI
this is because the id on driver side is used to distinguish between
transactions and on AXI side it has to be same to assert that the transactions
will be finished in-order.
:see: :class:`hwtLib.amba.datapump.base.AxiDatapumpBase`
.. hwt-autodoc::
"""
def _declr(self):
super()._declr() # add clk, rst, axi addr channel and req channel
self.errorRead = Signal()._m()
if self.ALIGNAS != 8:
self.errorAlignment = Signal()._m()
with self._paramsShared():
self.axi.HAS_W = False
d = self.driver = AxiRDatapumpIntf()
d.ID_WIDTH = 0
d.MAX_BYTES = self.MAX_CHUNKS * (self.CHUNK_WIDTH // 8)
f = self.sizeRmFifo = HandshakedFifo(TransEndInfo)
f.ID_WIDTH = 0
f.DEPTH = self.MAX_TRANS_OVERLAP
f.SHIFT_OPTIONS = self.getShiftOptions()
def storeTransInfo(self, transInfo: TransEndInfo, isLast: bool):
if isLast:
rem = self.driver.req.rem
else:
rem = 0
offset = self.driver.req.addr[self.getSizeAlignBits():]
return [
transInfo.rem(rem),
transInfo.propagateLast(int(isLast)),
*([]
if self.isAlwaysAligned() else
[self.encodeShiftValue(transInfo.SHIFT_OPTIONS, offset, transInfo.shift), ]),
]
def remSizeToStrb(self, remSize: RtlSignal, strb: RtlSignal, isFirstWord, isLastWord):
sizeRm = self.sizeRmFifo.dataOut
STRB_W = strb._dtype.bit_length()
if self.isAlwaysAligned():
STRB_ALL = mask(STRB_W)
strbSwitch = Switch(remSize)\
.Case(0,
strb(STRB_ALL)
).add_cases(
[(i + 1, strb(mask(i + 1)))
for i in range(STRB_W - 1)]
).Default(
strb(None)
)
if isinstance(isLastWord, (bool, int, HValue)):
if isLastWord:
return strbSwitch
else:
return strb(STRB_ALL)
else:
return If(isLastWord,
strbSwitch
).Else(
strb(STRB_ALL)
)
else:
CHUNK = self.CHUNK_WIDTH // 8
MAX_BYTES = CHUNK * self.MAX_CHUNKS
STRB_ALL = mask(min(STRB_W, MAX_BYTES))
ALIGNAS = self.ALIGNAS
possibleBytesInLastWord = set()
assert self.DATA_WIDTH % ALIGNAS == 0, ("Required to resolve number of bytes in last word", self.DATA_WIDTH, ALIGNAS)
for CHUNK_CNT in range(1, min(self.MAX_CHUNKS, max(3, self.DATA_WIDTH // CHUNK * 3)) + 1):
for o in range(0, STRB_W, ALIGNAS // 8):
bytesInLastWord = (o + CHUNK * CHUNK_CNT) % (self.DATA_WIDTH // 8)
if bytesInLastWord in possibleBytesInLastWord:
break
possibleBytesInLastWord.add(bytesInLastWord)
possibleBytesInLastWord = sorted(possibleBytesInLastWord)
offsetsAlignmentCombinations = set([
# bytesInLastWord, offset value of value in last word, index of shift option
(min(bytesInLastWord, MAX_BYTES), sh // 8, sh_i)
for bytesInLastWord in possibleBytesInLastWord
for sh_i, sh in enumerate(sizeRm.SHIFT_OPTIONS)
if bytesInLastWord <= MAX_BYTES
])
offsetsAlignmentCombinations = sorted(offsetsAlignmentCombinations)
t = strb._dtype.from_py
# :attention: last word can be first word as well
MASK_ALL = mask(STRB_W)
WORD_W = strb._dtype.bit_length()
return \
SwitchLogic([
(remSize._eq(0 if bytesInLastWord == STRB_W else bytesInLastWord) & sizeRm.shift._eq(shift_i),
strb(
# dissable prefix bytes if this is first word
isFirstWord._ternary(t((MASK_ALL << shift) & MASK_ALL), t(MASK_ALL)) &
# dissable suffix bytes if this last word
isLastWord._ternary(t(MASK_ALL >> ((WORD_W - bytesInLastWord - shift) % WORD_W)), t(MASK_ALL))
)
)
for bytesInLastWord, shift, shift_i in offsetsAlignmentCombinations
],
default=strb(None)
)
def dataHandler(self, rErrFlag: RtlSignal, rmSizeOut: TransEndInfo):
rIn = self.axi.r
rOut = self.driver.r
if self.axi.LEN_WIDTH:
last = rIn.last
else:
last = BIT.from_py(1)
rInLast = last
if self.useTransSplitting():
last = rmSizeOut.propagateLast & last
if self.isAlwaysAligned():
# without shift logic
* ([self.remSizeToStrb(rmSizeOut.rem, rOut.strb, False, rIn.valid & last), ] if self.USE_STRB else []),
rOut.data(rIn.data)
rOut.last(last)
StreamNode(
masters=[rIn, rmSizeOut],
slaves=[rOut],
extraConds={
rmSizeOut: rInLast,
rOut:~rErrFlag
}
).sync()
else:
# align shifted incoming read data and optionally merge frames
aligner = AxiS_FrameJoin()
aligner.T = HStruct(
(HStream(Bits(self.CHUNK_WIDTH),
start_offsets=[i // 8 for i in self.getShiftOptions()],
frame_len=(1, self.MAX_CHUNKS)
), "f0"),
)
aligner.USE_STRB = False
aligner.DATA_WIDTH = self.DATA_WIDTH
self.aligner = aligner
isSingleWordOnly = self.CHUNK_WIDTH * self.MAX_CHUNKS <= self.DATA_WIDTH and self.ALIGNAS % (self.CHUNK_WIDTH * self.MAX_CHUNKS) == 0
if isSingleWordOnly:
first = BIT.from_py(1)
else:
# first beat of output frame (not necessary input frame, as multiple input
# frames could be merged in to a single output frame)
first = self._reg(f"first", def_val=1)
If(StreamNode([rIn, rmSizeOut], [aligner.dataIn[0], ]).ack(),
first(last),
)
aligner.dataIn[0].data(rIn.data)
aligner.dataIn[0].last(last)
self.remSizeToStrb(rmSizeOut.rem, aligner.dataIn[0].keep, first, last)
StreamNode(
[rIn, rmSizeOut],
[aligner.dataIn[0], ],
extraConds={
rmSizeOut:~rErrFlag & rInLast,
}
).sync()
if self.USE_STRB:
rOut.strb(aligner.dataOut.keep)
rOut.data(aligner.dataOut.data)
rOut.last(aligner.dataOut.last)
StreamNode(
masters=[aligner.dataOut, ],
slaves=[rOut],
extraConds={
rOut:~rErrFlag
}
).sync()
def _impl(self):
r = self.axi.r
errorRead = self._reg("errorRead", def_val=0)
If(r.valid & (r.resp != RESP_OKAY),
errorRead(1)
)
self.errorRead(errorRead)
err = errorRead
if self.ALIGNAS != 8:
req = self.driver.req
errorAlignment = self._reg("errorAlignment", def_val=0)
If(req.vld & (req.addr[log2ceil(self.ALIGNAS // 8):] != 0),
errorAlignment(1)
)
self.errorAlignment(errorAlignment)
err = err | errorAlignment
self.addrHandler(self.driver.req, self.axi.ar, self.sizeRmFifo.dataIn, err)
self.dataHandler(err, self.sizeRmFifo.dataOut)
propagateClkRstn(self)
if __name__ == "__main__":
from hwt.synthesizer.utils import to_rtl_str
# import sys
# sys.setrecursionlimit(5000)
u = Axi_rDatapump()
u.DATA_WIDTH = 512
u.MAX_CHUNKS = 1
u.CHUNK_WIDTH = 32
u.ALIGNAS = 32
print(to_rtl_str(u))
|
|
from __future__ import division, absolute_import
import importlib
import PyDSTool as dst
import math, numpy, scipy
# for convenience and compatibility
import numpy as np
import scipy as sp
class workspace(dst.args):
# override to ensure name and simpler repr
def __init__(self, name, **kw):
self.__dict__ = kw
self._name = name
def __repr__(self):
return self._infostr(0, 'Workspace ' + self._name,
ignore_underscored=True)
__str__ = __repr__
class calc_context(object):
"""
__init__ method for concrete sub-class should insert any core parameters
that are needed into 'shared' attribute
_update_order list attribute can be edited later to ensure correct order of
calculations in case of inter-dependence
"""
def __init__(self, sim, name, *args, **kwargs):
self.name = name
self.sim = sim
self._update_order = []
self.workspace = workspace(name)
self._refresh_init_args = []
# one function to one workspace variable
self._functions_to_workspace = {}
try:
self.local_init(*args, **kwargs)
except:
print("local_init could not complete at initialization")
def __call__(self, *args, **kwargs):
"""
Refresh workspace after update in attached simulator.
Option to pass positional or keyword arguments that will be used
to refresh the local_init() method for this context.
Returns the updated workspace.
"""
if len(args) + len(kwargs) == 0:
self.local_init(*self._refresh_init_args)
else:
self.local_init(*args, **kwargs)
for fn_name in self._update_order:
f = getattr(self, fn_name)
# discard result but keep side-effects on workspace update
f()
return self.workspace
def local_init(self, *args, **kwargs):
"""
Optionally override in concrete sub-class
"""
pass
def declare(self, module_name, alias):
"""
Inject module into global namespace for later reference.
Equivalent to "import <module_name> as <alias>"
"""
mod = importlib.import_module(module_name)
globals()[alias] = mod
def attach(self, fn_seq):
"""Expect each function to have been decorated using
@prep(<attr_name>)
"""
if callable(fn_seq):
# make a singleton list, for simplicity
fn_seq = [fn_seq]
for fn in fn_seq:
self._attach(fn)
def _attach(self, fn):
"""
Seems that functions need to be wrapped individually
in their own closure to avoid weird sharing of wrapped_fn
"""
def wrapped_fn():
val = fn(self)
self.workspace[fn.attr_name] = val
#print("Set workspace for value of %s is"%fn.attr_name, val)
return val
self.__setattr__(fn.__name__, wrapped_fn)
self._functions_to_workspace[fn.__name__] = (wrapped_fn, fn.attr_name)
# default to adding new function to end of update order
self._update_order.append(fn.__name__)
try:
val = getattr(self, fn.__name__)() #fn(self)
except Exception as e:
print("Could not compute value at attachment time for function %s"%fn.__name__)
print(" Problem was: %s" % str(e))
# initialize with None now, to declare in the meantime
self.workspace[fn.attr_name] = None
class general_context(calc_context):
"""
General purpose context
"""
pass
def make_measure(fn_name, fn_spec, **defs):
"""Dynamically create a python function for use with calculation
context.
"""
all_defs = defs.copy()
q = dst.QuantSpec('_dummy_', fn_spec, treatMultiRefs=False)
import PyDSTool.parseUtils as pu
mapping = pu.symbolMapClass()
assumed_modules = []
tokens = q.parser.tokenized
for sym in q.freeSymbols:
# Hack, for now: if first (therefore, assumed all)
# occurrences of symbol are in quotes, then don't convert.
# Better solution would be to make parser create "x" as a single
# symbol, at least with a detect quote option
first_ix = tokens.index(sym)
if first_ix == 0 or (first_ix > 0 and tokens[first_ix-1] not in ['"', "'"]):
if pu.isHierarchicalName(sym):
parts = sym.split('.')
if parts[0] == 'sim':
mapping[sym] = 'con.'+sym
## elif parts[0] == 'bombardier':
## # special case as this factory function is defined in that
## # module so that reference will fail at runtime: remove
## # 'bombardier' prefix
## rest_sym = '.'.join(parts[1:])
## mapping[sym] = rest_sym
## scope = globals()
## # locals override
## scope.update(locals())
## if parts[1] in scope:
## all_defs[parts[1]] = scope[parts[1]]
## else:
## raise ValueError("Cannot resolve scope of symbol '%s'"%sym)
else:
# assume module reference
assumed_modules.append(parts[0])
# record here to ensure inclusion in dyn_dummy
mapping[sym] = 'self.'+sym
else:
mapping[sym] = 'con.workspace.'+sym
elif first_ix > 0 and tokens[first_ix-1] in ['"', "'"]:
# put this symbol in the mapping values to ensure not included
# as an argument to the function
mapping[sym] = sym
q.mapNames(mapping)
import types
for module_name in assumed_modules:
global_scope = globals()
# test if module name in scope
if module_name in global_scope:
_mod = global_scope[module_name]
if isinstance(_mod, types.ModuleType):
all_defs[module_name] = _mod
# dyn_dummy contains dummy mappings but declares symbols to leave
# evaluating until runtime
dyn_dummy = dict(zip(mapping.values(), ['']*len(mapping)))
funq = dst.expr2fun(q, ensure_args=['con'], ensure_dynamic=dyn_dummy,
for_funcspec=False, fn_name=fn_name,
**all_defs)
# decorate output
funq.attr_name = fn_name
return funq
# depecrated to make_measure
def prep(attr_name):
"""
Create decorator of a new measure [DEPRECATED -- see make_measure]
"""
def decorator(fn):
fn.attr_name = attr_name
return fn
return decorator
def map_workspace(con, pts, *args):
"""
Returns a list of dictionaries, each representing the state of the
calc_context workspace for each of the points given. Optional
positional arguments will be passed first to the calc_context when
calling it.
This assumes the calc_context local_init accepts `pt` as an argument.
"""
wseq = []
for pt in pts:
con(*args, pt=pt)
wseq.append(dst.filteredDict(con.workspace.__dict__, ['_name'], neg=True))
return wseq
def extract_variable_from_wseq(varname, wseq):
return [w[varname] for w in wseq]
|
|
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import imath
import IECore
import Gaffer
import GafferUI
from GafferUI.PlugValueWidget import sole
from Qt import QtCore
from Qt import QtWidgets
from Qt import QtCompat
from . import _Algo
from . import _ClipboardAlgo
from . import _ProxyModels
from ._CellPlugValueWidget import _CellPlugValueWidget
from ._PlugTableDelegate import _PlugTableDelegate
from ._PlugTableModel import _PlugTableModel
from ._ProxySelectionModel import _ProxySelectionModel
from ._SectionChooser import _SectionChooser
from .._TableView import _TableView
class _PlugTableView( GafferUI.Widget ) :
Mode = IECore.Enum.create( "RowNames", "Defaults", "Cells" )
def __init__( self, selectionModel, mode, **kw ) :
tableView = _NavigableTable()
GafferUI.Widget.__init__( self, tableView, **kw )
self.__mode = mode
self.__setupModels( selectionModel )
# Headers and column sizing
QtCompat.setSectionResizeMode( tableView.verticalHeader(), QtWidgets.QHeaderView.Fixed )
tableView.verticalHeader().setDefaultSectionSize( 25 )
tableView.verticalHeader().setVisible( False )
self.__horizontalHeader = GafferUI.Widget( QtWidgets.QHeaderView( QtCore.Qt.Horizontal, tableView ) )
self.__horizontalHeader._qtWidget().setDefaultAlignment( QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter )
tableView.setHorizontalHeader( self.__horizontalHeader._qtWidget() )
self.__horizontalHeader.buttonPressSignal().connect( Gaffer.WeakMethod( self.__headerButtonPress ), scoped = False )
if mode in ( self.Mode.Cells, self.Mode.Defaults ) :
self.__applyColumnWidthMetadata()
self.__applyColumnOrderMetadata()
tableView.horizontalHeader().sectionResized.connect( Gaffer.WeakMethod( self.__columnResized ) )
tableView.horizontalHeader().sectionMoved.connect( Gaffer.WeakMethod( self.__columnMoved ) )
self.__ignoreColumnResized = False
self.__ignoreColumnMoved = False
else : # RowNames mode
if self.__canReorderRows() :
tableView.verticalHeader().setVisible( True )
tableView.verticalHeader().sectionMoved.connect( Gaffer.WeakMethod( self.__rowMoved ) )
self.__ignoreRowMoved = False
tableView.horizontalHeader().resizeSection( 1, 22 )
self.__applyRowNamesWidth()
# Style the row enablers as toggles rather than checkboxes.
## \todo Do the same for cells containing NameValuePlugs with enablers. This is tricky
# because we need to do it on a per-cell basis, so will need to use `_CellPlugItemDelegate.paint()`
# instead.
tableView.setProperty( "gafferToggleIndicator", True )
self.__plugMetadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal( tableView.model().rowsPlug().node() ).connect(
Gaffer.WeakMethod( self.__plugMetadataChanged ), scoped = False
)
Gaffer.Metadata.nodeValueChangedSignal().connect( Gaffer.WeakMethod( self.__nodeMetadataChanged ), scoped = False )
self.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ), scoped = False )
self.dragMoveSignal().connect( Gaffer.WeakMethod( self.__dragMove ), scoped = False )
self.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__dragLeave ), scoped = False )
self.dropSignal().connect( Gaffer.WeakMethod( self.__drop ), scoped = False )
if mode != self.Mode.Defaults :
tableView.horizontalHeader().setVisible( False )
self.__applyReadOnlyMetadata()
# Column visibility
self.__visibleSection = None
tableView.model().modelReset.connect( Gaffer.WeakMethod( self.__modelReset ) )
# Selection and editing. We disable all edit triggers so that
# the QTableView itself won't edit anything, and we then implement
# our own editing via PlugValueWidgets in _EditWindow.
tableView.setEditTriggers( tableView.NoEditTriggers )
tableView.setSelectionMode( tableView.ExtendedSelection )
tableView.setSelectionBehavior( tableView.SelectItems )
self.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ), scoped = False )
self.buttonDoubleClickSignal().connect( Gaffer.WeakMethod( self.__buttonDoubleClick ), scoped = False )
self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ), scoped = False )
# Drawing
tableView.setItemDelegate( _PlugTableDelegate( tableView ) )
# Size and scrolling
tableView.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
tableView.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
tableView.setHorizontalScrollMode( tableView.ScrollPerPixel )
tableView.setSizePolicy(
QtWidgets.QSizePolicy.Fixed if mode == self.Mode.RowNames else QtWidgets.QSizePolicy.Maximum,
QtWidgets.QSizePolicy.Fixed if mode == self.Mode.Defaults else QtWidgets.QSizePolicy.Maximum,
)
def plugAt( self, position ) :
point = self._qtWidget().viewport().mapFrom(
self._qtWidget(),
QtCore.QPoint( position.x, position.y )
)
index = self._qtWidget().indexAt( point )
return self._qtWidget().model().plugForIndex( index )
def selectedPlugs( self ) :
selection = self._qtWidget().selectionModel().selectedIndexes()
model = self._qtWidget().model()
return [ model.plugForIndex( i ) for i in selection ]
def editPlugs( self, plugs, scrollTo = True, allowDirectEditing = True, position = None ) :
tableView = self._qtWidget()
selectionModel = tableView.selectionModel()
indexes = [ tableView.model().indexForPlug( plug ) for plug in plugs ]
assert( all( [ index.isValid() for index in indexes ] ) )
if not all( [ index.flags() & ( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable ) for index in indexes ] ) :
return
if scrollTo :
tableView.scrollTo( indexes[ -1 ] )
self.__selectIndexes( indexes )
if position is None :
visibleRect = tableView.visualRect( selectionModel.currentIndex() )
rect = QtCore.QRect(
tableView.viewport().mapToGlobal( visibleRect.topLeft() ),
tableView.viewport().mapToGlobal( visibleRect.bottomRight() )
)
bound = imath.Box2i( imath.V2i( rect.left(), rect.bottom() ), imath.V2i( rect.right(), rect.top() ) )
else :
bound = imath.Box2i( position, position )
self.__showEditor( plugs, bound, allowDirectEditing )
def setVisibleSection( self, sectionName ) :
if self.__visibleSection == sectionName :
return
self.__visibleSection = sectionName
self.__applyColumnVisibility()
def getVisibleSection( self ) :
return self.__visibleSection
def __setupModels( self, selectionModel ) :
tableView = self._qtWidget()
if self.__mode == self.Mode.RowNames :
viewProxy = _ProxyModels.RowNamesProxyModel( tableView )
elif self.__mode == self.Mode.Cells :
viewProxy = _ProxyModels.CellsProxyModel( tableView )
else :
viewProxy = _ProxyModels.DefaultsProxyModel( tableView )
viewProxy.setSourceModel( selectionModel.model() )
tableView.setModel( viewProxy )
selectionProxy = _ProxySelectionModel( viewProxy, selectionModel, tableView )
tableView.setSelectionModel( selectionProxy )
def __columnMoved( self, logicalIndex, oldVisualIndex, newVisualIndex ) :
if self.__ignoreColumnMoved :
return
model = self._qtWidget().model()
header = self._qtWidget().horizontalHeader()
with Gaffer.UndoScope( model.rowsPlug().ancestor( Gaffer.ScriptNode ) ) :
with Gaffer.Signals.BlockedConnection( self.__plugMetadataChangedConnection ) :
for logicalIndex in range( 0, header.count() ) :
plug = model.plugForIndex( model.index( 0, logicalIndex ) )
Gaffer.Metadata.registerValue( plug, "spreadsheet:columnIndex", header.visualIndex( logicalIndex ) )
def __columnResized( self, logicalIndex, oldSize, newSize ) :
if self.__ignoreColumnResized :
return
model = self._qtWidget().model()
plug = model.plugForIndex( model.index( 0, logicalIndex ) )
with Gaffer.UndoScope( plug.ancestor( Gaffer.ScriptNode ), mergeGroup = "_PlugTableView{}{}".format( id( self ), logicalIndex ) ) :
with Gaffer.Signals.BlockedConnection( self.__plugMetadataChangedConnection ) :
Gaffer.Metadata.registerValue( plug, "spreadsheet:columnWidth", newSize )
def __applyReadOnlyMetadata( self ) :
readOnly = Gaffer.MetadataAlgo.readOnly( self._qtWidget().model().rowsPlug() )
if self.__mode in ( self.Mode.Cells, self.Mode.Defaults ) :
self._qtWidget().horizontalHeader().setSectionsMovable( not readOnly )
QtCompat.setSectionResizeMode(
self._qtWidget().horizontalHeader(),
QtWidgets.QHeaderView.Fixed if readOnly else QtWidgets.QHeaderView.Interactive
)
else :
# Rows mode
self._qtWidget().verticalHeader().setSectionsMovable( not readOnly )
def __applyColumnWidthMetadata( self, cellPlug = None ) :
if self.__mode == self.Mode.RowNames :
return
defaultCells = self._qtWidget().model().rowsPlug().defaultRow()["cells"]
if cellPlug is not None :
indicesAndPlugs = [ ( defaultCells.children().index( cellPlug ), cellPlug ) ]
else :
indicesAndPlugs = enumerate( defaultCells )
try :
self.__ignoreColumnResized = True
for index, plug in indicesAndPlugs :
width = Gaffer.Metadata.value( plug, "spreadsheet:columnWidth" )
if width is None :
width = self._qtWidget().horizontalHeader().defaultSectionSize()
self._qtWidget().horizontalHeader().resizeSection( index, width )
finally :
self.__ignoreColumnResized = False
def __applyColumnOrderMetadata( self ) :
if self.__mode == self.Mode.RowNames :
return
rowsPlug = self._qtWidget().model().rowsPlug()
header = self._qtWidget().horizontalHeader()
for index, plug in enumerate( rowsPlug.defaultRow()["cells"] ) :
visualIndex = Gaffer.Metadata.value( plug, "spreadsheet:columnIndex" )
self.__ignoreColumnMoved = True
header.moveSection( header.visualIndex( index ), visualIndex if visualIndex is not None else index )
self.__ignoreColumnMoved = False
def __applyColumnVisibility( self ) :
if self.__mode == self.Mode.RowNames :
return
# Changing column visibility seems to cause the
# `sectionResized()` signal to be emitted unnecessarily,
# so we suppress the slot we've attached to it.
self.__ignoreColumnResized = True
try :
rowsPlug = self._qtWidget().model().rowsPlug()
for i, plug in enumerate( rowsPlug.defaultRow()["cells"].children() ) :
if self.__visibleSection is not None :
visible = _SectionChooser.getSection( plug ) == self.__visibleSection
else :
visible = True
if visible :
self._qtWidget().showColumn( i )
else :
self._qtWidget().hideColumn( i )
finally :
self.__ignoreColumnResized = False
def __canReorderRows( self ) :
rowsPlug = self._qtWidget().model().rowsPlug()
if isinstance( rowsPlug.node(), Gaffer.Reference ) :
reference = rowsPlug.node()
# Default row (`[0]`) is irrelevant because it is always
# referenced and we won't try to reorder it anyway.
for row in rowsPlug.children()[1:] :
if not reference.isChildEdit( row ) :
return False
return True
def __rowMoved( self, logicalIndex, oldVisualIndex, newVisualIndex ) :
if self.__ignoreRowMoved :
return
# Qt implements row moves as a visual transform on top of the model.
# We want to implement them as edits to the order of the underlying
# RowPlugs. So we translate the change in visual transform to a call to
# `reorderChildren()`, and then reset the visual transform.
assert( oldVisualIndex == logicalIndex ) # Otherwise a previous visual transform reset failed
# Reorder rows
rowsPlug = self._qtWidget().model().rowsPlug()
rows = list( rowsPlug.children() )
header = self._qtWidget().verticalHeader()
assert( len( rows ) == header.count() + 1 ) # Header doesn't know about the default row
rows = [ rows[0] ] + [ rows[header.logicalIndex(i)+1] for i in range( 0, header.count() ) ]
with Gaffer.UndoScope( rowsPlug.ancestor( Gaffer.ScriptNode ) ) :
rowsPlug.reorderChildren( rows )
# Reset visual transform
self.__ignoreRowMoved = True
for i in range( min( oldVisualIndex, newVisualIndex ), max( oldVisualIndex, newVisualIndex ) + 1 ) :
header.moveSection( header.visualIndex( i ), i )
self.__ignoreRowMoved = False
def __applyRowNamesWidth( self ) :
if self.__mode != self.Mode.RowNames :
return
width = self.__getRowNameWidth()
self._qtWidget().horizontalHeader().resizeSection( 0, width )
@GafferUI.LazyMethod()
def __applyColumnOrderLazily( self ) :
self.__applyColumnOrderMetadata()
def __nodeMetadataChanged( self, nodeTypeId, key, node ) :
if Gaffer.MetadataAlgo.readOnlyAffectedByChange( self._qtWidget().model().rowsPlug(), nodeTypeId, key, node ) :
self.__applyReadOnlyMetadata()
def __plugMetadataChanged( self, plug, key, reason ) :
rowsPlug = self._qtWidget().model().rowsPlug()
if Gaffer.MetadataAlgo.readOnlyAffectedByChange( rowsPlug, plug, key ) :
self.__applyReadOnlyMetadata()
if self.__mode == self.Mode.RowNames :
if plug.isSame( rowsPlug.defaultRow() ) and key == "spreadsheet:rowNameWidth" :
self.__applyRowNamesWidth()
return
else :
if not rowsPlug.isAncestorOf( plug ) :
return
if key == "spreadsheet:columnWidth" :
if plug.parent() == rowsPlug.defaultRow()["cells"] :
self.__applyColumnWidthMetadata( cellPlug = plug )
elif key == "spreadsheet:columnIndex" :
# Typically we get a flurry of edits to columnIndex at once,
# so we use a lazy method to update the order once everything
# has been done.
self.__applyColumnOrderLazily()
elif key == "spreadsheet:section" :
self.__applyColumnVisibility()
def __dragEnter( self, widget, event ) :
if Gaffer.MetadataAlgo.readOnly( self._qtWidget().model().rowsPlug() ) :
return False
if not isinstance( event.data, ( Gaffer.Plug, IECore.Data ) ) :
return False
self.__currentDragDestinationPlug = None
return True
def __dragMove( self, widget, event ) :
destinationPlug = self.plugAt( event.line.p0 )
if self.__currentDragDestinationPlug == destinationPlug :
return
self.__currentDragDestinationPlug = destinationPlug
selectionModel = self._qtWidget().selectionModel()
selectionModel.clear()
if destinationPlug is None:
return
select = False
if isinstance( event.data, IECore.Data ) :
select = _ClipboardAlgo.canPasteCells( event.data, [ [ destinationPlug ] ] )
else :
sourcePlug, targetPlug = self.__connectionPlugs( event.data, destinationPlug )
select = self.__canConnect( sourcePlug, targetPlug )
if select :
selectionModel.select(
self._qtWidget().model().indexForPlug( destinationPlug ),
QtCore.QItemSelectionModel.SelectCurrent
)
def __dragLeave( self, widget, event ) :
self.__currentDragDestinationPlug = None
self._qtWidget().selectionModel().clear()
def __drop( self, widget, event ) :
self.__currentDragDestinationPlug = None
destinationPlug = self.plugAt( event.line.p0 )
if isinstance( event.data, IECore.Data ) :
if not _ClipboardAlgo.canPasteCells( event.data, [ [ destinationPlug ] ] ) :
return False
with Gaffer.UndoScope( destinationPlug.ancestor( Gaffer.ScriptNode ) ) :
context = self.ancestor( GafferUI.PlugValueWidget ).getContext()
_ClipboardAlgo.pasteCells( event.data, [ [ destinationPlug ] ], context.getTime() )
else :
sourcePlug, targetPlug = self.__connectionPlugs( event.data, destinationPlug )
if not self.__canConnect( sourcePlug, targetPlug ) :
return False
with Gaffer.UndoScope( targetPlug.ancestor( Gaffer.ScriptNode ) ) :
targetPlug.setInput( sourcePlug )
index = self._qtWidget().model().indexForPlug( destinationPlug )
selectionModel = self._qtWidget().selectionModel()
selectionModel.select( index, QtCore.QItemSelectionModel.ClearAndSelect )
selectionModel.setCurrentIndex( index, QtCore.QItemSelectionModel.ClearAndSelect )
# People regularly have spreadsheets in separate windows. Ensure the
# sheet has focus after drop has concluded. It will have returned to
# the origin of the drag.
def focusOnIdle() :
if not self._qtWidget().isActiveWindow() :
self._qtWidget().activateWindow()
self._qtWidget().setFocus()
return False
GafferUI.EventLoop.addIdleCallback( focusOnIdle )
return True
def __connectionPlugs( self, sourcePlug, targetPlug ) :
if isinstance( targetPlug, Gaffer.Spreadsheet.CellPlug ) :
targetPlug = targetPlug[ "value" ]
if isinstance( targetPlug, Gaffer.NameValuePlug ) :
if not isinstance( sourcePlug, Gaffer.NameValuePlug ) :
targetPlug = targetPlug[ "value" ]
else :
if isinstance( sourcePlug, Gaffer.NameValuePlug ) :
sourcePlug = sourcePlug[ "value" ]
return sourcePlug, targetPlug
def __canConnect( self, sourcePlug, targetPlug ) :
if targetPlug is None :
return False
if Gaffer.MetadataAlgo.readOnly( targetPlug ) :
return False
if any( Gaffer.MetadataAlgo.getReadOnly( p ) for p in Gaffer.Plug.RecursiveRange( targetPlug ) ) :
return False
with self.ancestor( GafferUI.PlugValueWidget ).getContext() :
if not targetPlug.ancestor( Gaffer.Spreadsheet.RowPlug )["enabled"].getValue() :
return False
if not targetPlug.acceptsInput( sourcePlug ) :
return False
return True
def __positionInCellGrid( self, position ) :
# The event coordinate origin includes the header view.
# Queries to indexAt etc... need the origin to be in the
# table view itself.
cellPosition = imath.V3f( position )
if self._qtWidget().verticalHeader().isVisible() :
cellPosition.x -= self._qtWidget().verticalHeader().frameRect().width()
if self._qtWidget().horizontalHeader().isVisible() :
cellPosition.y -= self._qtWidget().horizontalHeader().frameRect().height()
return cellPosition
def __buttonPress( self, widget, event ) :
if event.buttons != event.Buttons.Right :
return False
point = self._qtWidget().viewport().mapFrom(
self._qtWidget(),
QtCore.QPoint( event.line.p0.x, event.line.p0.y )
)
index = self._qtWidget().indexAt( point )
# Disabled items won't show up in the selection model
if not index.flags() & QtCore.Qt.ItemIsEnabled :
return True
# Ensure the cell that was clicked is selected. This avoids any ambiguity
# as to whether the menu is operating on the cell that was right-clicked, or
# the cells that are selected. As double-click to edit also resets selection
# (Qt default) then this offers the most consistent experience.
selectionModel = self._qtWidget().selectionModel()
if not selectionModel.isSelected( index ) :
selectionModel.select( index, selectionModel.ClearAndSelect )
selectedPlugs = self.selectedPlugs()
if len( selectedPlugs ) == 1 :
plug = next( iter( selectedPlugs ) )
if isinstance( plug, Gaffer.Spreadsheet.CellPlug ) :
plug = plug["value"]
## \todo We need to make this temporary PlugValueWidget just so we
# can show a plug menu. We should probably refactor so we can do it
# without the widget, but this would touch `PlugValueWidget.popupMenuSignal()`
# and all connected client code.
self.__menuPlugValueWidget = GafferUI.PlugValueWidget.create( plug )
definition = self.__menuPlugValueWidget._popupMenuDefinition()
else :
definition = IECore.MenuDefinition()
if self.__mode == self.Mode.RowNames :
self.__prependRowMenuItems( definition, selectedPlugs )
else :
self.__prependCellMenuItems( definition, selectedPlugs )
self.__plugMenu = GafferUI.Menu( definition )
self.__plugMenu.popup()
return True
def __buttonDoubleClick( self, widget, event ) :
if event.buttons != event.Buttons.Left :
return False
# Consistency is a little tricky here. Ideally we'd have the same
# interaction for all plug types, without adding unnecessary steps. We
# standardise 'return/double click opens edit window'. But in the
# interest of simplifying common steps, there are the following
# exceptions.
#
# - Bools: Return/double-click toggles the value. Requires right-click
# to display the edit window.
#
# - Presets: Return/Double click displays the popup menu, requires right-click
# to display the edit window.
point = self._qtWidget().viewport().mapFrom(
self._qtWidget(),
QtCore.QPoint( event.line.p0.x, event.line.p0.y )
)
index = self._qtWidget().indexAt( point )
if not index.flags() & QtCore.Qt.ItemIsEnabled :
return True
plug = self._qtWidget().model().plugForIndex( index )
if plug is None :
return False
if self._qtWidget().model().presentsCheckstate( index ) :
valuePlug = plug["value"] if isinstance( plug, Gaffer.Spreadsheet.CellPlug ) else plug
self.__toggleBooleans( [ valuePlug ] )
else :
self.editPlugs( [ plug ], scrollTo = False, position = GafferUI.Widget.mousePosition() )
return True
def __keyPress( self, widget, event ) :
forRows = self.__mode == self.Mode.RowNames
if event.key == "Space" :
self.__spaceBarPressed()
return True
if event.modifiers == event.Modifiers.None_ :
if event.key == "Return" :
self.__returnKeyPress()
return True
if event.key == "D" :
# We don't have a shortcut for managing the enabled state of rows, because when a
# row is disabled (via Qt.ItemIsEnabled flag), those indices are no longer reported
# from the selection model. So you could use the key to turn them off, but its
# hard to turn them back on again with it.
if not forRows :
self.__toggleCellEnabledState()
return True
elif event.modifiers == event.Modifiers.Control :
if event.key in ( "C", "V", "X" ) :
if event.key == "C" :
self.__copyRows() if forRows else self.__copyCells()
elif event.key == "V" :
self.__pasteRows() if forRows else self.__pasteCells()
return True
return False
def __returnKeyPress( self ) :
# If the selection is presented as a checkbox, toggle rather than
# opening the edit window. This matches the single-click toggle mouse
# interaction.
selectionModel = self._qtWidget().selectionModel()
selectedIndexes = selectionModel.selectedIndexes()
if not selectedIndexes :
return
model = selectionModel.model()
if all( [ model.presentsCheckstate( i ) for i in selectedIndexes ] ) :
valuePlugs = [ model.valuePlugForIndex( i ) for i in selectedIndexes ]
self.__toggleBooleans( valuePlugs )
else :
self.__editSelectedPlugs()
def __spaceBarPressed( self ) :
# Qt has the odd behaviour that space will toggle the selection of the
# focused cell, unless it has a checked state, and then it'll toggle
# that. As we support `return` to do that, then make sure space only
# ever toggles the selection state of the focused cell.
currentIndex = self._qtWidget().selectionModel().currentIndex()
if currentIndex.isValid() :
self._qtWidget().selectionModel().select( currentIndex, QtCore.QItemSelectionModel.Toggle )
def __headerButtonPress( self, header, event ) :
if event.buttons != event.Buttons.Right :
return False
column = self._qtWidget().columnAt( event.line.p0.x )
cellPlug = self._qtWidget().model().plugForIndex( self._qtWidget().model().index( 0, column ) )
assert( cellPlug.ancestor( Gaffer.Spreadsheet.RowPlug ) == cellPlug.ancestor( Gaffer.Spreadsheet.RowsPlug ).defaultRow() )
menuDefinition = IECore.MenuDefinition()
menuDefinition.append(
"/Set Label...",
{
"command" : functools.partial( Gaffer.WeakMethod( self.__setColumnLabel ), cellPlug ),
"active" : not Gaffer.MetadataAlgo.readOnly( cellPlug ),
}
)
menuDefinition.append(
"/Set Description...",
{
"command" : functools.partial( Gaffer.WeakMethod( self.__setColumnDescription ), cellPlug ),
"active" : not Gaffer.MetadataAlgo.readOnly( cellPlug ),
}
)
sectionNames = _SectionChooser.sectionNames( self._qtWidget().model().rowsPlug() )
currentSection = _SectionChooser.getSection( cellPlug )
for sectionName in sectionNames :
menuDefinition.append(
"/Move to Section/{}".format( sectionName ),
{
"command" : functools.partial( Gaffer.WeakMethod( self.__moveToSection ), cellPlug, sectionName = sectionName ),
"active" : not Gaffer.MetadataAlgo.readOnly( cellPlug ) and sectionName != currentSection,
}
)
if sectionNames :
menuDefinition.append( "/Move to Section/__divider__", { "divider" : True } )
menuDefinition.append(
"/Move to Section/New...",
{
"command" : functools.partial( Gaffer.WeakMethod( self.__moveToSection ), cellPlug ),
"active" : not Gaffer.MetadataAlgo.readOnly( cellPlug ),
}
)
menuDefinition.append( "/DeleteDivider", { "divider" : True } )
menuDefinition.append(
"/Delete Column",
{
"command" : functools.partial( Gaffer.WeakMethod( self.__deleteColumn ), cellPlug ),
"active" : self.__canDeleteColumn( cellPlug )
}
)
self.__headerMenu = GafferUI.Menu( menuDefinition )
self.__headerMenu.popup()
return True
def __prependRowMenuItems( self, menuDefinition, plugs ) :
rowPlugs = { p.ancestor( Gaffer.Spreadsheet.RowPlug ) for p in plugs }
pluralSuffix = "" if len( rowPlugs ) == 1 else "s"
targetDivider = "/__SpreadsheetRowAndCellDivider__"
if menuDefinition.item( targetDivider ) is None :
menuDefinition.prepend( targetDivider, { "divider" : True } )
items = []
rowsPlug = next( iter( rowPlugs ) ).ancestor( Gaffer.Spreadsheet.RowsPlug )
widths = [
( "Half", GafferUI.PlugWidget.labelWidth() * 0.5 ),
( "Single", GafferUI.PlugWidget.labelWidth() ),
( "Double", GafferUI.PlugWidget.labelWidth() * 2 ),
]
currentWidth = self.__getRowNameWidth()
for label, width in widths :
items.append( (
"/Width/{}".format( label ),
{
"command" : functools.partial( Gaffer.WeakMethod( self.__setRowNameWidth ), width ),
"active" : not Gaffer.MetadataAlgo.readOnly( rowsPlug ),
"checkBox" : width == currentWidth,
}
) )
clipboard = self.__getClipboard()
pasteRowsPluralSuffix = "" if _ClipboardAlgo.isValueMatrix( clipboard ) and len( clipboard ) == 1 else "s"
canChangeEnabledState, currentEnabledState = self.__canChangeRowEnabledState( rowPlugs )
enabledPlugs = [ row["enabled"] for row in rowPlugs ]
items.extend( (
(
"/__DisableRowsDivider__", { "divider" : True }
),
(
( "/Disable Row%s" if currentEnabledState else "/Enable Row%s" ) % pluralSuffix,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__setRowEnabledState ), enabledPlugs, not currentEnabledState ),
"active" : canChangeEnabledState
}
),
(
"/__CopyPasteRowsDivider__", { "divider" : True }
),
(
"Copy Row%s" % pluralSuffix,
{
"command" : Gaffer.WeakMethod( self.__copyRows ),
"shortCut" : "Ctrl+C"
}
),
(
"Paste Row%s" % pasteRowsPluralSuffix,
{
"command" : Gaffer.WeakMethod( self.__pasteRows ),
"active" : _ClipboardAlgo.canPasteRows( self.__getClipboard(), rowsPlug ),
"shortCut" : "Ctrl+V"
}
),
(
"/__DeleteRowDivider__", { "divider" : True }
),
(
"/Delete Row%s" % pluralSuffix,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__deleteRows ), rowPlugs ),
"active" : self.__canDeleteRows( rowPlugs )
}
)
) )
for path, args in reversed( items ) :
menuDefinition.prepend( path, args )
def __prependCellMenuItems( self, menuDefinition, cellPlugs ) :
targetDivider = "/__SpreadsheetRowAndCellDivider__"
if menuDefinition.item( targetDivider ) is None :
menuDefinition.prepend( targetDivider, { "divider" : True } )
pluralSuffix = "" if len( cellPlugs ) == 1 else "s"
canChangeEnabledState, currentEnabledState = self.__canChangeCellEnabledState( cellPlugs )
enabledPlugs = [ cell.enabledPlug() for cell in cellPlugs ]
plugMatrix = _ClipboardAlgo.createPlugMatrixFromCells( cellPlugs )
items = [
(
( "/Disable Cell%s" if currentEnabledState else "/Enable Cell%s" ) % pluralSuffix,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__setPlugValues ), enabledPlugs, not currentEnabledState ),
"active" : canChangeEnabledState,
"shortCut" : "D"
}
),
( "/__EditCellsDivider__", { "divider" : True } ),
(
"/Edit Cell%s" % pluralSuffix,
{
"active" : _CellPlugValueWidget.canEdit( cellPlugs ),
"command" : functools.partial( Gaffer.WeakMethod( self.__editSelectedPlugs ), False )
}
),
( "/__CopyPasteCellsDivider__", { "divider" : True } ),
(
"Copy Cell%s" % pluralSuffix,
{
"command" : Gaffer.WeakMethod( self.__copyCells ),
"active" : _ClipboardAlgo.canCopyPlugs( plugMatrix ),
"shortCut" : "Ctrl+C"
}
),
(
"Paste Cell%s" % pluralSuffix,
{
"command" : Gaffer.WeakMethod( self.__pasteCells ),
"active" : _ClipboardAlgo.canPasteCells( self.__getClipboard(), plugMatrix ),
"shortCut" : "Ctrl+V"
}
)
]
for path, args in reversed( items ) :
menuDefinition.prepend( path, args )
def __getClipboard( self ) :
appRoot = self._qtWidget().model().rowsPlug().ancestor( Gaffer.ApplicationRoot )
return appRoot.getClipboardContents()
def __setClipboard( self, data ) :
appRoot = self._qtWidget().model().rowsPlug().ancestor( Gaffer.ApplicationRoot )
return appRoot.setClipboardContents( data )
def __copyCells( self ) :
selection = self.selectedPlugs()
plugMatrix = _ClipboardAlgo.createPlugMatrixFromCells( selection )
if not plugMatrix or not _ClipboardAlgo.canCopyPlugs( plugMatrix ) :
return
with self.ancestor( GafferUI.PlugValueWidget ).getContext() :
clipboardData = _ClipboardAlgo.valueMatrix( plugMatrix )
self.__setClipboard( clipboardData )
def __pasteCells( self ) :
plugMatrix = _ClipboardAlgo.createPlugMatrixFromCells( self.selectedPlugs() )
clipboard = self.__getClipboard()
if not plugMatrix or not _ClipboardAlgo.canPasteCells( clipboard, plugMatrix ) :
return
context = self.ancestor( GafferUI.PlugValueWidget ).getContext()
with Gaffer.UndoScope( plugMatrix[0][0].ancestor( Gaffer.ScriptNode ) ) :
_ClipboardAlgo.pasteCells( clipboard, plugMatrix, context.getTime() )
def __copyRows( self ) :
rowPlugs = _PlugTableView.__orderedRowsPlugs( self.selectedPlugs() )
with self.ancestor( GafferUI.PlugValueWidget ).getContext() :
clipboardData = _ClipboardAlgo.copyRows( rowPlugs )
self.__setClipboard( clipboardData )
def __pasteRows( self ) :
rowsPlug = self._qtWidget().model().rowsPlug()
clipboard = self.__getClipboard()
if not _ClipboardAlgo.canPasteRows( clipboard, rowsPlug ) :
return
with Gaffer.UndoScope( rowsPlug.ancestor( Gaffer.ScriptNode ) ) :
_ClipboardAlgo.pasteRows( clipboard, rowsPlug )
def __setRowNameWidth( self, width, *unused ) :
rowsPlug = self._qtWidget().model().rowsPlug()
with Gaffer.UndoScope( rowsPlug.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( rowsPlug.defaultRow(), "spreadsheet:rowNameWidth", width )
def __getRowNameWidth( self ) :
rowsPlug = self._qtWidget().model().rowsPlug()
width = Gaffer.Metadata.value( rowsPlug.defaultRow(), "spreadsheet:rowNameWidth" )
return width if width is not None else GafferUI.PlugWidget.labelWidth()
def __editSelectedPlugs( self, allowDirectEditing = True ) :
selectedPlugs = self.selectedPlugs()
if self.__mode == self.Mode.RowNames :
# Multi-editing row names makes no sense, so pick the first one.
# It will also be muddled up with the value cells.
rows = _PlugTableView.__orderedRowsPlugs( selectedPlugs )
selectedPlugs = { rows[0]["name"] }
self.editPlugs( selectedPlugs, allowDirectEditing = allowDirectEditing )
def __showEditor( self, plugs, plugBound, allowDirectEditing ) :
self.__editorWidget = None
if allowDirectEditing :
# Show a presets menu directly to avoid an unnecessary interaction step
# This sadly leaks the widget, but there isn't a lot we can do at present.
plugValueWidget = GafferUI.PlugValueWidget.create( plugs )
if isinstance( plugValueWidget, _CellPlugValueWidget ) :
valuePlugValueWidget = plugValueWidget.childPlugValueWidget( next( iter( plugs ) )["value"] )
if isinstance( valuePlugValueWidget, GafferUI.PresetsPlugValueWidget ) :
if not Gaffer.Metadata.value( next( iter( valuePlugValueWidget.getPlugs() ) ), "presetsPlugValueWidget:isCustom" ) :
self.__editorWidget = plugValueWidget
valuePlugValueWidget.menu().popup( position = plugBound.center() )
return
self.__editorWidget = GafferUI.PlugPopup( plugs, title = "" )
self.__editorWidget.popup( plugBound.center() )
# Clears and selects a non-contiguous list of indexes if they're not already selected.
def __selectIndexes( self, indexes ) :
selectionModel = self._qtWidget().selectionModel()
if set( selectionModel.selectedIndexes() ) != set( indexes ) :
selection = QtCore.QItemSelection()
for index in indexes :
selection.select( index, index )
selectionModel.select( selection, QtCore.QItemSelectionModel.ClearAndSelect )
if not selectionModel.isSelected( selectionModel.currentIndex() ) :
selectionModel.setCurrentIndex( indexes[ -1 ], QtCore.QItemSelectionModel.ClearAndSelect )
def __setColumnLabel( self, cellPlug ) :
label = GafferUI.TextInputDialogue(
title = "Set Label",
confirmLabel = "Set",
initialText = Gaffer.Metadata.value( cellPlug, "spreadsheet:columnLabel" ) or cellPlug.getName()
).waitForText( parentWindow = self.ancestor( GafferUI.Window ) )
if label is not None :
with Gaffer.UndoScope( cellPlug.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( cellPlug, "spreadsheet:columnLabel", label )
def __setColumnDescription( self, cellPlug ) :
description = GafferUI.TextInputDialogue(
title = "Set Description",
confirmLabel = "Set",
initialText = Gaffer.Metadata.value( cellPlug["value"], "description" ) or "",
multiLine = True,
).waitForText( parentWindow = self.ancestor( GafferUI.Window ) )
if description is not None :
with Gaffer.UndoScope( cellPlug.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( cellPlug["value"], "description", description )
def __canDeleteColumn( self, cellPlug ) :
if Gaffer.MetadataAlgo.readOnly( cellPlug ) :
return False
if isinstance( cellPlug.node(), Gaffer.Reference ) :
return False
return True
def __deleteColumn( self, cellPlug ) :
rowsPlug = cellPlug.ancestor( Gaffer.Spreadsheet.RowsPlug )
with Gaffer.UndoScope( rowsPlug.ancestor( Gaffer.ScriptNode ) ) :
rowsPlug.removeColumn( cellPlug.parent().children().index( cellPlug ) )
def __canDeleteRows( self, rowPlugs ) :
if not rowPlugs :
return False
rowsPlug = next( iter( rowPlugs ) ).ancestor( Gaffer.Spreadsheet.RowsPlug )
if rowsPlug.defaultRow() in rowPlugs :
return False
if any( [ Gaffer.MetadataAlgo.readOnly( row ) for row in rowPlugs ] ) :
return False
if isinstance( rowsPlug.node(), Gaffer.Reference ) :
# Can't delete rows unless they have been added as edits
# on top of the reference. Otherwise they will be recreated
# when the reference is reloaded anyway.
reference = rowsPlug.node()
for row in rowPlugs :
if not reference.isChildEdit( row ) :
return False
return True
def __deleteRows( self, rowPlugs ) :
with Gaffer.UndoScope( next( iter( rowPlugs ) ).ancestor( Gaffer.ScriptNode ) ) :
for row in rowPlugs :
row.parent().removeChild( row )
def __canChangeRowEnabledState( self, rowPlugs ) :
enabledPlugs = [ row["enabled"] for row in rowPlugs ]
return self.__canChangeEnabledState( enabledPlugs )
def __setRowEnabledState( self, enabledPlugs, enabled ) :
self.__setPlugValues( enabledPlugs, enabled )
# Clear the row name column selection if rows have been disabled.
# They don't show up in selectionModel.selection().
if not enabled :
selectionModel = self._qtWidget().selectionModel()
nameColumnIndex = self._qtWidget().model().index( 0, 0 )
flags = QtCore.QItemSelectionModel.Columns | QtCore.QItemSelectionModel.Deselect
selectionModel.select( nameColumnIndex, flags )
if selectionModel.currentIndex().column() == 0 :
selectionModel.clearCurrentIndex()
def __canChangeCellEnabledState( self, cellPlugs ) :
enabledPlugs = [ cell.enabledPlug() for cell in cellPlugs ]
allSettable, enabled = self.__canChangeEnabledState( enabledPlugs )
return ( _Algo.cellsCanBeDisabled( cellPlugs ) and allSettable, enabled )
def __canChangeEnabledState( self, enabledPlugs ) :
anyReadOnly = any( [ Gaffer.MetadataAlgo.readOnly( plug ) for plug in enabledPlugs ] )
allSettable = all( [ plug.settable() for plug in enabledPlugs ] )
enabled = True
with self.ancestor( GafferUI.PlugValueWidget ).getContext() :
with IECore.IgnoredExceptions( Gaffer.ProcessException ) :
enabled = all( [ plug.getValue() for plug in enabledPlugs ] )
return ( allSettable and not anyReadOnly, enabled )
def __toggleBooleans( self, plugs ) :
if not plugs or any( Gaffer.MetadataAlgo.readOnly( p ) for p in plugs ) :
return
with self.ancestor( GafferUI.PlugValueWidget ).getContext() :
checked = sole( [ plug.getValue() for plug in plugs ] )
with Gaffer.UndoScope( next( iter( plugs ) ).ancestor( Gaffer.ScriptNode ) ) :
self.__setPlugValues( plugs, not checked )
def __toggleCellEnabledState( self ) :
cellPlugs = [ p for p in self.selectedPlugs() if isinstance( p, Gaffer.Spreadsheet.CellPlug ) ]
canChange, currentState = self.__canChangeCellEnabledState( cellPlugs )
if not canChange :
return
self.__setPlugValues( [ cell.enabledPlug() for cell in cellPlugs ], not currentState )
def __setPlugValues( self, plugs, value ) :
with Gaffer.UndoScope( next( iter( plugs ) ).ancestor( Gaffer.ScriptNode ) ) :
for plug in plugs :
plug.setValue( value )
def __modelReset( self ) :
self.__applyColumnVisibility()
self.__applyColumnWidthMetadata()
self.__applyRowNamesWidth()
def __moveToSection( self, cellPlug, sectionName = None ) :
if sectionName is None :
sectionName = GafferUI.TextInputDialogue(
initialText = "New Section",
title = "Move to Section",
confirmLabel = "Move"
).waitForText( parentWindow = self.ancestor( GafferUI.Window ) )
if not sectionName :
return
with Gaffer.UndoScope( cellPlug.ancestor( Gaffer.ScriptNode ) ) :
_SectionChooser.setSection( cellPlug, sectionName )
@staticmethod
def __orderedRowsPlugs( plugs ) :
rowPlugs = { p.ancestor( Gaffer.Spreadsheet.RowPlug ) for p in plugs }
if rowPlugs :
allRows = next( iter( rowPlugs ) ).parent().children()
return sorted( rowPlugs, key = allRows.index )
return []
# Ensures navigation key presses aren't stolen by any application-level actions.
class _NavigableTable( _TableView ) :
__protectedKeys = (
QtCore.Qt.Key_Left,
QtCore.Qt.Key_Right,
QtCore.Qt.Key_Up,
QtCore.Qt.Key_Down
)
def event( self, event ) :
if event.type() == QtCore.QEvent.ShortcutOverride and event.key() in self.__protectedKeys :
event.accept()
return True
else :
return _TableView.event( self, event )
|
|
# -*- coding: utf-8 -*-
# Author: Tommy Clausner <Tommy.Clausner@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import pytest
import numpy as np
from numpy.testing import (assert_array_less, assert_allclose,
assert_array_equal)
from scipy.spatial.distance import cdist
import mne
from mne import (SourceEstimate, VolSourceEstimate, VectorSourceEstimate,
read_evokeds, SourceMorph, compute_source_morph,
read_source_morph, read_source_estimate,
read_forward_solution, grade_to_vertices,
setup_volume_source_space, make_forward_solution,
make_sphere_model, make_ad_hoc_cov, VolVectorSourceEstimate,
read_freesurfer_lut)
from mne.datasets import testing
from mne.fixes import _get_img_fdata
from mne.minimum_norm import (apply_inverse, read_inverse_operator,
make_inverse_operator)
from mne.source_space import (get_volume_labels_from_aseg, _get_mri_info_data,
_get_atlas_values, _add_interpolator)
from mne.utils import (run_tests_if_main, requires_nibabel, check_version,
requires_dipy, requires_h5py)
from mne.fixes import _get_args
# Setup paths
data_path = testing.data_path(download=False)
sample_dir = op.join(data_path, 'MEG', 'sample')
subjects_dir = op.join(data_path, 'subjects')
fname_evoked = op.join(sample_dir, 'sample_audvis-ave.fif')
fname_trans = op.join(sample_dir, 'sample_audvis_trunc-trans.fif')
fname_inv_vol = op.join(sample_dir,
'sample_audvis_trunc-meg-vol-7-meg-inv.fif')
fname_fwd_vol = op.join(sample_dir,
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_vol_w = op.join(sample_dir,
'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w')
fname_inv_surf = op.join(sample_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
fname_fmorph = op.join(data_path, 'MEG', 'sample',
'fsaverage_audvis_trunc-meg')
fname_smorph = op.join(sample_dir, 'sample_audvis_trunc-meg')
fname_t1 = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
fname_vol = op.join(subjects_dir, 'sample', 'bem', 'sample-volume-7mm-src.fif')
fname_brain = op.join(subjects_dir, 'sample', 'mri', 'brain.mgz')
fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
fname_fs_vol = op.join(subjects_dir, 'fsaverage', 'bem',
'fsaverage-vol7-nointerp-src.fif.gz')
fname_aseg_fs = op.join(subjects_dir, 'fsaverage', 'mri', 'aseg.mgz')
fname_stc = op.join(sample_dir, 'fsaverage_audvis_trunc-meg')
def _real_vec_stc():
inv = read_inverse_operator(fname_inv_surf)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0].crop(0, 0.01)
return apply_inverse(evoked, inv, pick_ori='vector')
def test_sourcemorph_consistency():
"""Test SourceMorph class consistency."""
assert _get_args(SourceMorph.__init__)[1:] == \
mne.morph._SOURCE_MORPH_ATTRIBUTES
@testing.requires_testing_data
def test_sparse_morph():
"""Test sparse morphing."""
rng = np.random.RandomState(0)
vertices_fs = [np.sort(rng.permutation(np.arange(10242))[:4]),
np.sort(rng.permutation(np.arange(10242))[:6])]
data = rng.randn(10, 1)
stc_fs = SourceEstimate(data, vertices_fs, 1, 1, 'fsaverage')
spheres_fs = [mne.read_surface(op.join(
subjects_dir, 'fsaverage', 'surf', '%s.sphere.reg' % hemi))[0]
for hemi in ('lh', 'rh')]
spheres_sample = [mne.read_surface(op.join(
subjects_dir, 'sample', 'surf', '%s.sphere.reg' % hemi))[0]
for hemi in ('lh', 'rh')]
morph_fs_sample = compute_source_morph(
stc_fs, 'fsaverage', 'sample', sparse=True, spacing=None,
subjects_dir=subjects_dir)
stc_sample = morph_fs_sample.apply(stc_fs)
offset = 0
orders = list()
for v1, s1, v2, s2 in zip(stc_fs.vertices, spheres_fs,
stc_sample.vertices, spheres_sample):
dists = cdist(s1[v1], s2[v2])
order = np.argmin(dists, axis=-1)
assert_array_less(dists[np.arange(len(order)), order], 1.5) # mm
orders.append(order + offset)
offset += len(order)
assert_allclose(stc_fs.data, stc_sample.data[np.concatenate(orders)])
# Return
morph_sample_fs = compute_source_morph(
stc_sample, 'sample', 'fsaverage', sparse=True, spacing=None,
subjects_dir=subjects_dir)
stc_fs_return = morph_sample_fs.apply(stc_sample)
offset = 0
orders = list()
for v1, s, v2 in zip(stc_fs.vertices, spheres_fs, stc_fs_return.vertices):
dists = cdist(s[v1], s[v2])
order = np.argmin(dists, axis=-1)
assert_array_less(dists[np.arange(len(order)), order], 1.5) # mm
orders.append(order + offset)
offset += len(order)
assert_allclose(stc_fs.data, stc_fs_return.data[np.concatenate(orders)])
@testing.requires_testing_data
def test_xhemi_morph():
"""Test cross-hemisphere morphing."""
stc = read_source_estimate(fname_stc, subject='sample')
# smooth 1 for speed where possible
smooth = 4
spacing = 4
n_grade_verts = 2562
stc = compute_source_morph(
stc, 'sample', 'fsaverage_sym', smooth=smooth, warn=False,
spacing=spacing, subjects_dir=subjects_dir).apply(stc)
morph = compute_source_morph(
stc, 'fsaverage_sym', 'fsaverage_sym', smooth=1, xhemi=True,
warn=False, spacing=[stc.vertices[0], []],
subjects_dir=subjects_dir)
stc_xhemi = morph.apply(stc)
assert stc_xhemi.data.shape[0] == n_grade_verts
assert stc_xhemi.rh_data.shape[0] == 0
assert len(stc_xhemi.vertices[1]) == 0
assert stc_xhemi.lh_data.shape[0] == n_grade_verts
assert len(stc_xhemi.vertices[0]) == n_grade_verts
# complete reversal mapping
morph = compute_source_morph(
stc, 'fsaverage_sym', 'fsaverage_sym', smooth=smooth, xhemi=True,
warn=False, spacing=stc.vertices, subjects_dir=subjects_dir)
mm = morph.morph_mat
assert mm.shape == (n_grade_verts * 2,) * 2
assert mm.size > n_grade_verts * 2
assert mm[:n_grade_verts, :n_grade_verts].size == 0 # L to L
assert mm[n_grade_verts:, n_grade_verts:].size == 0 # R to L
assert mm[n_grade_verts:, :n_grade_verts].size > n_grade_verts # L to R
assert mm[:n_grade_verts, n_grade_verts:].size > n_grade_verts # R to L
# more complicated reversal mapping
vertices_use = [stc.vertices[0], np.arange(10242)]
n_src_verts = len(vertices_use[1])
assert vertices_use[0].shape == (n_grade_verts,)
assert vertices_use[1].shape == (n_src_verts,)
# ensure it's sufficiently diffirent to manifest round-trip errors
assert np.in1d(vertices_use[1], stc.vertices[1]).mean() < 0.3
morph = compute_source_morph(
stc, 'fsaverage_sym', 'fsaverage_sym', smooth=smooth, xhemi=True,
warn=False, spacing=vertices_use, subjects_dir=subjects_dir)
mm = morph.morph_mat
assert mm.shape == (n_grade_verts + n_src_verts, n_grade_verts * 2)
assert mm[:n_grade_verts, :n_grade_verts].size == 0
assert mm[n_grade_verts:, n_grade_verts:].size == 0
assert mm[:n_grade_verts, n_grade_verts:].size > n_grade_verts
assert mm[n_grade_verts:, :n_grade_verts].size > n_src_verts
# morph forward then back
stc_xhemi = morph.apply(stc)
morph = compute_source_morph(
stc_xhemi, 'fsaverage_sym', 'fsaverage_sym', smooth=smooth,
xhemi=True, warn=False, spacing=stc.vertices,
subjects_dir=subjects_dir)
stc_return = morph.apply(stc_xhemi)
for hi in range(2):
assert_array_equal(stc_return.vertices[hi], stc.vertices[hi])
correlation = np.corrcoef(stc.data.ravel(), stc_return.data.ravel())[0, 1]
assert correlation > 0.9 # not great b/c of sparse grade + small smooth
@testing.requires_testing_data
@pytest.mark.parametrize('smooth, lower, upper, n_warn', [
(None, 0.959, 0.963, 0),
(3, 0.968, 0.971, 2),
('nearest', 0.98, 0.99, 0),
])
def test_surface_source_morph_round_trip(smooth, lower, upper, n_warn):
"""Test round-trip morphing yields similar STCs."""
kwargs = dict(smooth=smooth, warn=True, subjects_dir=subjects_dir)
stc = mne.read_source_estimate(fname_smorph)
if smooth == 'nearest' and not check_version('scipy', '1.3'):
with pytest.raises(ValueError, match='required to use nearest'):
morph = compute_source_morph(stc, 'sample', 'fsaverage', **kwargs)
return
with pytest.warns(None) as w:
morph = compute_source_morph(stc, 'sample', 'fsaverage', **kwargs)
w = [ww for ww in w if 'vertices not included' in str(ww.message)]
assert len(w) == n_warn
assert morph.morph_mat.shape == (20484, len(stc.data))
stc_fs = morph.apply(stc)
morph_back = compute_source_morph(
stc_fs, 'fsaverage', 'sample', spacing=stc.vertices, **kwargs)
assert morph_back.morph_mat.shape == (len(stc.data), 20484)
stc_back = morph_back.apply(stc_fs)
corr = np.corrcoef(stc.data.ravel(), stc_back.data.ravel())[0, 1]
assert lower <= corr <= upper
# check the round-trip power
assert_power_preserved(stc, stc_back)
def assert_power_preserved(orig, new, limits=(1., 1.05)):
"""Assert that the power is preserved during a round-trip morph."""
__tracebackhide__ = True
power_ratio = np.linalg.norm(orig.data) / np.linalg.norm(new.data)
min_, max_ = limits
assert min_ < power_ratio < max_, 'Power ratio'
@requires_h5py
@testing.requires_testing_data
def test_surface_vector_source_morph(tmpdir):
"""Test surface and vector source estimate morph."""
inverse_operator_surf = read_inverse_operator(fname_inv_surf)
stc_surf = read_source_estimate(fname_smorph, subject='sample')
stc_surf.crop(0.09, 0.1) # for faster computation
stc_vec = _real_vec_stc()
source_morph_surf = compute_source_morph(
inverse_operator_surf['src'], subjects_dir=subjects_dir,
smooth=1, warn=False) # smooth 1 for speed
assert source_morph_surf.subject_from == 'sample'
assert source_morph_surf.subject_to == 'fsaverage'
assert source_morph_surf.kind == 'surface'
assert isinstance(source_morph_surf.src_data, dict)
assert isinstance(source_morph_surf.src_data['vertices_from'], list)
assert isinstance(source_morph_surf, SourceMorph)
stc_surf_morphed = source_morph_surf.apply(stc_surf)
assert isinstance(stc_surf_morphed, SourceEstimate)
stc_vec_morphed = source_morph_surf.apply(stc_vec)
with pytest.raises(ValueError, match="Invalid value for the 'output'"):
source_morph_surf.apply(stc_surf, output='nifti1')
# check if correct class after morphing
assert isinstance(stc_surf_morphed, SourceEstimate)
assert isinstance(stc_vec_morphed, VectorSourceEstimate)
# check __repr__
assert 'surface' in repr(source_morph_surf)
# check loading and saving for surf
source_morph_surf.save(tmpdir.join('42.h5'))
source_morph_surf_r = read_source_morph(tmpdir.join('42.h5'))
assert (all([read == saved for read, saved in
zip(sorted(source_morph_surf_r.__dict__),
sorted(source_morph_surf.__dict__))]))
# check wrong subject correction
stc_surf.subject = None
assert isinstance(source_morph_surf.apply(stc_surf), SourceEstimate)
# degenerate
stc_vol = read_source_estimate(fname_vol_w, 'sample')
with pytest.raises(TypeError, match='stc_from must be an instance'):
source_morph_surf.apply(stc_vol)
@requires_h5py
@requires_nibabel()
@requires_dipy()
@pytest.mark.slowtest
@testing.requires_testing_data
def test_volume_source_morph(tmpdir):
"""Test volume source estimate morph, special cases and exceptions."""
import nibabel as nib
inverse_operator_vol = read_inverse_operator(fname_inv_vol)
stc_vol = read_source_estimate(fname_vol_w, 'sample')
# check for invalid input type
with pytest.raises(TypeError, match='src must be'):
compute_source_morph(src=42)
# check for raising an error if neither
# inverse_operator_vol['src'][0]['subject_his_id'] nor subject_from is set,
# but attempting to perform a volume morph
src = inverse_operator_vol['src']
assert src._subject is None # already None on disk (old!)
with pytest.raises(ValueError, match='subject_from could not be inferred'):
with pytest.warns(RuntimeWarning, match='recommend regenerating'):
compute_source_morph(src=src, subjects_dir=subjects_dir)
# check infer subject_from from src[0]['subject_his_id']
src[0]['subject_his_id'] = 'sample'
with pytest.raises(ValueError, match='Inter-hemispheric morphing'):
compute_source_morph(src=src, subjects_dir=subjects_dir, xhemi=True)
with pytest.raises(ValueError, match='Only surface.*sparse morph'):
compute_source_morph(src=src, sparse=True, subjects_dir=subjects_dir)
# terrible quality but fast
zooms = 20
kwargs = dict(zooms=zooms, niter_sdr=(1,), niter_affine=(1,))
source_morph_vol = compute_source_morph(
subjects_dir=subjects_dir, src=fname_inv_vol,
subject_from='sample', **kwargs)
shape = (13,) * 3 # for the given zooms
assert source_morph_vol.subject_from == 'sample'
# the brain used in sample data has shape (255, 255, 255)
assert tuple(source_morph_vol.sdr_morph.domain_shape) == shape
assert tuple(source_morph_vol.pre_affine.domain_shape) == shape
# proofs the above
assert_array_equal(source_morph_vol.zooms, (zooms,) * 3)
# assure proper src shape
mri_size = (src[0]['mri_height'], src[0]['mri_depth'], src[0]['mri_width'])
assert source_morph_vol.src_data['src_shape_full'] == mri_size
fwd = read_forward_solution(fname_fwd_vol)
fwd['src'][0]['subject_his_id'] = 'sample' # avoid further warnings
source_morph_vol = compute_source_morph(
fwd['src'], 'sample', 'sample', subjects_dir=subjects_dir,
**kwargs)
# check wrong subject_to
with pytest.raises(IOError, match='cannot read file'):
compute_source_morph(fwd['src'], 'sample', '42',
subjects_dir=subjects_dir)
# two different ways of saving
source_morph_vol.save(tmpdir.join('vol'))
# check loading
source_morph_vol_r = read_source_morph(tmpdir.join('vol-morph.h5'))
# check for invalid file name handling ()
with pytest.raises(IOError, match='not found'):
read_source_morph(tmpdir.join('42'))
# check morph
stc_vol_morphed = source_morph_vol.apply(stc_vol)
# old way, verts do not match
assert not np.array_equal(stc_vol_morphed.vertices[0], stc_vol.vertices[0])
# vector
stc_vol_vec = VolVectorSourceEstimate(
np.tile(stc_vol.data[:, np.newaxis], (1, 3, 1)),
stc_vol.vertices, 0, 1)
stc_vol_vec_morphed = source_morph_vol.apply(stc_vol_vec)
assert isinstance(stc_vol_vec_morphed, VolVectorSourceEstimate)
for ii in range(3):
assert_allclose(stc_vol_vec_morphed.data[:, ii], stc_vol_morphed.data)
# check output as NIfTI
assert isinstance(source_morph_vol.apply(stc_vol_vec, output='nifti2'),
nib.Nifti2Image)
# check for subject_from mismatch
source_morph_vol_r.subject_from = '42'
with pytest.raises(ValueError, match='subject_from must match'):
source_morph_vol_r.apply(stc_vol_morphed)
# check if nifti is in grid morph space with voxel_size == spacing
img_morph_res = source_morph_vol.apply(stc_vol, output='nifti1')
# assure morph spacing
assert isinstance(img_morph_res, nib.Nifti1Image)
assert img_morph_res.header.get_zooms()[:3] == (zooms,) * 3
# assure src shape
img_mri_res = source_morph_vol.apply(stc_vol, output='nifti1',
mri_resolution=True)
assert isinstance(img_mri_res, nib.Nifti1Image)
assert (img_mri_res.shape == (src[0]['mri_height'], src[0]['mri_depth'],
src[0]['mri_width']) +
(img_mri_res.shape[3],))
# check if nifti is defined resolution with voxel_size == (5., 5., 5.)
img_any_res = source_morph_vol.apply(stc_vol, output='nifti1',
mri_resolution=(5., 5., 5.))
assert isinstance(img_any_res, nib.Nifti1Image)
assert img_any_res.header.get_zooms()[:3] == (5., 5., 5.)
# check if morph outputs correct data
assert isinstance(stc_vol_morphed, VolSourceEstimate)
# check if loaded and saved objects contain the same
assert (all([read == saved for read, saved in
zip(sorted(source_morph_vol_r.__dict__),
sorted(source_morph_vol.__dict__))]))
# check __repr__
assert 'volume' in repr(source_morph_vol)
# check Nifti2Image
assert isinstance(
source_morph_vol.apply(stc_vol, mri_resolution=True,
mri_space=True, output='nifti2'),
nib.Nifti2Image)
# Degenerate conditions
with pytest.raises(TypeError, match='output must be'):
source_morph_vol.apply(stc_vol, output=1)
with pytest.raises(ValueError, match='subject_from does not match'):
compute_source_morph(src=src, subject_from='42')
with pytest.raises(ValueError, match='output'):
source_morph_vol.apply(stc_vol, output='42')
with pytest.raises(ValueError, match='subject_to cannot be None'):
compute_source_morph(src, 'sample', None,
subjects_dir=subjects_dir)
# Check if not morphed, but voxel size not boolean, raise ValueError.
# Note that this check requires dipy to not raise the dipy ImportError
# before checking if the actual voxel size error will raise.
with pytest.raises(ValueError, match='Cannot infer original voxel size'):
stc_vol.as_volume(inverse_operator_vol['src'], mri_resolution=4)
stc_surf = read_source_estimate(fname_stc, 'sample')
with pytest.raises(TypeError, match='stc_from must be an instance'):
source_morph_vol.apply(stc_surf)
# src_to
# zooms=20 does not match src_to zooms (7)
with pytest.raises(ValueError, match='If src_to is provided, zooms shoul'):
source_morph_vol = compute_source_morph(
fwd['src'], subject_from='sample', src_to=fwd['src'],
subject_to='sample', subjects_dir=subjects_dir, **kwargs)
# hack the src_to "zooms" to make it seem like a pos=20. source space
fwd['src'][0]['src_mri_t']['trans'][:3, :3] = 0.02 * np.eye(3)
source_morph_vol = compute_source_morph(
fwd['src'], subject_from='sample', src_to=fwd['src'],
subject_to='sample', subjects_dir=subjects_dir, **kwargs)
stc_vol_2 = source_morph_vol.apply(stc_vol)
# new way, verts match
assert_array_equal(stc_vol.vertices[0], stc_vol_2.vertices[0])
stc_vol_bad = VolSourceEstimate(
stc_vol.data[:-1], [stc_vol.vertices[0][:-1]],
stc_vol.tmin, stc_vol.tstep)
match = (
'vertices do not match between morph \\(4157\\) and stc \\(4156\\).*'
'\n.*\n.*\n.*Vertices were likely excluded during forward computatio.*'
)
with pytest.raises(ValueError, match=match):
source_morph_vol.apply(stc_vol_bad)
@requires_h5py
@requires_nibabel()
@requires_dipy()
@pytest.mark.slowtest
@testing.requires_testing_data
@pytest.mark.parametrize('subject_from, subject_to, lower, upper', [
('sample', 'fsaverage', 8.5, 9),
('fsaverage', 'fsaverage', 7, 7.5),
('sample', 'sample', 6, 7),
])
def test_volume_source_morph_round_trip(
tmpdir, subject_from, subject_to, lower, upper):
"""Test volume source estimate morph round-trips well."""
import nibabel as nib
from nibabel.processing import resample_from_to
src = dict()
if 'sample' in (subject_from, subject_to):
src['sample'] = mne.read_source_spaces(fname_vol)
src['sample'][0]['subject_his_id'] = 'sample'
assert src['sample'][0]['nuse'] == 4157
if 'fsaverage' in (subject_from, subject_to):
# Created to save space with:
#
# bem = op.join(op.dirname(mne.__file__), 'data', 'fsaverage',
# 'fsaverage-inner_skull-bem.fif')
# src_fsaverage = mne.setup_volume_source_space(
# 'fsaverage', pos=7., bem=bem, mindist=0,
# subjects_dir=subjects_dir, add_interpolator=False)
# mne.write_source_spaces(fname_fs_vol, src_fsaverage, overwrite=True)
#
# For speed we do it without the interpolator because it's huge.
src['fsaverage'] = mne.read_source_spaces(fname_fs_vol)
src['fsaverage'][0].update(
vol_dims=np.array([23, 29, 25]), seg_name='brain')
_add_interpolator(src['fsaverage'], True)
assert src['fsaverage'][0]['nuse'] == 6379
src_to, src_from = src[subject_to], src[subject_from]
del src
# No SDR just for speed once everything works
kwargs = dict(niter_sdr=(), niter_affine=(1,),
subjects_dir=subjects_dir, verbose=True)
morph_from_to = compute_source_morph(
src=src_from, src_to=src_to, subject_to=subject_to, **kwargs)
morph_to_from = compute_source_morph(
src=src_to, src_to=src_from, subject_to=subject_from, **kwargs)
use = np.linspace(0, src_from[0]['nuse'] - 1, 10).round().astype(int)
stc_from = VolSourceEstimate(
np.eye(src_from[0]['nuse'])[:, use], [src_from[0]['vertno']], 0, 1)
stc_from_rt = morph_to_from.apply(morph_from_to.apply(stc_from))
maxs = np.argmax(stc_from_rt.data, axis=0)
src_rr = src_from[0]['rr'][src_from[0]['vertno']]
dists = 1000 * np.linalg.norm(src_rr[use] - src_rr[maxs], axis=1)
mu = np.mean(dists)
assert lower <= mu < upper # fsaverage=7.97; 25.4 without src_ras_t fix
# check that pre_affine is close to identity when subject_to==subject_from
if subject_to == subject_from:
for morph in (morph_to_from, morph_from_to):
assert_allclose(
morph.pre_affine.affine, np.eye(4), atol=1e-2)
# check that power is more or less preserved
ratio = stc_from.data.size / stc_from_rt.data.size
limits = ratio * np.array([1, 1.2])
stc_from.crop(0, 0)._data.fill(1.)
stc_from_rt = morph_to_from.apply(morph_from_to.apply(stc_from))
assert_power_preserved(stc_from, stc_from_rt, limits=limits)
# before and after morph, check the proportion of vertices
# that are inside and outside the brainmask.mgz
brain = nib.load(op.join(subjects_dir, subject_from, 'mri', 'brain.mgz'))
mask = _get_img_fdata(brain) > 0
if subject_from == subject_to == 'sample':
for stc in [stc_from, stc_from_rt]:
img = stc.as_volume(src_from, mri_resolution=True)
img = nib.Nifti1Image(_get_img_fdata(img)[:, :, :, 0], img.affine)
img = _get_img_fdata(resample_from_to(img, brain, order=1))
assert img.shape == mask.shape
in_ = img[mask].astype(bool).mean()
out = img[~mask].astype(bool).mean()
assert 0.97 < in_ < 0.98
assert out < 0.02
@pytest.mark.slowtest
@testing.requires_testing_data
def test_morph_stc_dense():
"""Test morphing stc."""
subject_from = 'sample'
subject_to = 'fsaverage'
stc_from = read_source_estimate(fname_smorph, subject='sample')
stc_to = read_source_estimate(fname_fmorph)
# make sure we can specify grade
stc_from.crop(0.09, 0.1) # for faster computation
stc_to.crop(0.09, 0.1) # for faster computation
assert_array_equal(stc_to.time_as_index([0.09, 0.1], use_rounding=True),
[0, len(stc_to.times) - 1])
# After dep change this to:
stc_to1 = compute_source_morph(
subject_to=subject_to, spacing=3, smooth=12, src=stc_from,
subjects_dir=subjects_dir).apply(stc_from)
assert_allclose(stc_to.data, stc_to1.data, atol=1e-5)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to1.data.mean(axis=0)
assert np.corrcoef(mean_to, mean_from).min() > 0.999
vertices_to = grade_to_vertices(subject_to, grade=3,
subjects_dir=subjects_dir)
# make sure we can fill by morphing
with pytest.warns(RuntimeWarning, match='consider increasing'):
morph = compute_source_morph(
stc_from, subject_from, subject_to, spacing=None, smooth=1,
subjects_dir=subjects_dir)
stc_to5 = morph.apply(stc_from)
assert stc_to5.data.shape[0] == 163842 + 163842
# Morph vector data
stc_vec = _real_vec_stc()
stc_vec_to1 = compute_source_morph(
stc_vec, subject_from, subject_to, subjects_dir=subjects_dir,
spacing=vertices_to, smooth=1, warn=False).apply(stc_vec)
assert stc_vec_to1.subject == subject_to
assert stc_vec_to1.tmin == stc_vec.tmin
assert stc_vec_to1.tstep == stc_vec.tstep
assert len(stc_vec_to1.lh_vertno) == 642
assert len(stc_vec_to1.rh_vertno) == 642
# Degenerate conditions
# Morphing to a density that is too high should raise an informative error
# (here we need to push to grade=6, but for some subjects even grade=5
# will break)
with pytest.raises(ValueError, match='Cannot use icosahedral grade 6 '):
compute_source_morph(
stc_to1, subject_from=subject_to, subject_to=subject_from,
spacing=6, subjects_dir=subjects_dir)
del stc_to1
with pytest.raises(ValueError, match='smooth.* has to be at least 1'):
compute_source_morph(
stc_from, subject_from, subject_to, spacing=5, smooth=-1,
subjects_dir=subjects_dir)
# subject from mismatch
with pytest.raises(ValueError, match="subject_from does not match"):
compute_source_morph(stc_from, subject_from='foo',
subjects_dir=subjects_dir)
# only one set of vertices
with pytest.raises(ValueError, match="grade.*list must have two elements"):
compute_source_morph(
stc_from, subject_from=subject_from, spacing=[vertices_to[0]],
subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_morph_stc_sparse():
"""Test morphing stc with sparse=True."""
subject_from = 'sample'
subject_to = 'fsaverage'
# Morph sparse data
# Make a sparse stc
stc_from = read_source_estimate(fname_smorph, subject='sample')
stc_from.vertices[0] = stc_from.vertices[0][[100, 500]]
stc_from.vertices[1] = stc_from.vertices[1][[200]]
stc_from._data = stc_from._data[:3]
stc_to_sparse = compute_source_morph(
stc_from, subject_from=subject_from, subject_to=subject_to,
spacing=None, sparse=True, subjects_dir=subjects_dir).apply(stc_from)
assert_allclose(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert len(stc_from.rh_vertno) == len(stc_to_sparse.rh_vertno)
assert len(stc_from.lh_vertno) == len(stc_to_sparse.lh_vertno)
assert stc_to_sparse.subject == subject_to
assert stc_from.tmin == stc_from.tmin
assert stc_from.tstep == stc_from.tstep
stc_from.vertices[0] = np.array([], dtype=np.int64)
stc_from._data = stc_from._data[:1]
stc_to_sparse = compute_source_morph(
stc_from, subject_from, subject_to, spacing=None, sparse=True,
subjects_dir=subjects_dir).apply(stc_from)
assert_allclose(np.sort(stc_from.data.sum(axis=1)),
np.sort(stc_to_sparse.data.sum(axis=1)))
assert len(stc_from.rh_vertno) == len(stc_to_sparse.rh_vertno)
assert len(stc_from.lh_vertno) == len(stc_to_sparse.lh_vertno)
assert stc_to_sparse.subject == subject_to
assert stc_from.tmin == stc_from.tmin
assert stc_from.tstep == stc_from.tstep
# Degenerate cases
with pytest.raises(ValueError, match='spacing must be set to None'):
compute_source_morph(
stc_from, subject_from=subject_from, subject_to=subject_to,
spacing=5, sparse=True, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='xhemi=True can only be used with'):
compute_source_morph(
stc_from, subject_from=subject_from, subject_to=subject_to,
spacing=None, sparse=True, xhemi=True, subjects_dir=subjects_dir)
@requires_nibabel()
@testing.requires_testing_data
@pytest.mark.parametrize('sl, n_real, n_mri, n_orig', [
# First and last should add up, middle can have overlap should be <= sum
(slice(0, 1), 37, 123, 8),
(slice(1, 2), 51, 225, 12),
(slice(0, 2), 88, 330, 20),
])
def test_volume_labels_morph(tmpdir, sl, n_real, n_mri, n_orig):
"""Test generating a source space from volume label."""
import nibabel as nib
n_use = (sl.stop - sl.start) // (sl.step or 1)
# see gh-5224
evoked = mne.read_evokeds(fname_evoked)[0].crop(0, 0)
evoked.pick_channels(evoked.ch_names[:306:8])
evoked.info.normalize_proj()
n_ch = len(evoked.ch_names)
aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
lut, _ = read_freesurfer_lut()
label_names = sorted(get_volume_labels_from_aseg(aseg_fname))
use_label_names = label_names[sl]
src = setup_volume_source_space(
'sample', subjects_dir=subjects_dir, volume_label=use_label_names,
mri=aseg_fname)
assert len(src) == n_use
assert src.kind == 'volume'
n_src = sum(s['nuse'] for s in src)
sphere = make_sphere_model('auto', 'auto', evoked.info)
fwd = make_forward_solution(evoked.info, fname_trans, src, sphere)
assert fwd['sol']['data'].shape == (n_ch, n_src * 3)
inv = make_inverse_operator(evoked.info, fwd, make_ad_hoc_cov(evoked.info),
loose=1.)
stc = apply_inverse(evoked, inv)
assert stc.data.shape == (n_src, 1)
img = stc.as_volume(src, mri_resolution=True)
assert img.shape == (86, 86, 86, 1)
n_on = np.array(img.dataobj).astype(bool).sum()
aseg_img = _get_img_fdata(nib.load(fname_aseg))
n_got_real = np.in1d(
aseg_img.ravel(), [lut[name] for name in use_label_names]).sum()
assert n_got_real == n_real
# - This was 291 on `master` before gh-5590
# - Refactoring transforms it became 279 with a < 1e-8 change in vox_mri_t
# - Dropped to 123 once nearest-voxel was used in gh-7653
# - Jumped back up to 330 with morphing fixes actually correctly
# interpolating across all volumes
assert aseg_img.shape == img.shape[:3]
assert n_on == n_mri
for ii in range(2):
# should work with (ii=0) or without (ii=1) the interpolator
if ii:
src[0]['interpolator'] = None
img = stc.as_volume(src, mri_resolution=False)
n_on = np.array(img.dataobj).astype(bool).sum()
# was 20 on `master` before gh-5590
# then 44 before gh-7653, which took it back to 20
assert n_on == n_orig
# without the interpolator, this should fail
assert src[0]['interpolator'] is None
with pytest.raises(RuntimeError, match=r'.*src\[0\], .* mri_resolution'):
stc.as_volume(src, mri_resolution=True)
@pytest.fixture(scope='session', params=[testing._pytest_param()])
def _mixed_morph_srcs():
# create a mixed source space
labels_vol = ['Left-Cerebellum-Cortex', 'Right-Cerebellum-Cortex']
src = mne.setup_source_space('sample', spacing='oct3',
add_dist=False, subjects_dir=subjects_dir)
src += mne.setup_volume_source_space(
'sample', mri=fname_aseg, pos=10.0,
volume_label=labels_vol, subjects_dir=subjects_dir,
add_interpolator=True, verbose=True)
# create the destination space
src_fs = mne.read_source_spaces(
op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif'))
src_fs += mne.setup_volume_source_space(
'fsaverage', pos=7., volume_label=labels_vol,
subjects_dir=subjects_dir, add_interpolator=False, verbose=True)
del labels_vol
with pytest.raises(ValueError, match='src_to must be provided .* mixed'):
mne.compute_source_morph(
src=src, subject_from='sample', subject_to='fsaverage',
subjects_dir=subjects_dir)
with pytest.warns(RuntimeWarning, match='not included in smoothing'):
morph = mne.compute_source_morph(
src=src, subject_from='sample', subject_to='fsaverage',
subjects_dir=subjects_dir, niter_affine=[1, 0, 0],
niter_sdr=[1, 0, 0], src_to=src_fs, smooth=5, verbose=True)
return morph, src, src_fs
@requires_nibabel()
@requires_dipy()
@pytest.mark.parametrize('vector', (False, True))
def test_mixed_source_morph(_mixed_morph_srcs, vector):
"""Test mixed source space morphing."""
import nibabel as nib
morph, src, src_fs = _mixed_morph_srcs
# Test some basic properties in the subject's own space
lut, _ = read_freesurfer_lut()
ids = [lut[s['seg_name']] for s in src[2:]]
del lut
vertices = [s['vertno'] for s in src]
n_vertices = sum(len(v) for v in vertices)
data = np.zeros((n_vertices, 3, 1))
data[:, 1] = 1.
klass = mne.MixedVectorSourceEstimate
if not vector:
data = data[:, 1]
klass = klass._scalar_class
stc = klass(data, vertices, 0, 1, 'sample')
vol_info = _get_mri_info_data(fname_aseg, data=True)
rrs = np.concatenate([src[2]['rr'][sp['vertno']] for sp in src[2:]])
n_want = np.in1d(_get_atlas_values(vol_info, rrs), ids).sum()
img = _get_img_fdata(stc.volume().as_volume(src, mri_resolution=False))
assert img.astype(bool).sum() == n_want
img_res = nib.load(fname_aseg)
n_want = np.in1d(_get_img_fdata(img_res), ids).sum()
img = _get_img_fdata(stc.volume().as_volume(src, mri_resolution=True))
assert img.astype(bool).sum() > n_want # way more get interpolated into
with pytest.raises(TypeError, match='stc_from must be an instance'):
morph.apply(1.)
# Now actually morph
stc_fs = morph.apply(stc)
img = stc_fs.volume().as_volume(src_fs, mri_resolution=False)
vol_info = _get_mri_info_data(fname_aseg_fs, data=True)
rrs = np.concatenate([src_fs[2]['rr'][sp['vertno']] for sp in src_fs[2:]])
n_want = np.in1d(_get_atlas_values(vol_info, rrs), ids).sum()
with pytest.raises(ValueError, match=r'stc\.subject does not match src s'):
stc_fs.volume().as_volume(src, mri_resolution=False)
img = _get_img_fdata(
stc_fs.volume().as_volume(src_fs, mri_resolution=False))
assert img.astype(bool).sum() == n_want # correct number of voxels
# Morph separate parts and compare to morphing the entire one
stc_fs_surf = morph.apply(stc.surface())
stc_fs_vol = morph.apply(stc.volume())
stc_fs_2 = stc_fs.__class__(
np.concatenate([stc_fs_surf.data, stc_fs_vol.data]),
stc_fs_surf.vertices + stc_fs_vol.vertices, stc_fs.tmin, stc_fs.tstep,
stc_fs.subject)
assert_allclose(stc_fs.data, stc_fs_2.data)
run_tests_if_main()
|
|
# Copyright (c) 2011-2014 Greg Holt
# Copyright (c) 2012-2013 John Dickinson
# Copyright (c) 2012 Felipe Reyes
# Copyright (c) 2012 Peter Portante
# Copyright (c) 2012 Victor Rodionov
# Copyright (c) 2013-2014 Samuel Merritt
# Copyright (c) 2013 Chuck Thier
# Copyright (c) 2013 David Goetz
# Copyright (c) 2013 Dirk Mueller
# Copyright (c) 2013 Donagh McCabe
# Copyright (c) 2013 Fabien Boucher
# Copyright (c) 2013 Greg Lange
# Copyright (c) 2013 Kun Huang
# Copyright (c) 2013 Richard Hawkins
# Copyright (c) 2013 Tong Li
# Copyright (c) 2013 ZhiQiang Fan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
TempURL Middleware
Allows the creation of URLs to provide temporary access to objects.
For example, a website may wish to provide a link to download a large
object in Swift, but the Swift account has no public access. The
website can generate a URL that will provide GET access for a limited
time to the resource. When the web browser user clicks on the link,
the browser will download the object directly from Swift, obviating
the need for the website to act as a proxy for the request.
If the user were to share the link with all his friends, or
accidentally post it on a forum, etc. the direct access would be
limited to the expiration time set when the website created the link.
Beyond that, the middleware provides the ability to create URLs, which
contain signatures which are valid for all objects which share a
common prefix. These prefix-based URLs are useful for sharing a set
of objects.
------------
Client Usage
------------
To create temporary URLs, first an ``X-Account-Meta-Temp-URL-Key``
header must be set on the Swift account. Then, an HMAC (RFC 2104)
signature is generated using the HTTP method to allow (``GET``, ``PUT``,
``DELETE``, etc.), the Unix timestamp until which the access should be allowed,
the full path to the object, and the key set on the account.
The digest algorithm to be used may be configured by the operator. By default,
HMAC-SHA1, HMAC-SHA256, and HMAC-SHA512 are supported. Check the
``tempurl.allowed_digests`` entry in the cluster's capabilities response to
see which algorithms are supported by your deployment; see
:doc:`api/discoverability` for more information. On older clusters,
the ``tempurl`` key may be present while the ``allowed_digests`` subkey
is not; in this case, only HMAC-SHA1 is supported.
For example, here is code generating the signature for a ``GET`` for 60
seconds on ``/v1/AUTH_account/container/object``::
import hmac
from hashlib import sha1
from time import time
method = 'GET'
expires = int(time() + 60)
path = '/v1/AUTH_account/container/object'
key = 'mykey'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
Be certain to use the full path, from the ``/v1/`` onward.
Let's say ``sig`` ends up equaling
``da39a3ee5e6b4b0d3255bfef95601890afd80709`` and ``expires`` ends up
``1323479485``. Then, for example, the website could provide a link to::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
temp_url_expires=1323479485
For longer hashes, a hex encoding becomes unwieldy. Base64 encoding is also
supported, and indicated by prefixing the signature with ``"<digest name>:"``.
This is *required* for HMAC-SHA512 signatures. For example, comparable code
for generating a HMAC-SHA512 signature would be::
import base64
import hmac
from hashlib import sha512
from time import time
method = 'GET'
expires = int(time() + 60)
path = '/v1/AUTH_account/container/object'
key = 'mykey'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = 'sha512:' + base64.urlsafe_b64encode(hmac.new(
key, hmac_body, sha512).digest())
Supposing that ``sig`` ends up equaling
``sha512:ZrSijn0GyDhsv1ltIj9hWUTrbAeE45NcKXyBaz7aPbSMvROQ4jtYH4nRAmm
5ErY2X11Yc1Yhy2OMCyN3yueeXg==`` and ``expires`` ends up
``1516741234``, then the website could provide a link to::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
temp_url_sig=sha512:ZrSijn0GyDhsv1ltIj9hWUTrbAeE45NcKXyBaz7aPbSMvRO
Q4jtYH4nRAmm5ErY2X11Yc1Yhy2OMCyN3yueeXg==&
temp_url_expires=1516741234
You may also use ISO 8601 UTC timestamps with the format
``"%Y-%m-%dT%H:%M:%SZ"`` instead of UNIX timestamps in the URL
(but NOT in the code above for generating the signature!).
So, the above HMAC-SHA1 URL could also be formulated as::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
temp_url_expires=2011-12-10T01:11:25Z
If a prefix-based signature with the prefix ``pre`` is desired, set path to::
path = 'prefix:/v1/AUTH_account/container/pre'
The generated signature would be valid for all objects starting
with ``pre``. The middleware detects a prefix-based temporary URL by
a query parameter called ``temp_url_prefix``. So, if ``sig`` and ``expires``
would end up like above, following URL would be valid::
https://swift-cluster.example.com/v1/AUTH_account/container/pre/object?
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
temp_url_expires=1323479485&
temp_url_prefix=pre
Another valid URL::
https://swift-cluster.example.com/v1/AUTH_account/container/pre/
subfolder/another_object?
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
temp_url_expires=1323479485&
temp_url_prefix=pre
Any alteration of the resource path or query arguments of a temporary URL
would result in ``401 Unauthorized``. Similarly, a ``PUT`` where ``GET`` was
the allowed method would be rejected with ``401 Unauthorized``.
However, ``HEAD`` is allowed if ``GET``, ``PUT``, or ``POST`` is allowed.
Using this in combination with browser form post translation
middleware could also allow direct-from-browser uploads to specific
locations in Swift.
TempURL supports both account and container level keys. Each allows up to two
keys to be set, allowing key rotation without invalidating all existing
temporary URLs. Account keys are specified by ``X-Account-Meta-Temp-URL-Key``
and ``X-Account-Meta-Temp-URL-Key-2``, while container keys are specified by
``X-Container-Meta-Temp-URL-Key`` and ``X-Container-Meta-Temp-URL-Key-2``.
Signatures are checked against account and container keys, if
present.
With ``GET`` TempURLs, a ``Content-Disposition`` header will be set on the
response so that browsers will interpret this as a file attachment to
be saved. The filename chosen is based on the object name, but you
can override this with a filename query parameter. Modifying the
above example::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
temp_url_expires=1323479485&filename=My+Test+File.pdf
If you do not want the object to be downloaded, you can cause
``Content-Disposition: inline`` to be set on the response by adding the
``inline`` parameter to the query string, like so::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
temp_url_expires=1323479485&inline
In some cases, the client might not able to present the content of the object,
but you still want the content able to save to local with the specific
filename. So you can cause ``Content-Disposition: inline; filename=...`` to be
set on the response by adding the ``inline&filename=...`` parameter to the
query string, like so::
https://swift-cluster.example.com/v1/AUTH_account/container/object?
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
temp_url_expires=1323479485&inline&filename=My+Test+File.pdf
---------------------
Cluster Configuration
---------------------
This middleware understands the following configuration settings:
``incoming_remove_headers``
A whitespace-delimited list of the headers to remove from
incoming requests. Names may optionally end with ``*`` to
indicate a prefix match. ``incoming_allow_headers`` is a
list of exceptions to these removals.
Default: ``x-timestamp``
``incoming_allow_headers``
A whitespace-delimited list of the headers allowed as
exceptions to ``incoming_remove_headers``. Names may
optionally end with ``*`` to indicate a prefix match.
Default: None
``outgoing_remove_headers``
A whitespace-delimited list of the headers to remove from
outgoing responses. Names may optionally end with ``*`` to
indicate a prefix match. ``outgoing_allow_headers`` is a
list of exceptions to these removals.
Default: ``x-object-meta-*``
``outgoing_allow_headers``
A whitespace-delimited list of the headers allowed as
exceptions to ``outgoing_remove_headers``. Names may
optionally end with ``*`` to indicate a prefix match.
Default: ``x-object-meta-public-*``
``methods``
A whitespace delimited list of request methods that are
allowed to be used with a temporary URL.
Default: ``GET HEAD PUT POST DELETE``
``allowed_digests``
A whitespace delimited list of digest algorithms that are allowed
to be used when calculating the signature for a temporary URL.
Default: ``sha1 sha256 sha512``
"""
__all__ = ['TempURL', 'filter_factory',
'DEFAULT_INCOMING_REMOVE_HEADERS',
'DEFAULT_INCOMING_ALLOW_HEADERS',
'DEFAULT_OUTGOING_REMOVE_HEADERS',
'DEFAULT_OUTGOING_ALLOW_HEADERS']
import binascii
from calendar import timegm
import functools
import hashlib
from os.path import basename
from time import time, strftime, strptime, gmtime
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlencode
from swift.proxy.controllers.base import get_account_info, get_container_info
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import header_to_environ_key, HTTPUnauthorized, \
HTTPBadRequest
from swift.common.utils import split_path, get_valid_utf8_str, \
register_swift_info, get_hmac, streq_const_time, quote, get_logger, \
strict_b64decode
DISALLOWED_INCOMING_HEADERS = 'x-object-manifest x-symlink-target'
#: Default headers to remove from incoming requests. Simply a whitespace
#: delimited list of header names and names can optionally end with '*' to
#: indicate a prefix match. DEFAULT_INCOMING_ALLOW_HEADERS is a list of
#: exceptions to these removals.
DEFAULT_INCOMING_REMOVE_HEADERS = 'x-timestamp'
#: Default headers as exceptions to DEFAULT_INCOMING_REMOVE_HEADERS. Simply a
#: whitespace delimited list of header names and names can optionally end with
#: '*' to indicate a prefix match.
DEFAULT_INCOMING_ALLOW_HEADERS = ''
#: Default headers to remove from outgoing responses. Simply a whitespace
#: delimited list of header names and names can optionally end with '*' to
#: indicate a prefix match. DEFAULT_OUTGOING_ALLOW_HEADERS is a list of
#: exceptions to these removals.
DEFAULT_OUTGOING_REMOVE_HEADERS = 'x-object-meta-*'
#: Default headers as exceptions to DEFAULT_OUTGOING_REMOVE_HEADERS. Simply a
#: whitespace delimited list of header names and names can optionally end with
#: '*' to indicate a prefix match.
DEFAULT_OUTGOING_ALLOW_HEADERS = 'x-object-meta-public-*'
DEFAULT_ALLOWED_DIGESTS = 'sha1 sha256 sha512'
SUPPORTED_DIGESTS = set(DEFAULT_ALLOWED_DIGESTS.split())
CONTAINER_SCOPE = 'container'
ACCOUNT_SCOPE = 'account'
EXPIRES_ISO8601_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def get_tempurl_keys_from_metadata(meta):
"""
Extracts the tempurl keys from metadata.
:param meta: account metadata
:returns: list of keys found (possibly empty if no keys set)
Example:
meta = get_account_info(...)['meta']
keys = get_tempurl_keys_from_metadata(meta)
"""
return [get_valid_utf8_str(value) for key, value in meta.items()
if key.lower() in ('temp-url-key', 'temp-url-key-2')]
def disposition_format(disposition_type, filename):
# Content-Disposition in HTTP is defined in
# https://tools.ietf.org/html/rfc6266 and references
# https://tools.ietf.org/html/rfc5987#section-3.2
# to explain the filename*= encoding format. The summary
# is that it's the charset, then an optional (and empty) language
# then the filename. Looks funny, but it's right.
return '''%s; filename="%s"; filename*=UTF-8''%s''' % (
disposition_type, quote(filename, safe=' /'), quote(filename))
def authorize_same_account(account_to_match):
def auth_callback_same_account(req):
try:
_ver, acc, _rest = req.split_path(2, 3, True)
except ValueError:
return HTTPUnauthorized(request=req)
if acc == account_to_match:
return None
else:
return HTTPUnauthorized(request=req)
return auth_callback_same_account
def authorize_same_container(account_to_match, container_to_match):
def auth_callback_same_container(req):
try:
_ver, acc, con, _rest = req.split_path(3, 4, True)
except ValueError:
return HTTPUnauthorized(request=req)
if acc == account_to_match and con == container_to_match:
return None
else:
return HTTPUnauthorized(request=req)
return auth_callback_same_container
class TempURL(object):
"""
WSGI Middleware to grant temporary URLs specific access to Swift
resources. See the overview for more information.
The proxy logs created for any subrequests made will have swift.source set
to "TU".
:param app: The next WSGI filter or app in the paste.deploy
chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
#: The next WSGI application/filter in the paste.deploy pipeline.
self.app = app
#: The filter configuration dict.
self.conf = conf
self.allowed_digests = conf.get(
'allowed_digests', DEFAULT_ALLOWED_DIGESTS.split())
self.disallowed_headers = set(
header_to_environ_key(h)
for h in DISALLOWED_INCOMING_HEADERS.split())
headers = [header_to_environ_key(h)
for h in conf.get('incoming_remove_headers',
DEFAULT_INCOMING_REMOVE_HEADERS.split())]
#: Headers to remove from incoming requests. Uppercase WSGI env style,
#: like `HTTP_X_PRIVATE`.
self.incoming_remove_headers = \
[h for h in headers if not h.endswith('*')]
#: Header with match prefixes to remove from incoming requests.
#: Uppercase WSGI env style, like `HTTP_X_SENSITIVE_*`.
self.incoming_remove_headers_startswith = \
[h[:-1] for h in headers if h.endswith('*')]
headers = [header_to_environ_key(h)
for h in conf.get('incoming_allow_headers',
DEFAULT_INCOMING_ALLOW_HEADERS.split())]
#: Headers to allow in incoming requests. Uppercase WSGI env style,
#: like `HTTP_X_MATCHES_REMOVE_PREFIX_BUT_OKAY`.
self.incoming_allow_headers = \
[h for h in headers if not h.endswith('*')]
#: Header with match prefixes to allow in incoming requests. Uppercase
#: WSGI env style, like `HTTP_X_MATCHES_REMOVE_PREFIX_BUT_OKAY_*`.
self.incoming_allow_headers_startswith = \
[h[:-1] for h in headers if h.endswith('*')]
headers = [h.title()
for h in conf.get('outgoing_remove_headers',
DEFAULT_OUTGOING_REMOVE_HEADERS.split())]
#: Headers to remove from outgoing responses. Lowercase, like
#: `x-account-meta-temp-url-key`.
self.outgoing_remove_headers = \
[h for h in headers if not h.endswith('*')]
#: Header with match prefixes to remove from outgoing responses.
#: Lowercase, like `x-account-meta-private-*`.
self.outgoing_remove_headers_startswith = \
[h[:-1] for h in headers if h.endswith('*')]
headers = [h.title()
for h in conf.get('outgoing_allow_headers',
DEFAULT_OUTGOING_ALLOW_HEADERS.split())]
#: Headers to allow in outgoing responses. Lowercase, like
#: `x-matches-remove-prefix-but-okay`.
self.outgoing_allow_headers = \
[h for h in headers if not h.endswith('*')]
#: Header with match prefixes to allow in outgoing responses.
#: Lowercase, like `x-matches-remove-prefix-but-okay-*`.
self.outgoing_allow_headers_startswith = \
[h[:-1] for h in headers if h.endswith('*')]
#: HTTP user agent to use for subrequests.
self.agent = '%(orig)s TempURL'
def __call__(self, env, start_response):
"""
Main hook into the WSGI paste.deploy filter/app pipeline.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
:returns: Response as per WSGI.
"""
if env['REQUEST_METHOD'] == 'OPTIONS':
return self.app(env, start_response)
info = self._get_temp_url_info(env)
temp_url_sig, temp_url_expires, temp_url_prefix, filename,\
inline_disposition = info
if temp_url_sig is None and temp_url_expires is None:
return self.app(env, start_response)
if not temp_url_sig or not temp_url_expires:
return self._invalid(env, start_response)
if ':' in temp_url_sig:
hash_algorithm, temp_url_sig = temp_url_sig.split(':', 1)
if ('-' in temp_url_sig or '_' in temp_url_sig) and not (
'+' in temp_url_sig or '/' in temp_url_sig):
temp_url_sig = temp_url_sig.replace('-', '+').replace('_', '/')
try:
temp_url_sig = binascii.hexlify(strict_b64decode(
temp_url_sig + '=='))
except ValueError:
return self._invalid(env, start_response)
elif len(temp_url_sig) == 40:
hash_algorithm = 'sha1'
elif len(temp_url_sig) == 64:
hash_algorithm = 'sha256'
else:
return self._invalid(env, start_response)
if hash_algorithm not in self.allowed_digests:
return self._invalid(env, start_response)
account, container, obj = self._get_path_parts(env)
if not account:
return self._invalid(env, start_response)
keys = self._get_keys(env)
if not keys:
return self._invalid(env, start_response)
if temp_url_prefix is None:
path = '/v1/%s/%s/%s' % (account, container, obj)
else:
if not obj.startswith(temp_url_prefix):
return self._invalid(env, start_response)
path = 'prefix:/v1/%s/%s/%s' % (account, container,
temp_url_prefix)
if env['REQUEST_METHOD'] == 'HEAD':
hmac_vals = [
hmac for method in ('HEAD', 'GET', 'POST', 'PUT')
for hmac in self._get_hmacs(
env, temp_url_expires, path, keys, hash_algorithm,
request_method=method)]
else:
hmac_vals = self._get_hmacs(
env, temp_url_expires, path, keys, hash_algorithm)
is_valid_hmac = False
hmac_scope = None
for hmac, scope in hmac_vals:
# While it's true that we short-circuit, this doesn't affect the
# timing-attack resistance since the only way this will
# short-circuit is when a valid signature is passed in.
if streq_const_time(temp_url_sig, hmac):
is_valid_hmac = True
hmac_scope = scope
break
if not is_valid_hmac:
return self._invalid(env, start_response)
# disallowed headers prevent accidentally allowing upload of a pointer
# to data that the PUT tempurl would not otherwise allow access for.
# It should be safe to provide a GET tempurl for data that an
# untrusted client just uploaded with a PUT tempurl.
resp = self._clean_disallowed_headers(env, start_response)
if resp:
return resp
self._clean_incoming_headers(env)
if hmac_scope == ACCOUNT_SCOPE:
env['swift.authorize'] = authorize_same_account(account)
else:
env['swift.authorize'] = authorize_same_container(account,
container)
env['swift.authorize_override'] = True
env['REMOTE_USER'] = '.wsgi.tempurl'
qs = {'temp_url_sig': temp_url_sig,
'temp_url_expires': temp_url_expires}
if temp_url_prefix is not None:
qs['temp_url_prefix'] = temp_url_prefix
if filename:
qs['filename'] = filename
env['QUERY_STRING'] = urlencode(qs)
def _start_response(status, headers, exc_info=None):
headers = self._clean_outgoing_headers(headers)
if env['REQUEST_METHOD'] in ('GET', 'HEAD') and status[0] == '2':
# figure out the right value for content-disposition
# 1) use the value from the query string
# 2) use the value from the object metadata
# 3) use the object name (default)
out_headers = []
existing_disposition = None
for h, v in headers:
if h.lower() != 'content-disposition':
out_headers.append((h, v))
else:
existing_disposition = v
if inline_disposition:
if filename:
disposition_value = disposition_format('inline',
filename)
else:
disposition_value = 'inline'
elif filename:
disposition_value = disposition_format('attachment',
filename)
elif existing_disposition:
disposition_value = existing_disposition
else:
name = basename(env['PATH_INFO'].rstrip('/'))
disposition_value = disposition_format('attachment',
name)
# this is probably just paranoia, I couldn't actually get a
# newline into existing_disposition
value = disposition_value.replace('\n', '%0A')
out_headers.append(('Content-Disposition', value))
# include Expires header for better cache-control
out_headers.append(('Expires', strftime(
"%a, %d %b %Y %H:%M:%S GMT",
gmtime(temp_url_expires))))
headers = out_headers
return start_response(status, headers, exc_info)
return self.app(env, _start_response)
def _get_path_parts(self, env):
"""
Return the account, container and object name for the request,
if it's an object request and one of the configured methods;
otherwise, None is returned.
:param env: The WSGI environment for the request.
:returns: (Account str, container str, object str) or
(None, None, None).
"""
if env['REQUEST_METHOD'] in self.conf['methods']:
try:
ver, acc, cont, obj = split_path(env['PATH_INFO'], 4, 4, True)
except ValueError:
return (None, None, None)
if ver == 'v1' and obj.strip('/'):
return (acc, cont, obj)
return (None, None, None)
def _get_temp_url_info(self, env):
"""
Returns the provided temporary URL parameters (sig, expires, prefix),
if given and syntactically valid. Either sig, expires or prefix could
be None if not provided. If provided, expires is also
converted to an int if possible or 0 if not, and checked for
expiration (returns 0 if expired).
:param env: The WSGI environment for the request.
:returns: (sig, expires, prefix, filename, inline) as described above.
"""
temp_url_sig = temp_url_expires = temp_url_prefix = filename =\
inline = None
qs = parse_qs(env.get('QUERY_STRING', ''), keep_blank_values=True)
if 'temp_url_sig' in qs:
temp_url_sig = qs['temp_url_sig'][0]
if 'temp_url_expires' in qs:
try:
temp_url_expires = int(qs['temp_url_expires'][0])
except ValueError:
try:
temp_url_expires = timegm(strptime(
qs['temp_url_expires'][0],
EXPIRES_ISO8601_FORMAT))
except ValueError:
temp_url_expires = 0
if temp_url_expires < time():
temp_url_expires = 0
if 'temp_url_prefix' in qs:
temp_url_prefix = qs['temp_url_prefix'][0]
if 'filename' in qs:
filename = qs['filename'][0]
if 'inline' in qs:
inline = True
return (temp_url_sig, temp_url_expires, temp_url_prefix, filename,
inline)
def _get_keys(self, env):
"""
Returns the X-[Account|Container]-Meta-Temp-URL-Key[-2] header values
for the account or container, or an empty list if none are set. Each
value comes as a 2-tuple (key, scope), where scope is either
CONTAINER_SCOPE or ACCOUNT_SCOPE.
Returns 0-4 elements depending on how many keys are set in the
account's or container's metadata.
:param env: The WSGI environment for the request.
:returns: [
(X-Account-Meta-Temp-URL-Key str value, ACCOUNT_SCOPE) if set,
(X-Account-Meta-Temp-URL-Key-2 str value, ACCOUNT_SCOPE if set,
(X-Container-Meta-Temp-URL-Key str value, CONTAINER_SCOPE) if set,
(X-Container-Meta-Temp-URL-Key-2 str value, CONTAINER_SCOPE if set,
]
"""
account_info = get_account_info(env, self.app, swift_source='TU')
account_keys = get_tempurl_keys_from_metadata(account_info['meta'])
container_info = get_container_info(env, self.app, swift_source='TU')
container_keys = get_tempurl_keys_from_metadata(
container_info.get('meta', []))
return ([(ak, ACCOUNT_SCOPE) for ak in account_keys] +
[(ck, CONTAINER_SCOPE) for ck in container_keys])
def _get_hmacs(self, env, expires, path, scoped_keys, hash_algorithm,
request_method=None):
"""
:param env: The WSGI environment for the request.
:param expires: Unix timestamp as an int for when the URL
expires.
:param path: The path which is used for hashing.
:param scoped_keys: (key, scope) tuples like _get_keys() returns
:param hash_algorithm: The hash algorithm to use.
:param request_method: Optional override of the request in
the WSGI env. For example, if a HEAD
does not match, you may wish to
override with GET to still allow the
HEAD.
:returns: a list of (hmac, scope) 2-tuples
"""
if not request_method:
request_method = env['REQUEST_METHOD']
digest = functools.partial(hashlib.new, hash_algorithm)
return [
(get_hmac(request_method, path, expires, key, digest), scope)
for (key, scope) in scoped_keys]
def _invalid(self, env, start_response):
"""
Performs the necessary steps to indicate a WSGI 401
Unauthorized response to the request.
:param env: The WSGI environment for the request.
:param start_response: The WSGI start_response hook.
:returns: 401 response as per WSGI.
"""
if env['REQUEST_METHOD'] == 'HEAD':
body = None
else:
body = '401 Unauthorized: Temp URL invalid\n'
return HTTPUnauthorized(body=body)(env, start_response)
def _clean_disallowed_headers(self, env, start_response):
"""
Validate the absence of disallowed headers for "unsafe" operations.
:returns: None for safe operations or swob.HTTPBadResponse if the
request includes disallowed headers.
"""
if env['REQUEST_METHOD'] in ('GET', 'HEAD', 'OPTIONS'):
return
for h in env:
if h in self.disallowed_headers:
return HTTPBadRequest(
body='The header %r is not allowed in this tempurl' %
h[len('HTTP_'):].title().replace('_', '-'))(
env, start_response)
def _clean_incoming_headers(self, env):
"""
Removes any headers from the WSGI environment as per the
middleware configuration for incoming requests.
:param env: The WSGI environment for the request.
"""
for h in env.keys():
if h in self.incoming_allow_headers:
continue
for p in self.incoming_allow_headers_startswith:
if h.startswith(p):
break
else:
if h in self.incoming_remove_headers:
del env[h]
continue
for p in self.incoming_remove_headers_startswith:
if h.startswith(p):
del env[h]
break
def _clean_outgoing_headers(self, headers):
"""
Removes any headers as per the middleware configuration for
outgoing responses.
:param headers: A WSGI start_response style list of headers,
[('header1', 'value), ('header2', 'value),
...]
:returns: The same headers list, but with some headers
removed as per the middlware configuration for
outgoing responses.
"""
headers = HeaderKeyDict(headers)
for h in headers.keys():
if h in self.outgoing_allow_headers:
continue
for p in self.outgoing_allow_headers_startswith:
if h.startswith(p):
break
else:
if h in self.outgoing_remove_headers:
del headers[h]
continue
for p in self.outgoing_remove_headers_startswith:
if h.startswith(p):
del headers[h]
break
return headers.items()
def filter_factory(global_conf, **local_conf):
"""Returns the WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
defaults = {
'methods': 'GET HEAD PUT POST DELETE',
'incoming_remove_headers': DEFAULT_INCOMING_REMOVE_HEADERS,
'incoming_allow_headers': DEFAULT_INCOMING_ALLOW_HEADERS,
'outgoing_remove_headers': DEFAULT_OUTGOING_REMOVE_HEADERS,
'outgoing_allow_headers': DEFAULT_OUTGOING_ALLOW_HEADERS,
'allowed_digests': DEFAULT_ALLOWED_DIGESTS,
}
info_conf = {k: conf.get(k, v).split() for k, v in defaults.items()}
allowed_digests = set(digest.lower()
for digest in info_conf['allowed_digests'])
not_supported = allowed_digests - SUPPORTED_DIGESTS
if not_supported:
logger = get_logger(conf, log_route='tempurl')
logger.warning('The following digest algorithms are configured but '
'not supported: %s', ', '.join(not_supported))
allowed_digests -= not_supported
if not allowed_digests:
raise ValueError('No valid digest algorithms are configured '
'for tempurls')
info_conf['allowed_digests'] = sorted(allowed_digests)
register_swift_info('tempurl', **info_conf)
conf.update(info_conf)
return lambda app: TempURL(app, conf)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for compute_gradient."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import \
gradient_checker_v2 as gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
# needs this to register gradient for SoftmaxCrossEntropyWithLogits:
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _random_complex(shape, dtype):
data = np.random.random_sample(shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data.imag = np.random.random_sample(shape)
return data
@test_util.run_all_in_graph_and_eager_modes
class GradientCheckerTest(test.TestCase):
def testWithStaticShape(self):
size = (2, 3)
constant = constant_op.constant(2.0, shape=size, name="const")
def add_constant_with_static_shape_check(x):
self.assertAllEqual(x.shape.as_list(), constant.shape.as_list())
return x + constant
x = constant_op.constant(3.0, shape=size, name="x")
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
add_constant_with_static_shape_check, [x]))
self.assertLess(error, 1e-4)
def testAddSimple(self):
size = (2, 3)
x1 = constant_op.constant(2.0, shape=size, name="x1")
x2 = constant_op.constant(3.0, shape=size, name="x2")
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
lambda x1: math_ops.add(x1, x2), [x1]))
tf_logging.info("x1 error = %f", error)
self.assertLess(error, 1e-4)
def testAddCustomized(self):
size = (2, 3)
x1 = constant_op.constant(2.0, shape=size, dtype=dtypes.float64, name="x1")
x2 = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3))
# checkint gradients for x2 using a special delta
error = gradient_checker.max_error(*gradient_checker.compute_gradient(
lambda x2: math_ops.add(x1, x2), [x2], delta=1e-2))
tf_logging.info("x2 error = %f", error)
self.assertLess(error, 1e-10)
def testGather(self):
def f(params):
index_values = [1, 3]
indices = constant_op.constant(index_values, name="i")
return array_ops.gather(params, indices, name="y")
p_shape = (4, 2)
p_size = 8
params = constant_op.constant(
np.arange(p_size).astype(np.float), shape=p_shape, name="p")
error = gradient_checker.max_error(
*gradient_checker.compute_gradient(f, [params]))
tf_logging.info("gather error = %f", error)
self.assertLess(error, 1e-4)
def testNestedGather(self):
def f(params):
index_values = [1, 3, 5, 6]
indices = constant_op.constant(index_values, name="i")
y = array_ops.gather(params, indices, name="y")
index_values2 = [0, 2]
indices2 = constant_op.constant(index_values2, name="i2")
return array_ops.gather(y, indices2, name="y2")
p_shape = (8, 2)
p_size = 16
params = constant_op.constant(
np.arange(p_size).astype(np.float), shape=p_shape, name="p")
error = gradient_checker.max_error(
*gradient_checker.compute_gradient(f, [params]))
tf_logging.info("nested gather error = %f", error)
self.assertLess(error, 1e-4)
def testComplexMul(self):
c = constant_op.constant(5 + 7j, dtype=dtypes.complex64)
def f(x):
return c * x
x_shape = c.shape
x_dtype = c.dtype
x = constant_op.constant(_random_complex(x_shape, x_dtype))
analytical, numerical = gradient_checker.compute_gradient(f, [x])
correct = np.array([[5, -7], [7, 5]])
self.assertAllEqual(correct, analytical[0])
self.assertAllClose(correct, numerical[0], rtol=1e-4)
x = constant_op.constant(_random_complex(x_shape, x_dtype))
self.assertLess(
gradient_checker.max_error(*gradient_checker.compute_gradient(f, [x])),
3e-4)
def testComplexConj(self):
def f(x):
return math_ops.conj(x)
x_shape = ()
x_dtype = dtypes.complex64
x = constant_op.constant(_random_complex(x_shape, x_dtype))
analytical, numerical = gradient_checker.compute_gradient(f, [x])
correct = np.array([[1, 0], [0, -1]])
self.assertAllEqual(correct, analytical[0])
self.assertAllClose(correct, numerical[0], rtol=2e-5)
x = constant_op.constant(_random_complex(x_shape, x_dtype))
self.assertLess(
gradient_checker.max_error(*gradient_checker.compute_gradient(f, [x])),
2e-5)
def testEmptySucceeds(self):
def f(x):
return array_ops.identity(x)
x = constant_op.constant(
np.random.random_sample((0, 3)), dtype=dtypes.float32)
for grad in gradient_checker.compute_gradient(f, [x]):
self.assertEqual(grad[0].shape, (0, 0))
error = gradient_checker.max_error(
*gradient_checker.compute_gradient(f, [x]))
self.assertEqual(error, 0)
def testEmptyMatMul(self):
def f(x, y):
return math_ops.matmul(x, y)
x = constant_op.constant(
np.random.random_sample((0, 3)), dtype=dtypes.float32)
y = constant_op.constant(
np.random.random_sample((3, 4)), dtype=dtypes.float32)
for grad in gradient_checker.compute_gradient(f, [x, y]):
self.assertEqual(grad[0].shape, (0, 0))
self.assertEqual(grad[1].shape, (0, 12))
error = gradient_checker.max_error(
*gradient_checker.compute_gradient(f, [x, y]))
self.assertEqual(error, 0)
def testEmptyFails(self):
@custom_gradient.custom_gradient
def id_bad_grad(x):
y = array_ops.identity(x)
def grad_fn(dy):
# dx = constant_op.constant(np.zeros((1, 4)), dtype=dtypes.float32)
dx = array_ops.transpose(dy)
return dx
return y, grad_fn
def f(x):
return id_bad_grad(x)
x = constant_op.constant(
np.random.random_sample((0, 3)), dtype=dtypes.float32)
bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
with self.assertRaisesRegexp(ValueError, bad):
gradient_checker.compute_gradient(f, [x])
def testNaNGradFails(self):
@custom_gradient.custom_gradient
def id_nan_grad(x):
y = array_ops.identity(x)
def grad_fn(dy):
dx = np.nan * dy
# dx = dy
return dx
return y, grad_fn
def f(x):
return id_nan_grad(x)
x = constant_op.constant(
np.random.random_sample((1, 1)), dtype=dtypes.float32)
error = gradient_checker.max_error(
*gradient_checker.compute_gradient(f, [x]))
# Typical test would assert error < max_err, so assert this test would
# raise AssertionError, since NaN is not < 1.0.
with self.assertRaisesRegexp(AssertionError, "nan not less than 1.0"):
self.assertLess(error, 1.0)
def testGradGrad(self):
def f(x):
with backprop.GradientTape() as tape:
tape.watch(x)
y = math_ops.square(x)
z = math_ops.square(y)
return tape.gradient(z, x)
analytical, numerical = gradient_checker.compute_gradient(f, [2.0])
self.assertAllEqual([[[48.]]], analytical)
self.assertAllClose([[[48.]]], numerical, rtol=1e-4)
@test_util.run_all_in_graph_and_eager_modes
class MiniMNISTTest(test.TestCase):
# Gradient checker for MNIST.
def _BuildAndTestMiniMNIST(self, param_index, tag):
# Fix seed to avoid occasional flakiness
np.random.seed(6)
# Hyperparameters
batch = 3
inputs = 16
features = 32
classes = 10
# Define the parameters
inp_data = np.random.random_sample(inputs * batch)
hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs)
hidden_bias_data = np.random.random_sample(features)
sm_weight_data = np.random.randn(features * classes) / np.sqrt(features)
sm_bias_data = np.random.random_sample(classes)
# special care for labels since they need to be normalized per batch
label_data = np.random.random(batch * classes).reshape((batch, classes))
s = label_data.sum(axis=1)
label_data /= s[:, None]
# We treat the inputs as "parameters" here
inp = constant_op.constant(
inp_data.tolist(),
shape=[batch, inputs],
dtype=dtypes.float64,
name="inp")
hidden_weight = constant_op.constant(
hidden_weight_data.tolist(),
shape=[inputs, features],
dtype=dtypes.float64,
name="hidden_weight")
hidden_bias = constant_op.constant(
hidden_bias_data.tolist(),
shape=[features],
dtype=dtypes.float64,
name="hidden_bias")
softmax_weight = constant_op.constant(
sm_weight_data.tolist(),
shape=[features, classes],
dtype=dtypes.float64,
name="softmax_weight")
softmax_bias = constant_op.constant(
sm_bias_data.tolist(),
shape=[classes],
dtype=dtypes.float64,
name="softmax_bias")
# List all the parameter so that we can test them one at a time
all_params = [inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias]
# Now, Building MNIST
def f(inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias):
features = nn_ops.relu(
nn_ops.xw_plus_b(inp, hidden_weight, hidden_bias), name="features")
logits = nn_ops.xw_plus_b(
features, softmax_weight, softmax_bias, name="logits")
labels = constant_op.constant(
label_data.tolist(),
shape=[batch, classes],
dtype=dtypes.float64,
name="labels")
cost = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name="cost")
return cost
def f_restricted(x):
xs = all_params
i = param_index
# use x for the i-th parameter
xs = xs[0:i] + [x] + xs[i + 1:]
return f(*xs)
# Test the gradients.
err = gradient_checker.max_error(*gradient_checker.compute_gradient(
f_restricted, [all_params[param_index]], delta=1e-5))
tf_logging.info("Mini MNIST: %s gradient error = %g", tag, err)
return err
def testInputGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(0, "input"), 1e-8)
def testHiddenWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(1, "hidden_weight"), 1e-8)
def testHiddenBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(2, "hidden_bias"), 1e-8)
def testSoftmaxWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(3, "softmax_weight"), 1e-8)
def testSoftmaxBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(4, "softmax_bias"), 1e-8)
if __name__ == "__main__":
test.main()
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import chain
import torch
from fairseq import optim, utils
from .dynamic_loss_scaler import DynamicLossScaler
class _FP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
# forward __init__ call to the next class in mro(method resolution order)
super().__init__(*args, **kwargs)
@property
def has_flat_params(self):
return torch.is_tensor(self.fp32_params)
@classmethod
def build_fp32_params(cls, params, flatten=True):
# create FP32 copy of parameters and grads
if flatten:
total_param_size = sum(p.data.numel() for p in params)
fp32_params = torch.zeros(total_param_size, dtype=torch.float, device=params[0].device)
offset = 0
for p in params:
numel = p.data.numel()
fp32_params[offset:offset+numel].copy_(p.data.view(-1))
offset += numel
fp32_params = torch.nn.Parameter(fp32_params)
fp32_params.grad = fp32_params.data.new(total_param_size)
return fp32_params
else:
fp32_params = []
for p in params:
p32 = torch.nn.Parameter(p.data.float())
p32.grad = torch.zeros_like(p32.data)
fp32_params.append(p32)
return fp32_params
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
if self.scaler is not None:
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict and self.scaler is not None:
self.scaler.loss_scale = state_dict['loss_scale']
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
if self.scaler is not None:
loss = self.scaler.scale(loss)
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self, multiply_grads=1.):
if self._needs_sync:
if self.scaler is not None:
# correct for dynamic loss scaler
multiply_grads /= self.scaler.loss_scale
# copy FP16 grads to FP32
if self.has_flat_params:
offset = 0
for p in self.fp16_params:
if not p.requires_grad:
continue
grad_data = p.grad.data if p.grad is not None else p.data.new_zeros(p.data.shape)
numel = grad_data.numel()
self.fp32_params.grad.data[offset:offset+numel].copy_(grad_data.view(-1))
offset += numel
self.fp32_params.grad.data.mul_(multiply_grads)
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
if p.grad is not None:
p32.grad.data.copy_(p.grad.data)
p32.grad.data.mul_(multiply_grads)
else:
p32.grad = torch.zeros_like(p.data, dtype=torch.float)
self._needs_sync = False
def _sync_fp32_grads_to_fp16(self):
# copy FP32 params back into FP16 model
if self.has_flat_params:
offset = 0
for p in self.fp16_params:
if not p.requires_grad:
continue
numel = p.data.numel()
p.data.copy_(self.fp32_params.data[offset:offset+numel].view_as(p.data))
offset += numel
else:
for p, p32 in zip(self.fp16_params, self.fp32_params):
if not p.requires_grad:
continue
p.data.copy_(p32.data)
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
if self._needs_sync:
self._sync_fp16_grads_to_fp32(c)
elif self.has_flat_params:
self.fp32_params.grad.data.mul_(c)
else:
for p32 in self.fp32_params:
p32.grad.data.mul_(c)
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = utils.clip_grad_norm_(self.fp32_params, max_norm, aggregate_norm_fn)
# detect overflow and adjust loss scale
if self.scaler is not None:
self.scaler.check_overflow(grad_norm)
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
self.fp32_optimizer.step(closure)
if self.scaler is not None:
self.scaler.update()
self._sync_fp32_grads_to_fp16()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
self.fp32_params.grad.zero_()
else:
for p32 in self.fp32_params:
p32.grad.zero_()
self._needs_sync = False
class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
"""
def __init__(self, args, params, fp32_optimizer, fp32_params):
super().__init__(args)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if getattr(args, 'fp16_scale_window', None) is None:
if len(args.update_freq) > 1:
raise ValueError(
'--fp16-scale-window must be given explicitly when using a '
'custom --update-freq schedule'
)
data_parallel_size = int(args.distributed_world_size / args.model_parallel_size)
scale_window = int(2**14 / data_parallel_size / args.update_freq[0])
else:
scale_window = args.fp16_scale_window
if not getattr(args, 'bf16', False):
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=scale_window,
tolerance=args.fp16_scale_tolerance,
threshold=args.threshold_loss_scale,
min_loss_scale=args.min_loss_scale
)
else:
# disable loss scaling for bfloat16
self.scaler = None
@classmethod
def build_optimizer(cls, args, params):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
flatten = not getattr(args, 'fp16_no_flatten_grads', False)
if getattr(args, 'bf16', False):
flatten = False # mixed precision is faster on TPUs without flat grads
fp32_params = cls.build_fp32_params(params, flatten=flatten)
if flatten:
fp32_optimizer = optim.build_optimizer(args, [fp32_params])
else:
fp32_optimizer = optim.build_optimizer(args, fp32_params)
if flatten and not fp32_optimizer.supports_flat_params:
raise RuntimeError(
'chosen optimizer does not support flat params, '
'please set --fp16-no-flatten-grads'
)
return cls(args, params, fp32_optimizer, fp32_params)
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
class _MemoryEfficientFP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
# forward __init__ call to the next class in MRO (method resolution order)
super().__init__(*args, **kwargs)
@property
def has_flat_params(self):
return False
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.wrapped_optimizer.state_dict()
if self.scaler is not None:
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict and self.scaler is not None:
self.scaler.loss_scale = state_dict['loss_scale']
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
# Hack: PyTorch automatically casts the optimizer state to match the
# type of the current parameters. But with --memory-efficient-fp16 the
# params are FP16 while the optimizer state is FP32 and we don't want
# to cast. A workaround is to manually copy back the original state
# after the optimizer has been loaded.
groups = self.optimizer.param_groups
saved_groups = state_dict['param_groups']
id_map = {
old_id: p
for old_id, p in zip(
chain(*(g['params'] for g in saved_groups)),
chain(*(g['params'] for g in groups))
)
}
for k, v in state_dict['state'].items():
if k in id_map:
param = id_map[k]
self.optimizer.state[param] = v
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
if self.scaler is not None:
loss = self.scaler.scale(loss)
loss.backward()
def _unscale_grads(self):
if self._multiply_factor != 1.:
self.wrapped_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
"""Clips gradient norm and updates dynamic loss scaler."""
max_norm = float(max_norm)
grad_norm = self._multiply_factor * self.wrapped_optimizer.clip_grad_norm(0, aggregate_norm_fn)
if self.scaler is not None:
grad_norm_cpu = float(grad_norm)
if grad_norm_cpu > max_norm > 0.:
self._multiply_factor *= max_norm / grad_norm_cpu
# detect overflow and adjust loss scale
self.scaler.check_overflow(grad_norm_cpu)
else:
clip_coef = (max_norm / (grad_norm + 1e-6)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
if self.supports_step_with_scale:
# NOTE(msb) optimizer divides by scale factor
self.wrapped_optimizer.step(closure, scale=(1. / self._multiply_factor))
else:
self._unscale_grads()
self.wrapped_optimizer.step(closure)
if self.scaler is not None:
self.scaler.update()
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self.wrapped_optimizer.zero_grad()
if self.scaler is not None:
self._multiply_factor = 1. / float(self.scaler.loss_scale)
class MemoryEfficientFP16Optimizer(_MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not
maintain an FP32 copy of the model. We instead expect the optimizer to
convert the gradients to FP32 internally and sync the results back to the
FP16 model params. This significantly reduces memory usage but slightly
increases the time spent in the optimizer.
Since this wrapper depends on specific functionality in the wrapped
optimizer (i.e., on-the-fly conversion of grads to FP32), only certain
optimizers can be wrapped. This is determined by the
*supports_memory_efficient_fp16* property.
"""
def __init__(self, args, params, optimizer):
if not optimizer.supports_memory_efficient_fp16:
raise ValueError(
'Unsupported optimizer: {}'.format(optimizer.__class__.__name__)
)
super().__init__(args)
self.wrapped_optimizer = optimizer
if getattr(args, 'fp16_scale_window', None) is None:
if len(args.update_freq) > 1:
raise ValueError(
'--fp16-scale-window must be given explicitly when using a '
'custom --update-freq schedule'
)
data_parallel_size = int(args.distributed_world_size / args.model_parallel_size)
scale_window = 2**14 / data_parallel_size / args.update_freq[0]
else:
scale_window = args.fp16_scale_window
if not getattr(args, 'bf16', False):
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=scale_window,
tolerance=args.fp16_scale_tolerance,
threshold=args.threshold_loss_scale,
min_loss_scale=args.min_loss_scale
)
else:
# disable loss scaling for bfloat16
self.scaler = None
@classmethod
def build_optimizer(cls, args, params):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
fp16_optimizer = optim.build_optimizer(args, params)
return cls(args, params, fp16_optimizer)
@property
def optimizer(self):
return self.wrapped_optimizer.optimizer
@property
def optimizer_config(self):
return self.wrapped_optimizer.optimizer_config
def get_lr(self):
return self.wrapped_optimizer.get_lr()
def set_lr(self, lr):
self.wrapped_optimizer.set_lr(lr)
|
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This code was written with funding by http://prometheusresearch.com
"""
Upload Progress Monitor
This is a WSGI middleware component which monitors the status of files
being uploaded. It includes a small query application which will return
a list of all files being uploaded by particular session/user.
>>> from paste.httpserver import serve
>>> from paste.urlmap import URLMap
>>> from paste.auth.basic import AuthBasicHandler
>>> from paste.debug.debugapp import SlowConsumer, SimpleApplication
>>> # from paste.progress import *
>>> realm = 'Test Realm'
>>> def authfunc(username, password):
... return username == password
>>> map = URLMap({})
>>> ups = UploadProgressMonitor(map, threshold=1024)
>>> map['/upload'] = SlowConsumer()
>>> map['/simple'] = SimpleApplication()
>>> map['/report'] = UploadProgressReporter(ups)
>>> serve(AuthBasicHandler(ups, realm, authfunc))
serving on...
.. note::
This is experimental, and will change in the future.
"""
import time
from paste.wsgilib import catch_errors
DEFAULT_THRESHOLD = 1024 * 1024 # one megabyte
DEFAULT_TIMEOUT = 60*5 # five minutes
ENVIRON_RECEIVED = 'paste.bytes_received'
REQUEST_STARTED = 'paste.request_started'
REQUEST_FINISHED = 'paste.request_finished'
class _ProgressFile(object):
"""
This is the input-file wrapper used to record the number of
``paste.bytes_received`` for the given request.
"""
def __init__(self, environ, rfile):
self._ProgressFile_environ = environ
self._ProgressFile_rfile = rfile
self.flush = rfile.flush
self.write = rfile.write
self.writelines = rfile.writelines
def __iter__(self):
environ = self._ProgressFile_environ
riter = iter(self._ProgressFile_rfile)
def iterwrap():
for chunk in riter:
environ[ENVIRON_RECEIVED] += len(chunk)
yield chunk
return iter(iterwrap)
def read(self, size=-1):
chunk = self._ProgressFile_rfile.read(size)
self._ProgressFile_environ[ENVIRON_RECEIVED] += len(chunk)
return chunk
def readline(self):
chunk = self._ProgressFile_rfile.readline()
self._ProgressFile_environ[ENVIRON_RECEIVED] += len(chunk)
return chunk
def readlines(self, hint=None):
chunk = self._ProgressFile_rfile.readlines(hint)
self._ProgressFile_environ[ENVIRON_RECEIVED] += len(chunk)
return chunk
class UploadProgressMonitor(object):
"""
monitors and reports on the status of uploads in progress
Parameters:
``application``
This is the next application in the WSGI stack.
``threshold``
This is the size in bytes that is needed for the
upload to be included in the monitor.
``timeout``
This is the amount of time (in seconds) that a upload
remains in the monitor after it has finished.
Methods:
``uploads()``
This returns a list of ``environ`` dict objects for each
upload being currently monitored, or finished but whose time
has not yet expired.
For each request ``environ`` that is monitored, there are several
variables that are stored:
``paste.bytes_received``
This is the total number of bytes received for the given
request; it can be compared with ``CONTENT_LENGTH`` to
build a percentage complete. This is an integer value.
``paste.request_started``
This is the time (in seconds) when the request was started
as obtained from ``time.time()``. One would want to format
this for presentation to the user, if necessary.
``paste.request_finished``
This is the time (in seconds) when the request was finished,
canceled, or otherwise disconnected. This is None while
the given upload is still in-progress.
TODO: turn monitor into a queue and purge queue of finished
requests that have passed the timeout period.
"""
def __init__(self, application, threshold=None, timeout=None):
self.application = application
self.threshold = threshold or DEFAULT_THRESHOLD
self.timeout = timeout or DEFAULT_TIMEOUT
self.monitor = []
def __call__(self, environ, start_response):
length = environ.get('CONTENT_LENGTH', 0)
if length and int(length) > self.threshold:
# replace input file object
self.monitor.append(environ)
environ[ENVIRON_RECEIVED] = 0
environ[REQUEST_STARTED] = time.time()
environ[REQUEST_FINISHED] = None
environ['wsgi.input'] = \
_ProgressFile(environ, environ['wsgi.input'])
def finalizer(exc_info=None):
environ[REQUEST_FINISHED] = time.time()
return catch_errors(self.application, environ,
start_response, finalizer, finalizer)
return self.application(environ, start_response)
def uploads(self):
return self.monitor
class UploadProgressReporter(object):
"""
reports on the progress of uploads for a given user
This reporter returns a JSON file (for use in AJAX) listing the
uploads in progress for the given user. By default, this reporter
uses the ``REMOTE_USER`` environment to compare between the current
request and uploads in-progress. If they match, then a response
record is formed.
``match()``
This member function can be overriden to provide alternative
matching criteria. It takes two environments, the first
is the current request, the second is a current upload.
``report()``
This member function takes an environment and builds a
``dict`` that will be used to create a JSON mapping for
the given upload. By default, this just includes the
percent complete and the request url.
"""
def __init__(self, monitor):
self.monitor = monitor
def match(self, search_environ, upload_environ):
if search_environ.get('REMOTE_USER', None) == \
upload_environ.get('REMOTE_USER', 0):
return True
return False
def report(self, environ):
retval = { 'started': time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(environ[REQUEST_STARTED])),
'finished': '',
'content_length': environ.get('CONTENT_LENGTH'),
'bytes_received': environ[ENVIRON_RECEIVED],
'path_info': environ.get('PATH_INFO',''),
'query_string': environ.get('QUERY_STRING','')}
finished = environ[REQUEST_FINISHED]
if finished:
retval['finished'] = time.strftime("%Y:%m:%d %H:%M:%S",
time.gmtime(finished))
return retval
def __call__(self, environ, start_response):
body = []
for map in [self.report(env) for env in self.monitor.uploads()
if self.match(environ, env)]:
parts = []
for k, v in map.items():
v = str(v).replace("\\", "\\\\").replace('"', '\\"')
parts.append('%s: "%s"' % (k, v))
body.append("{ %s }" % ", ".join(parts))
body = "[ %s ]" % ", ".join(body)
start_response("200 OK", [('Content-Type', 'text/plain'),
('Content-Length', len(body))])
return [body]
__all__ = ['UploadProgressMonitor', 'UploadProgressReporter']
if "__main__" == __name__:
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
|
#!/usr/bin/env python
import numpy as np
import cv2
import rospy
import tf
from std_msgs.msg import Header
from sensor_msgs.msg import CameraInfo, PointCloud
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
from geometry_msgs.msg import Point32
from ocular.msg import KeypointMotion
def make_empty_pose():
return Pose(
Point(0, 0, 0),
Quaternion(0, 0, 0, 1)
)
class InterframeCameraEstimator(object):
"""
Takes interframe keypoint motion and generates a camera matrix from it
Uses fundamental & essential matrices (assuming pre-calibrated camera from
Tango). Then does SVD to get some matrices and figures out which one makes
sense.
Publishes the estimated pose delta to 'raw_visual_pose_delta' topic.
"""
def __init__(self):
super(InterframeCameraEstimator, self).__init__()
rospy.init_node('interframe_camera_estimator')
self.interframe_keypoint_motion_topic = rospy.Subscriber(
'keypoint_motion',
KeypointMotion,
self.new_motion_callback
)
self.camera_intrinsics_topic = rospy.Subscriber(
'tango/camera/fisheye_1/camera_info',
CameraInfo,
self.new_camera_intrinsics_callback
)
self.pub_pose = rospy.Publisher(
'interframe_pose',
PoseStamped,
queue_size=10
)
self.pub_point_cloud = rospy.Publisher(
'interframe_point_cloud',
PointCloud,
queue_size=10
)
self.camera_intrinsics = None
self.accumulated_pose = make_empty_pose()
self.base_transformation_mat = np.matrix([
[1, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]
], np.float32)
def new_camera_intrinsics_callback(self, new_camera_info):
"""
Store the camera intrinsics.
We need this for the calibration matrices from the Tango
"""
self.camera_intrinsics = new_camera_info
self.k_mat = np.matrix(
np.array(self.camera_intrinsics.K).reshape((3, 3))
)
self.k_inv = self.k_mat.I
def new_motion_callback(self, new_motion_msg):
"""
Take keypoint motion data from other node and process it
"""
# we can't do anything until we have the camera calibration
if self.camera_intrinsics is None:
# TOmaybeDO: use a wait_for_message instead of missing a frame?
return
previous_kp = np.stack(
(new_motion_msg.prev_x, new_motion_msg.prev_y),
axis=1
)
current_kp = np.stack(
(new_motion_msg.cur_x, new_motion_msg.cur_y),
axis=1
)
f_mat = self.calculate_fundamental_matrix(previous_kp, current_kp)
camera_matrix, R_mat, t_mat = self.manually_calculate_pose(f_mat)
error_amount, triangulated = self.triangulation(
previous_kp, current_kp,
self.base_transformation_mat, camera_matrix
)
# print np.linalg.norm(np.array(error_amount))
for p in triangulated:
print p
self.pub_point_cloud.publish(
header=Header(
stamp=rospy.Time.now(), # TODO: use camera image time
frame_id='map'
),
points=[Point32(p[0], p[1], p[2]) for p in triangulated]
)
# get quaternion from rotation matrix
tf_rot = np.identity(4)
tf_rot[0:3, 0:3] = R_mat
quat = tf.transformations.quaternion_from_matrix(tf_rot)
old_quat = self.accumulated_pose.orientation
new_quat = tf.transformations.quaternion_multiply(
[old_quat.x, old_quat.y, old_quat.z, old_quat.w],
quat
)
normalized_new_quat = tf.transformations.quaternion_from_euler(
*tf.transformations.euler_from_quaternion(new_quat)
)
print normalized_new_quat
self.accumulated_pose.orientation = Quaternion(
*normalized_new_quat
)
self.pub_pose.publish(
header=Header(
stamp=rospy.Time.now(), # TODO: use camera image time
frame_id='map'
),
pose=Pose(
Point(
0, 0, 0
),
self.accumulated_pose.orientation
)
)
def triangulation(self, kp_a, kp_b, cam_a, cam_b):
"""
Returns a point cloud
"""
reproj_error = []
point_cloud = []
for i in range(len(kp_a)):
# convert to normalized homogeneous coordinates
kp = kp_a[i]
u = np.array([kp[0], kp[1], 1.0])
mat_um = self.k_inv * np.matrix(u).T
u = np.array(mat_um[:, 0])
kp_ = kp_b[i]
u_ = np.array([kp_[0], kp_[1], 1.0])
mat_um_ = self.k_inv * np.matrix(u_).T
u_ = np.array(mat_um_[:, 0])
# now we triangulate!
x = self.linear_ls_triangulation(
u, cam_a, u_, cam_b
)
point_cloud.append(x.flatten())
# calculate reprojection error
# reproject to other img
x_for_camera = np.matrix(
np.append(x, [[1.0]], axis=0)
)
x_pt_img = np.array(self.k_mat * cam_b * x_for_camera).flatten()
x_pt_img_ = np.array([
x_pt_img[0] / x_pt_img[2],
x_pt_img[1] / x_pt_img[2]
])
# check error in matched keypoint
reproj_error.append(
np.linalg.norm(x_pt_img_ - kp_)
)
return reproj_error, point_cloud
def linear_ls_triangulation(self, point_a, cam_a, point_b, cam_b):
"""
Python version of
Mastering Opencv With Practical Computer Vision Projects'
LST implementation on page 144
"""
# build A matrix
# import pdb; pdb.set_trace()
point_a = point_a.flatten()
point_b = point_b.flatten()
mat_a = np.matrix([
[point_a[0]*cam_a[2, 0]-cam_a[0, 0], point_a[0]*cam_a[2, 1]-cam_a[0, 1], point_a[0]*cam_a[2, 2]-cam_a[0, 2]],
[point_a[1]*cam_a[2, 0]-cam_a[1, 0], point_a[1]*cam_a[2, 1]-cam_a[1, 1], point_a[1]*cam_a[2, 2]-cam_a[1, 2]],
[point_b[0]*cam_b[2, 0]-cam_b[0, 0], point_b[0]*cam_b[2, 1]-cam_b[0, 1], point_b[0]*cam_b[2, 2]-cam_b[0, 2]],
[point_b[1]*cam_b[2, 0]-cam_b[1, 0], point_b[1]*cam_b[2, 1]-cam_b[1, 1], point_b[1]*cam_b[2, 2]-cam_b[1, 2]]
])
# build B vector
mat_b = np.matrix([
[-(point_a[0]*cam_a[2, 3]-cam_a[0, 3])],
[-(point_a[1]*cam_a[2, 3]-cam_a[1, 3])],
[-(point_b[0]*cam_b[2, 3]-cam_b[0, 3])],
[-(point_b[1]*cam_b[2, 3]-cam_b[1, 3])]
])
# solve for X
_, x = cv2.solve(mat_a, mat_b, None, cv2.DECOMP_SVD)
return x
def calculate_fundamental_matrix(self, previous_pts, current_pts):
fundamental_matrix, mask = cv2.findFundamentalMat(
previous_pts,
current_pts,
cv2.FM_RANSAC
)
if fundamental_matrix is None or fundamental_matrix.shape == (1, 1):
# dang, no fundamental matrix found
raise Exception('No fundamental matrix found')
elif fundamental_matrix.shape[0] > 3:
# more than one matrix found, just pick the first
fundamental_matrix = fundamental_matrix[0:3, 0:3]
return np.matrix(fundamental_matrix)
def manually_calculate_pose(self, f_mat):
# get essential matrix from the fundamental
# I am assuming that only one calibration matrix is fine here, because
# only one type of camera is being used.
e_mat = self.k_mat.T * f_mat * self.k_mat
singular_values, u_mat, vt = cv2.SVDecomp(e_mat)
# reconstruction from SVD:
# np.dot(u_mat, np.dot(np.diag(singular_values.T[0]), vt))
u_mat = np.matrix(u_mat)
vt = np.matrix(vt)
# from Epipolar Geometry and the Fundamental Matrix 9.13
w_mat = np.matrix([
[0, -1, 0],
[1, 0, 0],
[0, 0, 1]
], np.float32)
R_mat = u_mat * w_mat * vt # HZ 9.19
t_mat = u_mat[:, 2] # get third column of u
# check rotation matrix for validity
if np.linalg.det(R_mat) - 1.0 > 1e-07:
print('{}\nDoes not appear to be a valid rotation matrix'.format(
R_mat
))
camera_matrix = np.column_stack((R_mat, t_mat))
return camera_matrix, R_mat, t_mat
def run(self):
""" The main run loop"""
r = rospy.Rate(100)
while not rospy.is_shutdown():
r.sleep()
if __name__ == '__main__':
cam_estimator = InterframeCameraEstimator()
cam_estimator.run()
|
|
import hashlib
import io
import json
import os
import zipfile
from base64 import b64decode, b64encode
from django.db import transaction
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.utils.encoding import force_bytes, force_str
import requests
import waffle
from django_statsd.clients import statsd
from requests_hawk import HawkAuth
from asn1crypto import cms
import olympia.core.logger
from olympia import amo
log = olympia.core.logger.getLogger('z.crypto')
SIGN_FOR_APPS = (amo.FIREFOX.id, amo.ANDROID.id)
class SigningError(Exception):
pass
def supports_firefox(file_obj):
"""Return True if the file supports Firefox or Firefox for Android.
We only sign files that are at least compatible with Firefox/Firefox for
Android.
"""
apps = file_obj.version.apps.all()
return apps.filter(max__application__in=SIGN_FOR_APPS)
def get_id(addon):
"""Return the addon GUID if <= 64 chars, or its sha256 hash otherwise.
We don't want GUIDs longer than 64 chars: bug 1203365.
"""
guid = force_bytes(addon.guid)
if len(guid) <= 64:
# Return guid as original unicode string.
return addon.guid
return force_str(hashlib.sha256(guid).hexdigest())
def use_promoted_signer(file_obj, promo_group):
return (
file_obj.version.channel == amo.RELEASE_CHANNEL_LISTED
and promo_group.autograph_signing_states
)
def add_guid(file_obj):
with storage.open(file_obj.current_file_path) as fobj:
# Get the file data and add the guid to the manifest if waffle switch is enabled
if waffle.switch_is_active('add-guid-to-manifest'):
with zipfile.ZipFile(fobj, mode='r') as existing_zip:
manifest_json = json.loads(existing_zip.read('manifest.json'))
if (
'browser_specific_settings' not in manifest_json
and manifest_json.get('applications', {}).get('gecko')
):
gecko_root = manifest_json['applications']['gecko']
else:
if 'browser_specific_settings' not in manifest_json:
manifest_json['browser_specific_settings'] = {}
if 'gecko' not in manifest_json['browser_specific_settings']:
manifest_json['browser_specific_settings']['gecko'] = {}
gecko_root = manifest_json['browser_specific_settings']['gecko']
if 'id' not in gecko_root:
gecko_root['id'] = file_obj.version.addon.guid
new_zip_buffer = io.BytesIO()
with zipfile.ZipFile(new_zip_buffer, mode='w') as new_zip:
for info in existing_zip.filelist:
if info.filename == 'manifest.json':
new_zip.writestr(
'manifest.json',
json.dumps(manifest_json, indent=2).encode('utf-8'),
)
else:
with new_zip.open(info.filename, mode='w') as new_file:
new_file.write(existing_zip.read(info))
return new_zip_buffer.getvalue()
else:
# we don't need to add a guid, so just return fobj as normal
fobj.seek(0)
return fobj.read()
def call_signing(file_obj):
"""Sign `file_obj` via autographs /sign/file endpoint.
:returns: The certificates serial number.
"""
conf = settings.AUTOGRAPH_CONFIG
input_data = force_str(b64encode(add_guid(file_obj)))
signing_data = {
'input': input_data,
'keyid': conf['signer'],
'options': {
'id': get_id(file_obj.version.addon),
# "Add-on variant A params (PKCS7 SHA1 and COSE ES256) work in
# Fx <57, so we can switch to that without breaking backwards
# compatibility"
# https://github.com/mozilla/addons-server/issues/9308
# This means, the pkcs7 sha1 signature is used for backwards
# compatibility and cose sha256 will be used for newer
# Firefox versions.
# The relevant pref in Firefox is
# "security.signed_app_signatures.policy"
# where it's set to COSEAndPKCS7WithSHA1OrSHA256 to match
# these settings.
'pkcs7_digest': 'SHA1',
'cose_algorithms': ['ES256'],
},
}
hawk_auth = HawkAuth(id=conf['user_id'], key=conf['key'])
# We are using a separate signer that adds the mozilla-recommendation.json
# file.
promo_group = file_obj.addon.promoted_group(currently_approved=False)
if use_promoted_signer(file_obj, promo_group):
signing_states = {
promo_group.autograph_signing_states.get(app.short)
for app in file_obj.addon.promotedaddon.all_applications
}
signing_data['keyid'] = conf['recommendation_signer']
signing_data['options']['recommendations'] = list(signing_states)
hawk_auth = HawkAuth(
id=conf['recommendation_signer_user_id'],
key=conf['recommendation_signer_key'],
)
with statsd.timer('services.sign.addon.autograph'):
response = requests.post(
'{server}/sign/file'.format(server=conf['server_url']),
json=[signing_data],
auth=hawk_auth,
)
if response.status_code != requests.codes.CREATED:
msg = f'Posting to add-on signing failed ({response.status_code})'
log.error(msg, extra={'reason': response.reason, 'text': response.text})
raise SigningError(msg)
# Save the returned file in our storage.
with storage.open(file_obj.current_file_path, 'wb') as fobj:
fobj.write(b64decode(response.json()[0]['signed_file']))
# Now fetch the certificates serial number. Future versions of
# autograph may return this in the response.
# https://github.com/mozilla-services/autograph/issues/214
# Now extract the file and fetch the pkcs signature
with zipfile.ZipFile(file_obj.current_file_path, mode='r') as zip_fobj:
return get_signer_serial_number(
zip_fobj.read(os.path.join('META-INF', 'mozilla.rsa'))
)
def sign_file(file_obj):
"""Sign a File if necessary.
If it's not necessary (file exists but it's a mozilla signed one) then
return the file directly.
If there's no endpoint (signing is not enabled) or isn't reviewed yet,
or there was an error while signing, raise an exception - it
shouldn't happen.
Otherwise proceed with signing and return the signed file.
"""
from olympia.git.utils import create_git_extraction_entry
if not settings.ENABLE_ADDON_SIGNING:
raise SigningError(f'Not signing file {file_obj.pk}: no active endpoint')
# No file? No signature.
if not os.path.exists(file_obj.current_file_path):
raise SigningError(f"File {file_obj.current_file_path} doesn't exist on disk")
# Don't sign Mozilla signed extensions (they're already signed).
if file_obj.is_mozilla_signed_extension:
# Don't raise an exception here, just log and return file_obj even
# though we didn't sign, it's not an error - we just don't need to do
# anything in this case.
log.info(
'Not signing file {}: mozilla signed extension is already '
'signed'.format(file_obj.pk)
)
return file_obj
# We only sign files that are compatible with Firefox.
if not supports_firefox(file_obj):
raise SigningError(
'Not signing version {}: not for a Firefox version we support'.format(
file_obj.version.pk
)
)
# Sign the file. If there's any exception, we skip the rest.
cert_serial_num = str(call_signing(file_obj))
size = storage.size(file_obj.current_file_path)
# Save the certificate serial number for revocation if needed, and re-hash
# the file now that it's been signed.
file_obj.update(
cert_serial_num=cert_serial_num,
hash=file_obj.generate_hash(),
is_signed=True,
size=size,
)
log.info(f'Signing complete for file {file_obj.pk}')
if waffle.switch_is_active('enable-uploads-commit-to-git-storage'):
# Schedule this version for git extraction.
transaction.on_commit(
lambda: create_git_extraction_entry(version=file_obj.version)
)
return file_obj
def is_signed(file_path):
"""Return True if the file has been signed.
This utility function will help detect if a XPI file has been signed by
mozilla (if we can't trust the File.is_signed field).
It will simply check the signature filenames, and assume that if they're
named "mozilla.*" then the xpi has been signed by us.
This is in no way a perfect or correct solution, it's just the way we
do it until we decide to inspect/walk the certificates chain to
validate it comes from Mozilla.
"""
try:
with zipfile.ZipFile(file_path, mode='r') as zf:
filenames = set(zf.namelist())
except (zipfile.BadZipFile, OSError):
filenames = set()
return {
'META-INF/mozilla.rsa',
'META-INF/mozilla.sf',
'META-INF/manifest.mf',
}.issubset(filenames)
class SignatureInfo:
def __init__(self, pkcs7):
if isinstance(pkcs7, SignatureInfo):
# Allow passing around SignatureInfo objects to avoid
# re-reading the signature every time.
self.content = pkcs7.content
else:
self.content = cms.ContentInfo.load(pkcs7).native['content']
@property
def signer_serial_number(self):
return self.signer_info['sid']['serial_number']
@property
def signer_info(self):
"""There should be only one SignerInfo for add-ons,
nss doesn't support multiples
See ttps://bugzilla.mozilla.org/show_bug.cgi?id=1357815#c4 for a few
more details.
"""
return self.content['signer_infos'][0]
@property
def issuer(self):
return self.signer_info['sid']['issuer']
@property
def signer_certificate(self):
for certificate in self.content['certificates']:
info = certificate['tbs_certificate']
is_signer_certificate = (
info['issuer'] == self.issuer
and info['serial_number'] == self.signer_serial_number
)
if is_signer_certificate:
return info
def get_signer_serial_number(pkcs7):
"""Return the signer serial number of the signature."""
return SignatureInfo(pkcs7).signer_serial_number
def get_signer_organizational_unit_name(pkcs7):
"""Return the OU of the signer certificate."""
cert = SignatureInfo(pkcs7).signer_certificate
return cert['subject']['organizational_unit_name']
|
|
from clue.game.character import Colonel, Scarlet, Professor, Green, White, Peacock
from clue.game.location import Study, Hall, Lounge, Library, BilliardRoom, DiningRoom, Conservatory, Ballroom, Kitchen, HallwayStudyToHall, HallwayHallToLounge, HallwayStudyToLibrary, HallwayHallToBilliardRoom, HallwayLoungeToDiningRoom, HallwayLibraryToBilliardRoom, HallwayBilliardRoomToDiningRoom, HallwayLibraryToConservatory, HallwayBilliardRoomToBallroom, HallwayDiningRoomToKitchen, HallwayConservatoryToBallroom, HallwayBallroomToKitchen
from clue.game.card import Weapon, Suspect, Room
from clue.game.casefile import CaseFile
from clue.game.suggestion import Suggestion
from clue.game.error import GameError
from twisted.python import log
from random import randint, shuffle, choice
TURN_STATE_WAITING_TO_MOVE = "waiting_to_move"
TURN_STATE_WAITING_TO_SUGGEST = "waiting_to_suggest"
TURN_STATE_IS_SUGGESTING = "suggesting"
TURN_STATE_WAITING_TO_ACCUSE = "waiting_to_accuse"
TURN_STATE_IS_ACCUSING = "accusing"
class Game:
def __init__(self):
self.players = {}
self.in_progress = False
self.num_turns = 0
self.active_turn = None
self.active_turn_state = None
self.case_file = None
self.active_suggestion = None
self.active_suggestion_turn = None
self.winner = None
self.game_over = False
self.creator = None
self.grid = [
[Study(), HallwayStudyToHall(), Hall(), HallwayHallToLounge(), Lounge()],
[HallwayStudyToLibrary(), None, HallwayHallToBilliardRoom(), None, HallwayLoungeToDiningRoom()],
[Library(), HallwayLibraryToBilliardRoom(), BilliardRoom(), HallwayBilliardRoomToDiningRoom(), DiningRoom()],
[HallwayLibraryToConservatory(), None, HallwayBilliardRoomToBallroom(), None, HallwayDiningRoomToKitchen()],
[Conservatory(), HallwayConservatoryToBallroom(), Ballroom(), HallwayBallroomToKitchen(), Kitchen()]
]
def is_in_progress(self):
return self.in_progress
def num_players(self):
return len(self.players)
def add_player(self, player):
if player.id in self.players:
raise Exception("Player is already part of this game.")
# What character should this player be?
characters = [Colonel, Scarlet, Professor, Green, White, Peacock]
for option in characters:
unique = True
for playerId in self.players:
if self.players[playerId].character.color == option.color:
unique = False
break
if unique:
player.character = option(player)
break
if player.character is None:
raise Exception("Player has no character.")
self.players[player.id] = player
self.broadcast_game_state()
# Start the game if we have enough players.
if self.num_players() >= 6: self.start()
def remove_player(self, player):
try:
del self.players[player.id]
self.broadcast_game_state()
except KeyError: pass
def start(self):
if self.in_progress:
raise Exception("Game has already started.")
# Build the case file
# Select one random suspect, one random weapon, and one random room
deck = self.build_deck()
suspects = [card for card in deck if card.card_type == "suspect"]
rooms = [card for card in deck if card.card_type == "room"]
weapons = [card for card in deck if card.card_type == "weapon"]
self.case_file = CaseFile(choice(suspects), choice(weapons), choice(rooms))
# Remove these cards from the deck
deck.remove(self.case_file.suspect)
deck.remove(self.case_file.weapon)
deck.remove(self.case_file.location)
# Shuffle and deal the remaining cards to each player
shuffle(deck)
while len(deck) > 0:
for playerId in self.players:
if len(deck) == 0: break
self.players[playerId].give_card(deck.pop())
# Place all the characters in a random room
for playerId in self.players:
player = self.players[playerId]
while player.location is None:
possible_location = self.grid[randint(0,4)][randint(0,4)]
if possible_location is None: continue
if possible_location.num_players() > 0: continue
if not possible_location.loc_type == "room": continue
player.location = possible_location
player.location.add_player(player)
# Set the first turn (Mrs. Scarlet always goes first)
self.active_turn = next(playerId for playerId in self.players if self.players[playerId].character.color == "red")
self.active_turn_state = TURN_STATE_WAITING_TO_MOVE
# Mark the game as in progress
self.in_progress = True
# Broadcast the state
self.broadcast_game_state()
def move_player(self, player, direction):
row = None
col = None
for i in range(0, len(self.grid)):
for j in range(0, len(self.grid[i])):
if player.location == self.grid[i][j]:
row = i
col = j
break
if row is None or col is None:
return
new_row = row
new_col = col
if direction == "up":
new_row += -1
elif direction == "down":
new_row += 1
elif direction == "left":
new_col += -1
elif direction == "right":
new_col += 1
elif direction == "up_left":
new_row = 0
new_col = 0
elif direction == "up_right":
new_row = 0
new_col = len(self.grid[new_row]) - 1
elif direction == "down_left":
new_row = len(self.grid) - 1
new_col = 0
elif direction == "down_right":
new_row = len(self.grid) - 1
new_col = len(self.grid[new_row]) - 1
log.msg("Moving player from [%d,%d] to [%d,%d]" % (row, col, new_row, new_col))
# If new row/col is valid, move player to that position.
if new_row >= 0 and new_row < len(self.grid):
if new_col >= 0 and new_col < len(self.grid[new_row]):
new_location = self.grid[new_row][new_col]
if new_location is not None:
# Is there room in the new location?
if new_location.is_hallway() and new_location.num_players() == 1:
raise GameError("Hallway is blocked by another player.")
player.location.remove_player(player)
new_location.add_player(player)
# Broadcast the new game state
self.broadcast_game_state()
def next_turn(self):
original_turn = self.active_turn
player_id_list = self.players.keys()
idx = player_id_list.index(self.active_turn)
self.active_turn = None
while self.active_turn is None:
idx = idx + 1
next_idx = idx % len(player_id_list)
# Did we loop back around...
if player_id_list[next_idx] == original_turn:
self.game_over = True
self.broadcast_game_state()
raise GameError("No remaining active players. Game over.")
# Is the next player out?
if self.players[player_id_list[next_idx]].did_fail_accusation:
continue
# If we made it here, then we have the next turn
self.active_turn = player_id_list[next_idx]
# # Wrap around to the front of the list if have reached the end
# while i < 6:
# if idx + 1 >= len(player_id_list):
# self.active_turn = player_id_list[0]
# else:
# self.active_turn = player_id_list[idx + 1]
self.active_turn_state = TURN_STATE_WAITING_TO_MOVE
self.broadcast_game_state()
def broadcast_game_state(self):
state = self.to_dict()
for playerId in self.players:
self.players[playerId].send_player_data()
self.players[playerId].connection.sendOperation("update_game_state", state)
def build_deck(self):
deck = [
Weapon("Rope"),
Weapon("Lead Pipe"),
Weapon("Knife"),
Weapon("Wrench"),
Weapon("Candlestick"),
Weapon("Revolver"),
Room("Study"),
Room("Hall"),
Room("Lounge"),
Room("Library"),
Room("Billiard Room"),
Room("Dining Room"),
Room("Conservatory"),
Room("Ballroom"),
Room("Kitchen"),
Suspect(Colonel.name),
Suspect(Scarlet.name),
Suspect(Professor.name),
Suspect(Green.name),
Suspect(White.name),
Suspect(Peacock.name)
]
return deck
def make_suggestion(self, player, suspect, weapon, location):
if self.active_suggestion is not None:
raise Exception("There is already an active suggestion.")
# Move the suspect into the location
did_move = False
new_location = next(loc for row in self.grid for loc in row if loc is not None and loc.name == location)
for playerId in self.players:
suspect_player = self.players[playerId]
if suspect_player.character.name == suspect:
suspect_player.location.remove_player(suspect_player)
new_location.add_player(suspect_player)
did_move = True
break
if did_move is False:
log.msg("Failed to move suspect %s into location %s" % (suspect, location))
raise GameError("Unrecognized suspect and location.")
# Store suggestion data
self.active_suggestion = Suggestion(
player,
suspect,
weapon,
location
)
# Each subsequent player must now try to refuse the suggestion.
self.active_turn_state = TURN_STATE_IS_SUGGESTING
player_id_list = self.players.keys()
idx = player_id_list.index(self.active_suggestion.player.id)
next_idx = (idx + 1) % len(player_id_list)
self.active_suggestion_turn = player_id_list[next_idx]
self.broadcast_game_state()
def pass_suggestion(self):
if self.active_suggestion is None or self.active_suggestion_turn is None or not self.active_turn_state == TURN_STATE_IS_SUGGESTING:
raise Exception("There is no active suggestion.")
player_id_list = self.players.keys()
idx = player_id_list.index(self.active_suggestion_turn)
next_idx = (idx + 1) % len(player_id_list)
# If we wrapped back around to the player, then the suggestion is 100% correct.
if player_id_list[next_idx] == self.active_suggestion.player.id:
self.end_suggestion()
# Otherwise, pass the suggestion onto the next player.
else:
self.active_suggestion_turn = player_id_list[next_idx]
self.broadcast_game_state()
def refute_suggestion(self, player, card_name):
if self.active_suggestion is None or not self.active_turn_state == TURN_STATE_IS_SUGGESTING:
raise Exception("There is no active suggestion.")
options = [ self.active_suggestion.suspect, self.active_suggestion.location, self.active_suggestion.weapon ]
if card_name in options:
# Show the refuted card to the author of the suggestion.
suggestion_player = self.active_suggestion.player
suggestion_player.connection.sendOperation("suggestion_was_refuted", {
"card_name": card_name,
"refuting_player": str(player.id),
"suggestion": self.active_suggestion.to_dict()
})
# End the active suggestion
self.end_suggestion()
def end_suggestion(self):
if self.active_suggestion is None or not self.active_turn_state == TURN_STATE_IS_SUGGESTING:
raise Exception("There is no active suggestion.")
self.active_turn_state = TURN_STATE_WAITING_TO_ACCUSE
self.active_suggestion = None
self.active_suggestion_turn = None
self.broadcast_game_state()
def make_accusation(self, player, suspect, weapon, location):
if player.did_fail_accusation:
raise GameError("Player already failed accusastion.")
for playerId in self.players:
if not playerId == player.id:
self.players[playerId].send_message("%s accuses %s of committing the crime in %s with a %s" % (player.character.name, suspect, location, weapon))
suspect_match = True if self.case_file.suspect.name == suspect else False
weapon_match = True if self.case_file.weapon.name == weapon else False
location_match = True if self.case_file.location.name == location else False
if suspect_match and weapon_match and location_match:
# Let everyone know
for playerId in self.players:
self.players[playerId].connection.sendOperation("accusation_did_succeed", {
"case_file": self.case_file.to_dict(),
"player": str(player.id)
})
# Player is correct!
self.winner = player.id
self.broadcast_game_state()
else:
# Player failed the accusation. He/she is out.
player.did_fail_accusation = True
# Move the player to a room
while player.location.is_hallway():
if player.location.move_up:
self.move_player(player, "up")
continue
if player.location.move_down:
self.move_player(player, "down")
continue
if player.location.move_left:
self.move_player(player, "left")
continue
if player.location.move_right:
self.move_player(player, "right")
continue
# Default to study
self.player.location = self.grid[0][0]
# Send the player a message
player.connection.sendOperation("accusation_did_fail", {
"case_file": self.case_file.to_dict()
})
# Let everyone else know
for playerId in self.players:
if not playerId == player.id:
self.players[playerId].send_message("%s failed the accusation. He is out." % (player.character.name))
# Update the game state for all clients
self.next_turn()
self.broadcast_game_state()
def to_dict(self):
return {
"players": [self.players[player].to_dict() for player in self.players],
"num_turns": self.num_turns,
"in_progress": self.in_progress,
"active_turn": str(self.active_turn),
"active_turn_state": self.active_turn_state,
"locations": [ location.to_dict() for row in self.grid for location in row if location is not None ],
"case_file": self.case_file.to_dict() if self.case_file is not None else None,
"active_suggestion": self.active_suggestion.to_dict() if self.active_suggestion is not None else None,
"active_suggestion_turn": str(self.active_suggestion_turn) if self.active_suggestion_turn is not None else None,
"winner": str(self.winner) if self.winner is not None else None,
"game_over": self.game_over,
"creator": str(self.creator) if self.creator is not None else None
}
|
|
from __future__ import absolute_import
from typing import Any, List, Dict, Optional, Callable, Tuple
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib.auth import authenticate, login, get_backends
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse, HttpRequest
from django.shortcuts import redirect
from django.template import RequestContext, loader
from django.utils.timezone import now
from django.utils.cache import patch_cache_control
from django.core.exceptions import ValidationError
from django.core import validators
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.forms.models import model_to_dict
from django.core.mail import send_mail
from django.middleware.csrf import get_token
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmAlias, \
RealmFilter, \
PreregistrationUser, get_client, MitUser, UserActivity, PushDeviceToken, \
get_stream, UserPresence, get_recipient, \
split_email_to_domain, resolve_email_to_domain, email_to_username, get_realm, \
completely_open, get_unique_open_realm, remote_user_to_email, email_allowed_for_realm
from zerver.lib.actions import do_change_password, do_change_full_name, do_change_is_admin, \
do_activate_user, do_create_user, \
internal_send_message, update_user_presence, do_events_register, \
get_status_dict, do_change_enable_offline_email_notifications, \
do_change_enable_digest_emails, do_set_realm_name, do_set_realm_restricted_to_domain, \
do_set_realm_invite_required, do_set_realm_invite_by_admins_only, \
do_set_realm_create_stream_by_admins_only, get_default_subs, \
user_email_is_unique, do_invite_users, do_refer_friend, compute_mit_user_fullname, \
do_set_muted_topics, clear_followup_emails_queue, do_update_pointer, realm_user_count
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.forms import RegistrationForm, HomepageForm, ToSForm, \
CreateUserForm, is_inactive, OurAuthenticationForm
from django.views.decorators.csrf import csrf_exempt
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib import bugdown
from zerver.lib.validator import check_string, check_list, check_bool
from zerver.decorator import require_post, authenticated_json_post_view, \
has_request_variables, authenticated_json_view, to_non_negative_int, \
JsonableError, get_user_profile_by_email, REQ, require_realm_admin, \
zulip_login_required
from zerver.lib.avatar import avatar_url
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import statsd, generate_random_token
from zproject.backends import password_auth_enabled, dev_auth_enabled
from confirmation.models import Confirmation
import requests
import subprocess
import calendar
import datetime
import ujson
import simplejson
import re
from six import text_type
from six.moves import urllib
import base64
import time
import logging
import jwt
import hashlib
import hmac
from zproject.jinja2 import render_to_response
from zerver.lib.rest import rest_dispatch as _rest_dispatch
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
def name_changes_disabled(realm):
# type: (Realm) -> bool
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
@require_post
def accounts_register(request):
# type: (HttpRequest) -> HttpResponse
key = request.POST['key']
confirmation = Confirmation.objects.get(confirmation_key=key)
prereg_user = confirmation.content_object
email = prereg_user.email
mit_beta_user = isinstance(confirmation.content_object, MitUser)
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
validators.validate_email(email)
unique_open_realm = get_unique_open_realm()
if unique_open_realm:
realm = unique_open_realm
domain = realm.domain
elif not mit_beta_user and prereg_user.referred_by:
# If someone invited you, you are joining their realm regardless
# of your e-mail address.
#
# MitUsers can't be referred and don't have a referred_by field.
realm = prereg_user.referred_by.realm
domain = realm.domain
if not email_allowed_for_realm(email, realm):
return render_to_response("zerver/closed_realm.html", {"closed_domain_name": realm.name})
elif not mit_beta_user and prereg_user.realm:
# You have a realm set, even though nobody referred you. This
# happens if you sign up through a special URL for an open
# realm.
domain = prereg_user.realm.domain
realm = get_realm(domain)
else:
domain = resolve_email_to_domain(email)
realm = get_realm(domain)
if realm and realm.deactivated:
# The user is trying to register for a deactivated realm. Advise them to
# contact support.
return render_to_response("zerver/deactivated.html",
{"deactivated_domain_name": realm.name,
"zulip_administrator": settings.ZULIP_ADMINISTRATOR})
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.parse.quote_plus(email))
name_validated = False
full_name = None
if request.POST.get('from_confirmation'):
try:
del request.session['authenticated_full_name']
except KeyError:
pass
if domain == "mit.edu":
hesiod_name = compute_mit_user_fullname(email)
form = RegistrationForm(
initial={'full_name': hesiod_name if "@" not in hesiod_name else ""})
name_validated = True
elif settings.POPULATE_PROFILE_VIA_LDAP:
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
try:
request.session['authenticated_full_name'] = ldap_attrs[settings.AUTH_LDAP_USER_ATTR_MAP['full_name']][0]
name_validated = True
# We don't use initial= here, because if the form is
# complete (that is, no additional fields need to be
# filled out by the user) we want the form to validate,
# so they can be directly registered without having to
# go through this interstitial.
form = RegistrationForm(
{'full_name': request.session['authenticated_full_name']})
# FIXME: This will result in the user getting
# validation errors if they have to enter a password.
# Not relevant for ONLY_SSO, though.
break
except TypeError:
# Let the user fill out a name and/or try another backend
form = RegistrationForm()
elif 'full_name' in request.POST:
form = RegistrationForm(
initial={'full_name': request.POST.get('full_name')}
)
else:
form = RegistrationForm()
else:
postdata = request.POST.copy()
if name_changes_disabled(realm):
# If we populate profile information via LDAP and we have a
# verified name from you on file, use that. Otherwise, fall
# back to the full name in the request.
try:
postdata.update({'full_name': request.session['authenticated_full_name']})
name_validated = True
except KeyError:
pass
form = RegistrationForm(postdata)
if not password_auth_enabled(realm):
form['password'].field.required = False
if form.is_valid():
if password_auth_enabled(realm):
password = form.cleaned_data['password']
else:
# SSO users don't need no passwords
password = None
full_name = form.cleaned_data['full_name']
short_name = email_to_username(email)
first_in_realm = len(UserProfile.objects.filter(realm=realm, is_bot=False)) == 0
# FIXME: sanitize email addresses and fullname
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
try:
user_profile = existing_user_profile
do_activate_user(user_profile)
do_change_password(user_profile, password)
do_change_full_name(user_profile, full_name)
except UserProfile.DoesNotExist:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
else:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
# This logs you in using the ZulipDummyBackend, since honestly nothing
# more fancy than this is required.
login(request, authenticate(username=user_profile.email, use_dummy_backend=True))
if first_in_realm:
do_change_is_admin(user_profile, True)
return HttpResponseRedirect(reverse('zerver.views.initial_invite_page'))
else:
return HttpResponseRedirect(reverse('zerver.views.home'))
return render_to_response('zerver/register.html',
{'form': form,
'company_name': domain,
'email': email,
'key': key,
'full_name': request.session.get('authenticated_full_name', None),
'lock_name': name_validated and name_changes_disabled(realm),
# password_auth_enabled is normally set via our context processor,
# but for the registration form, there is no logged in user yet, so
# we have to set it here.
'password_auth_enabled': password_auth_enabled(realm),
},
request=request)
@zulip_login_required
def accounts_accept_terms(request):
email = request.user.email
domain = resolve_email_to_domain(email)
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
full_name = form.cleaned_data['full_name']
send_mail('Terms acceptance for ' + full_name,
loader.render_to_string('zerver/tos_accept_body.txt',
{'name': full_name,
'email': email,
'ip': request.META['REMOTE_ADDR'],
'browser': request.META.get('HTTP_USER_AGENT', "Unspecified")}),
settings.EMAIL_HOST_USER,
["all@zulip.com"])
do_change_full_name(request.user, full_name)
return redirect(home)
else:
form = ToSForm()
return render_to_response('zerver/accounts_accept_terms.html',
{ 'form': form, 'company_name': domain, 'email': email },
request=request)
from zerver.lib.ccache import make_ccache
@authenticated_json_view
@has_request_variables
def webathena_kerberos_login(request, user_profile,
cred=REQ(default=None)):
# type (HttpRequest, UserProfile, str) -> HttpResponse
if cred is None:
return json_error(_("Could not find Kerberos credential"))
if not user_profile.realm.domain == "mit.edu":
return json_error(_("Webathena login only for mit.edu realm"))
try:
parsed_cred = ujson.loads(cred)
user = parsed_cred["cname"]["nameString"][0]
if user == "golem":
# Hack for an mit.edu user whose Kerberos username doesn't
# match what he zephyrs as
user = "ctl"
assert(user == user_profile.email.split("@")[0])
ccache = make_ccache(parsed_cred)
except Exception:
return json_error(_("Invalid Kerberos cache"))
# TODO: Send these data via (say) rabbitmq
try:
subprocess.check_call(["ssh", "zulip@zmirror2.zulip.net", "--",
"/home/zulip/zulip/bots/process_ccache",
user,
user_profile.api_key,
base64.b64encode(ccache)])
except Exception:
logging.exception("Error updating the user's ccache")
return json_error(_("We were unable to setup mirroring for you"))
return json_success()
def api_endpoint_docs(request):
# type: (HttpRequest) -> HttpResponse
raw_calls = open('templates/zerver/api_content.json', 'r').read()
calls = ujson.loads(raw_calls)
langs = set()
for call in calls:
call["endpoint"] = "%s/v1/%s" % (settings.EXTERNAL_API_URI, call["endpoint"])
call["example_request"]["curl"] = call["example_request"]["curl"].replace("https://api.zulip.com", settings.EXTERNAL_API_URI)
response = call['example_response']
if '\n' not in response:
# For 1-line responses, pretty-print them
extended_response = response.replace(", ", ",\n ")
else:
extended_response = response
call['rendered_response'] = bugdown.convert("~~~ .py\n" + extended_response + "\n~~~\n", "default")
for example_type in ('request', 'response'):
for lang in call.get('example_' + example_type, []):
langs.add(lang)
return render_to_response(
'zerver/api_endpoints.html', {
'content': calls,
'langs': langs,
},
request=request)
@authenticated_json_post_view
@has_request_variables
def json_invite_users(request, user_profile, invitee_emails_raw=REQ("invitee_emails")):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
if not invitee_emails_raw:
return json_error(_("You must specify at least one email address."))
invitee_emails = set(re.split(r'[, \n]', invitee_emails_raw))
stream_names = request.POST.getlist('stream')
if not stream_names:
return json_error(_("You must specify at least one stream for invitees to join."))
# We unconditionally sub you to the notifications stream if it
# exists and is public.
notifications_stream = user_profile.realm.notifications_stream
if notifications_stream and not notifications_stream.invite_only:
stream_names.append(notifications_stream.name)
streams = []
for stream_name in stream_names:
stream = get_stream(stream_name, user_profile.realm)
if stream is None:
return json_error(_("Stream does not exist: %s. No invites were sent.") % (stream_name,))
streams.append(stream)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
return json_success()
def create_homepage_form(request, user_info=None):
# type: (HttpRequest, Optional[Dict[str, Any]]) -> HomepageForm
if user_info:
return HomepageForm(user_info, domain=request.session.get("domain"))
# An empty fields dict is not treated the same way as not
# providing it.
return HomepageForm(domain=request.session.get("domain"))
def maybe_send_to_registration(request, email, full_name=''):
# type: (HttpRequest, text_type, text_type) -> HttpResponse
form = create_homepage_form(request, user_info={'email': email})
request.verified_email = None
if form.is_valid():
# Construct a PreregistrationUser object and send the user over to
# the confirmation view.
prereg_user = None
if settings.ONLY_SSO:
try:
prereg_user = PreregistrationUser.objects.filter(email__iexact=email).latest("invited_at")
except PreregistrationUser.DoesNotExist:
prereg_user = create_preregistration_user(email, request)
else:
prereg_user = create_preregistration_user(email, request)
return redirect("".join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
"/",
# Split this so we only get the part after the /
Confirmation.objects.get_link_for_object(prereg_user).split("/", 3)[3],
'?full_name=',
# urllib does not handle Unicode, so coerece to encoded byte string
# Explanation: http://stackoverflow.com/a/5605354/90777
urllib.parse.quote_plus(full_name.encode('utf8')))))
else:
return render_to_response('zerver/accounts_home.html', {'form': form},
request=request)
def login_or_register_remote_user(request, remote_username, user_profile, full_name=''):
# type: (HttpRequest, str, UserProfile, text_type) -> HttpResponse
if user_profile is None or user_profile.is_mirror_dummy:
# Since execution has reached here, the client specified a remote user
# but no associated user account exists. Send them over to the
# PreregistrationUser flow.
return maybe_send_to_registration(request, remote_user_to_email(remote_username), full_name)
else:
login(request, user_profile)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
def remote_user_sso(request):
# type: (HttpRequest) -> HttpResponse
try:
remote_user = request.META["REMOTE_USER"]
except KeyError:
raise JsonableError(_("No REMOTE_USER set."))
user_profile = authenticate(remote_user=remote_user)
return login_or_register_remote_user(request, remote_user, user_profile)
@csrf_exempt
def remote_user_jwt(request):
# type: (HttpRequest) -> HttpResponse
try:
json_web_token = request.POST["json_web_token"]
payload, signing_input, header, signature = jwt.load(json_web_token)
except KeyError:
raise JsonableError(_("No JSON web token passed in request"))
except jwt.DecodeError:
raise JsonableError(_("Bad JSON web token"))
remote_user = payload.get("user", None)
if remote_user is None:
raise JsonableError(_("No user specified in JSON web token claims"))
domain = payload.get('realm', None)
if domain is None:
raise JsonableError(_("No domain specified in JSON web token claims"))
email = "%s@%s" % (remote_user, domain)
try:
jwt.verify_signature(payload, signing_input, header, signature,
settings.JWT_AUTH_KEYS[domain])
# We do all the authentication we need here (otherwise we'd have to
# duplicate work), but we need to call authenticate with some backend so
# that the request.backend attribute gets set.
user_profile = authenticate(username=email, use_dummy_backend=True)
except (jwt.DecodeError, jwt.ExpiredSignature):
raise JsonableError(_("Bad JSON web token signature"))
except KeyError:
raise JsonableError(_("Realm not authorized for JWT login"))
except UserProfile.DoesNotExist:
user_profile = None
return login_or_register_remote_user(request, email, user_profile, remote_user)
def google_oauth2_csrf(request, value):
# type: (HttpRequest, str) -> HttpResponse
return hmac.new(get_token(request).encode('utf-8'), value, hashlib.sha256).hexdigest()
def start_google_oauth2(request):
# type: (HttpRequest) -> HttpResponse
uri = 'https://accounts.google.com/o/oauth2/auth?'
cur_time = str(int(time.time()))
csrf_state = '{}:{}'.format(
cur_time,
google_oauth2_csrf(request, cur_time),
)
prams = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'scope': 'profile email',
'state': csrf_state,
}
return redirect(uri + urllib.parse.urlencode(prams))
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp):
# type: (HttpResponse) -> Dict[str, Any]
if requests_json_is_function:
return resp.json()
else:
return resp.json
def finish_google_oauth2(request):
# type: (HttpRequest) -> HttpResponse
error = request.GET.get('error')
if error == 'access_denied':
return redirect('/')
elif error is not None:
logging.warning('Error from google oauth2 login %r', request.GET)
return HttpResponse(status=400)
value, hmac_value = request.GET.get('state').split(':')
if hmac_value != google_oauth2_csrf(request, value):
logging.warning('Google oauth2 CSRF error')
return HttpResponse(status=400)
resp = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'grant_type': 'authorization_code',
},
)
if resp.status_code == 400:
logging.warning('User error converting Google oauth2 login to token: %r' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
raise Exception('Could not convert google oauth2 code to access_token\r%r' % (resp.text,))
access_token = extract_json_response(resp)['access_token']
resp = requests.get(
'https://www.googleapis.com/plus/v1/people/me',
params={'access_token': access_token}
)
if resp.status_code == 400:
logging.warning('Google login failed making info API call: %r' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
raise Exception('Google login failed making API call\r%r' % (resp.text,))
body = extract_json_response(resp)
try:
full_name = body['name']['formatted']
except KeyError:
# Only google+ users have a formated name. I am ignoring i18n here.
full_name = u'{} {}'.format(
body['name']['givenName'], body['name']['familyName']
)
for email in body['emails']:
if email['type'] == 'account':
break
else:
raise Exception('Google oauth2 account email not found %r' % (body,))
email_address = email['value']
user_profile = authenticate(username=email_address, use_dummy_backend=True)
return login_or_register_remote_user(request, email_address, user_profile, full_name)
def login_page(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
extra_context = kwargs.pop('extra_context', {})
if dev_auth_enabled():
# Development environments usually have only a few users, but
# it still makes sense to limit how many users we render to
# support performance testing with DevAuthBackend.
MAX_DEV_BACKEND_USERS = 100
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True)
users = users_query.order_by('email')[0:MAX_DEV_BACKEND_USERS]
extra_context['direct_admins'] = [u.email for u in users if u.is_realm_admin]
extra_context['direct_users'] = [u.email for u in users if not u.is_realm_admin]
template_response = django_login_page(
request, authentication_form=OurAuthenticationForm,
extra_context=extra_context, **kwargs)
try:
template_response.context_data['email'] = request.GET['email']
except KeyError:
pass
return template_response
def dev_direct_login(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
# This function allows logging in without a password and should only be called in development environments.
# It may be called if the DevAuthBackend is included in settings.AUTHENTICATION_BACKENDS
if (not dev_auth_enabled()) or settings.PRODUCTION:
# This check is probably not required, since authenticate would fail without an enabled DevAuthBackend.
raise Exception('Direct login not supported.')
email = request.POST['direct_email']
user_profile = authenticate(username=email)
if user_profile is None:
raise Exception("User cannot login")
login(request, user_profile)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
@authenticated_json_post_view
@has_request_variables
def json_bulk_invite_users(request, user_profile,
invitee_emails_list=REQ('invitee_emails',
validator=check_list(check_string))):
# type: (HttpRequest, UserProfile, List[str]) -> HttpResponse
invitee_emails = set(invitee_emails_list)
streams = get_default_subs(user_profile)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
# Report bulk invites to internal Zulip.
invited = PreregistrationUser.objects.filter(referred_by=user_profile)
internal_message = "%s <`%s`> invited %d people to Zulip." % (
user_profile.full_name, user_profile.email, invited.count())
internal_send_message(settings.NEW_USER_BOT, "stream", "signups",
user_profile.realm.domain, internal_message)
return json_success()
@zulip_login_required
def initial_invite_page(request):
# type: (HttpRequest) -> HttpResponse
user = request.user
# Only show the bulk-invite page for the first user in a realm
domain_count = len(UserProfile.objects.filter(realm=user.realm))
if domain_count > 1:
return redirect('zerver.views.home')
params = {'company_name': user.realm.domain}
if (user.realm.restricted_to_domain):
params['invite_suffix'] = user.realm.domain
return render_to_response('zerver/initial_invite_page.html', params,
request=request)
@require_post
def logout_then_login(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
return django_logout_then_login(request, kwargs)
def create_preregistration_user(email, request):
# type: (text_type, HttpRequest) -> HttpResponse
domain = request.session.get("domain")
if completely_open(domain):
# Clear the "domain" from the session object; it's no longer needed
request.session["domain"] = None
# The user is trying to sign up for a completely open realm,
# so create them a PreregistrationUser for that realm
return PreregistrationUser.objects.create(email=email,
realm=get_realm(domain))
# MIT users who are not explicitly signing up for an open realm
# require special handling (They may already have an (inactive)
# account, for example)
if split_email_to_domain(email) == "mit.edu":
return MitUser.objects.get_or_create(email=email)[0]
return PreregistrationUser.objects.create(email=email)
def accounts_home_with_domain(request, domain):
# type: (HttpRequest, str) -> HttpResponse
if completely_open(domain):
# You can sign up for a completely open realm through a
# special registration path that contains the domain in the
# URL. We store this information in the session rather than
# elsewhere because we don't have control over URL or form
# data for folks registering through OpenID.
request.session["domain"] = domain
return accounts_home(request)
else:
return HttpResponseRedirect(reverse('zerver.views.accounts_home'))
def send_registration_completion_email(email, request):
# type: (str, HttpRequest) -> HttpResponse
"""
Send an email with a confirmation link to the provided e-mail so the user
can complete their registration.
"""
prereg_user = create_preregistration_user(email, request)
context = {'support_email': settings.ZULIP_ADMINISTRATOR,
'voyager': settings.VOYAGER}
Confirmation.objects.send_confirmation(prereg_user, email,
additional_context=context)
def accounts_home(request):
# type: (HttpRequest) -> HttpResponse
if request.method == 'POST':
form = create_homepage_form(request, user_info=request.POST)
if form.is_valid():
email = form.cleaned_data['email']
send_registration_completion_email(email, request)
return HttpResponseRedirect(reverse('send_confirm', kwargs={'email': email}))
try:
email = request.POST['email']
# Note: We don't check for uniqueness
is_inactive(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.parse.quote_plus(email))
else:
form = create_homepage_form(request)
return render_to_response('zerver/accounts_home.html',
{'form': form, 'current_url': request.get_full_path},
request=request)
def approximate_unread_count(user_profile):
# type: (UserProfile) -> int
not_in_home_view_recipients = [sub.recipient.id for sub in \
Subscription.objects.filter(
user_profile=user_profile, in_home_view=False)]
muted_topics = ujson.loads(user_profile.muted_topics)
# If muted_topics is empty, it looks like []. If it is non-empty, it look
# like [[u'devel', u'test']]. We should switch to a consistent envelope, but
# until we do we still have both in the database.
if muted_topics:
muted_topics = muted_topics[0]
return UserMessage.objects.filter(
user_profile=user_profile, message_id__gt=user_profile.pointer).exclude(
message__recipient__type=Recipient.STREAM,
message__recipient__id__in=not_in_home_view_recipients).exclude(
message__subject__in=muted_topics).exclude(
flags=UserMessage.flags.read).count()
def sent_time_in_epoch_seconds(user_message):
# type: (UserMessage) -> float
# user_message is a UserMessage object.
if not user_message:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
@zulip_login_required
def home(request):
# type: (HttpRequest) -> HttpResponse
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
request._email = request.user.email
request.client = get_client("website")
narrow = [] # type: List[List[str]]
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream = get_stream(request.GET.get("stream"), user_profile.realm)
assert(narrow_stream is not None)
assert(narrow_stream.is_public())
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if not user_profile.last_reminder is None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get the tutorial
needs_tutorial = settings.TUTORIAL_ENABLED and \
user_profile.tutorial_status != UserProfile.TUTORIAL_FINISHED
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
desktop_notifications_enabled = user_profile.enable_desktop_notifications
if narrow_stream is not None:
desktop_notifications_enabled = False
if user_profile.realm.notifications_stream:
notifications_stream = user_profile.realm.notifications_stream.name
else:
notifications_stream = ""
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
voyager = settings.VOYAGER,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
maxfilesize = settings.MAX_FILE_UPLOAD_SIZE,
password_auth_enabled = password_auth_enabled(user_profile.realm),
have_initial_messages = user_has_messages,
subbed_info = register_ret['subscriptions'],
unsubbed_info = register_ret['unsubscribed'],
email_dict = register_ret['email_dict'],
people_list = register_ret['realm_users'],
bot_list = register_ret['realm_bots'],
initial_pointer = register_ret['pointer'],
initial_presences = register_ret['presences'],
initial_servertime = time.time(), # Used for calculating relative presence age
fullname = user_profile.full_name,
email = user_profile.email,
domain = user_profile.realm.domain,
realm_name = register_ret['realm_name'],
realm_invite_required = register_ret['realm_invite_required'],
realm_invite_by_admins_only = register_ret['realm_invite_by_admins_only'],
realm_create_stream_by_admins_only = register_ret['realm_create_stream_by_admins_only'],
realm_restricted_to_domain = register_ret['realm_restricted_to_domain'],
enter_sends = user_profile.enter_sends,
left_side_userlist = register_ret['left_side_userlist'],
referrals = register_ret['referrals'],
realm_emoji = register_ret['realm_emoji'],
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
notifications_stream = notifications_stream,
# Stream message notification settings:
stream_desktop_notifications_enabled =
user_profile.enable_stream_desktop_notifications,
stream_sounds_enabled = user_profile.enable_stream_sounds,
# Private message and @-mention notification settings:
desktop_notifications_enabled = desktop_notifications_enabled,
sounds_enabled =
user_profile.enable_sounds,
enable_offline_email_notifications =
user_profile.enable_offline_email_notifications,
enable_offline_push_notifications =
user_profile.enable_offline_push_notifications,
twenty_four_hour_time = register_ret['twenty_four_hour_time'],
enable_digest_emails = user_profile.enable_digest_emails,
event_queue_id = register_ret['queue_id'],
last_event_id = register_ret['last_event_id'],
max_message_id = register_ret['max_message_id'],
unread_count = approximate_unread_count(user_profile),
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
staging = settings.ZULIP_COM_STAGING or settings.DEVELOPMENT,
alert_words = register_ret['alert_words'],
muted_topics = register_ret['muted_topics'],
realm_filters = register_ret['realm_filters'],
is_admin = user_profile.is_realm_admin,
can_create_streams = user_profile.can_create_streams(),
name_changes_disabled = name_changes_disabled(user_profile.realm),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
autoscroll_forever = user_profile.autoscroll_forever,
default_desktop_notifications = user_profile.default_desktop_notifications,
avatar_url = avatar_url(user_profile),
mandatory_topics = user_profile.realm.mandatory_topics,
show_digest_email = user_profile.realm.show_digest_email,
)
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_recipient(Recipient.STREAM, narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["initial_pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
show_invites = False
product_name = "Zulip"
page_params['product_name'] = product_name
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render_to_response('zerver/index.html',
{'user_profile': user_profile,
'page_params' : simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META.get("HTTP_USER_AGENT", "Unspecified")),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'pipeline': settings.PIPELINE,
'show_invites': show_invites,
'is_admin': user_profile.is_realm_admin,
'show_webathena': user_profile.realm.domain == "mit.edu",
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
'product_name': product_name
},
request=request)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request):
# type: (HttpRequest) -> HttpResponse
return HttpResponseRedirect(reverse('zerver.views.home'))
def is_buggy_ua(agent):
# type: (str) -> bool
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Humbug Desktop/" in agent or "Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
"Mac" not in agent
def get_pointer_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
def generate_client_id():
# type: () -> str
return generate_random_token(32)
# The order of creation of the various dictionaries are important.
# We filter on {userprofile,stream,subscription_recipient}_ids.
@require_realm_admin
def export(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
if (Message.objects.filter(sender__realm=user_profile.realm).count() > 1000000 or
UserMessage.objects.filter(user_profile__realm=user_profile.realm).count() > 3000000):
return json_error(_("Realm has too much data for non-batched export."))
response = {}
response['zerver_realm'] = [model_to_dict(x)
for x in Realm.objects.select_related().filter(id=user_profile.realm.id)]
response['zerver_userprofile'] = [model_to_dict(x, exclude=["password", "api_key"])
for x in UserProfile.objects.select_related().filter(realm=user_profile.realm)]
userprofile_ids = set(userprofile["id"] for userprofile in response['zerver_userprofile'])
response['zerver_stream'] = [model_to_dict(x, exclude=["email_token"])
for x in Stream.objects.select_related().filter(realm=user_profile.realm, invite_only=False)]
stream_ids = set(x["id"] for x in response['zerver_stream'])
response['zerver_usermessage'] = [model_to_dict(x) for x in UserMessage.objects.select_related()
if x.user_profile_id in userprofile_ids]
user_recipients = [model_to_dict(x)
for x in Recipient.objects.select_related().filter(type=1)
if x.type_id in userprofile_ids]
stream_recipients = [model_to_dict(x)
for x in Recipient.objects.select_related().filter(type=2)
if x.type_id in stream_ids]
stream_recipient_ids = set(x["id"] for x in stream_recipients)
# only check for subscriptions to streams
response['zerver_subscription'] = [model_to_dict(x) for x in Subscription.objects.select_related()
if x.user_profile_id in userprofile_ids
and x.recipient_id in stream_recipient_ids]
subscription_recipient_ids = set(x["recipient"] for x in response['zerver_subscription'])
huddle_recipients = [model_to_dict(r)
for r in Recipient.objects.select_related().filter(type=3)
if r.type_id in subscription_recipient_ids]
huddle_ids = set(x["type_id"] for x in huddle_recipients)
response["zerver_recipient"] = user_recipients + stream_recipients + huddle_recipients
response['zerver_huddle'] = [model_to_dict(h)
for h in Huddle.objects.select_related()
if h.id in huddle_ids]
recipient_ids = set(x["id"] for x in response['zerver_recipient'])
response["zerver_message"] = [model_to_dict(m) for m in Message.objects.select_related()
if m.recipient_id in recipient_ids
and m.sender_id in userprofile_ids]
for (table, model) in [("defaultstream", DefaultStream),
("realmemoji", RealmEmoji),
("realmalias", RealmAlias),
("realmfilter", RealmFilter)]:
response["zerver_"+table] = [model_to_dict(x) for x in
model.objects.select_related().filter(realm_id=user_profile.realm.id)] # type: ignore
return json_success(response)
def get_profile_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
@require_realm_admin
@has_request_variables
def update_realm(request, user_profile, name=REQ(validator=check_string, default=None),
restricted_to_domain=REQ(validator=check_bool, default=None),
invite_required=REQ(validator=check_bool, default=None),
invite_by_admins_only=REQ(validator=check_bool, default=None),
create_stream_by_admins_only=REQ(validator=check_bool, default=None)):
# type: (HttpRequest, UserProfile, Optional[str], Optional[bool], Optional[bool], Optional[bool], Optional[bool]) -> HttpResponse
realm = user_profile.realm
data = {} # type: Dict[str, Any]
if name is not None and realm.name != name:
do_set_realm_name(realm, name)
data['name'] = 'updated'
if restricted_to_domain is not None and realm.restricted_to_domain != restricted_to_domain:
do_set_realm_restricted_to_domain(realm, restricted_to_domain)
data['restricted_to_domain'] = restricted_to_domain
if invite_required is not None and realm.invite_required != invite_required:
do_set_realm_invite_required(realm, invite_required)
data['invite_required'] = invite_required
if invite_by_admins_only is not None and realm.invite_by_admins_only != invite_by_admins_only:
do_set_realm_invite_by_admins_only(realm, invite_by_admins_only)
data['invite_by_admins_only'] = invite_by_admins_only
if create_stream_by_admins_only is not None and realm.create_stream_by_admins_only != create_stream_by_admins_only:
do_set_realm_create_stream_by_admins_only(realm, create_stream_by_admins_only)
data['create_stream_by_admins_only'] = create_stream_by_admins_only
return json_success(data)
@csrf_exempt
@require_post
@has_request_variables
def api_fetch_api_key(request, username=REQ(), password=REQ()):
# type: (HttpRequest, str, str) -> HttpResponse
return_data = {} # type: Dict[str, bool]
if username == "google-oauth2-token":
user_profile = authenticate(google_oauth2_token=password, return_data=return_data)
else:
user_profile = authenticate(username=username, password=password, return_data=return_data)
if return_data.get("inactive_user") == True:
return json_error(_("Your account has been disabled."), data={"reason": "user disable"}, status=403)
if return_data.get("inactive_realm") == True:
return json_error(_("Your realm has been deactivated."), data={"reason": "realm deactivated"}, status=403)
if return_data.get("password_auth_disabled") == True:
return json_error(_("Password auth is disabled in your team."), data={"reason": "password auth disabled"}, status=403)
if user_profile is None:
if return_data.get("valid_attestation") == True:
# We can leak that the user is unregistered iff they present a valid authentication string for the user.
return json_error(_("This user is not registered; do so from a browser."), data={"reason": "unregistered"}, status=403)
return json_error(_("Your username or password is incorrect."), data={"reason": "incorrect_creds"}, status=403)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@authenticated_json_post_view
@has_request_variables
def json_fetch_api_key(request, user_profile, password=REQ(default='')):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
if password_auth_enabled(user_profile.realm) and not user_profile.check_password(password):
return json_error(_("Your username or password is incorrect."))
return json_success({"api_key": user_profile.api_key})
@csrf_exempt
def api_fetch_google_client_id(request):
# type: (HttpRequest) -> HttpResponse
if not settings.GOOGLE_CLIENT_ID:
return json_error(_("GOOGLE_CLIENT_ID is not configured"), status=400)
return json_success({"google_client_id": settings.GOOGLE_CLIENT_ID})
def get_status_list(requesting_user_profile):
# type: (UserProfile) -> Dict[str, Any]
return {'presences': get_status_dict(requesting_user_profile),
'server_timestamp': time.time()}
@has_request_variables
def update_active_status_backend(request, user_profile, status=REQ(),
new_user_input=REQ(validator=check_bool, default=False)):
# type: (HttpRequest, UserProfile, str, bool) -> HttpResponse
status_val = UserPresence.status_from_string(status)
if status_val is None:
raise JsonableError(_("Invalid presence status: %s") % (status,))
else:
update_user_presence(user_profile, request.client, now(), status_val,
new_user_input)
ret = get_status_list(user_profile)
if user_profile.realm.domain == "mit.edu":
try:
activity = UserActivity.objects.get(user_profile = user_profile,
query="get_events_backend",
client__name="zephyr_mirror")
ret['zephyr_mirror_active'] = \
(activity.last_visit.replace(tzinfo=None) >
datetime.datetime.utcnow() - datetime.timedelta(minutes=5))
except UserActivity.DoesNotExist:
ret['zephyr_mirror_active'] = False
return json_success(ret)
@authenticated_json_post_view
def json_get_active_statuses(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return json_success(get_status_list(user_profile))
# Does not need to be authenticated because it's called from rest_dispatch
@has_request_variables
def api_events_register(request, user_profile,
apply_markdown=REQ(default=False, validator=check_bool),
all_public_streams=REQ(default=None, validator=check_bool)):
# type: (HttpRequest, UserProfile, bool, Optional[bool]) -> HttpResponse
return events_register_backend(request, user_profile,
apply_markdown=apply_markdown,
all_public_streams=all_public_streams)
def _default_all_public_streams(user_profile, all_public_streams):
# type: (UserProfile, Optional[bool]) -> bool
if all_public_streams is not None:
return all_public_streams
else:
return user_profile.default_all_public_streams
def _default_narrow(user_profile, narrow):
# type: (UserProfile, List[List[text_type]]) -> List[List[text_type]]
default_stream = user_profile.default_events_register_stream
if not narrow and user_profile.default_events_register_stream is not None:
narrow = [['stream', default_stream.name]]
return narrow
@has_request_variables
def events_register_backend(request, user_profile, apply_markdown=True,
all_public_streams=None,
event_types=REQ(validator=check_list(check_string), default=None),
narrow=REQ(validator=check_list(check_list(check_string, length=2)), default=[]),
queue_lifespan_secs=REQ(converter=int, default=0)):
# type: (HttpRequest, UserProfile, bool, Optional[bool], Optional[List[str]], List[List[text_type]], int) -> HttpResponse
all_public_streams = _default_all_public_streams(user_profile, all_public_streams)
narrow = _default_narrow(user_profile, narrow)
ret = do_events_register(user_profile, request.client, apply_markdown,
event_types, queue_lifespan_secs, all_public_streams,
narrow=narrow)
return json_success(ret)
@authenticated_json_post_view
@has_request_variables
def json_refer_friend(request, user_profile, email=REQ()):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
if not email:
return json_error(_("No email address specified"))
if user_profile.invites_granted - user_profile.invites_used <= 0:
return json_error(_("Insufficient invites"))
do_refer_friend(user_profile, email);
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_set_muted_topics(request, user_profile,
muted_topics=REQ(validator=check_list(check_list(check_string, length=2)), default=[])):
# type: (HttpRequest, UserProfile, List[List[text_type]]) -> HttpResponse
do_set_muted_topics(user_profile, muted_topics)
return json_success()
def add_push_device_token(request, user_profile, token_str, kind, ios_app_id=None):
# type: (HttpRequest, UserProfile, str, int, Optional[str]) -> HttpResponse
if token_str == '' or len(token_str) > 4096:
return json_error(_('Empty or invalid length token'))
# If another user was previously logged in on the same device and didn't
# properly log out, the token will still be registered to the wrong account
PushDeviceToken.objects.filter(token=token_str).delete()
# Overwrite with the latest value
token, created = PushDeviceToken.objects.get_or_create(user=user_profile,
token=token_str,
kind=kind,
ios_app_id=ios_app_id)
if not created:
token.last_updated = now()
token.save(update_fields=['last_updated'])
return json_success()
@has_request_variables
def add_apns_device_token(request, user_profile, token=REQ(), appid=REQ(default=settings.ZULIP_IOS_APP_ID)):
# type: (HttpRequest, UserProfile, str, str) -> HttpResponse
return add_push_device_token(request, user_profile, token, PushDeviceToken.APNS, ios_app_id=appid)
@has_request_variables
def add_android_reg_id(request, user_profile, token_str=REQ("token")):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
return add_push_device_token(request, user_profile, token_str, PushDeviceToken.GCM)
def remove_push_device_token(request, user_profile, token_str, kind):
# type: (HttpRequest, UserProfile, str, int) -> HttpResponse
if token_str == '' or len(token_str) > 4096:
return json_error(_('Empty or invalid length token'))
try:
token = PushDeviceToken.objects.get(token=token_str, kind=kind)
token.delete()
except PushDeviceToken.DoesNotExist:
return json_error(_("Token does not exist"))
return json_success()
@has_request_variables
def remove_apns_device_token(request, user_profile, token=REQ()):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
return remove_push_device_token(request, user_profile, token, PushDeviceToken.APNS)
@has_request_variables
def remove_android_reg_id(request, user_profile, token=REQ()):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
return remove_push_device_token(request, user_profile, token, PushDeviceToken.GCM)
def generate_204(request):
# type: (HttpRequest) -> HttpResponse
return HttpResponse(content=None, status=204)
def process_unsubscribe(token, subscription_type, unsubscribe_function):
# type: (HttpRequest, str, Callable[[UserProfile], None]) -> HttpResponse
try:
confirmation = Confirmation.objects.get(confirmation_key=token)
except Confirmation.DoesNotExist:
return render_to_response('zerver/unsubscribe_link_error.html')
user_profile = confirmation.content_object
unsubscribe_function(user_profile)
return render_to_response('zerver/unsubscribe_success.html',
{"subscription_type": subscription_type,
"external_host": settings.EXTERNAL_HOST})
# Email unsubscribe functions. All have the function signature
# processor(user_profile).
def do_missedmessage_unsubscribe(user_profile):
# type: (UserProfile) -> None
do_change_enable_offline_email_notifications(user_profile, False)
def do_welcome_unsubscribe(user_profile):
# type: (UserProfile) -> None
clear_followup_emails_queue(user_profile.email)
def do_digest_unsubscribe(user_profile):
# type: (UserProfile) -> None
do_change_enable_digest_emails(user_profile, False)
# The keys are part of the URL for the unsubscribe link and must be valid
# without encoding.
# The values are a tuple of (display name, unsubscribe function), where the
# display name is what we call this class of email in user-visible text.
email_unsubscribers = {
"missed_messages": ("missed messages", do_missedmessage_unsubscribe),
"welcome": ("welcome", do_welcome_unsubscribe),
"digest": ("digest", do_digest_unsubscribe)
}
# Login NOT required. These are for one-click unsubscribes.
def email_unsubscribe(request, type, token):
# type: (HttpRequest, str, str) -> HttpResponse
if type in email_unsubscribers:
display_name, unsubscribe_function = email_unsubscribers[type]
return process_unsubscribe(token, display_name, unsubscribe_function)
return render_to_response('zerver/unsubscribe_link_error.html', {},
request=request)
|
|
"""
GPU op for Stochastic max pooling as defined in:
Stochastic Pooling for Regularization of Deep Convolutional Neural Networks
Matthew D. Zeiler, Rob Fergus, ICLR 2013
The code is written around Alex Krizhevsky's cuda-convnet
"""
__authors__ = "Mehdi Mirza"
__copyright__ = "Copyright 2010-2013, Universite de Montreal"
__credits__ = ["Mehdi Mirza", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "Mehdi Mirza"
__email__ = "mirzamom@iro"
import warnings
import numpy
from theano import shared
from theano.gof import Apply
from theano.sandbox.cuda import CudaNdarrayType
from theano.sandbox.cuda.basic_ops import as_cuda_ndarray_variable
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from theano.sandbox.cuda import GpuOp
from theano.tensor import get_scalar_constant_value, NotScalarConstantError, zeros_like
from pylearn2.sandbox.cuda_convnet.base_acts import UnimplementedError
from pylearn2.sandbox.cuda_convnet.convnet_compile import convnet_available
from pylearn2.sandbox.cuda_convnet.convnet_compile import cuda_convnet_loc
from pylearn2.sandbox.cuda_convnet.shared_code import this_dir
from pylearn2.sandbox.cuda_convnet.pool import MaxPoolGrad
def stochastic_max_pool_c01b(c01b, pool_shape, pool_stride, start=0, seed = 1234):
"""
.. todo::
WRITEME
"""
assert pool_shape[0] == pool_shape[1]
assert pool_stride[0] == pool_stride[1]
op = StochasticMaxPool(pool_shape[0], pool_stride[0], start, seed)
c01b = gpu_contiguous(c01b)
return op(c01b)
def weighted_max_pool_c01b(c01b, pool_shape, pool_stride, start=0):
"""
.. todo::
WRITEME
"""
assert pool_shape[0] == pool_shape[1]
assert pool_stride[0] == pool_stride[1]
op = WeightedMaxPool(pool_shape[0], pool_stride[0], start)
c01b = gpu_contiguous(c01b)
return op(c01b)
class StochasticMaxPool(GpuOp):
"""
Stochastic MaxPool op code on the GPU.
The input are in the order (channel, image rows, image cols, batch)
Works only on square images and the grad works only when
channel % 16 == 0.
Parameters
----------
ds : int
defines the size of the pooling region in the x (equivalently, y)
dimension. Squares of size (ds)2 get reduced to one value by this
layer. There are no restrictions on the value of this parameter. It's
fine for a pooling square to fall off the boundary of the image. Named
SizeX in Alex's code.
stride : int
defines the stride size between successive pooling squares. Setting
this parameter smaller than sizeX produces overlapping pools. Setting
it equal to sizeX gives the usual, non-overlapping pools. Values
greater than sizeX are not allowed.
start : int, optional
tells the net where in the input image to start the pooling (in x,y
coordinates). In principle, you can start anywhere you want. Setting
this to a positive number will cause the net to discard some pixels at
the top and at the left of the image. Setting this to a negative number
will cause it to include pixels that don't exist (which is fine).
start=0 is the usual setting.
outputs : int, optional
allows you to control how many output values in the x (equivalently, y)
dimension this operation will produce. This parameter is analogous to
the start parameter, in that it allows you to discard some portion of
the image by setting it to a value small enough to leave part of the
image uncovered. Setting it to zero instructs the net to produce as
many outputs as is necessary to ensure that the whole image is covered.
default 0
seed : WRITEME
"""
def __init__(self, ds, stride, start=0, outputs=0, seed = 1234):
self.ds = ds
self.stride = stride
self.start = start
self.copy_non_contiguous = 0
self.seed_state = shared(numpy.asarray(seed).astype('float32'))
self.seed_state.default_update = self.seed_state + 1
assert stride > 0 and stride <= ds, (stride, ds)
assert ds > 0, ds # We check in the code if ds <= imgSizeX
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
#Dont put copy_non_contigous as this doesn't change the output
return (type(self) == type(other) and
self.ds == other.ds and
self.stride == other.stride and
self.start == other.start)
def __hash__(self):
"""
.. todo::
WRITEME
"""
#Dont put copy_non_contigous as this doesn't change the output
return (hash(type(self)) ^ hash(self.ds) ^
hash(self.stride) ^ hash(self.start))
def c_header_dirs(self):
"""
.. todo::
WRITEME
"""
return [this_dir]
def c_headers(self):
"""
.. todo::
WRITEME
"""
return ['nvmatrix.cuh', 'conv_util.cuh']
def c_lib_dirs(self):
"""
.. todo::
WRITEME
"""
return [cuda_convnet_loc]
def c_libraries(self):
"""
.. todo::
WRITEME
"""
return ['cuda_convnet']
def c_code_cache_version(self):
"""
.. todo::
WRITEME
"""
return (1,)
def _argument_contiguity_check(self, arg_name):
"""
.. todo::
WRITEME
"""
return """
if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))
{
if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {
PyErr_SetString(PyExc_ValueError,
"%(class)s: %(arg_name)s must be C contiguous");
%%(fail)s;
}
}
""" % {
'class': self.__class__.__name__,
'arg_name': arg_name,
'class_name_caps': self.__class__.__name__.upper(),
}
def make_node(self, images):
"""
.. todo::
WRITEME
"""
images = as_cuda_ndarray_variable(images)
assert images.ndim == 4
channels_broadcastable = images.type.broadcastable[0]
batch_broadcastable = images.type.broadcastable[3]
rows_broadcastable = False
cols_broadcastable = False
targets_broadcastable = (channels_broadcastable, rows_broadcastable,
cols_broadcastable, batch_broadcastable)
targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
targets = targets_type()
seed = self.seed_state
seed = as_cuda_ndarray_variable(seed)
return Apply(self, [images, seed], [targets])
def c_code(self, node, name, inputs, outputs, sub):
"""
.. todo::
WRITEME
"""
images, seed = inputs
targets, = outputs
fail = sub['fail']
# The amount of braces that must be closed at the end
num_braces = 0
if self.copy_non_contiguous:
raise UnimplementedError()
else:
basic_setup = "#define STOCHASTICMAXPOOL_COPY_NON_CONTIGUOUS 0\n"
# Convert images in nv_images, an NVMatrix, for compatibility
# with the cuda-convnet functions
setup_nv_images = self._argument_contiguity_check("images") + """
if (%(images)s->nd != 4)
{
PyErr_Format(PyExc_ValueError,
"images must have nd=4, got nd=%%i", %(images)s->nd);
%(fail)s;
}
{ //setup_nv_images brace 1
const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);
const int img_channels = images_dims[0];
const int imgSizeY = images_dims[1];
const int imgSizeX = images_dims[2];
const int batch_size = images_dims[3];
if(imgSizeY != imgSizeX){
PyErr_Format(PyExc_ValueError,
"images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",
img_channels, imgSizeY, imgSizeX, batch_size);
%(fail)s;
}
if(%(ds)s > imgSizeY){
PyErr_Format(PyExc_ValueError,
"ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",
%(ds)s, imgSizeX, imgSizeY);
%(fail)s;
}
if(%(start)s >= imgSizeX){
PyErr_Format(PyExc_ValueError,
"start is %%d but must be smaller then the images size of %%d x %%d.",
%(start)s, imgSizeX, imgSizeY);
%(fail)s;
}
NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,
"MaxPool:nv_images");
//int * seed = CudaNdarray_HOST_DIMS%(seed)s;
float * seed = CudaNdarray_DEV_DATA(%(seed)s);
//int * seed = %(seed)s;
"""
num_braces += 1
setup_nv_targets = """
//int _outputsX = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;
int target_dims [] = {
img_channels,
_outputsX,
_outputsX,
batch_size };
if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))
{
%(fail)s;
}
{ // setup_nv_target brace # 1
NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1] * target_dims[2],
target_dims[3], "MaxPool:nv_targets");
"""
num_braces += 1
do_pool = """
convLocalStochasticMaxPool(nv_images, nv_targets, img_channels, %(ds)s,
%(start)s, %(stride)s, _outputsX, MaxPooler(), seed);
"""
braces = '}' * num_braces
rval = (basic_setup +
setup_nv_images +
setup_nv_targets +
do_pool +
braces)
start = self.start
stride = self.stride
ds = self.ds
rval = rval % locals()
return rval
def grad(self, inp, grads):
"""
.. todo::
WRITEME
"""
x, seed = inp
gz, = grads
gz = gpu_contiguous(gz)
maxout = self(x)
return [MaxPoolGrad(self.ds, self.stride, self.start)(x, maxout, gz), zeros_like(seed)]
# Make sure the cuda_convnet library is compiled and up-to-date
def make_thunk(self, *args, **kwargs):
"""
.. todo::
WRITEME
"""
if not convnet_available():
raise RuntimeError('Could not compile cuda_convnet')
return super(StochasticMaxPool, self).make_thunk(*args, **kwargs)
class WeightedMaxPool(GpuOp):
"""
This op wrap Alex's MaxPool code on the GPU.
The input are in the order (channel, image rows, image cols, batch)
Works only on square images and the grad works only when
channel % 16 == 0.
Parameters
----------
ds : int
defines the size of the pooling region in the x (equivalently, y)
dimension. Squares of size (ds)2 get reduced to one value by this
layer. There are no restrictions on the value of this parameter. It's
fine for a pooling square to fall off the boundary of the image. Named
SizeX in Alex's code.
stride : int
defines the stride size between successive pooling squares. Setting
this parameter smaller than sizeX produces overlapping pools. Setting
it equal to sizeX gives the usual, non-overlapping pools. Values
greater than sizeX are not allowed.
start : int, optional
tells the net where in the input image to start the pooling (in x,y
coordinates). In principle, you can start anywhere you want. Setting
this to a positive number will cause the net to discard some pixels at
the top and at the left of the image. Setting this to a negative number
will cause it to include pixels that don't exist (which is fine).
start=0 is the usual setting.
outputs : int, optional
allows you to control how many output values in the x (equivalently, y)
dimension this operation will produce. This parameter is analogous to
the start parameter, in that it allows you to discard some portion of
the image by setting it to a value small enough to leave part of the
image uncovered. Setting it to zero instructs the net to produce as
many outputs as is necessary to ensure that the whole image is covered.
default 0
"""
def __init__(self, ds, stride, start=0, outputs=0):
self.ds = ds
self.stride = stride
self.start = start
self.copy_non_contiguous = 0
assert stride > 0 and stride <= ds, (stride, ds)
assert ds > 0, ds # We check in the code if ds <= imgSizeX
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
#Dont put copy_non_contigous as this doesn't change the output
return (type(self) == type(other) and
self.ds == other.ds and
self.stride == other.stride and
self.start == other.start)
def __hash__(self):
"""
.. todo::
WRITEME
"""
#Dont put copy_non_contigous as this doesn't change the output
return (hash(type(self)) ^ hash(self.ds) ^
hash(self.stride) ^ hash(self.start))
def c_header_dirs(self):
"""
.. todo::
WRITEME
"""
return [this_dir]
def c_headers(self):
"""
.. todo::
WRITEME
"""
return ['nvmatrix.cuh', 'conv_util.cuh']
def c_lib_dirs(self):
"""
.. todo::
WRITEME
"""
return [cuda_convnet_loc]
def c_libraries(self):
"""
.. todo::
WRITEME
"""
return ['cuda_convnet']
def c_code_cache_version(self):
"""
.. todo::
WRITEME
"""
return (1,)
def _argument_contiguity_check(self, arg_name):
"""
.. todo::
WRITEME
"""
return """
if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))
{
if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {
PyErr_SetString(PyExc_ValueError,
"%(class)s: %(arg_name)s must be C contiguous");
%%(fail)s;
}
}
""" % {
'class': self.__class__.__name__,
'arg_name': arg_name,
'class_name_caps': self.__class__.__name__.upper(),
}
def make_node(self, images):
"""
.. todo::
WRITEME
"""
images = as_cuda_ndarray_variable(images)
assert images.ndim == 4
channels_broadcastable = images.type.broadcastable[0]
batch_broadcastable = images.type.broadcastable[3]
rows_broadcastable = False
cols_broadcastable = False
targets_broadcastable = (channels_broadcastable, rows_broadcastable,
cols_broadcastable, batch_broadcastable)
targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
targets = targets_type()
return Apply(self, [images], [targets])
def c_code(self, node, name, inputs, outputs, sub):
"""
.. todo::
WRITEME
"""
images, = inputs
targets, = outputs
fail = sub['fail']
# The amount of braces that must be closed at the end
num_braces = 0
if self.copy_non_contiguous:
raise UnimplementedError()
else:
basic_setup = "#define WEIGHTEDMAXPOOL_COPY_NON_CONTIGUOUS 0\n"
# Convert images in nv_images, an NVMatrix, for compatibility
# with the cuda-convnet functions
setup_nv_images = self._argument_contiguity_check("images") + """
if (%(images)s->nd != 4)
{
PyErr_Format(PyExc_ValueError,
"images must have nd=4, got nd=%%i", %(images)s->nd);
%(fail)s;
}
{ //setup_nv_images brace 1
const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);
const int img_channels = images_dims[0];
const int imgSizeY = images_dims[1];
const int imgSizeX = images_dims[2];
const int batch_size = images_dims[3];
if(imgSizeY != imgSizeX){
PyErr_Format(PyExc_ValueError,
"images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",
img_channels, imgSizeY, imgSizeX, batch_size);
%(fail)s;
}
if(%(ds)s > imgSizeY){
PyErr_Format(PyExc_ValueError,
"ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",
%(ds)s, imgSizeX, imgSizeY);
%(fail)s;
}
if(%(start)s >= imgSizeX){
PyErr_Format(PyExc_ValueError,
"start is %%d but must be smaller then the images size of %%d x %%d.",
%(start)s, imgSizeX, imgSizeY);
%(fail)s;
}
NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,
"MaxPool:nv_images");
"""
num_braces += 1
setup_nv_targets = """
//int _outputsX = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;
int target_dims [] = {
img_channels,
_outputsX,
_outputsX,
batch_size };
if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))
{
%(fail)s;
}
{ // setup_nv_target brace # 1
NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1] * target_dims[2],
target_dims[3], "MaxPool:nv_targets");
"""
num_braces += 1
do_pool = """
convLocalWeightedPool(nv_images, nv_targets, img_channels, %(ds)s,
%(start)s, %(stride)s, _outputsX, MaxPooler());
"""
braces = '}' * num_braces
rval = (basic_setup +
setup_nv_images +
setup_nv_targets +
do_pool +
braces)
start = self.start
stride = self.stride
ds = self.ds
rval = rval % locals()
return rval
def grad(self, inp, grads):
"""
.. todo::
WRITEME
"""
raise NotImplementedError()
# Make sure the cuda_convnet library is compiled and up-to-date
def make_thunk(self, node, storage_map, compute_map, no_recycling):
"""
.. todo::
WRITEME
"""
if not convnet_available():
raise RuntimeError('Could not compile cuda_convnet')
return super(WeightedMaxPool, self).make_thunk(
node, storage_map, compute_map, no_recycling)
|
|
# Copyright (C) 2016 Antoine Carme <Antoine.Carme@Laposte.net>
# All rights reserved.
# This file is part of the Python Automatic Forecasting (PyAF) library and is made available under
# the terms of the 3 Clause BSD license
import pandas as pd
import numpy as np
from . import Time as tsti
from . import DateTime_Functions as dtfunc
from . import Perf as tsperf
from . import Plots as tsplot
from . import Utils as tsutil
# for timing
import time
class cAbstractCycle:
def __init__(self , trend):
self.mTimeInfo = tsti.cTimeInfo()
self.mTrendFrame = pd.DataFrame()
self.mCycleFrame = pd.DataFrame()
self.mTrend = trend;
self.mTrend_residue_name = self.mTrend.mOutName + '_residue'
self.mFormula = None;
self.mComplexity = None;
def getCycleResidueName(self):
return self.getCycleName() + "_residue";
def plot(self):
tsplot.decomp_plot(self.mCycleFrame, self.mTimeInfo.mNormalizedTimeColumn,
self.mTrend_residue_name, self.getCycleName() , self.getCycleResidueName(), horizon = self.mTimeInfo.mHorizon);
def check_not_nan(self, sig , name):
#print("check_not_nan");
if(np.isnan(sig).any() or np.isinf(sig).any() ):
logger = tsutil.get_pyaf_logger();
logger.error("CYCLE_RESIDUE_WITH_NAN_IN_SIGNAL" + str(sig));
raise tsutil.Internal_PyAF_Error("CYCLE_COLUMN _FOR_TREND_RESIDUE ['" + name + "'");
pass
def compute_cycle_residue(self, df):
target = df[self.mTrend_residue_name].values
lSignal = df[self.mSignal].values
lTrend = df[self.mTrend.mOutName]
lCycle = df[self.getCycleName()]
if(self.mDecompositionType in ['T+S+R']):
df[self.getCycleResidueName()] = lSignal - lTrend - lCycle
elif(self.mDecompositionType in ['TS+R']):
df[self.getCycleResidueName()] = lSignal - lTrend * lCycle
else:
lTrendCycle = lTrend * lCycle
# This is questionable. But if only a few values are zero, it is the safest.
lTrendCycle = lTrendCycle.apply(lambda x : x if(abs(x) > 1e-8) else 1e-8)
df[self.getCycleResidueName()] = lSignal / lTrendCycle
df[self.getCycleResidueName()] = df[self.getCycleResidueName()].astype(target.dtype)
def compute_target_means_by_cycle_value(self , iCycleFrame, iCycleName):
# we encode only using estimation
lCycleFrameEstim = self.mSplit.getEstimPart(iCycleFrame);
lGroupBy = lCycleFrameEstim.groupby(by=[iCycleName] , sort=False)[self.mTrend_residue_name]
lEncodedValueDict = None
if(self.mOptions.mCycle_Encoding_Scheme == "Target_Mean"):
lEncodedValueDict = lGroupBy.mean().to_dict();
else:
lEncodedValueDict = lGroupBy.median().to_dict();
for x in lEncodedValueDict.keys():
lEncodedValueDict[ x ] = np.float64(lEncodedValueDict[ x ])
return lEncodedValueDict
def compute_target_means_default_value(self):
# we encode only using estimation
lCycleFrameEstim = self.mSplit.getEstimPart(self.mCycleFrame);
if(self.mOptions.mCycle_Encoding_Scheme == "Target_Mean"):
return np.float64(lCycleFrameEstim[self.mTrend_residue_name].mean());
return np.float64(lCycleFrameEstim[self.mTrend_residue_name].median());
def computePerf(self):
if(self.mOptions.mDebug):
self.check_not_nan(self.mCycleFrame[self.getCycleResidueName()], self.getCycleResidueName())
# self.mCycleFrame.to_csv(self.getCycleResidueName() + ".csv");
self.mCycleFitPerf = tsperf.cPerf();
self.mCycleForecastPerf = tsperf.cPerf();
# self.mCycleFrame[[self.mTrend_residue_name, self.getCycleName()]].to_csv(self.getCycleName() + ".csv");
(lFrameFit, lFrameForecast, lFrameTest) = self.mSplit.cutFrame(self.mCycleFrame);
self.mCycleFitPerf.computeCriterion(
lFrameFit[self.mTrend_residue_name], lFrameFit[self.getCycleName()],
self.mOptions.mCycle_Criterion, self.getCycleName())
self.mCycleForecastPerf.computeCriterion(
lFrameForecast[self.mTrend_residue_name], lFrameForecast[self.getCycleName()],
self.mOptions.mCycle_Criterion, self.getCycleName())
class cZeroCycle(cAbstractCycle):
def __init__(self , trend):
super().__init__(trend);
self.mFormula = "NoCycle"
self.mComplexity = 0;
self.mConstantValue = 0.0
def getCycleName(self):
return self.mTrend_residue_name + "_zeroCycle[" + str(self.mConstantValue) + "]";
def dump_values(self):
logger = tsutil.get_pyaf_logger();
lDict = {}
logger.info("ZERO_CYCLE_MODEL_VALUES " + self.getCycleName() + " " + str(self.mConstantValue) + " {}");
def fit(self):
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
self.mConstantValue = 0.0
if(self.mDecompositionType in ['TS+R', 'TSR']):
# multiplicative models
self.mConstantValue = 1.0
self.mCycleFrame[self.mTrend.mOutName] = self.mTrendFrame[self.mTrend.mOutName]
self.mCycleFrame[self.mTrend_residue_name] = self.mTrendFrame[self.mTrend_residue_name]
self.mCycleFrame[self.getCycleName()] = self.mConstantValue
self.compute_cycle_residue(self.mCycleFrame)
self.mOutName = self.getCycleName()
def transformDataset(self, df):
target = df[self.mTrend_residue_name]
df[self.getCycleName()] = self.mConstantValue;
self.compute_cycle_residue(df)
return df;
class cSeasonalPeriodic(cAbstractCycle):
def __init__(self , trend, date_part):
super().__init__(trend);
self.mDatePart = date_part;
self.mEncodedValueDict = {}
self.mFormula = "Seasonal_" + self.mDatePart.name;
def getCycleName(self):
return self.mTrend_residue_name + "_Seasonal_" + self.mDatePart.name;
def dump_values(self):
logger = tsutil.get_pyaf_logger();
lDict = {}
logger.info("SEASONAL_MODEL_VALUES " + self.getCycleName() + " " + str(self.mDefaultValue) + " " + str(self.mEncodedValueDict));
def hasEnoughData(self, iTimeMin, iTimeMax):
lTimeDelta = iTimeMax - iTimeMin;
lDays = lTimeDelta / np.timedelta64(1,'D');
lSeconds = lTimeDelta / np.timedelta64(1,'s');
# these are just guessses of how much dataa is needed to get valid signal stats/means of each seasonal unit.
# TODO : add these in the options. (None, None) => no limit
lThresholds = {
dtfunc.eDatePart.Hour : (1 * 10 , None), # 10 days
dtfunc.eDatePart.Minute : (None , 3600 * 10), # 10 hours
dtfunc.eDatePart.Second : (None , 360 * 10), # 10 minutes
dtfunc.eDatePart.DayOfMonth : (30 * 10 , None), # 10 months
dtfunc.eDatePart.DayOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.MonthOfYear : (360 * 10 , None), # 10 years
dtfunc.eDatePart.WeekOfYear : (360 * 10 , None), # 10 years
dtfunc.eDatePart.WeekOfYear : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.DayOfYear : (360 * 10 , None), # 10 years
dtfunc.eDatePart.HourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.TwoHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.ThreeHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.FourHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.SixHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.EightHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.TwelveHourOfWeek : (7 * 10 , None), # 10 weeks
dtfunc.eDatePart.WeekOfMonth : (30 * 10 , None), # 10 months
dtfunc.eDatePart.DayOfNthWeekOfMonth : (30 * 10 , None) # 10 months
}
lThreshold = lThresholds.get(self.mDatePart)
if(lThreshold[0] is not None):
return (lDays >= lThreshold[0]);
elif(lThreshold[1] is not None):
return (lSeconds >= lThreshold[1]);
return False;
def compute_date_parts(self, iTimeValues):
lHelper = dtfunc.cDateTime_Helper()
return lHelper.apply_date_time_computer(self.mDatePart, iTimeValues);
def fit(self):
assert(self.mTimeInfo.isPhysicalTime());
lHor = self.mTimeInfo.mHorizon;
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
lName = self.getCycleName();
self.mCycleFrame[self.mTrend_residue_name] = self.mTrendFrame[self.mTrend_residue_name]
self.mCycleFrame[self.mTrend.mOutName] = self.mTrendFrame[self.mTrend.mOutName]
self.mCycleFrame[lName] = self.compute_date_parts(self.mTrendFrame[self.mTime])
self.mDefaultValue = self.compute_target_means_default_value()
self.mEncodedValueDict = self.compute_target_means_by_cycle_value(self.mCycleFrame, self.getCycleName())
self.mCycleFrame[lName + '_enc'] = self.mCycleFrame[lName].apply(lambda x : self.mEncodedValueDict.get(x , self.mDefaultValue))
self.mCycleFrame[lName + '_enc'].fillna(self.mDefaultValue, inplace=True);
self.compute_cycle_residue(self.mCycleFrame)
self.mCycleFrame[lName + '_NotEncoded'] = self.mCycleFrame[lName];
self.mCycleFrame[lName] = self.mCycleFrame[lName + '_enc'];
self.mOutName = self.getCycleName()
#print("encoding '" + lName + "' " + str(self.mEncodedValueDict));
# The longer the seasonal, the more complex it is.
self.mComplexity = len(self.mEncodedValueDict.keys())
def transformDataset(self, df):
target = df[self.mTrend_residue_name]
lDateParts = self.compute_date_parts(df[self.mTime])
df[self.getCycleName()] = lDateParts.apply(lambda x : self.mEncodedValueDict.get(x , self.mDefaultValue))
self.compute_cycle_residue(df)
return df;
class cBestCycleForTrend(cAbstractCycle):
def __init__(self , trend, criterion):
super().__init__(trend);
self.mCycleFrame = pd.DataFrame()
self.mCyclePerfByLength = {}
self.mBestCycleValueDict = {}
self.mBestCycleLength = None
self.mCriterion = criterion
self.mFormula = "BestCycle"
def getCycleName(self):
return self.mTrend_residue_name + "_bestCycle_by" + self.mCriterion;
def dump_values(self):
logger = tsutil.get_pyaf_logger();
lDict = {} if(self.mBestCycleLength is None) else self.mBestCycleValueDict[self.mBestCycleLength]
logger.info("BEST_CYCLE_LENGTH_VALUES " + self.getCycleName() + " " + str(self.mBestCycleLength) + " " + str(self.mDefaultValue) + " " + str(lDict));
def dumpCyclePerfs(self):
print(self.mCyclePerfByLength);
def computeBestCycle(self):
# self.dumpCyclePerfs();
self.mBestCycleLength = None;
lData = self.mCyclePerfByLength.items()
if(len(lData) == 0):
return
lPerf = tsperf.cPerf();
# less MAPE is better, less categories is better, the last is the length to have a total order.
lSortingMethod_By_MAPE = lambda x : (x[1][0], x[0])
lData = sorted(lData, key = lSortingMethod_By_MAPE)
assert(len(lData) > 0)
lBestCriterion = lData[0][1]
lData_smallest = [x for x in lData if lPerf.is_close_criterion_value(self.mOptions.mCycle_Criterion,
x[1][0],
iTolerance = 0.05, iRefValue = lBestCriterion[0])]
lSortingMethod_By_Complexity = lambda x : (x[1][1], x[0])
lData_smallest = sorted(lData_smallest, key = lSortingMethod_By_Complexity)
assert(len(lData_smallest) > 0)
self.mBestCycleLength = lData_smallest[0][0]
self.transformDataset(self.mCycleFrame);
pass
def generate_cycles(self):
self.mTimeInfo.addVars(self.mCycleFrame);
self.mCycleFrame[self.mTrend_residue_name] = self.mTrendFrame[self.mTrend_residue_name]
self.mCycleFrame[self.mTrend.mOutName] = self.mTrendFrame[self.mTrend.mOutName]
self.mDefaultValue = self.compute_target_means_default_value();
self.mCyclePerfByLength = {}
lMaxRobustCycleLength = self.mTrendFrame.shape[0]//12;
# print("MAX_ROBUST_CYCLE_LENGTH", self.mTrendFrame.shape[0], lMaxRobustCycleLength);
lCycleLengths = self.mOptions.mCycleLengths or range(2,lMaxRobustCycleLength + 1)
lCycleFrame = pd.DataFrame();
lCycleFrame[self.mTrend_residue_name ] = self.mTrendFrame[self.mTrend_residue_name]
for lLength in lCycleLengths:
if ((lLength > 1) and (lLength <= lMaxRobustCycleLength)):
name_length = self.mTrend_residue_name + '_Cycle';
lCycleFrame[name_length] = self.mCycleFrame[self.mTimeInfo.mRowNumberColumn] % lLength
lEncodedValueDict = self.compute_target_means_by_cycle_value(lCycleFrame, name_length)
lCycleFrame[name_length + '_enc'] = lCycleFrame[name_length].apply(
lambda x : lEncodedValueDict.get(x , self.mDefaultValue))
self.mBestCycleValueDict[lLength] = lEncodedValueDict;
lPerf = tsperf.cPerf();
# validate the cycles on the validation part
lValidFrame = self.mSplit.getValidPart(lCycleFrame);
lCritValue = lPerf.computeCriterion(lValidFrame[self.mTrend_residue_name],
lValidFrame[name_length + "_enc"],
self.mCriterion,
"Validation")
if(lPerf.is_acceptable_criterion_value(self.mOptions.mCycle_Criterion, iRefValue = lCritValue)):
self.mCyclePerfByLength[lLength] = (round(lCritValue, 3) , len(lEncodedValueDict))
if(self.mOptions.mDebugCycles):
logger = tsutil.get_pyaf_logger();
logger.debug("CYCLE_INTERNAL_CRITERION " + name_length + " " + str(lLength) + \
" " + self.mCriterion +" " + str(lCritValue))
pass
def fit(self):
# print("cycle_fit" , self.mTrend_residue_name);
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.generate_cycles();
self.computeBestCycle();
self.mOutName = self.getCycleName()
self.mFormula = "Cycle_None"
if(self.mBestCycleLength is not None):
self.mFormula = "Cycle_" + str(self.mBestCycleLength);
self.transformDataset(self.mCycleFrame);
self.mComplexity = 0
if(self.mBestCycleLength is not None):
lDict = self.mBestCycleValueDict[self.mBestCycleLength];
self.mComplexity = len(lDict.keys())
def transformDataset(self, df):
if(self.mBestCycleLength is not None):
lValueCol = df[self.mTimeInfo.mRowNumberColumn].apply(lambda x : x % self.mBestCycleLength);
df['cycle_internal'] = lValueCol;
# print("BEST_CYCLE" , self.mBestCycleLength)
# print(self.mBestCycleValueDict);
lDict = self.mBestCycleValueDict[self.mBestCycleLength];
df[self.getCycleName()] = lValueCol.apply(lambda x : lDict.get(x , self.mDefaultValue));
else:
df[self.getCycleName()] = np.zeros_like(df[self.mTimeInfo.mRowNumberColumn]);
target = df[self.mTrend_residue_name]
self.compute_cycle_residue(df)
if(self.mOptions.mDebug):
self.check_not_nan(self.mCycleFrame[self.getCycleName()].values , self.getCycleName());
return df;
class cCycleEstimator:
def __init__(self):
self.mTimeInfo = tsti.cTimeInfo()
self.mTrendFrame = pd.DataFrame()
self.mCycleFrame = pd.DataFrame()
self.mCycleList = {}
def addSeasonal(self, trend, seas_type, resolution):
if(resolution >= self.mTimeInfo.mResolution):
lSeasonal = cSeasonalPeriodic(trend, seas_type);
if(self.mOptions.mActivePeriodics[lSeasonal.mFormula]):
if(lSeasonal.hasEnoughData(self.mTimeInfo.mTimeMin, self.mTimeInfo.mTimeMax)):
self.mCycleList[trend] = self.mCycleList[trend] + [lSeasonal];
else:
if(self.mOptions.mDebugCycles):
lTimeDelta = self.mTimeInfo.mTimeMax - self.mTimeInfo.mTimeMin
lDays = lTimeDelta / np.timedelta64(1,'D');
logger = tsutil.get_pyaf_logger();
logger.debug("NOT_ENOUGH_DATA_TO_ANAYLSE_SEASONAL_PATTERN " + str((trend.__class__.__name__, seas_type, resolution, lDays)))
pass
def defineCycles(self):
for trend in self.mTrendList:
self.mCycleList[trend] = [];
if(self.mOptions.mActivePeriodics['NoCycle']):
self.mCycleList[trend] = [cZeroCycle(trend)];
if(self.mOptions.mActivePeriodics['BestCycle']):
self.mCycleList[trend] = self.mCycleList[trend] + [
cBestCycleForTrend(trend, self.mOptions.mCycle_Criterion)];
if(self.mTimeInfo.isPhysicalTime()):
# The order used here is mandatory. see filterSeasonals before changing this order.
self.addSeasonal(trend, dtfunc.eDatePart.MonthOfYear, dtfunc.eTimeResolution.MONTH);
self.addSeasonal(trend, dtfunc.eDatePart.WeekOfYear, dtfunc.eTimeResolution.DAY);
self.addSeasonal(trend, dtfunc.eDatePart.DayOfMonth, dtfunc.eTimeResolution.DAY);
self.addSeasonal(trend, dtfunc.eDatePart.DayOfWeek, dtfunc.eTimeResolution.DAY);
self.addSeasonal(trend, dtfunc.eDatePart.DayOfYear, dtfunc.eTimeResolution.DAY);
self.addSeasonal(trend, dtfunc.eDatePart.Hour, dtfunc.eTimeResolution.HOUR);
self.addSeasonal(trend, dtfunc.eDatePart.Minute, dtfunc.eTimeResolution.MINUTE);
self.addSeasonal(trend, dtfunc.eDatePart.Second, dtfunc.eTimeResolution.SECOND);
self.addSeasonal(trend, dtfunc.eDatePart.HourOfWeek, dtfunc.eTimeResolution.HOUR);
self.addSeasonal(trend, dtfunc.eDatePart.TwoHourOfWeek, dtfunc.eTimeResolution.HOUR);
self.addSeasonal(trend, dtfunc.eDatePart.ThreeHourOfWeek, dtfunc.eTimeResolution.HOUR);
self.addSeasonal(trend, dtfunc.eDatePart.FourHourOfWeek, dtfunc.eTimeResolution.HOUR);
self.addSeasonal(trend, dtfunc.eDatePart.SixHourOfWeek, dtfunc.eTimeResolution.HOUR);
self.addSeasonal(trend, dtfunc.eDatePart.EightHourOfWeek, dtfunc.eTimeResolution.HOUR);
self.addSeasonal(trend, dtfunc.eDatePart.TwelveHourOfWeek, dtfunc.eTimeResolution.HOUR);
self.addSeasonal(trend, dtfunc.eDatePart.WeekOfMonth, dtfunc.eTimeResolution.DAY);
self.addSeasonal(trend, dtfunc.eDatePart.DayOfNthWeekOfMonth, dtfunc.eTimeResolution.DAY);
for trend in self.mTrendList:
if(len(self.mCycleList[trend]) == 0):
self.mCycleList[trend] = [cZeroCycle(trend)];
for cycle in self.mCycleList[trend]:
cycle.mTrendFrame = self.mTrendFrame;
cycle.mTimeInfo = self.mTimeInfo;
cycle.mSplit = self.mSplit;
cycle.mOptions = self.mOptions;
cycle.mDecompositionType = self.mDecompositionType
def plotCycles(self):
for trend in self.mTrendList:
for cycle in self.mCycleList[trend]:
cycle.plot()
def dumpCyclePerf(self, cycle):
if(self.mOptions.mDebugCycles):
logger = tsutil.get_pyaf_logger();
logger.debug("CYCLE_PERF_DETAIL_COUNT_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mCount) + " %.3f" % (cycle.mCycleForecastPerf.mCount));
logger.debug("CYCLE_PERF_DETAIL_MAPE_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mMAPE)+ " %.3f" % (cycle.mCycleForecastPerf.mMAPE));
logger.debug("CYCLE_PERF_DETAIL_L2_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mL2) + " %.3f" % (cycle.mCycleForecastPerf.mL2));
logger.debug("CYCLE_PERF_DETAIL_R2_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mR2) + " %.3f" % (cycle.mCycleForecastPerf.mR2));
logger.debug("CYCLE_PERF_DETAIL_PEARSONR_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mPearsonR) + " %.3f" % (cycle.mCycleForecastPerf.mPearsonR));
def estimateCycles(self):
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
for trend in self.mTrendList:
lTrend_residue_name = trend.mOutName + '_residue'
self.mCycleFrame[lTrend_residue_name] = self.mTrendFrame[lTrend_residue_name]
for cycle in self.mCycleList[trend]:
start_time = time.time()
cycle.fit();
if(self.mOptions.mDebugPerformance):
cycle.computePerf();
self.dumpCyclePerf(cycle)
self.mCycleFrame[cycle.getCycleName()] = cycle.mCycleFrame[cycle.getCycleName()]
self.mCycleFrame[cycle.getCycleResidueName()] = cycle.mCycleFrame[cycle.getCycleResidueName()]
if(self.mOptions.mDebug):
cycle.check_not_nan(self.mCycleFrame[cycle.getCycleResidueName()].values ,
cycle.getCycleResidueName())
end_time = time.time()
lTrainingTime = round(end_time - start_time , 2);
if(self.mOptions.mDebugProfile):
logger = tsutil.get_pyaf_logger();
logger.info("CYCLE_TRAINING_TIME_IN_SECONDS '" + cycle.mOutName + "' " + str(lTrainingTime))
# Avoid dataframe fragmentation warnings.
self.mCycleFrame = self.mCycleFrame.copy()
pass
def filterSeasonals(self):
logger = tsutil.get_pyaf_logger();
logger.debug("CYCLE_TRAINING_FILTER_SEASONALS_START")
for trend in self.mTrendList:
lPerfs = {}
lTrend_residue_name = trend.mOutName + '_residue'
lCycleList = []
lSeasonals = {}
for cycle in self.mCycleList[trend]:
if(isinstance(cycle , cSeasonalPeriodic)):
cycle.computePerf();
# check that the MAPE is not above 1.0
if(cycle.mCycleForecastPerf.is_acceptable_criterion_value(self.mOptions.mCycle_Criterion)):
lCritValue = cycle.mCycleForecastPerf.getCriterionValue(self.mOptions.mCycle_Criterion)
lCategories = len(cycle.mEncodedValueDict.keys())
lPerfs[cycle.mOutName] = (round(lCritValue, 3), lCategories)
lSeasonals[cycle.mOutName] = cycle
else:
lCycleList = lCycleList + [cycle]
if(len(lSeasonals) == 0):
return
lData = lPerfs.items()
# less MAPE is better, less categories is better, the last is the name of the seasonal to have a total order.
lSortingMethod_By_MAPE = lambda x : (x[1][0], x[0])
lData = sorted(lData, key = lSortingMethod_By_MAPE)
assert(len(lData) > 0)
lBestPerf = lSeasonals[ lData[0][0] ].mCycleForecastPerf
lBestCriterion = lData[0][1]
lData_smallest = [x for x in lData if lBestPerf.is_close_criterion_value(self.mOptions.mCycle_Criterion,
x[1][0],
iTolerance = 0.05)]
lSortingMethod_By_Complexity = lambda x : (x[1][1], x[0])
lData_smallest = sorted(lData_smallest, key = lSortingMethod_By_Complexity)
assert(len(lData_smallest) > 0)
lBestSeasonal = lSeasonals[ lData_smallest[0][0] ]
lBestCriterion = lData_smallest[0][1]
lCycleList = lCycleList + [lBestSeasonal]
self.mCycleList[trend] = lCycleList
if(self.mOptions.mDebugCycles):
logger.info("CYCLE_TRAINING_FILTER_SEASONALS_DATA " + trend.mOutName + " " + str(lData_smallest))
logger.info("CYCLE_TRAINING_FILTER_SEASONALS_BEST " + trend.mOutName + " " + lBestSeasonal.mOutName + " " + str(lBestCriterion))
logger.debug("CYCLE_TRAINING_FILTER_SEASONALS_END")
pass
def estimateAllCycles(self):
self.defineCycles();
self.estimateCycles()
if(self.mOptions.mFilterSeasonals):
self.filterSeasonals()
|
|
"""Config flow to configure Xiaomi Miio."""
import logging
from re import search
from micloud import MiCloud
from micloud.micloudexception import MiCloudAccessDenied
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.core import callback
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_CLOUD_COUNTRY,
CONF_CLOUD_PASSWORD,
CONF_CLOUD_SUBDEVICES,
CONF_CLOUD_USERNAME,
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MAC,
CONF_MANUAL,
CONF_MODEL,
DEFAULT_CLOUD_COUNTRY,
DOMAIN,
MODELS_ALL,
MODELS_ALL_DEVICES,
MODELS_GATEWAY,
SERVER_COUNTRY_CODES,
AuthException,
SetupException,
)
from .device import ConnectXiaomiDevice
_LOGGER = logging.getLogger(__name__)
DEVICE_SETTINGS = {
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
}
DEVICE_CONFIG = vol.Schema({vol.Required(CONF_HOST): str}).extend(DEVICE_SETTINGS)
DEVICE_MODEL_CONFIG = vol.Schema({vol.Required(CONF_MODEL): vol.In(MODELS_ALL)})
DEVICE_CLOUD_CONFIG = vol.Schema(
{
vol.Optional(CONF_CLOUD_USERNAME): str,
vol.Optional(CONF_CLOUD_PASSWORD): str,
vol.Optional(CONF_CLOUD_COUNTRY, default=DEFAULT_CLOUD_COUNTRY): vol.In(
SERVER_COUNTRY_CODES
),
vol.Optional(CONF_MANUAL, default=False): bool,
}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if user_input is not None:
use_cloud = user_input.get(CONF_CLOUD_SUBDEVICES, False)
cloud_username = self.config_entry.data.get(CONF_CLOUD_USERNAME)
cloud_password = self.config_entry.data.get(CONF_CLOUD_PASSWORD)
cloud_country = self.config_entry.data.get(CONF_CLOUD_COUNTRY)
if use_cloud and (
not cloud_username or not cloud_password or not cloud_country
):
errors["base"] = "cloud_credentials_incomplete"
# trigger re-auth flow
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data=self.config_entry.data,
)
)
if not errors:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CLOUD_SUBDEVICES,
default=self.config_entry.options.get(CONF_CLOUD_SUBDEVICES, False),
): bool
}
)
return self.async_show_form(
step_id="init", data_schema=settings_schema, errors=errors
)
class XiaomiMiioFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Xiaomi Miio config flow."""
VERSION = 1
def __init__(self):
"""Initialize."""
self.host = None
self.mac = None
self.token = None
self.model = None
self.name = None
self.cloud_username = None
self.cloud_password = None
self.cloud_country = None
self.cloud_devices = {}
@staticmethod
@callback
def async_get_options_flow(config_entry) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_reauth(self, user_input=None):
"""Perform reauth upon an authentication error or missing cloud credentials."""
self.host = user_input[CONF_HOST]
self.token = user_input[CONF_TOKEN]
self.mac = user_input[CONF_MAC]
self.model = user_input.get(CONF_MODEL)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Dialog that informs the user that reauth is required."""
if user_input is not None:
return await self.async_step_cloud()
return self.async_show_form(
step_id="reauth_confirm", data_schema=vol.Schema({})
)
async def async_step_import(self, conf: dict):
"""Import a configuration from config.yaml."""
self.host = conf[CONF_HOST]
self.token = conf[CONF_TOKEN]
self.name = conf.get(CONF_NAME)
self.model = conf.get(CONF_MODEL)
self.context.update(
{"title_placeholders": {"name": f"YAML import {self.host}"}}
)
return await self.async_step_connect()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_cloud()
async def async_step_zeroconf(self, discovery_info):
"""Handle zeroconf discovery."""
name = discovery_info.get("name")
self.host = discovery_info.get("host")
self.mac = discovery_info.get("properties", {}).get("mac")
if self.mac is None:
poch = discovery_info.get("properties", {}).get("poch", "")
result = search(r"mac=\w+", poch)
if result is not None:
self.mac = result.group(0).split("=")[1]
if not name or not self.host or not self.mac:
return self.async_abort(reason="not_xiaomi_miio")
self.mac = format_mac(self.mac)
# Check which device is discovered.
for gateway_model in MODELS_GATEWAY:
if name.startswith(gateway_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"Gateway {self.host}"}}
)
return await self.async_step_cloud()
for device_model in MODELS_ALL_DEVICES:
if name.startswith(device_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"{device_model} {self.host}"}}
)
return await self.async_step_cloud()
# Discovered device is not yet supported
_LOGGER.debug(
"Not yet supported Xiaomi Miio device '%s' discovered with host %s",
name,
self.host,
)
return self.async_abort(reason="not_xiaomi_miio")
def extract_cloud_info(self, cloud_device_info):
"""Extract the cloud info."""
if self.host is None:
self.host = cloud_device_info["localip"]
if self.mac is None:
self.mac = format_mac(cloud_device_info["mac"])
if self.model is None:
self.model = cloud_device_info["model"]
if self.name is None:
self.name = cloud_device_info["name"]
self.token = cloud_device_info["token"]
async def async_step_cloud(self, user_input=None):
"""Configure a xiaomi miio device through the Miio Cloud."""
errors = {}
if user_input is not None:
if user_input[CONF_MANUAL]:
return await self.async_step_manual()
cloud_username = user_input.get(CONF_CLOUD_USERNAME)
cloud_password = user_input.get(CONF_CLOUD_PASSWORD)
cloud_country = user_input.get(CONF_CLOUD_COUNTRY)
if not cloud_username or not cloud_password or not cloud_country:
errors["base"] = "cloud_credentials_incomplete"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
miio_cloud = MiCloud(cloud_username, cloud_password)
try:
if not await self.hass.async_add_executor_job(miio_cloud.login):
errors["base"] = "cloud_login_error"
except MiCloudAccessDenied:
errors["base"] = "cloud_login_error"
if errors:
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
devices_raw = await self.hass.async_add_executor_job(
miio_cloud.get_devices, cloud_country
)
if not devices_raw:
errors["base"] = "cloud_no_devices"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
self.cloud_devices = {}
for device in devices_raw:
if not device.get("parent_id"):
name = device["name"]
model = device["model"]
list_name = f"{name} - {model}"
self.cloud_devices[list_name] = device
self.cloud_username = cloud_username
self.cloud_password = cloud_password
self.cloud_country = cloud_country
if self.host is not None:
for device in self.cloud_devices.values():
cloud_host = device.get("localip")
if cloud_host == self.host:
self.extract_cloud_info(device)
return await self.async_step_connect()
if len(self.cloud_devices) == 1:
self.extract_cloud_info(list(self.cloud_devices.values())[0])
return await self.async_step_connect()
return await self.async_step_select()
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
async def async_step_select(self, user_input=None):
"""Handle multiple cloud devices found."""
errors = {}
if user_input is not None:
cloud_device = self.cloud_devices[user_input["select_device"]]
self.extract_cloud_info(cloud_device)
return await self.async_step_connect()
select_schema = vol.Schema(
{vol.Required("select_device"): vol.In(list(self.cloud_devices))}
)
return self.async_show_form(
step_id="select", data_schema=select_schema, errors=errors
)
async def async_step_manual(self, user_input=None):
"""Configure a xiaomi miio device Manually."""
errors = {}
if user_input is not None:
self.token = user_input[CONF_TOKEN]
if user_input.get(CONF_HOST):
self.host = user_input[CONF_HOST]
return await self.async_step_connect()
if self.host:
schema = vol.Schema(DEVICE_SETTINGS)
else:
schema = DEVICE_CONFIG
return self.async_show_form(step_id="manual", data_schema=schema, errors=errors)
async def async_step_connect(self, user_input=None):
"""Connect to a xiaomi miio device."""
errors = {}
if self.host is None or self.token is None:
return self.async_abort(reason="incomplete_info")
if user_input is not None:
self.model = user_input[CONF_MODEL]
# Try to connect to a Xiaomi Device.
connect_device_class = ConnectXiaomiDevice(self.hass)
try:
await connect_device_class.async_connect_device(self.host, self.token)
except AuthException:
if self.model is None:
errors["base"] = "wrong_token"
except SetupException:
if self.model is None:
errors["base"] = "cannot_connect"
device_info = connect_device_class.device_info
if self.model is None and device_info is not None:
self.model = device_info.model
if self.model is None and not errors:
errors["base"] = "cannot_connect"
if errors:
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
if self.mac is None and device_info is not None:
self.mac = format_mac(device_info.mac_address)
unique_id = self.mac
existing_entry = await self.async_set_unique_id(
unique_id, raise_on_progress=False
)
if existing_entry:
data = existing_entry.data.copy()
data[CONF_HOST] = self.host
data[CONF_TOKEN] = self.token
if (
self.cloud_username is not None
and self.cloud_password is not None
and self.cloud_country is not None
):
data[CONF_CLOUD_USERNAME] = self.cloud_username
data[CONF_CLOUD_PASSWORD] = self.cloud_password
data[CONF_CLOUD_COUNTRY] = self.cloud_country
self.hass.config_entries.async_update_entry(existing_entry, data=data)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
if self.name is None:
self.name = self.model
flow_type = None
for gateway_model in MODELS_GATEWAY:
if self.model.startswith(gateway_model):
flow_type = CONF_GATEWAY
if flow_type is None:
for device_model in MODELS_ALL_DEVICES:
if self.model.startswith(device_model):
flow_type = CONF_DEVICE
if flow_type is not None:
return self.async_create_entry(
title=self.name,
data={
CONF_FLOW_TYPE: flow_type,
CONF_HOST: self.host,
CONF_TOKEN: self.token,
CONF_MODEL: self.model,
CONF_MAC: self.mac,
CONF_CLOUD_USERNAME: self.cloud_username,
CONF_CLOUD_PASSWORD: self.cloud_password,
CONF_CLOUD_COUNTRY: self.cloud_country,
},
)
errors["base"] = "unknown_device"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
|
|
# -*- coding: utf-8 -*-
"""
@file
@brief Classes which defines column for class @see cl IterRow
"""
from inspect import isfunction
from .iter_exceptions import IterException, NotAllowedOperation
from .others_types import long, NA, EmptyGroup, GroupByContainer
from .column_operator import OperatorId, OperatorMul, ColumnOperator, OperatorAdd
from .column_operator import OperatorDiv, OperatorPow, OperatorSub, OperatorDivN, OperatorMod
from .column_operator import OperatorEq, OperatorNe, OperatorGe, OperatorLe, OperatorGt, OperatorLt
from .column_operator import OperatorNot, OperatorOr, OperatorAnd
from .column_operator import OperatorFunc
from .column_group_operator import OperatorGroupLen, OperatorGroupAvg
def private_function_type():
"no documentation"
pass
class ColumnType:
"""
Defines a column of a table.
"""
_default_name = "__unk__"
_str_type = {int: 'int', long: 'long', NA: 'NA',
float: 'float', str: 'str',
type(private_function_type): 'func',
}
def IsColumnType(self):
"""
checks it is a column type which used by an operator
"""
return True
@property
def ShortName(self):
"""
a short name (tells the column type)
"""
return "any"
@property
def Name(self):
"""
property
"""
return self._name
@property
def Type(self):
"""
property
"""
return self._type
@property
def Parent(self):
"""
property
"""
return self._parent
@property
def Func(self):
"""
property
"""
return self._func
def __init__(
self, name, typ, func=None, parent=tuple(), op=None, owner=None):
"""
initiates the column
@param name name of the column
@param typ type of the data it will contain (can be None)
@param func a function, if None, if will be the identity
@param parent precise a list of parents if this column was part of a formula
@param op operator to apply between the column
@param owner table which contains the column (only for further validation)
function is a function: ``f: x --> y``
"""
self._name = name
self._type = typ
self._value = None
self._parent = parent
self._op = op
self._owner = owner
if not isinstance(op, ColumnOperator):
raise IterException(
"op should be a ColumnOperator not: {0}".format(
type(op)))
if not isinstance(parent, tuple):
raise TypeError("we expect a tuple for parameter parent")
for p in parent:
p.IsColumnType()
if typ not in [int, float, long, str, None, NA,
type(private_function_type)]:
raise IterException(
"type should in [int,float,str,long,function]: " +
str(typ))
if isfunction(func):
self._func = func
elif func is None:
self._func = None
else:
raise IterException(
"type of func should in [int,float,str,long,function]: {0}".format(
str(func)))
if "_func" not in self.__dict__:
raise IterException("this column is missing a function")
def __str__(self):
"""
usual
"""
ps = "|".join([_.ShortName for _ in self._parent])
if self._value is not None:
return "CT({0},<{1}>,op:{2},#P={3})={4}".format(
self._name, ColumnType._str_type[self._type], str(self._op), ps, self())
else:
return "CT({0},<{1}>,op:{2},#P={3}) [no loop started]".format(
self._name, ColumnType._str_type[self._type], str(self._op), ps)
def __call__(self):
"""
returns func(value)
"""
if self._func is None:
if len(self._parent) == 0:
if self._value is None:
raise ValueError(
"method set must be called before for column {0}".format(
str(self)))
else:
res = self._value
elif self._op is None:
raise ValueError(
"there are parents but no operator for column {0}\nParents:\n{1}".format(
str(self),
self.print_parent()))
else:
try:
res = self._op(self._parent)
except TypeError as e:
raise IterException(
"unable(1) to apply an operator for column op=<{0}>, col={1}, TYPE={2} TYPE_OP={3} TYPE_PARENT={4}".format(
str(
self._op), str(self), type(self), type(
self._op), type(
self._parent))) from e
except AttributeError as ee:
raise IterException(
"unable(2) to apply an operator for column op=<{0}>, col={1}, TYPE={2} TYPE_OP={3} TYPE_PARENT={4}".format(
str(
self._op), str(self), type(self), type(
self._op), type(
self._parent))) from ee
if isinstance(res, ColumnType):
raise IterException(
"this evaluation(*) cannot return a ColumnType for this column: {0}".format(str(self)))
else:
# we use a shortcut
try:
res = self._func(self._value)
except TypeError as e:
raise IterException(
"unable to compute the value of {0}\n{1}".format(
str(self),
self.print_parent())) from e
if isinstance(res, ColumnType):
raise IterException(
"this evaluation cannot return a ColumnType for this column: {0}".format(
str(self)))
self.set(res)
return res
def set(self, value):
"""
Sets a value for this column.
@param value anything in ``[int, float, long, str, function]``
"""
if isinstance(value, (int, str, float, long, NA)):
self._value = value
elif isinstance(value, EmptyGroup):
# for an empty group
self._value = value
elif isinstance(value, list):
# for a group
self._value = value
else:
raise IterException(
"type of value should be in [int,float,str,long] not {0} for the column {1}".format(
type(value),
str(self)))
def set_none(self):
"""
After a loop on a database, we should put None back as a value.
"""
for p in self._parent:
p.set_none()
self._value = None
def set_name(self, new_name):
"""
Changes the name of the column.
@param newname new name
"""
self._name = new_name
def set_owner(self, new_owner):
"""
Changes the owner of the column.
@param newname new name
"""
self._owner = new_owner
def print_parent(self):
"""
Returns a string showing the dependencies of this columns.
Example:
@code
this_columns
parent1
parent11
parent12
parent2
@endcode
"""
if self._parent is None:
return self.__str__()
else:
rows = [self.__str__()]
for p in self._parent:
rs = [" " + _ for _ in p.print_parent().split("\n")]
rows.extend(rs)
return "\n".join(rows)
######################################
# functions which create other columns
######################################
def copy(self, new_owner):
"""
Returns a copy of this class.
@param new_owner new owner
@return ColumnType
"""
return ColumnType(self._name, self._type, func=None, parent=(
self,), op=OperatorId(), owner=new_owner)
#######################################
# operations
#######################################
def __mul__(self, column):
"""
These operators should be able to translate an expression
into function operating on the values.
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorMul())
else:
return self.__mul__(ColumnConstantType(column))
def __add__(self, column):
"""
These operators should be able to translate an expression
into function operating on the values.
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorAdd())
else:
return self.__add__(ColumnConstantType(column))
def __sub__(self, column):
"""
These operators should be able to translate an expression
into function operating on the values.
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorSub())
else:
return self.__sub__(ColumnConstantType(column))
def __truediv__(self, column):
"""
These operators should be able to translate an expression
into function operating on the values.
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorDiv())
else:
return self.__truediv__(ColumnConstantType(column))
def __floordiv__(self, column):
"""
These operators should be able to translate an expression
into function operating on the values.
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorDivN())
else:
return self.__floordiv__(ColumnConstantType(column))
def __mod__(self, column):
"""
these operators should be able to translate an expression
into function operating on the values
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorMod())
else:
return self.__mod__(ColumnConstantType(column))
def __pow__(self, column):
"""
these operators should be able to translate an expression
into function operating on the values
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorPow())
else:
return self.__pow__(ColumnConstantType(column))
#######################################
# test
#######################################
def __eq__(self, column):
"""
these operators should be able to translate an expression
into function operating on the values
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorEq())
else:
return self.__eq__(ColumnConstantType(column))
def __lt__(self, column):
"""
these operators should be able to translate an expression
into function operating on the values
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorLt())
else:
return self.__lt__(ColumnConstantType(column))
def __le__(self, column):
"""
these operators should be able to translate an expression
into function operating on the values
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorLe())
else:
return self.__le__(ColumnConstantType(column))
def __gt__(self, column):
"""
these operators should be able to translate an expression
into function operating on the values
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorGt())
else:
return self.__gt__(ColumnConstantType(column))
def __ge__(self, column):
"""
these operators should be able to translate an expression
into function operating on the values
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorGe())
else:
return self.__ge__(ColumnConstantType(column))
def __ne__(self, column):
"""
these operators should be able to translate an expression
into function operating on the values
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorNe())
else:
return self.__ne__(ColumnConstantType(column))
#######################################
# logical
#######################################
def Not(self):
"""
``not`` cannot be overriden
"""
return self.__not__()
def __not__(self):
"""
these operators should be able to translate an expression
into function operating on the values
@return a ColumnType
"""
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self,), op=OperatorNot())
def Or(self, column):
"""
``or`` cannot be overriden
"""
return self.__or__(column)
def __or__(self, column):
"""
these operators should be able to translate an expression
into function operating on the values
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorOr())
else:
return self.__or__(ColumnConstantType(column))
def And(self, column):
"""
``and`` cannot be overriden
"""
return self.__and__(column)
def __and__(self, column):
"""
these operators should be able to translate an expression
into function operating on the values
@param column a function or an int or a float or a long or a str or a ColumnType
@return a ColumnType
"""
if isinstance(column, ColumnType):
return ColumnType(ColumnType._default_name, self._type, func=None, parent=(
self, column), op=OperatorAnd())
else:
return self.__and__(ColumnConstantType(column))
#######################################
# group function
#######################################
def len(self):
"""
returns a group columns to count the number of observations
"""
return ColumnGroupType(
ColumnType._default_name, int, parent=(self,), op=OperatorGroupLen())
def count(self):
"""
returns a group columns to count the number of observations
"""
return self.len()
def avg(self):
"""
returns a group columns to return an average
"""
return ColumnGroupType(
ColumnType._default_name, float, parent=(self,), op=OperatorGroupAvg())
class ColumnConstantType(ColumnType):
"""
defines a constant as a column
"""
def __init__(self, const):
self._value = const
self._func = lambda x, c=self._value: c
self._parent = None
self._op = None
self._type = type(const)
self._const = const
self._owner = None
if isinstance(const, (int, float, long, str, NA)):
pass
else:
raise ValueError(
"this value is not a constant: {0}".format(
str(const)))
@property
def ShortName(self):
"""
a short name (tells the column type)
"""
return "cst"
def set_none(self):
"""
do nothing (it is a constant)
"""
pass
def set(self, value):
"""
do nothing (it is a constant)
@param value anything in [int,float,long,str, function ]
"""
pass
def __call__(self):
"""
return the constant
"""
return self._const
def __str__(self):
"""
usual
"""
return "cst({0})".format(self())
class ColumnTableType(ColumnType):
"""
defines a table column (not coming from an expression)
"""
def __init__(self, name, typ, owner):
"""
constructor
@param name name of the column
@param typ type of the column
@param owner owner of this column
"""
self._name = name
self._func = None
self._parent = None
self._op = None
self._type = typ
self._owner = owner
@property
def ShortName(self):
"""
a short name (tells the column type)
"""
return "col"
def set_none(self):
"""
after a loop on a database, we should put None back as a value
"""
self._value = None
def __call__(self):
"""
returns the content
"""
if self._value is None:
raise IterException(
"this column should contain a value: {0}".format(
str(self)))
return self._value
def __str__(self):
"""
usual
"""
return "col({0},{1})".format(
self._name, ColumnType._str_type[self._type])
class ColumnGroupType(ColumnType):
"""
defines a column which processes a group of rows (after a groupby)
"""
def __init__(self, name, typ, parent, op):
"""
constructor
@param name name of the column
@param typ type of the column
@param owner owner of this column
@param op operator
"""
self._name = name
self._value = None
self._parent = parent
self._opgr = op
self._op = OperatorId()
self._type = typ
self._owner = None
self._func = None
@property
def ShortName(self):
"""
a short name (tells the column type)
"""
return "group"
def set_none(self):
"""
after a loop on a database, we should put None back as a value
"""
self._value = None
def __call__(self):
"""
returns the content
"""
if isinstance(self._value, GroupByContainer):
try:
return self._opgr(self._value)
except TypeError as e:
raise IterException(
"unable(1) to apply an operator for column op=<{0}>, col={1}, TYPE={2} TYPE_OP={3}".format(
str(
self._op), str(self), type(self), type(
self._op))) from e
except AttributeError as ee:
raise IterException(
"unable(2) to apply an operator for column op=<{0}>, col={1}, TYPE={2} TYPE_OP={3}".format(
str(
self._op), str(self), type(self), type(
self._op))) from ee
else:
return super().__call__()
def __str__(self):
"""
usual
"""
return "CGT[{0}]({1})".format(str(self._opgr), self._name)
def set(self, value):
"""
sets a value for this column
@param value anything in [int,float,long,str, function ]
"""
self._value = value
if hasattr(value, "__iter__") and \
not isinstance(value, str) and \
not isinstance(value, GroupByContainer):
raise IterException(
"type of value should be GroupByContainer not {0} for the column {1}".format(
type(value),
str(self)))
def __mul__(self, column):
"""
forbidden
"""
raise NotAllowedOperation()
def __add__(self, column):
"""
forbidden
"""
raise NotAllowedOperation()
def __sub__(self, column):
"""
forbidden
"""
raise NotAllowedOperation()
def __truediv__(self, column):
"""
forbidden
"""
raise NotAllowedOperation()
def __floordiv__(self, column):
"""
forbidden
"""
raise NotAllowedOperation()
def __mod__(self, column):
"""
forbidden
"""
raise NotAllowedOperation()
def __pow__(self, column):
"""
forbidden
"""
raise NotAllowedOperation()
class CFT(ColumnType):
"""
defines a function
"""
def __init__(self, func, *args):
"""
constructor (a function cannot accept keywords)
@param func contained function
@param args list of @see cl ColumnType
"""
self._name = None
self._func = None
self._parent = None
self._op = OperatorFunc(func)
self._type = type(private_function_type)
self._owner = None
self._thisfunc = func
self._parent = tuple(args)
for _ in args:
if not isinstance(_, ColumnType):
raise TypeError(
"Expecting a column type, not {}".format(type(_)))
@property
def ShortName(self):
"""
a short name (tells the column type)
"""
return "func"
def set_none(self):
"""
after a loop on a database, we should put None back as a value
"""
self._value = None
def __str__(self):
"""
usual
"""
return "func({0},{1})".format(
self._name, ColumnType._str_type[self._type])
|
|
from vt_manager.communication.utils.XmlHelper import XmlHelper
from vt_manager.models.Action import Action
from vt_manager.models.VirtualMachine import VirtualMachine
from vt_manager.controller.drivers.VTDriver import VTDriver
from vt_manager.communication.XmlRpcClient import XmlRpcClient
from vt_manager.controller.actions.ActionController import ActionController
#from vt_manager.communication.sfa.vm_utils.SfaCommunicator import SfaCommunicator
#from vt_manager.common.middleware.thread_local import thread_locals, pull
from vt_manager.models.reservation import Reservation
from vt_manager.models.VirtualMachineKeys import VirtualMachineKeys
from vt_manager.utils.contextualization.vm_contextualize import VMContextualize
import logging
class ProvisioningResponseDispatcher():
'''
Handles the Agent responses when action status changes
'''
@staticmethod
def processResponse(rspec):
logging.debug("PROCESSING RESPONSE processResponse() STARTED...")
for action in rspec.response.provisioning.action:
try:
actionModel = ActionController.getAction(action.id)
except Exception as e:
logging.error("No action in DB with the incoming uuid\n%s", e)
return
'''
If the response is for an action only in QUEUED or ONGOING status, SUCCESS or FAILED actions are finished
'''
#if str(actionModel.callBackUrl) == str(SfaCommunicator.SFAUrl): #Avoiding unicodes
# event = pull(str(action.id))
# event.send('continue')
# return
if actionModel.getStatus() is Action.QUEUED_STATUS or Action.ONGOING_STATUS:
logging.debug("The incoming response has id: %s and NEW status: %s" % (actionModel.uuid,actionModel.status))
was_creating = False
was_created = False
actionModel.status = action.status
actionModel.description = action.description
actionModel.save()
#Complete information required for the Plugin: action type and VM
ActionController.completeActionRspec(action, actionModel)
#XXX:Implement this method or some other doing this job
vm = VTDriver.getVMbyUUID(actionModel.getObjectUUID())
if vm.state == "creating...":
was_creating = True
elif vm.state == "starting...":
was_created = True
controller=VTDriver.getDriver(vm.Server.get().getVirtTech())
failedOnCreate = 0
# Update VM model with new status from OXAD
if actionModel.getStatus() == Action.SUCCESS_STATUS:
ProvisioningResponseDispatcher.__updateVMafterSUCCESS(actionModel, vm)
elif actionModel.getStatus() == Action.ONGOING_STATUS:
ProvisioningResponseDispatcher.__updateVMafterONGOING(actionModel, vm)
elif actionModel.getStatus() == Action.FAILED_STATUS:
failedOnCreate = ProvisioningResponseDispatcher.__updateVMafterFAILED(actionModel, vm)
else:
vm.setState(VirtualMachine.UNKNOWN_STATE)
try:
created = False
vm_started = False
if vm.state == "created (stopped)":
created = True
elif vm.state == "running":
vm_started = True
logging.debug("Sending response to plug-in in sendAsync")
if str(vm.callBackURL) == 'SFA.OCF.VTM':
logging.debug("callback: %s" % vm.callBackURL)
print "-------------->PRD: Created:", created, "Was_creating:", was_creating, "vm_started:", vm_started
print "-------------->PRD Action:", action, action.server.__dict__
# Start VM just after creating sliver/VM
if created and was_creating:
from vt_manager.communication.sfa.drivers.VTSfaDriver import VTSfaDriver
driver = VTSfaDriver(None)
driver.crud_slice(vm.sliceName,vm.projectName, "start_slice")
ProvisioningResponseDispatcher.__clean_up_reservations(vm.uuid)
return
#if was_created and vm_started:
if vm_started:
ifaces = vm.getNetworkInterfaces()
for iface in ifaces:
if iface.isMgmt:
ip = iface.ip4s.all()[0].ip
# Contextualize VMs
ProvisioningResponseDispatcher.__contextualize_vm(vm, ip)
# Cleaning up reservation objects
return
XmlRpcClient.callRPCMethod(vm.getCallBackURL(), "sendAsync", XmlHelper.craftXmlClass(rspec))
if failedOnCreate == 1:
controller.deleteVM(vm)
# Keep actions table up-to-date after each deletion
actionModel.delete()
except Exception as e:
logging.error("Could not connect to Plugin in sendAsync\n%s",e)
return
#If response is for a finished action
else:
try:
#XXX: What should be done if this happen?
logging.error("Received response for an action in wrong state\n")
XmlRpcClient.callRPCMethod(vm.getCallBackURL(), "sendAsync", XmlHelper.getProcessingResponse(Action.ACTION_STATUS_FAILED_TYPE, action, "Received response for an action in wrong state"))
except Exception as e:
logging.error(e)
return
@staticmethod
def processresponseSync(rspec):
"""
Thread-free implementation.
"""
logging.debug("PROCESSING RESPONSE processResponseSync() STARTED...")
for action in rspec.response.provisioning.action:
try:
actionModel = ActionController.getAction(action.id)
except Exception as e:
logging.error("No action in DB with the incoming uuid\n%s", e)
return
"""
If the response is for an action only in QUEUED or ONGOING status, SUCCESS or FAILED actions are finished
"""
#if str(actionModel.callBackUrl) == str(SfaCommunicator.SFAUrl): #Avoiding unicodes
# event = pull(str(action.id))
# event.send('continue')
# return
logging.debug("................................ actionModel.getStatus(): %s ................." % str(actionModel.getStatus()))
if actionModel.getStatus() is Action.QUEUED_STATUS or Action.ONGOING_STATUS:
logging.debug("The incoming response has id: %s and NEW status: %s" % (actionModel.uuid,actionModel.status))
actionModel.status = action.status
actionModel.description = action.description
actionModel.save()
#Complete information required for the Plugin: action type and VM
ActionController.completeActionRspec(action, actionModel)
#XXX:Implement this method or some other doing this job
vm = VTDriver.getVMbyUUID(actionModel.getObjectUUID())
controller = VTDriver.getDriver(vm.Server.get().getVirtTech())
failedOnCreate = 0
if actionModel.getStatus() == Action.SUCCESS_STATUS:
ProvisioningResponseDispatcher.__updateVMafterSUCCESS(actionModel, vm)
elif actionModel.getStatus() == Action.ONGOING_STATUS:
ProvisioningResponseDispatcher.__updateVMafterONGOING(actionModel, vm)
elif actionModel.getStatus() == Action.FAILED_STATUS:
failedOnCreate = ProvisioningResponseDispatcher.__updateVMafterFAILED(actionModel, vm)
else:
vm.setState(VirtualMachine.UNKNOWN_STATE)
try:
logging.debug("Sending response to Plugin in sendAsync")
if str(actionModel.callBackUrl) == 'SFA.OCF.VTM':
logging.debug(">>>>>>> SFA.OCF.VTM\n\n\n")
if failedOnCreate:
logging.debug("........... failedOnCreate.........")
expiring_slices = vm.objects.filter(sliceName=vm.sliceName,projectName=vm.projectName)
logging.debug("........... expiring_slices: %s ..........." % str(expiring_slices))
if len(expiring_slices) == 1:
expiring_slices[0].delete()
# Cleaning up reservation objects
ProvisioningResponseDispatcher.__clean_up_reservations(vm.name)
XmlRpcClient.callRPCMethod(vm.getCallBackURL(), "sendSync", XmlHelper.craftXmlClass(rspec))
if failedOnCreate == 1:
controller.deleteVM(vm)
# Keep actions table up-to-date after each deletion
actionModel.delete()
except Exception as e:
logging.error("Could not connect to Plugin in sendSync. Exception: %s",e)
return
# If response is for a finished action
else:
try:
#XXX: What should be done if this happen?
logging.error("Received response for an action in wrong state")
XmlRpcClient.callRPCMethod(vm.getCallBackURL(), "sendSync", XmlHelper.getProcessingResponse(Action.ACTION_STATUS_FAILED_TYPE, action, "Received response for an action in wrong state"))
except Exception as e:
logging.error(e)
return
@staticmethod
def __updateVMafterSUCCESS(actionModel, vm):
if actionModel.getType() == Action.PROVISIONING_VM_CREATE_TYPE:
vm.setState(VirtualMachine.CREATED_STATE)
elif actionModel.getType() == Action.PROVISIONING_VM_START_TYPE or actionModel.getType() == Action.PROVISIONING_VM_REBOOT_TYPE:
vm.setState(VirtualMachine.RUNNING_STATE)
elif actionModel.getType() == Action.PROVISIONING_VM_STOP_TYPE:
vm.setState(VirtualMachine.STOPPED_STATE)
elif actionModel.getType() == Action.PROVISIONING_VM_DELETE_TYPE:
controller = VTDriver.getDriver(vm.Server.get().getVirtTech())
controller.deleteVM(vm)
# Keep actions table up-to-date after each deletion
actionModel.delete()
@staticmethod
def __updateVMafterONGOING(actionModel, vm):
if actionModel.getType() == Action.PROVISIONING_VM_CREATE_TYPE:
vm.setState(VirtualMachine.CREATING_STATE)
elif actionModel.getType() == Action.PROVISIONING_VM_START_TYPE:
vm.setState(VirtualMachine.STARTING_STATE)
elif actionModel.getType() == Action.PROVISIONING_VM_STOP_TYPE:
vm.setState(VirtualMachine.STOPPING_STATE)
elif actionModel.getType() == Action.PROVISIONING_VM_DELETE_TYPE:
vm.setState(VirtualMachine.DELETING_STATE)
elif actionModel.getType() == Action.PROVISIONING_VM_REBOOT_TYPE:
vm.setState(VirtualMachine.REBOOTING_STATE)
@staticmethod
def __updateVMafterFAILED(actionModel, vm):
if actionModel.getType() == Action.PROVISIONING_VM_START_TYPE:
vm.setState(VirtualMachine.STOPPED_STATE)
elif actionModel.getType() == Action.PROVISIONING_VM_STOP_TYPE:
vm.setState(VirtualMachine.RUNNING_STATE)
elif actionModel.getType() == Action.PROVISIONING_VM_REBOOT_TYPE:
vm.setState(VirtualMachine.STOPPED_STATE)
elif actionModel.getType() == Action.PROVISIONING_VM_CREATE_TYPE:
failedOnCreate = 1 #VM is deleted after sending response to the plug-in because callBackUrl is required
return failedOnCreate
else:
vm.setState(VirtualMachine.FAILED_STATE)
@staticmethod
def __clean_up_reservations(vm_id):
try:
logging.debug("ProvisioningResponseDispatcher.py.__clean_up_reservations...")
logging.debug("vm_uuid... %s" % vm_id)
logging.debug("reservation object: %s" % str(Reservation.objects.get(uuid = vm_id)))
Reservation.objects.get(uuid = vm_id).delete()
except Exception as e:
logging.debug("Failed to delete reservation for VM with name: %s. Exception: %s" % (str(vm_id), e))
return
@staticmethod
def __contextualize_vm(vm, ip):
import time
time.sleep(5)
# SSH keys for users are passed to the VM right after it is started
vm_keys = VirtualMachineKeys.objects.filter(slice_uuid=vm.sliceId, project_uuid=vm.projectId)
params = {
"vm_address": str(ip) ,
"vm_user": "root",
"vm_password": "openflow",
}
vm_context = VMContextualize(**params)
user_keys = {}
for vm_key in vm_keys:
user_name = str(vm_key.get_user_name())
if user_name not in user_keys:
user_keys[user_name] = [ vm_key.get_ssh_key() ]
else:
user_keys[user_name].append(vm_key.get_ssh_key())
logging.debug("Adding %s's public key(s) into VM. Key contents: %s" % (vm_key.get_user_name(), user_keys[str(vm_key.get_user_name())]))
# Placing a number of keys per user, multiple users
if len(user_keys[str(vm_key.get_user_name())]) > 0:
vm_context.contextualize_add_pub_keys(user_keys)
logging.debug("Contextualizing VM (%s)..." % str(ip))
|
|
import datetime
import logging
import os
import ujson
import shutil
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from bs4 import BeautifulSoup
from django.conf import settings
from django.db import connection
from django.db.models import Max
from django.utils.timezone import utc as timezone_utc, now as timezone_now
from typing import Any, Dict, List, Optional, Set, Tuple, \
Iterable, cast
from analytics.models import RealmCount, StreamCount, UserCount
from zerver.lib.actions import UserMessageLite, bulk_insert_ums, \
do_change_plan_type, do_change_avatar_fields
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.export import DATE_FIELDS, \
Record, TableData, TableName, Field, Path
from zerver.lib.message import do_render_markdown
from zerver.lib.bugdown import version as bugdown_version
from zerver.lib.actions import render_stream_description
from zerver.lib.upload import random_name, sanitize_name, \
guess_type, BadImageError
from zerver.lib.utils import generate_api_key, process_list_in_batches
from zerver.lib.parallel import run_parallel
from zerver.models import UserProfile, Realm, Client, Huddle, Stream, \
UserMessage, Subscription, Message, RealmEmoji, \
RealmDomain, Recipient, get_user_profile_by_id, \
UserPresence, UserActivity, UserActivityInterval, Reaction, \
CustomProfileField, CustomProfileFieldValue, RealmAuditLog, \
Attachment, get_system_bot, email_to_username, get_huddle_hash, \
UserHotspot, MutedTopic, Service, UserGroup, UserGroupMembership, \
BotStorageData, BotConfigData, DefaultStream, RealmFilter
realm_tables = [("zerver_defaultstream", DefaultStream, "defaultstream"),
("zerver_realmemoji", RealmEmoji, "realmemoji"),
("zerver_realmdomain", RealmDomain, "realmdomain"),
("zerver_realmfilter", RealmFilter, "realmfilter")] # List[Tuple[TableName, Any, str]]
# ID_MAP is a dictionary that maps table names to dictionaries
# that map old ids to new ids. We use this in
# re_map_foreign_keys and other places.
#
# We explicity initialize ID_MAP with the tables that support
# id re-mapping.
#
# Code reviewers: give these tables extra scrutiny, as we need to
# make sure to reload related tables AFTER we re-map the ids.
ID_MAP = {
'client': {},
'user_profile': {},
'huddle': {},
'realm': {},
'stream': {},
'recipient': {},
'subscription': {},
'defaultstream': {},
'reaction': {},
'realmemoji': {},
'realmdomain': {},
'realmfilter': {},
'message': {},
'user_presence': {},
'useractivity': {},
'useractivityinterval': {},
'usermessage': {},
'customprofilefield': {},
'customprofilefieldvalue': {},
'attachment': {},
'realmauditlog': {},
'recipient_to_huddle_map': {},
'userhotspot': {},
'mutedtopic': {},
'service': {},
'usergroup': {},
'usergroupmembership': {},
'botstoragedata': {},
'botconfigdata': {},
'analytics_realmcount': {},
'analytics_streamcount': {},
'analytics_usercount': {},
} # type: Dict[str, Dict[int, int]]
id_map_to_list = {
'huddle_to_user_list': {},
} # type: Dict[str, Dict[int, List[int]]]
path_maps = {
'attachment_path': {},
} # type: Dict[str, Dict[str, str]]
def update_id_map(table: TableName, old_id: int, new_id: int) -> None:
if table not in ID_MAP:
raise Exception('''
Table %s is not initialized in ID_MAP, which could
mean that we have not thought through circular
dependencies.
''' % (table,))
ID_MAP[table][old_id] = new_id
def fix_datetime_fields(data: TableData, table: TableName) -> None:
for item in data[table]:
for field_name in DATE_FIELDS[table]:
if item[field_name] is not None:
item[field_name] = datetime.datetime.fromtimestamp(item[field_name], tz=timezone_utc)
def fix_upload_links(data: TableData, message_table: TableName) -> None:
"""
Because the URLs for uploaded files encode the realm ID of the
organization being imported (which is only determined at import
time), we need to rewrite the URLs of links to uploaded files
during the import process.
"""
for message in data[message_table]:
if message['has_attachment'] is True:
for key, value in path_maps['attachment_path'].items():
if key in message['content']:
message['content'] = message['content'].replace(key, value)
if message['rendered_content']:
message['rendered_content'] = message['rendered_content'].replace(key, value)
def create_subscription_events(data: TableData, realm_id: int) -> None:
"""
When the export data doesn't contain the table `zerver_realmauditlog`,
this function creates RealmAuditLog objects for `subscription_created`
type event for all the existing Stream subscriptions.
This is needed for all the export tools which do not include the
table `zerver_realmauditlog` (Slack, Gitter, etc.) because the appropriate
data about when a user was subscribed is not exported by the third-party
service.
"""
all_subscription_logs = []
# from bulk_add_subscriptions in lib/actions
event_last_message_id = Message.objects.aggregate(Max('id'))['id__max']
if event_last_message_id is None:
event_last_message_id = -1
event_time = timezone_now()
recipient_id_to_stream_id = {
d['id']: d['type_id']
for d in data['zerver_recipient']
if d['type'] == Recipient.STREAM
}
for sub in data['zerver_subscription']:
recipient_id = sub['recipient_id']
stream_id = recipient_id_to_stream_id.get(recipient_id)
if stream_id is None:
continue
user_id = sub['user_profile_id']
all_subscription_logs.append(RealmAuditLog(realm_id=realm_id,
acting_user_id=user_id,
modified_user_id=user_id,
modified_stream_id=stream_id,
event_last_message_id=event_last_message_id,
event_time=event_time,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED))
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def fix_service_tokens(data: TableData, table: TableName) -> None:
"""
The tokens in the services are created by 'generate_api_key'.
As the tokens are unique, they should be re-created for the imports.
"""
for item in data[table]:
item['token'] = generate_api_key()
def process_huddle_hash(data: TableData, table: TableName) -> None:
"""
Build new huddle hashes with the updated ids of the users
"""
for huddle in data[table]:
user_id_list = id_map_to_list['huddle_to_user_list'][huddle['id']]
huddle['huddle_hash'] = get_huddle_hash(user_id_list)
def get_huddles_from_subscription(data: TableData, table: TableName) -> None:
"""
Extract the IDs of the user_profiles involved in a huddle from the subscription object
This helps to generate a unique huddle hash from the updated user_profile ids
"""
id_map_to_list['huddle_to_user_list'] = {
value: [] for value in ID_MAP['recipient_to_huddle_map'].values()}
for subscription in data[table]:
if subscription['recipient'] in ID_MAP['recipient_to_huddle_map']:
huddle_id = ID_MAP['recipient_to_huddle_map'][subscription['recipient']]
id_map_to_list['huddle_to_user_list'][huddle_id].append(subscription['user_profile_id'])
def fix_customprofilefield(data: TableData) -> None:
"""
In CustomProfileField with 'field_type' like 'USER', the IDs need to be
re-mapped.
"""
field_type_USER_id_list = []
for item in data['zerver_customprofilefield']:
if item['field_type'] == CustomProfileField.USER:
field_type_USER_id_list.append(item['id'])
for item in data['zerver_customprofilefieldvalue']:
if item['field_id'] in field_type_USER_id_list:
old_user_id_list = ujson.loads(item['value'])
new_id_list = re_map_foreign_keys_many_to_many_internal(
table='zerver_customprofilefieldvalue',
field_name='value',
related_table='user_profile',
old_id_list=old_user_id_list)
item['value'] = ujson.dumps(new_id_list)
class FakeMessage:
'''
We just need a stub object for do_render_markdown
to write stuff to.
'''
pass
def fix_message_rendered_content(realm: Realm,
sender_map: Dict[int, Record],
messages: List[Record]) -> None:
"""
This function sets the rendered_content of all the messages
after the messages have been imported from a non-Zulip platform.
"""
for message in messages:
if message['rendered_content'] is not None:
# For Zulip->Zulip imports, we use the original rendered
# markdown; this avoids issues where e.g. a mention can no
# longer render properly because a user has changed their
# name.
#
# However, we still need to update the data-user-id and
# similar values stored on mentions, stream mentions, and
# similar syntax in the rendered HTML.
soup = BeautifulSoup(message["rendered_content"], "html.parser")
user_mentions = soup.findAll("span", {"class": "user-mention"})
if len(user_mentions) != 0:
user_id_map = ID_MAP["user_profile"]
for mention in user_mentions:
if not mention.has_attr("data-user-id"):
# Legacy mentions don't have a data-user-id
# field; we should just import them
# unmodified.
continue
if mention['data-user-id'] == "*":
# No rewriting is required for wildcard mentions
continue
old_user_id = int(mention["data-user-id"])
if old_user_id in user_id_map:
mention["data-user-id"] = str(user_id_map[old_user_id])
message['rendered_content'] = str(soup)
stream_mentions = soup.findAll("a", {"class": "stream"})
if len(stream_mentions) != 0:
stream_id_map = ID_MAP["stream"]
for mention in stream_mentions:
old_stream_id = int(mention["data-stream-id"])
if old_stream_id in stream_id_map:
mention["data-stream-id"] = str(stream_id_map[old_stream_id])
message['rendered_content'] = str(soup)
user_group_mentions = soup.findAll("span", {"class": "user-group-mention"})
if len(user_group_mentions) != 0:
user_group_id_map = ID_MAP["usergroup"]
for mention in user_group_mentions:
old_user_group_id = int(mention["data-user-group-id"])
if old_user_group_id in user_group_id_map:
mention["data-user-group-id"] = str(user_group_id_map[old_user_group_id])
message['rendered_content'] = str(soup)
continue
message_object = FakeMessage()
try:
content = message['content']
sender_id = message['sender_id']
sender = sender_map[sender_id]
sent_by_bot = sender['is_bot']
translate_emoticons = sender['translate_emoticons']
# We don't handle alert words on import from third-party
# platforms, since they generally don't have an "alert
# words" type feature, and notifications aren't important anyway.
realm_alert_words_automaton = None
message_user_ids = set() # type: Set[int]
rendered_content = do_render_markdown(
message=cast(Message, message_object),
content=content,
realm=realm,
realm_alert_words_automaton=realm_alert_words_automaton,
message_user_ids=message_user_ids,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
)
assert(rendered_content is not None)
message['rendered_content'] = rendered_content
message['rendered_content_version'] = bugdown_version
except Exception:
# This generally happens with two possible causes:
# * rendering markdown throwing an uncaught exception
# * rendering markdown failing with the exception being
# caught in bugdown (which then returns None, causing the the
# rendered_content assert above to fire).
logging.warning("Error in markdown rendering for message ID %s; continuing" % (message['id'],))
def current_table_ids(data: TableData, table: TableName) -> List[int]:
"""
Returns the ids present in the current table
"""
id_list = []
for item in data[table]:
id_list.append(item["id"])
return id_list
def idseq(model_class: Any) -> str:
if model_class == RealmDomain:
return 'zerver_realmalias_id_seq'
elif model_class == BotStorageData:
return 'zerver_botuserstatedata_id_seq'
elif model_class == BotConfigData:
return 'zerver_botuserconfigdata_id_seq'
return '{}_id_seq'.format(model_class._meta.db_table)
def allocate_ids(model_class: Any, count: int) -> List[int]:
"""
Increases the sequence number for a given table by the amount of objects being
imported into that table. Hence, this gives a reserved range of ids to import the
converted slack objects into the tables.
"""
conn = connection.cursor()
sequence = idseq(model_class)
conn.execute("select nextval('%s') from generate_series(1,%s)" %
(sequence, str(count)))
query = conn.fetchall() # Each element in the result is a tuple like (5,)
conn.close()
# convert List[Tuple[int]] to List[int]
return [item[0] for item in query]
def convert_to_id_fields(data: TableData, table: TableName, field_name: Field) -> None:
'''
When Django gives us dict objects via model_to_dict, the foreign
key fields are `foo`, but we want `foo_id` for the bulk insert.
This function handles the simple case where we simply rename
the fields. For cases where we need to munge ids in the
database, see re_map_foreign_keys.
'''
for item in data[table]:
item[field_name + "_id"] = item[field_name]
del item[field_name]
def re_map_foreign_keys(data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False,
id_field: bool=False,
recipient_field: bool=False,
reaction_field: bool=False) -> None:
"""
This is a wrapper function for all the realm data tables
and only avatar and attachment records need to be passed through the internal function
because of the difference in data format (TableData corresponding to realm data tables
and List[Record] corresponding to the avatar and attachment records)
"""
# See comments in bulk_import_user_message_data.
assert('usermessage' not in related_table)
re_map_foreign_keys_internal(data[table], table, field_name, related_table, verbose, id_field,
recipient_field, reaction_field)
def re_map_foreign_keys_internal(data_table: List[Record],
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False,
id_field: bool=False,
recipient_field: bool=False,
reaction_field: bool=False) -> None:
'''
We occasionally need to assign new ids to rows during the
import/export process, to accommodate things like existing rows
already being in tables. See bulk_import_client for more context.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this fixer function does
the re-mapping. (It also appends `_id` to the field.)
'''
lookup_table = ID_MAP[related_table]
for item in data_table:
old_id = item[field_name]
if recipient_field:
if related_table == "stream" and item['type'] == 2:
pass
elif related_table == "user_profile" and item['type'] == 1:
pass
elif related_table == "huddle" and item['type'] == 3:
# save the recipient id with the huddle id, so that we can extract
# the user_profile ids involved in a huddle with the help of the
# subscription object
# check function 'get_huddles_from_subscription'
ID_MAP['recipient_to_huddle_map'][item['id']] = lookup_table[old_id]
pass
else:
continue
old_id = item[field_name]
if reaction_field:
if item['reaction_type'] == Reaction.REALM_EMOJI:
old_id = int(old_id)
else:
continue
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s %s from %s to %s' % (table,
field_name + '_id',
old_id,
new_id))
else:
new_id = old_id
if not id_field:
item[field_name + "_id"] = new_id
del item[field_name]
else:
if reaction_field:
item[field_name] = str(new_id)
else:
item[field_name] = new_id
def re_map_foreign_keys_many_to_many(data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False) -> None:
"""
We need to assign new ids to rows during the import/export
process.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this wrapper function does
the re-mapping only for ManyToMany fields.
"""
for item in data[table]:
old_id_list = item[field_name]
new_id_list = re_map_foreign_keys_many_to_many_internal(
table, field_name, related_table, old_id_list, verbose)
item[field_name] = new_id_list
del item[field_name]
def re_map_foreign_keys_many_to_many_internal(table: TableName,
field_name: Field,
related_table: TableName,
old_id_list: List[int],
verbose: bool=False) -> List[int]:
"""
This is an internal function for tables with ManyToMany fields,
which takes the old ID list of the ManyToMany relation and returns the
new updated ID list.
"""
lookup_table = ID_MAP[related_table]
new_id_list = []
for old_id in old_id_list:
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s %s from %s to %s' % (table,
field_name + '_id',
old_id,
new_id))
else:
new_id = old_id
new_id_list.append(new_id)
return new_id_list
def fix_bitfield_keys(data: TableData, table: TableName, field_name: Field) -> None:
for item in data[table]:
item[field_name] = item[field_name + '_mask']
del item[field_name + '_mask']
def fix_realm_authentication_bitfield(data: TableData, table: TableName, field_name: Field) -> None:
"""Used to fixup the authentication_methods bitfield to be a string"""
for item in data[table]:
values_as_bitstring = ''.join(['1' if field[1] else '0' for field in
item[field_name]])
values_as_int = int(values_as_bitstring, 2)
item[field_name] = values_as_int
def get_db_table(model_class: Any) -> str:
"""E.g. (RealmDomain -> 'zerver_realmdomain')"""
return model_class._meta.db_table
def update_model_ids(model: Any, data: TableData, related_table: TableName) -> None:
table = get_db_table(model)
# Important: remapping usermessage rows is
# not only unnessary, it's expensive and can cause
# memory errors. We don't even use ids from ID_MAP.
assert('usermessage' not in table)
old_id_list = current_table_ids(data, table)
allocated_id_list = allocate_ids(model, len(data[table]))
for item in range(len(data[table])):
update_id_map(related_table, old_id_list[item], allocated_id_list[item])
re_map_foreign_keys(data, table, 'id', related_table=related_table, id_field=True)
def bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None:
model = UserMessage
table = 'zerver_usermessage'
lst = data[table]
# IMPORTANT NOTE: We do not use any primary id
# data from either the import itself or ID_MAP.
# We let the DB itself generate ids. Note that
# no tables use user_message.id as a foreign key,
# so we can safely avoid all re-mapping complexity.
def process_batch(items: List[Dict[str, Any]]) -> None:
ums = [
UserMessageLite(
user_profile_id = item['user_profile_id'],
message_id = item['message_id'],
flags=item['flags'],
)
for item in items
]
bulk_insert_ums(ums)
chunk_size = 10000
process_list_in_batches(
lst=lst,
chunk_size=chunk_size,
process_batch=process_batch,
)
logging.info("Successfully imported %s from %s[%s]." % (model, table, dump_file_id))
def bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str]=None) -> None:
table = get_db_table(model)
# TODO, deprecate dump_file_id
model.objects.bulk_create(model(**item) for item in data[table])
if dump_file_id is None:
logging.info("Successfully imported %s from %s." % (model, table))
else:
logging.info("Successfully imported %s from %s[%s]." % (model, table, dump_file_id))
# Client is a table shared by multiple realms, so in order to
# correctly import multiple realms into the same server, we need to
# check if a Client object already exists, and so we need to support
# remap all Client IDs to the values in the new DB.
def bulk_import_client(data: TableData, model: Any, table: TableName) -> None:
for item in data[table]:
try:
client = Client.objects.get(name=item['name'])
except Client.DoesNotExist:
client = Client.objects.create(name=item['name'])
update_id_map(table='client', old_id=item['id'], new_id=client.id)
def import_uploads(import_dir: Path, processes: int, processing_avatars: bool=False,
processing_emojis: bool=False) -> None:
if processing_avatars and processing_emojis:
raise AssertionError("Cannot import avatars and emojis at the same time!")
if processing_avatars:
logging.info("Importing avatars")
elif processing_emojis:
logging.info("Importing emojis")
else:
logging.info("Importing uploaded files")
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename) as records_file:
records = ujson.loads(records_file.read()) # type: List[Dict[str, Any]]
timestamp = datetime_to_timestamp(timezone_now())
re_map_foreign_keys_internal(records, 'records', 'realm_id', related_table="realm",
id_field=True)
if not processing_emojis:
re_map_foreign_keys_internal(records, 'records', 'user_profile_id',
related_table="user_profile", id_field=True)
s3_uploads = settings.LOCAL_UPLOADS_DIR is None
if s3_uploads:
if processing_avatars or processing_emojis:
bucket_name = settings.S3_AVATAR_BUCKET
else:
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = conn.get_bucket(bucket_name, validate=True)
count = 0
for record in records:
count += 1
if count % 1000 == 0:
logging.info("Processed %s/%s uploads" % (count, len(records)))
if processing_avatars:
# For avatars, we need to rehash the user ID with the
# new server's avatar salt
relative_path = user_avatar_path_from_ids(record['user_profile_id'], record['realm_id'])
if record['s3_path'].endswith('.original'):
relative_path += '.original'
else:
# TODO: This really should be unconditional. However,
# until we fix the S3 upload backend to use the .png
# path suffix for its normal avatar URLs, we need to
# only do this for the LOCAL_UPLOADS_DIR backend.
if not s3_uploads:
relative_path += '.png'
elif processing_emojis:
# For emojis we follow the function 'upload_emoji_image'
relative_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=record['realm_id'],
emoji_file_name=record['file_name'])
record['last_modified'] = timestamp
else:
# Should be kept in sync with its equivalent in zerver/lib/uploads in the
# function 'upload_message_file'
relative_path = "/".join([
str(record['realm_id']),
random_name(18),
sanitize_name(os.path.basename(record['path']))
])
path_maps['attachment_path'][record['s3_path']] = relative_path
if s3_uploads:
key = Key(bucket)
key.key = relative_path
# Exported custom emoji from tools like Slack don't have
# the data for what user uploaded them in `user_profile_id`.
if not processing_emojis:
user_profile_id = int(record['user_profile_id'])
# Support email gateway bot and other cross-realm messages
if user_profile_id in ID_MAP["user_profile"]:
logging.info("Uploaded by ID mapped user: %s!" % (user_profile_id,))
user_profile_id = ID_MAP["user_profile"][user_profile_id]
user_profile = get_user_profile_by_id(user_profile_id)
key.set_metadata("user_profile_id", str(user_profile.id))
if 'last_modified' in record:
key.set_metadata("orig_last_modified", str(record['last_modified']))
key.set_metadata("realm_id", str(record['realm_id']))
# Zulip exports will always have a content-type, but third-party exports might not.
content_type = record.get("content_type")
if content_type is None:
content_type = guess_type(record['s3_path'])[0]
if content_type is None:
# This is the default for unknown data. Note that
# for `.original` files, this is the value we'll
# set; that is OK, because those are never served
# directly anyway.
content_type = 'application/octet-stream'
headers = {'Content-Type': content_type} # type: Dict[str, Any]
key.set_contents_from_filename(os.path.join(import_dir, record['path']), headers=headers)
else:
if processing_avatars or processing_emojis:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", relative_path)
else:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", relative_path)
orig_file_path = os.path.join(import_dir, record['path'])
os.makedirs(os.path.dirname(file_path), exist_ok=True)
shutil.copy(orig_file_path, file_path)
if processing_avatars:
from zerver.lib.upload import upload_backend
# Ensure that we have medium-size avatar images for every
# avatar. TODO: This implementation is hacky, both in that it
# does get_user_profile_by_id for each user, and in that it
# might be better to require the export to just have these.
def process_avatars(record: Dict[Any, Any]) -> int:
if record['s3_path'].endswith('.original'):
user_profile = get_user_profile_by_id(record['user_profile_id'])
if settings.LOCAL_UPLOADS_DIR is not None:
avatar_path = user_avatar_path_from_ids(user_profile.id, record['realm_id'])
medium_file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
avatar_path) + '-medium.png'
if os.path.exists(medium_file_path):
# We remove the image here primarily to deal with
# issues when running the import script multiple
# times in development (where one might reuse the
# same realm ID from a previous iteration).
os.remove(medium_file_path)
try:
upload_backend.ensure_medium_avatar_image(user_profile=user_profile)
if record.get("importer_should_thumbnail"):
upload_backend.ensure_basic_avatar_image(user_profile=user_profile)
except BadImageError:
logging.warning("Could not thumbnail avatar image for user %s; ignoring" % (
user_profile.id,))
# Delete the record of the avatar to avoid 404s.
do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_GRAVATAR)
return 0
if processes == 1:
for record in records:
process_avatars(record)
else:
connection.close()
output = []
for (status, job) in run_parallel(process_avatars, records, processes):
output.append(job)
# Importing data suffers from a difficult ordering problem because of
# models that reference each other circularly. Here is a correct order.
#
# * Client [no deps]
# * Realm [-notifications_stream]
# * Stream [only depends on realm]
# * Realm's notifications_stream
# * Now can do all realm_tables
# * UserProfile, in order by ID to avoid bot loop issues
# * Huddle
# * Recipient
# * Subscription
# * Message
# * UserMessage
#
# Because the Python object => JSON conversion process is not fully
# faithful, we have to use a set of fixers (e.g. on DateTime objects
# and Foreign Keys) to do the import correctly.
def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm:
logging.info("Importing realm dump %s" % (import_dir,))
if not os.path.exists(import_dir):
raise Exception("Missing import directory!")
realm_data_filename = os.path.join(import_dir, "realm.json")
if not os.path.exists(realm_data_filename):
raise Exception("Missing realm.json file!")
logging.info("Importing realm data from %s" % (realm_data_filename,))
with open(realm_data_filename) as f:
data = ujson.load(f)
sort_by_date = data.get('sort_by_date', False)
bulk_import_client(data, Client, 'zerver_client')
# We don't import the Stream model yet, since it depends on Realm,
# which isn't imported yet. But we need the Stream model IDs for
# notifications_stream.
update_model_ids(Stream, data, 'stream')
re_map_foreign_keys(data, 'zerver_realm', 'notifications_stream', related_table="stream")
re_map_foreign_keys(data, 'zerver_realm', 'signup_notifications_stream', related_table="stream")
fix_datetime_fields(data, 'zerver_realm')
# Fix realm subdomain information
data['zerver_realm'][0]['string_id'] = subdomain
data['zerver_realm'][0]['name'] = subdomain
fix_realm_authentication_bitfield(data, 'zerver_realm', 'authentication_methods')
update_model_ids(Realm, data, 'realm')
realm = Realm(**data['zerver_realm'][0])
if realm.notifications_stream_id is not None:
notifications_stream_id = int(realm.notifications_stream_id) # type: Optional[int]
else:
notifications_stream_id = None
realm.notifications_stream_id = None
if realm.signup_notifications_stream_id is not None:
signup_notifications_stream_id = int(realm.signup_notifications_stream_id) # type: Optional[int]
else:
signup_notifications_stream_id = None
realm.signup_notifications_stream_id = None
realm.save()
# Email tokens will automatically be randomly generated when the
# Stream objects are created by Django.
fix_datetime_fields(data, 'zerver_stream')
re_map_foreign_keys(data, 'zerver_stream', 'realm', related_table="realm")
# Handle rendering of stream descriptions for import from non-Zulip
for stream in data['zerver_stream']:
if 'rendered_description' in stream:
continue
stream["rendered_description"] = render_stream_description(stream["description"])
bulk_import_model(data, Stream)
realm.notifications_stream_id = notifications_stream_id
realm.signup_notifications_stream_id = signup_notifications_stream_id
realm.save()
# Remap the user IDs for notification_bot and friends to their
# appropriate IDs on this server
for item in data['zerver_userprofile_crossrealm']:
if item['email'].startswith("emailgateway@"):
# The email gateway bot's email is customized to a
# different domain on some servers.
item['email'] = settings.EMAIL_GATEWAY_BOT
logging.info("Adding to ID map: %s %s" % (item['id'], get_system_bot(item['email']).id))
new_user_id = get_system_bot(item['email']).id
update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id)
new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id
update_id_map(table='recipient', old_id=item['recipient_id'], new_id=new_recipient_id)
# Merge in zerver_userprofile_mirrordummy
data['zerver_userprofile'] = data['zerver_userprofile'] + data['zerver_userprofile_mirrordummy']
del data['zerver_userprofile_mirrordummy']
data['zerver_userprofile'].sort(key=lambda r: r['id'])
# To remap foreign key for UserProfile.last_active_message_id
update_message_foreign_keys(import_dir=import_dir, sort_by_date=sort_by_date)
fix_datetime_fields(data, 'zerver_userprofile')
update_model_ids(UserProfile, data, 'user_profile')
re_map_foreign_keys(data, 'zerver_userprofile', 'realm', related_table="realm")
re_map_foreign_keys(data, 'zerver_userprofile', 'bot_owner', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userprofile', 'default_sending_stream',
related_table="stream")
re_map_foreign_keys(data, 'zerver_userprofile', 'default_events_register_stream',
related_table="stream")
re_map_foreign_keys(data, 'zerver_userprofile', 'last_active_message_id',
related_table="message", id_field=True)
for user_profile_dict in data['zerver_userprofile']:
user_profile_dict['password'] = None
user_profile_dict['api_key'] = generate_api_key()
# Since Zulip doesn't use these permissions, drop them
del user_profile_dict['user_permissions']
del user_profile_dict['groups']
user_profiles = [UserProfile(**item) for item in data['zerver_userprofile']]
for user_profile in user_profiles:
user_profile.set_unusable_password()
UserProfile.objects.bulk_create(user_profiles)
re_map_foreign_keys(data, 'zerver_defaultstream', 'stream', related_table="stream")
re_map_foreign_keys(data, 'zerver_realmemoji', 'author', related_table="user_profile")
for (table, model, related_table) in realm_tables:
re_map_foreign_keys(data, table, 'realm', related_table="realm")
update_model_ids(model, data, related_table)
bulk_import_model(data, model)
if 'zerver_huddle' in data:
update_model_ids(Huddle, data, 'huddle')
# We don't import Huddle yet, since we don't have the data to
# compute huddle hashes until we've imported some of the
# tables below.
# TODO: double-check this.
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="stream",
recipient_field=True, id_field=True)
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="user_profile",
recipient_field=True, id_field=True)
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="huddle",
recipient_field=True, id_field=True)
update_model_ids(Recipient, data, 'recipient')
bulk_import_model(data, Recipient)
re_map_foreign_keys(data, 'zerver_subscription', 'user_profile', related_table="user_profile")
get_huddles_from_subscription(data, 'zerver_subscription')
re_map_foreign_keys(data, 'zerver_subscription', 'recipient', related_table="recipient")
update_model_ids(Subscription, data, 'subscription')
bulk_import_model(data, Subscription)
if 'zerver_realmauditlog' in data:
fix_datetime_fields(data, 'zerver_realmauditlog')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'realm', related_table="realm")
re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_user',
related_table='user_profile')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'acting_user',
related_table='user_profile')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_stream',
related_table="stream")
update_model_ids(RealmAuditLog, data, related_table="realmauditlog")
bulk_import_model(data, RealmAuditLog)
else:
logging.info('about to call create_subscription_events')
create_subscription_events(
data=data,
realm_id=realm.id,
)
logging.info('done with create_subscription_events')
if 'zerver_huddle' in data:
process_huddle_hash(data, 'zerver_huddle')
bulk_import_model(data, Huddle)
if 'zerver_userhotspot' in data:
fix_datetime_fields(data, 'zerver_userhotspot')
re_map_foreign_keys(data, 'zerver_userhotspot', 'user', related_table='user_profile')
update_model_ids(UserHotspot, data, 'userhotspot')
bulk_import_model(data, UserHotspot)
if 'zerver_mutedtopic' in data:
re_map_foreign_keys(data, 'zerver_mutedtopic', 'user_profile', related_table='user_profile')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'stream', related_table='stream')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'recipient', related_table='recipient')
update_model_ids(MutedTopic, data, 'mutedtopic')
bulk_import_model(data, MutedTopic)
if 'zerver_service' in data:
re_map_foreign_keys(data, 'zerver_service', 'user_profile', related_table='user_profile')
fix_service_tokens(data, 'zerver_service')
update_model_ids(Service, data, 'service')
bulk_import_model(data, Service)
if 'zerver_usergroup' in data:
re_map_foreign_keys(data, 'zerver_usergroup', 'realm', related_table='realm')
re_map_foreign_keys_many_to_many(data, 'zerver_usergroup',
'members', related_table='user_profile')
update_model_ids(UserGroup, data, 'usergroup')
bulk_import_model(data, UserGroup)
re_map_foreign_keys(data, 'zerver_usergroupmembership',
'user_group', related_table='usergroup')
re_map_foreign_keys(data, 'zerver_usergroupmembership',
'user_profile', related_table='user_profile')
update_model_ids(UserGroupMembership, data, 'usergroupmembership')
bulk_import_model(data, UserGroupMembership)
if 'zerver_botstoragedata' in data:
re_map_foreign_keys(data, 'zerver_botstoragedata', 'bot_profile', related_table='user_profile')
update_model_ids(BotStorageData, data, 'botstoragedata')
bulk_import_model(data, BotStorageData)
if 'zerver_botconfigdata' in data:
re_map_foreign_keys(data, 'zerver_botconfigdata', 'bot_profile', related_table='user_profile')
update_model_ids(BotConfigData, data, 'botconfigdata')
bulk_import_model(data, BotConfigData)
fix_datetime_fields(data, 'zerver_userpresence')
re_map_foreign_keys(data, 'zerver_userpresence', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userpresence', 'client', related_table='client')
update_model_ids(UserPresence, data, 'user_presence')
bulk_import_model(data, UserPresence)
fix_datetime_fields(data, 'zerver_useractivity')
re_map_foreign_keys(data, 'zerver_useractivity', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_useractivity', 'client', related_table='client')
update_model_ids(UserActivity, data, 'useractivity')
bulk_import_model(data, UserActivity)
fix_datetime_fields(data, 'zerver_useractivityinterval')
re_map_foreign_keys(data, 'zerver_useractivityinterval', 'user_profile', related_table="user_profile")
update_model_ids(UserActivityInterval, data, 'useractivityinterval')
bulk_import_model(data, UserActivityInterval)
re_map_foreign_keys(data, 'zerver_customprofilefield', 'realm', related_table="realm")
update_model_ids(CustomProfileField, data, related_table="customprofilefield")
bulk_import_model(data, CustomProfileField)
re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'user_profile',
related_table="user_profile")
re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'field',
related_table="customprofilefield")
fix_customprofilefield(data)
update_model_ids(CustomProfileFieldValue, data, related_table="customprofilefieldvalue")
bulk_import_model(data, CustomProfileFieldValue)
# Import uploaded files and avatars
import_uploads(os.path.join(import_dir, "avatars"), processes, processing_avatars=True)
import_uploads(os.path.join(import_dir, "uploads"), processes)
# We need to have this check as the emoji files are only present in the data
# importer from slack
# For Zulip export, this doesn't exist
if os.path.exists(os.path.join(import_dir, "emoji")):
import_uploads(os.path.join(import_dir, "emoji"), processes, processing_emojis=True)
sender_map = {
user['id']: user
for user in data['zerver_userprofile']
}
# Import zerver_message and zerver_usermessage
import_message_data(realm=realm, sender_map=sender_map, import_dir=import_dir)
re_map_foreign_keys(data, 'zerver_reaction', 'message', related_table="message")
re_map_foreign_keys(data, 'zerver_reaction', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_reaction', 'emoji_code', related_table="realmemoji", id_field=True,
reaction_field=True)
update_model_ids(Reaction, data, 'reaction')
bulk_import_model(data, Reaction)
for user_profile in UserProfile.objects.filter(is_bot=False, realm=realm):
# Since we now unconditionally renumbers message IDs, we need
# to reset the user's pointer to what will be a valid value.
#
# For zulip->zulip imports, we could do something clever, but
# it should always be safe to reset to first unread message.
#
# Longer-term, the plan is to eliminate pointer as a concept.
first_unread_message = UserMessage.objects.filter(user_profile=user_profile).extra(
where=[UserMessage.where_unread()]
).order_by("message_id").first()
if first_unread_message is not None:
user_profile.pointer = first_unread_message.message_id
else:
last_message = UserMessage.objects.filter(
user_profile=user_profile).order_by("message_id").last()
if last_message is not None:
user_profile.pointer = last_message.message_id
else:
# -1 is the guard value for new user accounts with no messages.
user_profile.pointer = -1
user_profile.save(update_fields=["pointer"])
# Similarly, we need to recalculate the first_message_id for stream objects.
for stream in Stream.objects.filter(realm=realm):
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
first_message = Message.objects.filter(recipient=recipient).first()
if first_message is None:
stream.first_message_id = None
else:
stream.first_message_id = first_message.id
stream.save(update_fields=["first_message_id"])
# Do attachments AFTER message data is loaded.
# TODO: de-dup how we read these json files.
fn = os.path.join(import_dir, "attachment.json")
if not os.path.exists(fn):
raise Exception("Missing attachment.json file!")
logging.info("Importing attachment data from %s" % (fn,))
with open(fn) as f:
data = ujson.load(f)
import_attachments(data)
# Import the analytics file.
import_analytics_data(realm=realm, import_dir=import_dir)
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
else:
do_change_plan_type(realm, Realm.SELF_HOSTED)
return realm
# create_users and do_import_system_bots differ from their equivalent in
# zerver/management/commands/initialize_voyager_db.py because here we check if the bots
# don't already exist and only then create a user for these bots.
def do_import_system_bots(realm: Any) -> None:
internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS]
create_users(realm, internal_bots, bot_type=UserProfile.DEFAULT_BOT)
names = [(settings.FEEDBACK_BOT_NAME, settings.FEEDBACK_BOT)]
create_users(realm, names, bot_type=UserProfile.DEFAULT_BOT)
print("Finished importing system bots.")
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]],
bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
short_name = email_to_username(email)
if not UserProfile.objects.filter(email=email):
user_set.add((email, full_name, short_name, True))
bulk_create_users(realm, user_set, bot_type)
def update_message_foreign_keys(import_dir: Path,
sort_by_date: bool) -> None:
old_id_list = get_incoming_message_ids(
import_dir=import_dir,
sort_by_date=sort_by_date,
)
count = len(old_id_list)
new_id_list = allocate_ids(model_class=Message, count=count)
for old_id, new_id in zip(old_id_list, new_id_list):
update_id_map(
table='message',
old_id=old_id,
new_id=new_id,
)
# We don't touch user_message keys here; that happens later when
# we're actually read the files a second time to get actual data.
def get_incoming_message_ids(import_dir: Path,
sort_by_date: bool) -> List[int]:
'''
This function reads in our entire collection of message
ids, which can be millions of integers for some installations.
And then we sort the list. This is necessary to ensure
that the sort order of incoming ids matches the sort order
of pub_date, which isn't always guaranteed by our
utilities that convert third party chat data. We also
need to move our ids to a new range if we're dealing
with a server that has data for other realms.
'''
if sort_by_date:
tups = list() # type: List[Tuple[int, int]]
else:
message_ids = [] # type: List[int]
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, "messages-%06d.json" % (dump_file_id,))
if not os.path.exists(message_filename):
break
with open(message_filename) as f:
data = ujson.load(f)
# Aggressively free up memory.
del data['zerver_usermessage']
for row in data['zerver_message']:
# We truncate pub_date to int to theoretically
# save memory and speed up the sort. For
# Zulip-to-Zulip imports, the
# message_id will generally be a good tiebreaker.
# If we occasionally mis-order the ids for two
# messages from the same second, it's not the
# end of the world, as it's likely those messages
# arrived to the original server in somewhat
# arbitrary order.
message_id = row['id']
if sort_by_date:
pub_date = int(row['pub_date'])
tup = (pub_date, message_id)
tups.append(tup)
else:
message_ids.append(message_id)
dump_file_id += 1
if sort_by_date:
tups.sort()
message_ids = [tup[1] for tup in tups]
return message_ids
def import_message_data(realm: Realm,
sender_map: Dict[int, Record],
import_dir: Path) -> None:
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, "messages-%06d.json" % (dump_file_id,))
if not os.path.exists(message_filename):
break
with open(message_filename) as f:
data = ujson.load(f)
logging.info("Importing message dump %s" % (message_filename,))
re_map_foreign_keys(data, 'zerver_message', 'sender', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_message', 'recipient', related_table="recipient")
re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client')
fix_datetime_fields(data, 'zerver_message')
# Parser to update message content with the updated attachment urls
fix_upload_links(data, 'zerver_message')
# We already create mappings for zerver_message ids
# in update_message_foreign_keys(), so here we simply
# apply them.
message_id_map = ID_MAP['message']
for row in data['zerver_message']:
row['id'] = message_id_map[row['id']]
for row in data['zerver_usermessage']:
assert(row['message'] in message_id_map)
fix_message_rendered_content(
realm=realm,
sender_map=sender_map,
messages=data['zerver_message'],
)
logging.info("Successfully rendered markdown for message batch")
# A LOT HAPPENS HERE.
# This is where we actually import the message data.
bulk_import_model(data, Message)
# Due to the structure of these message chunks, we're
# guaranteed to have already imported all the Message objects
# for this batch of UserMessage objects.
re_map_foreign_keys(data, 'zerver_usermessage', 'message', related_table="message")
re_map_foreign_keys(data, 'zerver_usermessage', 'user_profile', related_table="user_profile")
fix_bitfield_keys(data, 'zerver_usermessage', 'flags')
bulk_import_user_message_data(data, dump_file_id)
dump_file_id += 1
def import_attachments(data: TableData) -> None:
# Clean up the data in zerver_attachment that is not
# relevant to our many-to-many import.
fix_datetime_fields(data, 'zerver_attachment')
re_map_foreign_keys(data, 'zerver_attachment', 'owner', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_attachment', 'realm', related_table="realm")
# Configure ourselves. Django models many-to-many (m2m)
# relations asymmetrically. The parent here refers to the
# Model that has the ManyToManyField. It is assumed here
# the child models have been loaded, but we are in turn
# responsible for loading the parents and the m2m rows.
parent_model = Attachment
parent_db_table_name = 'zerver_attachment'
parent_singular = 'attachment'
child_singular = 'message'
child_plural = 'messages'
m2m_table_name = 'zerver_attachment_messages'
parent_id = 'attachment_id'
child_id = 'message_id'
update_model_ids(parent_model, data, 'attachment')
# We don't bulk_import_model yet, because we need to first compute
# the many-to-many for this table.
# First, build our list of many-to-many (m2m) rows.
# We do this in a slightly convoluted way to anticipate
# a future where we may need to call re_map_foreign_keys.
m2m_rows = [] # type: List[Record]
for parent_row in data[parent_db_table_name]:
for fk_id in parent_row[child_plural]:
m2m_row = {} # type: Record
m2m_row[parent_singular] = parent_row['id']
m2m_row[child_singular] = ID_MAP['message'][fk_id]
m2m_rows.append(m2m_row)
# Create our table data for insert.
m2m_data = {m2m_table_name: m2m_rows} # type: TableData
convert_to_id_fields(m2m_data, m2m_table_name, parent_singular)
convert_to_id_fields(m2m_data, m2m_table_name, child_singular)
m2m_rows = m2m_data[m2m_table_name]
# Next, delete out our child data from the parent rows.
for parent_row in data[parent_db_table_name]:
del parent_row[child_plural]
# Update 'path_id' for the attachments
for attachment in data[parent_db_table_name]:
attachment['path_id'] = path_maps['attachment_path'][attachment['path_id']]
# Next, load the parent rows.
bulk_import_model(data, parent_model)
# Now, go back to our m2m rows.
# TODO: Do this the kosher Django way. We may find a
# better way to do this in Django 1.9 particularly.
with connection.cursor() as cursor:
sql_template = '''
insert into %s (%s, %s) values(%%s, %%s);''' % (m2m_table_name,
parent_id,
child_id)
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
cursor.executemany(sql_template, tups)
logging.info('Successfully imported M2M table %s' % (m2m_table_name,))
def import_analytics_data(realm: Realm, import_dir: Path) -> None:
analytics_filename = os.path.join(import_dir, "analytics.json")
if not os.path.exists(analytics_filename):
return
logging.info("Importing analytics data from %s" % (analytics_filename,))
with open(analytics_filename) as f:
data = ujson.load(f)
# Process the data through the fixer functions.
fix_datetime_fields(data, 'analytics_realmcount')
re_map_foreign_keys(data, 'analytics_realmcount', 'realm', related_table="realm")
update_model_ids(RealmCount, data, 'analytics_realmcount')
bulk_import_model(data, RealmCount)
fix_datetime_fields(data, 'analytics_usercount')
re_map_foreign_keys(data, 'analytics_usercount', 'realm', related_table="realm")
re_map_foreign_keys(data, 'analytics_usercount', 'user', related_table="user_profile")
update_model_ids(UserCount, data, 'analytics_usercount')
bulk_import_model(data, UserCount)
fix_datetime_fields(data, 'analytics_streamcount')
re_map_foreign_keys(data, 'analytics_streamcount', 'realm', related_table="realm")
re_map_foreign_keys(data, 'analytics_streamcount', 'stream', related_table="stream")
update_model_ids(StreamCount, data, 'analytics_streamcount')
bulk_import_model(data, StreamCount)
|
|
import sys
import os
sys.path.insert(0, '.')
sys.path.extend(os.environ.get('PYTHONPATH','').split(os.pathsep))
import imp
import traceback
__name__ = '__main__'
mainmodule = type(sys)('__main__')
sys.modules['__main__'] = mainmodule
import cffi
# this is a list holding object we do not want to be freed (like callback and handlers)
uwsgi_gc = []
# the main ffi
ffi = cffi.FFI()
# the hooks we need to patch
hooks = '''
void free(void *);
ssize_t read(int, void *, size_t);
ssize_t write(int, const void *, size_t);
int close(int);
void (*uwsgi_pypy_hook_execute_source)(char *);
void (*uwsgi_pypy_hook_loader)(char *);
void (*uwsgi_pypy_hook_file_loader)(char *);
void (*uwsgi_pypy_hook_paste_loader)(char *);
void (*uwsgi_pypy_hook_pythonpath)(char *);
void (*uwsgi_pypy_hook_request)(struct wsgi_request *);
void (*uwsgi_pypy_post_fork_hook)(void);
'''
# here we load CFLAGS and uwsgi.h from the binary
defines0 = '''
char *uwsgi_get_cflags();
char *uwsgi_get_dot_h();
'''
ffi.cdef(defines0)
lib0 = ffi.verify(defines0)
# this is ugly, we should find a better approach
# basically it build a list of #define from binary CFLAGS
uwsgi_cdef = []
uwsgi_defines = []
uwsgi_cflags = ffi.string(lib0.uwsgi_get_cflags()).split()
for cflag in uwsgi_cflags:
if cflag.startswith('-D'):
line = cflag[2:]
if '=' in line:
(key, value) = line.split('=', 1)
uwsgi_cdef.append('#define %s ...' % key)
uwsgi_defines.append('#define %s %s' % (key, value.replace('\\"','"').replace('""','"')))
else:
uwsgi_cdef.append('#define %s ...' % line)
uwsgi_defines.append('#define %s 1' % line)
uwsgi_dot_h = ffi.string(lib0.uwsgi_get_dot_h())
# uwsgi definitions
cdefines = '''
%s
struct iovec {
void *iov_base;
size_t iov_len;
...;
};
struct uwsgi_header {
uint8_t modifier1;
...;
};
struct wsgi_request {
int fd;
int async_id;
uint16_t var_cnt;
struct iovec *hvec;
int async_ready_fd;
int async_last_ready_fd;
int suspended;
struct uwsgi_header *uh;
...;
};
struct uwsgi_opt {
char *key;
char *value;
...;
};
struct uwsgi_worker {
int id;
int pid;
uint64_t requests;
uint64_t delta_requests;
uint64_t signals;
int cheaped;
int suspended;
int sig;
uint8_t signum;
uint64_t running_time;
uint64_t avg_response_time;
uint64_t tx;
...;
};
struct uwsgi_plugin {
uint8_t modifier1;
void (*suspend) (struct wsgi_request *);
void (*resume) (struct wsgi_request *);
...;
};
struct uwsgi_buffer {
char *buf;
size_t pos;
...;
};
struct uwsgi_lock_item {
...;
};
struct uwsgi_cache {
struct uwsgi_lock_item *lock;
...;
};
struct uwsgi_cache_item {
uint64_t keysize;
...;
};
struct uwsgi_server {
char hostname[];
int mywid;
int muleid;
int master_process;
struct uwsgi_opt **exported_opts;
int exported_opts_cnt;
struct uwsgi_worker *workers;
int signal_socket;
int numproc;
int async;
void (*schedule_to_main) (struct wsgi_request *);
void (*schedule_to_req) (void);
struct wsgi_request *(*current_wsgi_req) (void);
struct wsgi_request *wsgi_req;
struct uwsgi_plugin *p[];
...;
};
struct uwsgi_server uwsgi;
struct uwsgi_plugin pypy_plugin;
const char *uwsgi_pypy_version;
char *uwsgi_binary_path();
void *uwsgi_malloc(size_t);
int uwsgi_response_prepare_headers(struct wsgi_request *, char *, size_t);
int uwsgi_response_add_header(struct wsgi_request *, char *, uint16_t, char *, uint16_t);
int uwsgi_response_write_body_do(struct wsgi_request *, char *, size_t);
int uwsgi_response_sendfile_do_can_close(struct wsgi_request *, int, size_t, size_t, int);
char *uwsgi_request_body_read(struct wsgi_request *, ssize_t , ssize_t *);
char *uwsgi_request_body_readline(struct wsgi_request *, ssize_t, ssize_t *);
void uwsgi_buffer_destroy(struct uwsgi_buffer *);
int uwsgi_is_again();
int uwsgi_register_rpc(char *, struct uwsgi_plugin *, uint8_t, void *);
int uwsgi_register_signal(uint8_t, char *, void *, uint8_t);
char *uwsgi_do_rpc(char *, char *, uint8_t, char **, uint16_t *, uint64_t *);
void uwsgi_set_processname(char *);
int uwsgi_signal_send(int, uint8_t);
uint64_t uwsgi_worker_exceptions(int);
int uwsgi_worker_is_busy(int);
char *uwsgi_cache_magic_get(char *, uint16_t, uint64_t *, uint64_t *, char *);
int uwsgi_cache_magic_set(char *, uint16_t, char *, uint64_t, uint64_t, uint64_t, char *);
int uwsgi_cache_magic_del(char *, uint16_t, char *);
int uwsgi_cache_magic_exists(char *, uint16_t, char *);
int uwsgi_cache_magic_clear(char *);
struct uwsgi_cache *uwsgi_cache_by_name(char *);
void uwsgi_cache_rlock(struct uwsgi_cache *);
void uwsgi_cache_rwunlock(struct uwsgi_cache *);
char *uwsgi_cache_item_key(struct uwsgi_cache_item *);
struct uwsgi_cache_item *uwsgi_cache_keys(struct uwsgi_cache *, uint64_t *, struct uwsgi_cache_item **);
int uwsgi_add_file_monitor(uint8_t, char *);
int uwsgi_add_timer(uint8_t, int);
int uwsgi_signal_add_rb_timer(uint8_t, int, int);
int uwsgi_user_lock(int);
int uwsgi_user_unlock(int);
int uwsgi_signal_registered(uint8_t);
int uwsgi_signal_add_cron(uint8_t, int, int, int, int, int);
void uwsgi_alarm_trigger(char *, char *, size_t);
void async_schedule_to_req_green(void);
void async_add_timeout(struct wsgi_request *, int);
int async_add_fd_write(struct wsgi_request *, int, int);
int async_add_fd_read(struct wsgi_request *, int, int);
int uwsgi_connect(char *, int, int);
int uwsgi_websocket_handshake(struct wsgi_request *, char *, uint16_t, char *, uint16_t, char *, uint16_t);
int uwsgi_websocket_send(struct wsgi_request *, char *, size_t);
struct uwsgi_buffer *uwsgi_websocket_recv(struct wsgi_request *);
struct uwsgi_buffer *uwsgi_websocket_recv_nb(struct wsgi_request *);
char *uwsgi_chunked_read(struct wsgi_request *, size_t *, int, int);
void uwsgi_disconnect(struct wsgi_request *);
int uwsgi_ready_fd(struct wsgi_request *);
void set_user_harakiri(struct wsgi_request *, int);
int uwsgi_metric_set(char *, char *, int64_t);
int uwsgi_metric_inc(char *, char *, int64_t);
int uwsgi_metric_dec(char *, char *, int64_t);
int uwsgi_metric_mul(char *, char *, int64_t);
int uwsgi_metric_div(char *, char *, int64_t);
int64_t uwsgi_metric_get(char *, char *);
%s
''' % ('\n'.join(uwsgi_cdef), hooks)
cverify = '''
%s
const char *uwsgi_pypy_version = UWSGI_VERSION;
%s
extern struct uwsgi_server uwsgi;
extern struct uwsgi_plugin pypy_plugin;
%s
''' % ('\n'.join(uwsgi_defines), uwsgi_dot_h, hooks)
ffi.cdef(cdefines)
lib = ffi.verify(cverify)
libc = ffi.dlopen(None)
"""
this is a global object point the the WSGI callable
it sucks, i will fix it in the near future...
"""
wsgi_application = None
# fix argv if needed
if len(sys.argv) == 0:
sys.argv.insert(0, ffi.string(lib.uwsgi_binary_path()))
"""
execute source, we expose it as cffi callback to avoid deadlocks
after GIL initialization
"""
@ffi.callback("void(char *)")
def uwsgi_pypy_execute_source(s):
source = ffi.string(s)
exec(source)
"""
load a wsgi module
"""
@ffi.callback("void(char *)")
def uwsgi_pypy_loader(module):
global wsgi_application
m = ffi.string(module)
c = 'application'
if ':' in m:
m, c = m.split(':')
if '.' in m:
mod = __import__(m, None, None, '*')
else:
mod = __import__(m)
wsgi_application = getattr(mod, c)
"""
load a mod_wsgi compliant .wsgi file
"""
@ffi.callback("void(char *)")
def uwsgi_pypy_file_loader(filename):
global wsgi_application
w = ffi.string(filename)
c = 'application'
mod = imp.load_source('uwsgi_file_wsgi', w)
wsgi_application = getattr(mod, c)
"""
load a .ini paste app
"""
@ffi.callback("void(char *)")
def uwsgi_pypy_paste_loader(config):
global wsgi_application
c = ffi.string(config)
if c.startswith('config:'):
c = c[7:]
if c[0] != '/':
c = os.getcwd() + '/' + c
try:
from paste.script.util.logging_config import fileConfig
fileConfig(c)
except ImportError:
print "PyPy WARNING: unable to load paste.script.util.logging_config"
from paste.deploy import loadapp
wsgi_application = loadapp('config:%s' % c)
"""
.post_fork_hook
"""
@ffi.callback("void()")
def uwsgi_pypy_post_fork_hook():
import uwsgi
if hasattr(uwsgi, 'post_fork_hook'):
uwsgi.post_fork_hook()
"""
add an item to the pythonpath
"""
@ffi.callback("void(char *)")
def uwsgi_pypy_pythonpath(item):
path = ffi.string(item)
sys.path.append(path)
print "added %s to pythonpath" % path
"""
class implementing wsgi.file_wrapper
"""
class WSGIfilewrapper(object):
def __init__(self, wsgi_req, f, chunksize=0):
self.wsgi_req = wsgi_req
self.f = f
self.chunksize = chunksize
if hasattr(f, 'close'):
self.close = f.close
def __iter__(self):
return self
def sendfile(self):
if hasattr(self.f, 'fileno'):
lib.uwsgi_response_sendfile_do_can_close(self.wsgi_req, self.f.fileno(), 0, 0, 0)
elif hasattr(self.f, 'read'):
if self.chunksize == 0:
chunk = self.f.read()
if len(chunk) > 0:
lib.uwsgi_response_write_body_do(self.wsgi_req, ffi.new("char[]", chunk), len(chunk))
return
while True:
chunk = self.f.read(self.chunksize)
if chunk is None or len(chunk) == 0:
break
lib.uwsgi_response_write_body_do(self.wsgi_req, ffi.new("char[]", chunk), len(chunk))
"""
class implementing wsgi.input
"""
class WSGIinput(object):
def __init__(self, wsgi_req):
self.wsgi_req = wsgi_req
def read(self, size=0):
rlen = ffi.new('ssize_t*')
chunk = lib.uwsgi_request_body_read(self.wsgi_req, size, rlen)
if chunk != ffi.NULL:
return ffi.string(chunk, rlen[0])
if rlen[0] < 0:
raise IOError("error reading wsgi.input")
raise IOError("error waiting for wsgi.input")
def getline(self, hint=0):
rlen = ffi.new('ssize_t*')
chunk = lib.uwsgi_request_body_readline(self.wsgi_req, hint, rlen)
if chunk != ffi.NULL:
return ffi.string(chunk, rlen[0])
if rlen[0] < 0:
raise IOError("error reading line from wsgi.input")
raise IOError("error waiting for line on wsgi.input")
def readline(self, hint=0):
return self.getline(hint)
def readlines(self, hint=0):
lines = []
while True:
chunk = self.getline(hint)
if len(chunk) == 0:
break
lines.append(chunk)
return lines
def __iter__(self):
return self
def __next__(self):
chunk = self.getline()
if len(chunk) == 0:
raise StopIteration
return chunk
"""
the WSGI request handler
"""
@ffi.callback("void(struct wsgi_request *)")
def uwsgi_pypy_wsgi_handler(wsgi_req):
import uwsgi
global wsgi_application
def writer(data):
lib.uwsgi_response_write_body_do(wsgi_req, ffi.new("char[]", data), len(data))
def start_response(status, headers, exc_info=None):
if exc_info:
traceback.print_exception(*exc_info)
lib.uwsgi_response_prepare_headers(wsgi_req, ffi.new("char[]", status), len(status))
for hh in headers:
lib.uwsgi_response_add_header(wsgi_req, ffi.new("char[]", hh[0]), len(hh[0]), ffi.new("char[]", hh[1]), len(hh[1]))
return writer
environ = {}
iov = wsgi_req.hvec
for i in range(0, wsgi_req.var_cnt, 2):
environ[ffi.string(ffi.cast("char*", iov[i].iov_base), iov[i].iov_len)] = ffi.string(ffi.cast("char*", iov[i+1].iov_base), iov[i+1].iov_len)
environ['wsgi.version'] = (1, 0)
scheme = 'http'
if 'HTTPS' in environ:
if environ['HTTPS'] in ('on', 'ON', 'On', '1', 'true', 'TRUE', 'True'):
scheme = 'https'
environ['wsgi.url_scheme'] = environ.get('UWSGI_SCHEME', scheme)
environ['wsgi.input'] = WSGIinput(wsgi_req)
environ['wsgi.errors'] = sys.stderr
environ['wsgi.run_once'] = False
environ['wsgi.file_wrapper'] = lambda f, chunksize=0: WSGIfilewrapper(wsgi_req, f, chunksize)
environ['wsgi.multithread'] = True
environ['wsgi.multiprocess'] = True
environ['uwsgi.core'] = wsgi_req.async_id
environ['uwsgi.node'] = uwsgi.hostname
response = wsgi_application(environ, start_response)
if type(response) is str:
writer(response)
else:
try:
if isinstance(response, WSGIfilewrapper):
response.sendfile()
else:
for chunk in response:
if isinstance(chunk, WSGIfilewrapper):
try:
chunk.sendfile()
finally:
chunk.close()
else:
writer(chunk)
finally:
if hasattr(response, 'close'):
response.close()
lib.uwsgi_pypy_hook_execute_source = uwsgi_pypy_execute_source
lib.uwsgi_pypy_hook_loader = uwsgi_pypy_loader
lib.uwsgi_pypy_hook_file_loader = uwsgi_pypy_file_loader
lib.uwsgi_pypy_hook_paste_loader = uwsgi_pypy_paste_loader
lib.uwsgi_pypy_hook_pythonpath = uwsgi_pypy_pythonpath
lib.uwsgi_pypy_hook_request = uwsgi_pypy_wsgi_handler
lib.uwsgi_pypy_post_fork_hook = uwsgi_pypy_post_fork_hook
"""
Here we define the "uwsgi" virtual module
"""
uwsgi = imp.new_module('uwsgi')
sys.modules['uwsgi'] = uwsgi
uwsgi.version = ffi.string(lib.uwsgi_pypy_version)
uwsgi.hostname = ffi.string(lib.uwsgi.hostname)
def uwsgi_pypy_uwsgi_register_signal(signum, kind, handler):
cb = ffi.callback('void(int)', handler)
uwsgi_gc.append(cb)
if lib.uwsgi_register_signal(signum, ffi.new("char[]", kind), cb, lib.pypy_plugin.modifier1) < 0:
raise Exception("unable to register signal %d" % signum)
uwsgi.register_signal = uwsgi_pypy_uwsgi_register_signal
class uwsgi_pypy_RPC(object):
def __init__(self, func):
self.func = func
def __call__(self, argc, argv, argvs, buf):
pargs = []
for i in range(0, argc):
pargs.append(ffi.string(argv[i], argvs[i]))
response = self.func(*pargs)
if len(response) > 0:
buf[0] = lib.uwsgi_malloc(len(response))
dst = ffi.buffer(buf[0], len(response))
dst[:len(response)] = response
return len(response)
def uwsgi_pypy_uwsgi_register_rpc(name, func, argc=0):
rpc_func = uwsgi_pypy_RPC(func)
cb = ffi.callback("int(int, char*[], int[], char**)", rpc_func)
uwsgi_gc.append(cb)
if lib.uwsgi_register_rpc(ffi.new("char[]", name), ffi.addressof(lib.pypy_plugin), argc, cb) < 0:
raise Exception("unable to register rpc func %s" % name)
uwsgi.register_rpc = uwsgi_pypy_uwsgi_register_rpc
def uwsgi_pypy_rpc(node, func, *args):
argc = 0
argv = ffi.new('char*[256]')
argvs = ffi.new('uint16_t[256]')
rsize = ffi.new('uint64_t*')
for arg in args:
if argc >= 255:
raise Exception('invalid number of rpc arguments')
if len(arg) >= 65535:
raise Exception('invalid rpc argument size (must be < 65535)')
argv[argc] = ffi.new('char[]', arg)
argvs[argc] = len(arg)
argc += 1
if node:
c_node = ffi.new("char[]", node)
else:
c_node = ffi.NULL
response = lib.uwsgi_do_rpc(c_node, ffi.new("char[]",func), argc, argv, argvs, rsize)
if response:
ret = ffi.string(response, rsize[0])
lib.free(response)
return ret
return None
uwsgi.rpc = uwsgi_pypy_rpc
def uwsgi_pypy_call(func, *args):
node = None
if '@' in func:
(func, node) = func.split('@')
return uwsgi_pypy_rpc(node, func, *args)
uwsgi.call = uwsgi_pypy_call
uwsgi.signal = lambda x: lib.uwsgi_signal_send(lib.uwsgi.signal_socket, x)
uwsgi.metric_get = lambda x: lib.uwsgi_metric_get(x, ffi.NULL)
uwsgi.metric_set = lambda x, y: lib.uwsgi_metric_set(x, ffi.NULL, y)
uwsgi.metric_inc = lambda x, y=1: lib.uwsgi_metric_inc(x, ffi.NULL, y)
uwsgi.metric_dec = lambda x, y=1: lib.uwsgi_metric_dec(x, ffi.NULL, y)
uwsgi.metric_mul = lambda x, y=1: lib.uwsgi_metric_mul(x, ffi.NULL, y)
uwsgi.metric_div = lambda x, y=1: lib.uwsgi_metric_div(x, ffi.NULL, y)
def uwsgi_pypy_uwsgi_cache_get(key, cache=ffi.NULL):
vallen = ffi.new('uint64_t*')
value = lib.uwsgi_cache_magic_get(key, len(key), vallen, ffi.NULL, cache)
if value == ffi.NULL:
return None
ret = ffi.string(value, vallen[0])
libc.free(value)
return ret
uwsgi.cache_get = uwsgi_pypy_uwsgi_cache_get
def uwsgi_pypy_uwsgi_cache_set(key, value, expires=0, cache=ffi.NULL):
if lib.uwsgi_cache_magic_set(key, len(key), value, len(value), expires, 0, cache) < 0:
raise Exception('unable to store item in the cache')
uwsgi.cache_set = uwsgi_pypy_uwsgi_cache_set
def uwsgi_pypy_uwsgi_cache_update(key, value, expires=0, cache=ffi.NULL):
if lib.uwsgi_cache_magic_set(key, len(key), value, len(value), expires, 1 << 1, cache) < 0:
raise Exception('unable to store item in the cache')
uwsgi.cache_update = uwsgi_pypy_uwsgi_cache_update
def uwsgi_pypy_uwsgi_cache_del(key, cache=ffi.NULL):
if lib.uwsgi_cache_magic_del(key, len(key), cache) < 0:
raise Exception('unable to delete item from the cache')
uwsgi.cache_del = uwsgi_pypy_uwsgi_cache_del
def uwsgi_pypy_uwsgi_cache_keys(cache=ffi.NULL):
uc = lib.uwsgi_cache_by_name(cache)
if uc == ffi.NULL:
raise Exception('no local uWSGI cache available')
l = []
lib.uwsgi_cache_rlock(uc)
pos = ffi.new('uint64_t *')
uci = ffi.new('struct uwsgi_cache_item **')
while True:
uci[0] = lib.uwsgi_cache_keys(uc, pos, uci)
if uci[0] == ffi.NULL: break
l.append(ffi.string(lib.uwsgi_cache_item_key(uci[0]), uci[0].keysize))
lib.uwsgi_cache_rwunlock(uc)
return l
uwsgi.cache_keys = uwsgi_pypy_uwsgi_cache_keys
def uwsgi_pypy_uwsgi_add_timer(signum, secs):
if lib.uwsgi_add_timer(signum, secs) < 0:
raise Exception("unable to register timer")
uwsgi.add_timer = uwsgi_pypy_uwsgi_add_timer
def uwsgi_pypy_uwsgi_add_rb_timer(signum, secs):
if lib.uwsgi_signal_add_rb_timer(signum, secs, 0) < 0:
raise Exception("unable to register redblack timer")
uwsgi.add_rb_timer = uwsgi_pypy_uwsgi_add_rb_timer
def uwsgi_pypy_uwsgi_add_file_monitor(signum, filename):
if lib.uwsgi_add_file_monitor(signum, ffi.new("char[]", filename)) < 0:
raise Exception("unable to register file monitor")
uwsgi.add_file_monitor = uwsgi_pypy_uwsgi_add_file_monitor
def uwsgi_pypy_lock(num=0):
if lib.uwsgi_user_lock(num) < 0:
raise Exception("invalid lock")
uwsgi.lock = uwsgi_pypy_lock
def uwsgi_pypy_unlock(num=0):
if lib.uwsgi_user_unlock(num) < 0:
raise Exception("invalid lock")
uwsgi.unlock = uwsgi_pypy_unlock
def uwsgi_pypy_masterpid():
if lib.uwsgi.master_process:
return lib.uwsgi.workers[0].pid
return 0
uwsgi.masterpid = uwsgi_pypy_masterpid
uwsgi.worker_id = lambda: lib.uwsgi.mywid
uwsgi.mule_id = lambda: lib.uwsgi.muleid
def uwsgi_pypy_signal_registered(signum):
if lib.uwsgi_signal_registered(signum) > 0:
return True
return False
uwsgi.signal_registered = uwsgi_pypy_signal_registered
def uwsgi_pypy_alarm(alarm, msg):
lib.uwsgi_alarm_trigger(ffi.new('char[]', alarm), ffi.new('char[]', msg), len(msg))
uwsgi.alarm = uwsgi_pypy_alarm
uwsgi.setprocname = lambda name: lib.uwsgi_set_processname(ffi.new('char[]', name))
def uwsgi_pypy_add_cron(signum, minute, hour, day, month, week):
if lib.uwsgi_signal_add_cron(signum, minute, hour, day, month, week) < 0:
raise Exception("unable to register cron")
uwsgi.add_cron = uwsgi_pypy_add_cron
"""
populate uwsgi.opt
"""
uwsgi.opt = {}
for i in range(0, lib.uwsgi.exported_opts_cnt):
uo = lib.uwsgi.exported_opts[i]
k = ffi.string(uo.key)
if uo.value == ffi.NULL:
v = True
else:
v = ffi.string(uo.value)
if k in uwsgi.opt:
if type(uwsgi.opt[k]) is list:
uwsgi.opt[k].append(v)
else:
uwsgi.opt[k] = [uwsgi.opt[k], v]
else:
uwsgi.opt[k] = v
def uwsgi_pypy_current_wsgi_req():
wsgi_req = lib.uwsgi.current_wsgi_req()
if wsgi_req == ffi.NULL:
raise Exception("unable to get current wsgi_request, check your setup !!!")
return wsgi_req
"""
uwsgi.suspend()
"""
def uwsgi_pypy_suspend():
wsgi_req = uwsgi_pypy_current_wsgi_req()
if lib.uwsgi.schedule_to_main:
lib.uwsgi.schedule_to_main(wsgi_req);
uwsgi.suspend = uwsgi_pypy_suspend
"""
uwsgi.workers()
"""
def uwsgi_pypy_workers():
workers = []
for i in range(1, lib.uwsgi.numproc+1):
worker = {}
worker['id'] = lib.uwsgi.workers[i].id
worker['pid'] = lib.uwsgi.workers[i].pid
worker['requests'] = lib.uwsgi.workers[i].requests
worker['delta_requests'] = lib.uwsgi.workers[i].delta_requests
worker['signals'] = lib.uwsgi.workers[i].signals
worker['exceptions'] = lib.uwsgi_worker_exceptions(i);
worker['apps'] = []
if lib.uwsgi.workers[i].cheaped:
worker['status'] == 'cheap'
elif lib.uwsgi.workers[i].suspended and not lib.uwsgi_worker_is_busy(i):
worker['status'] == 'pause'
else:
if lib.uwsgi.workers[i].sig:
worker['status'] = 'sig%d' % lib.uwsgi.workers[i].signum
elif lib.uwsgi_worker_is_busy(i):
worker['status'] = 'busy'
else:
worker['status'] = 'idle'
worker['running_time'] = lib.uwsgi.workers[i].running_time
worker['avg_rt'] = lib.uwsgi.workers[i].avg_response_time
worker['tx'] = lib.uwsgi.workers[i].tx
workers.append(worker)
return workers
uwsgi.workers = uwsgi_pypy_workers
"""
uwsgi.async_sleep(timeout)
"""
def uwsgi_pypy_async_sleep(timeout):
if timeout > 0:
wsgi_req = uwsgi_pypy_current_wsgi_req();
lib.async_add_timeout(wsgi_req, timeout);
uwsgi.async_sleep = uwsgi_pypy_async_sleep
"""
uwsgi.async_connect(addr)
"""
def uwsgi_pypy_async_connect(addr):
fd = lib.uwsgi_connect(ffi.new('char[]', addr), 0, 1)
if fd < 0:
raise Exception("unable to connect to %s" % addr)
return fd
uwsgi.async_connect = uwsgi_pypy_async_connect
uwsgi.connection_fd = lambda: uwsgi_pypy_current_wsgi_req().fd
"""
uwsgi.wait_fd_read(fd, timeout=0)
"""
def uwsgi_pypy_wait_fd_read(fd, timeout=0):
wsgi_req = uwsgi_pypy_current_wsgi_req();
if lib.async_add_fd_read(wsgi_req, fd, timeout) < 0:
raise Exception("unable to add fd %d to the event queue" % fd)
uwsgi.wait_fd_read = uwsgi_pypy_wait_fd_read
"""
uwsgi.wait_fd_write(fd, timeout=0)
"""
def uwsgi_pypy_wait_fd_write(fd, timeout=0):
wsgi_req = uwsgi_pypy_current_wsgi_req();
if lib.async_add_fd_write(wsgi_req, fd, timeout) < 0:
raise Exception("unable to add fd %d to the event queue" % fd)
uwsgi.wait_fd_write = uwsgi_pypy_wait_fd_write
"""
uwsgi.ready_fd()
"""
def uwsgi_pypy_ready_fd():
wsgi_req = uwsgi_pypy_current_wsgi_req();
return lib.uwsgi_ready_fd(wsgi_req)
uwsgi.ready_fd = uwsgi_pypy_ready_fd
"""
uwsgi.send(fd=None,data)
"""
def uwsgi_pypy_send(*args):
if len(args) == 0:
raise ValueError("uwsgi.send() takes at least 1 argument")
elif len(args) == 1:
wsgi_req = uwsgi_pypy_current_wsgi_req();
fd = wsgi_req.fd
data = args[0]
else:
fd = args[0]
data = args[1]
rlen = libc.write(fd, data, len(data))
if rlen <= 0:
raise IOError("unable to send data")
return rlen
uwsgi.send = uwsgi_pypy_send
"""
uwsgi.recv(fd=None,len)
"""
def uwsgi_pypy_recv(*args):
if len(args) == 0:
raise ValueError("uwsgi.recv() takes at least 1 argument")
elif len(args) == 1:
wsgi_req = uwsgi_pypy_current_wsgi_req();
fd = wsgi_req.fd
l = args[0]
else:
fd = args[0]
l = args[1]
data = ffi.new('char[%d]' % l)
rlen = libc.read(fd, data, l)
if rlen <= 0:
raise IOError("unable to receive data")
return ffi.string(data[0:rlen])
uwsgi.recv = uwsgi_pypy_recv
"""
uwsgi.close(fd)
"""
uwsgi.close = lambda fd: lib.close(fd)
"""
uwsgi.disconnect()
"""
uwsgi.disconnect = lambda: lib.uwsgi_disconnect(uwsgi_pypy_current_wsgi_req())
"""
uwsgi.websocket_recv()
"""
def uwsgi_pypy_websocket_recv():
wsgi_req = uwsgi_pypy_current_wsgi_req();
ub = lib.uwsgi_websocket_recv(wsgi_req);
if ub == ffi.NULL:
raise IOError("unable to receive websocket message")
ret = ffi.string(ub.buf, ub.pos)
lib.uwsgi_buffer_destroy(ub)
return ret
uwsgi.websocket_recv = uwsgi_pypy_websocket_recv
"""
uwsgi.websocket_recv_nb()
"""
def uwsgi_pypy_websocket_recv_nb():
wsgi_req = uwsgi_pypy_current_wsgi_req();
ub = lib.uwsgi_websocket_recv_nb(wsgi_req);
if ub == ffi.NULL:
raise IOError("unable to receive websocket message")
ret = ffi.string(ub.buf, ub.pos)
lib.uwsgi_buffer_destroy(ub)
return ret
uwsgi.websocket_recv_nb = uwsgi_pypy_websocket_recv_nb
"""
uwsgi.websocket_handshake(key, origin)
"""
def uwsgi_pypy_websocket_handshake(key='', origin='', proto=''):
wsgi_req = uwsgi_pypy_current_wsgi_req();
c_key = ffi.new('char[]', key)
c_origin = ffi.new('char[]', origin)
c_proto = ffi.new('char[]', proto)
if lib.uwsgi_websocket_handshake(wsgi_req, c_key, len(key), c_origin, len(origin), c_proto, len(proto)) < 0:
raise IOError("unable to complete websocket handshake")
uwsgi.websocket_handshake = uwsgi_pypy_websocket_handshake
"""
uwsgi.websocket_send(msg)
"""
def uwsgi_pypy_websocket_send(msg):
wsgi_req = uwsgi_pypy_current_wsgi_req();
if lib.uwsgi_websocket_send(wsgi_req, ffi.new('char[]', msg), len(msg)) < 0:
raise IOError("unable to send websocket message")
uwsgi.websocket_send = uwsgi_pypy_websocket_send
"""
uwsgi.chunked_read(timeout=0)
"""
def uwsgi_pypy_chunked_read(timeout=0):
wsgi_req = uwsgi_pypy_current_wsgi_req();
rlen = ffi.new("size_t*")
chunk = lib.uwsgi_chunked_read(wsgi_req, rlen, timeout, 0)
if chunk == ffi.NULL:
raise IOError("unable to receive chunked part")
return ffi.string(chunk, rlen[0])
uwsgi.chunked_read = uwsgi_pypy_chunked_read
"""
uwsgi.chunked_read_nb()
"""
def uwsgi_pypy_chunked_read_nb():
wsgi_req = uwsgi_pypy_current_wsgi_req();
rlen = ffi.new("size_t*")
chunk = lib.uwsgi_chunked_read(wsgi_req, rlen, 0, 1)
if chunk == ffi.NULL:
if lib.uwsgi_is_again() > 0:
return None
raise IOError("unable to receive chunked part")
return ffi.string(chunk, rlen[0])
uwsgi.chunked_read_nb = uwsgi_pypy_chunked_read_nb
"""
uwsgi.set_user_harakiri(sec)
"""
def uwsgi_pypy_set_user_harakiri(x):
wsgi_req = uwsgi_pypy_current_wsgi_req()
lib.set_user_harakiri(wsgi_req, x)
uwsgi.set_user_harakiri = uwsgi_pypy_set_user_harakiri
print "Initialized PyPy with Python", sys.version
print "PyPy Home:", sys.prefix
"""
Continulets support
"""
# this is the dictionary of continulets (one per-core)
uwsgi_pypy_continulets = {}
def uwsgi_pypy_continulet_wrapper(cont):
lib.async_schedule_to_req_green()
@ffi.callback("void()")
def uwsgi_pypy_continulet_schedule():
id = lib.uwsgi.wsgi_req.async_id
modifier1 = lib.uwsgi.wsgi_req.uh.modifier1;
# generate a new continulet
if not lib.uwsgi.wsgi_req.suspended:
from _continuation import continulet
uwsgi_pypy_continulets[id] = continulet(uwsgi_pypy_continulet_wrapper)
lib.uwsgi.wsgi_req.suspended = 1
# this is called in the main stack
if lib.uwsgi.p[modifier1].suspend:
lib.uwsgi.p[modifier1].suspend(ffi.NULL)
# let's switch
uwsgi_pypy_continulets[id].switch()
# back to the main stack
if lib.uwsgi.p[modifier1].resume:
lib.uwsgi.p[modifier1].resume(ffi.NULL)
@ffi.callback("void(struct wsgi_request *)")
def uwsgi_pypy_continulet_switch(wsgi_req):
id = wsgi_req.async_id
modifier1 = wsgi_req.uh.modifier1;
# this is called in the current continulet
if lib.uwsgi.p[modifier1].suspend:
lib.uwsgi.p[modifier1].suspend(wsgi_req)
uwsgi_pypy_continulets[id].switch()
# back to the continulet
if lib.uwsgi.p[modifier1].resume:
lib.uwsgi.p[modifier1].resume(wsgi_req)
# update current running continulet
lib.uwsgi.wsgi_req = wsgi_req
def uwsgi_pypy_setup_continulets():
if lib.uwsgi.async < 1:
raise Exception("pypy continulets require async mode !!!")
lib.uwsgi.schedule_to_main = uwsgi_pypy_continulet_switch
lib.uwsgi.schedule_to_req = uwsgi_pypy_continulet_schedule
print "*** PyPy Continulets engine loaded ***"
|
|
"""
Unit tests for the Three Open311 API wrapper.
"""
import os
import json
import unittest
from datetime import date
from mock import Mock, MagicMock, patch
import three
import responses
from three import core, Three, CityNotFound
from three.core import requests as req
class ThreeInit(unittest.TestCase):
def test_uninitialized_api_key(self):
self.assertEqual(Three().api_key, '')
def test_global_api_key(self):
os.environ['OPEN311_API_KEY'] = 'OHAI'
self.assertEqual(Three().api_key, 'OHAI')
def test_default_format_is_json(self):
self.assertEqual(Three().format, 'json')
def test_format_can_be_set_to_xml(self):
t = Three(format='xml')
self.assertEqual(t.format, 'xml')
def test_first_argument_is_endpoint(self):
t = Three('api.city.gov')
self.assertEqual(t.endpoint, 'https://api.city.gov/')
def test_reset_method_reconfigures_defaults(self):
t = Three('foo.bar')
self.assertEqual(t.endpoint, 'https://foo.bar/')
t.configure(endpoint='bar.bar')
self.assertEqual(t.endpoint, 'https://bar.bar/')
t.configure(endpoint='http://baz.bar')
self.assertEqual(t.endpoint, 'http://baz.bar/')
t.reset()
self.assertEqual(t.endpoint, 'https://foo.bar/')
def test_ssl_version(self):
import ssl
t = Three('foo.bar', ssl_version=ssl.PROTOCOL_TLSv1)
poolmanager = t.session.adapters['https://'].poolmanager
self.assertEqual(poolmanager.connection_pool_kw['ssl_version'],
ssl.PROTOCOL_TLSv1)
def tearDown(self):
os.environ['OPEN311_API_KEY'] = ''
@patch.object(req, 'Session', Mock())
class ThreeDiscovery(unittest.TestCase):
def setUp(self):
core.json = Mock()
def test_default_discovery_method(self):
t = Three('api.city.gov')
t.discovery()
expected = 'https://api.city.gov/discovery.json'
t.session.get.assert_called_with(expected, params={})
def test_discovery_url_argument(self):
t = Three('api.city.gov')
t.discovery('http://testing.gov/discovery.json')
t.session.get.assert_called_with('http://testing.gov/discovery.json')
def test_city_discovery_keyword(self):
t = Three('api.chicago.city', discovery='http://chi.api.gov')
self.assertEqual(t.discovery_url, 'http://chi.api.gov')
@patch.object(req, 'Session', Mock())
class ThreeServices(unittest.TestCase):
def setUp(self):
core.json = Mock()
def test_empty_services_call(self):
t = Three('api.city.gov')
t.services()
expected = 'https://api.city.gov/services.json'
t.session.get.assert_called_with(expected, params={})
def test_specific_service_code(self):
t = Three('api.city.gov')
t.services('123')
expected = 'https://api.city.gov/services/123.json'
t.session.get.assert_called_with(expected, params={})
def test_keyword_arguments_become_parameters(self):
t = Three('api.city.gov')
t.services('123', foo='bar')
params = {'foo': 'bar'}
expected = 'https://api.city.gov/services/123.json'
t.session.get.assert_called_with(expected, params=params)
@patch.object(req, 'Session', Mock())
class ThreeRequests(unittest.TestCase):
def setUp(self):
core.json = Mock()
def test_empty_requests_call(self):
t = Three('api.city.gov')
t.requests()
expected = 'https://api.city.gov/requests.json'
t.session.get.assert_called_with(expected, params={})
def test_requests_call_with_service_code(self):
t = Three('api.city.gov')
t.requests('123')
params = {'service_code': '123'}
expected = 'https://api.city.gov/requests.json'
t.session.get.assert_called_with(expected, params=params)
def test_requests_with_additional_keyword_arguments(self):
t = Three('api.city.gov')
t.requests('123', status='open')
params = {'service_code': '123', 'status': 'open'}
expected = 'https://api.city.gov/requests.json'
t.session.get.assert_called_with(expected, params=params)
@patch.object(req, 'Session', Mock())
class ThreeRequest(unittest.TestCase):
def setUp(self):
core.json = Mock()
def test_getting_a_specific_request(self):
t = Three('api.city.gov')
t.request('123')
expected = 'https://api.city.gov/requests/123.json'
t.session.get.assert_called_with(expected, params={})
def test_start_and_end_keyword_arguments(self):
t = Three('api.city.gov')
t.request('456', start='03-01-2010', end='03-05-2010')
expected = 'https://api.city.gov/requests/456.json'
params = {
'start_date': '2010-03-01T00:00:00Z',
'end_date': '2010-03-05T00:00:00Z'
}
t.session.get.assert_called_with(expected, params=params)
def test_only_start_keyword_arguments(self):
t = Three('api.city.gov')
t.request('456', start='03-01-2010')
end_date = date.today().strftime('%Y-%m-%dT00:00:00Z')
expected = 'https://api.city.gov/requests/456.json'
params = {
'start_date': '2010-03-01T00:00:00Z',
'end_date': end_date
}
t.session.get.assert_called_with(expected, params=params)
def test_between_keyword_argument(self):
t = Three('api.city.gov')
t.request('789', between=['03-01-2010', '03-05-2010'])
expected = 'https://api.city.gov/requests/789.json'
params = {
'start_date': '2010-03-01T00:00:00Z',
'end_date': '2010-03-05T00:00:00Z'
}
t.session.get.assert_called_with(expected, params=params)
def test_shortened_between_keyword(self):
t = Three('api.city.gov')
dates = ('03-01-10', '03-05-10')
t.request('123', between=dates)
expected = 'https://api.city.gov/requests/123.json'
params = {
'start_date': '2010-03-01T00:00:00Z',
'end_date': '2010-03-05T00:00:00Z'
}
t.session.get.assert_called_with(expected, params=params)
def test_between_can_handle_datetimes(self):
t = Three('api.city.gov')
dates = (date(2010, 3, 10), date(2010, 3, 15))
t.request('123', between=dates)
expected = 'https://api.city.gov/requests/123.json'
params = {
'start_date': '2010-03-10T00:00:00Z',
'end_date': '2010-03-15T00:00:00Z'
}
t.session.get.assert_called_with(expected, params=params)
@responses.activate
@patch.object(req, 'Session', Mock())
class ThreePost(unittest.TestCase):
def setUp(self):
core.json = Mock()
def test_a_default_post(self):
responses.add(responses.POST, 'https://api.city.gov/requests.json',
body="""[
{
"service_request_id":293944,
"service_notice":"The City will inspect and require the responsible party to correct within 24 hours and/or issue a Correction Notice or Notice of Violation of the Public Works Code",
"account_id":null
}
]""",
status=201,
content_type='application/json')
t = Three('api.city.gov', api_key='my_api_key')
resp = t.post('123', name='Zach Williams', address='85 2nd Street')
params = {'first_name': 'Zach', 'last_name': 'Williams',
'service_code': '123', 'address_string': '85 2nd Street',
'api_key': 'my_api_key'}
assert resp.status_code == 201
def test_post_request_with_api_key_argument(self):
t = Three('http://seeclicktest.com/open311/v2')
t.post('1627', name='Zach Williams', address='120 Spring St',
description='Just a test post.', phone='555-5555',
api_key='my_api_key')
params = {
'first_name': 'Zach', 'last_name': 'Williams',
'description': 'Just a test post.', 'service_code': '1627',
'address_string': '120 Spring St', 'phone': '555-5555',
'api_key': 'my_api_key'
}
expected = 'http://seeclicktest.com/open311/v2/requests.json'
t.session.post.assert_called_with(expected, data=params, files=None)
@patch.object(req, 'Session', Mock())
class ThreeToken(unittest.TestCase):
def setUp(self):
core.json = Mock()
def test_a_default_token_call(self):
t = Three('api.city.gov')
t.token('12345')
expected = 'https://api.city.gov/tokens/12345.json'
t.session.get.assert_called_with(expected, params={})
class TopLevelFunctions(unittest.TestCase):
def setUp(self):
self.session = Mock()
self.patch = patch.object(req, 'Session',
Mock(return_value=self.session))
self.patch.start()
core.json = MagicMock()
def tearDown(self):
self.patch.stop()
def test_three_api(self):
three.key('my_api_key')
key = os.environ['OPEN311_API_KEY']
self.assertEqual(key, 'my_api_key')
def test_cities_function_returns_a_list(self):
cities = three.cities()
self.assertTrue(isinstance(cities, list))
def test_three_city_info(self):
three.city('sf')
info = os.environ['OPEN311_CITY_INFO']
self.assertTrue(info)
def test_three_city_error(self):
self.assertRaises(CityNotFound, three.city, 'this is made up')
def test_three_discovery(self):
three.city('new haven')
three.discovery()
self.assertTrue(self.session.get.called)
def test_three_requests(self):
three.city('macon')
three.requests()
self.assertTrue(self.session.get.called)
def test_three_request_specific_report(self):
three.city('macon')
three.request('123abc')
self.assertTrue(self.session.get.called)
def test_three_services(self):
three.city('sf')
three.services()
self.assertTrue(self.session.get.called)
def test_three_token(self):
three.token('123abc')
three.services()
self.assertTrue(self.session.get.called)
def test_three_dev_functionality(self):
three.dev('http://api.city.gov')
environ = os.environ['OPEN311_CITY_INFO']
expected = '{"endpoint": "http://api.city.gov"}'
self.assertEqual(environ, expected)
def test_three_dev_keyword_arguments(self):
three.dev('http://api.city.gov', format='xml')
environ = json.loads(os.environ['OPEN311_CITY_INFO'])
expected = {"endpoint": "http://api.city.gov", "format": "xml"}
self.assertEqual(environ, expected)
def tearDown(self):
os.environ['OPEN311_API_KEY'] = ''
os.environ['OPEN311_CITY_INFO'] = ''
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
""" Sahana Eden Assessments Model
@copyright: 2012-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3Assess24HModel",
"S3AssessBuildingModel",
"S3AssessCanvassModel",
)
from gluon import *
from gluon.storage import Storage
from gluon.tools import callback
from ..s3 import *
T = current.T
# Common to both Building & Canvass
assess_property_type_opts = {
1 : T("Single Family"),
2 : T("Multi-Family/Apts"),
3 : T("Residence is Vacation Home"),
4 : T("Business"),
}
# =============================================================================
class S3Assess24HModel(S3Model):
"""
IFRC 24H Assessment form
"""
names = ("assess_24h",
)
def model(self):
T = current.T
s3 = current.response.s3
if s3.bulk:
# Don't default the Team leader name for Bulk Imports
default_person = None
else:
default_person = current.auth.s3_logged_in_person()
# ---------------------------------------------------------------------
# IFRC 24H Assessment
#
tablename = "assess_24h"
self.define_table(tablename,
self.pr_person_id(
default = default_person,
label = ("Name of Assessment Team Leader"),
),
s3_date(default = "now"),
self.gis_location_id(
widget = S3LocationSelector(show_map = False),
),
Field("inhabitants", "integer",
label = T("Approximate number of inhabitants"),
),
Field("inhabitants", "integer",
label = T("Approximate number of inhabitants"),
),
self.pr_person_id("contact_id",
comment = None,
label = ("Name of contact person in the community"),
requires = IS_ADD_PERSON_WIDGET2(),
widget = S3AddPersonWidget2(),
),
Field("injured", "integer",
label = T("# Injured"),
),
Field("dead", "integer",
label = T("# Dead"),
),
Field("missing", "integer",
label = T("# Missing"),
),
Field("minor_damage", "integer",
label = T("# Minor Damage"),
),
Field("moderate_damage", "integer",
label = T("# Moderate Damage"),
),
Field("destroyed", "integer",
label = T("# Destroyed"),
),
# tbc if-useful
*s3_meta_fields())
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Create Assessment"),
title_display = T("Assessment Details"),
title_list = T("Assessments"),
title_update = T("Edit Assessment"),
label_list_button = T("List Assessments"),
label_delete_button = T("Delete Assessment"),
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments found")
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# =============================================================================
class S3AssessBuildingModel(S3Model):
"""
Building Damage Assessment form
"""
names = ("assess_building",
"assess_building_rheader",
)
def model(self):
T = current.T
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
# Building Assessment
#
approved_opts = {
1 : T("Approved"),
2 : T("Rejected"),
}
assess_status_opts = {
1 : T("Assessed"),
2 : T("Ready to Start"),
3 : T("In-Progress"),
4 : T("Finished"),
}
assess_status2_opts = {
1 : T("N/A"),
2 : T("To be done"),
3 : T("In-Progress"),
4 : T("Finished"),
}
assess_priority_opts = {
1 : T("High"),
2 : T("Medium"),
3 : T("Low"),
}
building_status_opts = {
1 : T("Green"),
2 : T("Yellow"),
3 : T("Red (unsafe)"),
}
on_off_opts = {
1 : T("On"),
2 : T("Off"),
}
ownership_opts = {
1: T("Rent"),
2: T("Own"),
}
yes_no_opts = {
1: T("Yes"),
2: T("No"),
}
assess_additional_needs_opts = {
1 : T("Shelter"),
2 : T("Information -Mold -FEMA -Legal"),
3 : T("Other"),
4 : T("Food"),
}
assess_construction_type_opts = {
1 : T("Concrete"),
2 : T("Brick"),
3 : T("Wood Frame"),
4 : T("Metal Stud"),
4 : T("Other"),
}
assess_damage_opts = {
1 : T("Primarily Flood"),
2 : T("Wind/Wind driven rain"),
3 : T("Other"),
}
assess_insurance_opts = {
1 : T("Property"),
2 : T("Flood (Structure)"),
3 : T("Wind/Hurricane"),
4 : T("Sewer Back-up"),
5 : T("Flood (Contents)"),
6 : T("NONE"),
}
#assess_tools_opts = {
# 1 : T("Pump/Hoses"),
# 2 : T("Flat bar"),
# 3 : T("Dust pan"),
# 4 : T("Large pails"),
# 5 : T("Long pry bar"),
# 6 : T("Brooms"),
# 7 : T("Shovels"),
# 8 : T("Pick axe"),
# 9 : T("Trash bags"),
# 10 : T("Wheelbarrows"),
# 11 : T("Hammers"),
# 12 : T("Cleaning Supplies"),
# 13 : T("Crowbar"),
# 14 : T("Sledgehammer"),
# 15 : "",
# 16 : T("Nail pullers/cat's claws"),
# 17 : T("Generator"),
# 18 : T("Screwdrivers"),
# 19 : T("Chalk line"),
# 20 : T("Portable lights"),
# 21 : T("Wrench"),
# 22 : T("Sawzall"),
# 23 : T("Extension cords"),
# 24 : "",
# 25 : T("Utility knives"),
# 26 : T("Headlamps"),
# 21 : "",
# }
#assess_mold_removal_opts = {
# 1 : T("Wire brush"),
# 2 : T("Demolding solution"),
# 3 : T("Tyvek suits"),
# 4 : T("Grinder"),
# 5 : T("Paint brushes"),
# 6 : T("Goggles"),
# 7 : T("Shop vac + HEPA filter"),
# 8 : T("Cloth rags"),
# 9 : T("Rubber gloves"),
# 10 : T("Ladder/step stool"),
# 11 : T("Kllz"),
# #12 : T("Ladder/step stool""),
# }
#assess_personal_protection_opts = {
# 1 : T("N95 Dust masks"),
# 2 : T("Rubber Boots"),
# 4 : T("Respirators"),
# 5 : T("Work gloves"),
# 7 : T("Safety glasses"),
# 8 : T("Hard hats"),
# 10 : T("Boots"),
# 11 : T("Ear plugs"),
# }
#assess_skills_required_opts = {
# 1 : T("Pump equipment"),
# 2 : T("Mold removal"),
# 3 : T("Demolition/gutting"),
# 4 : T("Construction"),
# }
#assess_special_skills_opts = {
# 1 : T("Plumber"),
# 2 : T("Engineer"),
# 3 : T("Electrician"),
# 4 : T("Other"),
# }
assess_vulnerability_opts = {
1 : T("Elderly"),
2 : T("Disabled"),
3 : T("Small Children"),
4 : T("Single Female Head of Household"),
}
assess_work_requested_opts = {
1 : T("Pump out water"),
2 : T("Mud/sand removal"),
3 : T("Demolition/Gutting"),
4 : T("Clean up debris"),
5 : T("Mold removal"),
6 : T("Sanitization"),
}
tablename = "assess_building"
self.define_table(tablename,
Field("database_id", "integer",
label=T("Database ID")),
Field("status", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_status_opts)
),
represent = lambda opt:
assess_status_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=4, **attr),
label=T("Status")),
Field("status_gutting", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_status2_opts)
),
represent = lambda opt:
assess_status2_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=4, **attr),
label=T("Gutting Status")),
Field("status_mold", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_status2_opts)
),
represent = lambda opt:
assess_status2_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=4, **attr),
label=T("Mold Status")),
s3_comments("mold_notes",
comment=None,
label=T("Mold Notes")),
Field("priority", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_priority_opts)
),
represent = lambda opt:
assess_priority_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=3, **attr),
label=T("Priority")),
s3_date(label=T("Intake Date")),
Field("assessor1",
represent = lambda v: v or NONE,
label=T("Assessor 1")),
Field("assessor2",
represent = lambda v: v or NONE,
label=T("Assessor 2")),
Field("name",
represent = lambda v: v or NONE,
label=T("Name")),
Field("phone",
requires=IS_EMPTY_OR(s3_phone_requires),
represent = lambda v: v or NONE,
label=T("Phone Number")),
Field("contact_other",
represent = lambda v: v or NONE,
label=T("Other Contact Information")),
self.gis_location_id(),
Field("homeowner_availability",
represent = lambda v: v or NONE,
label=T("Homeowner Availability")),
Field("type_of_property", "list:integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_property_type_opts,
multiple=True)
),
represent = lambda ids: \
assess_multi_type_represent(ids,
assess_property_type_opts),
widget = lambda f, v, **attr: \
CheckboxesWidgetS3.widget(f, v, cols=4, **attr),
label=T("Type of Property")),
Field("inhabitants", "integer",
represent = lambda v: v or NONE,
label=T("# of Inhabitants")),
Field("year_built", "integer",
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1800, 2012)
),
represent = lambda v: v or NONE,
label=T("Year Built")),
Field("current_residence", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Current Residence")),
Field("ownership", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(ownership_opts)
),
represent = lambda opt: \
ownership_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Ownership")),
Field("intention",
represent = lambda v: v or NONE,
label=T("Intention to Stay Home")),
Field("vulnerability", "list:integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_vulnerability_opts,
multiple=True)
),
represent = lambda ids: \
assess_multi_type_represent(ids,
assess_vulnerability_opts),
widget = lambda f, v, **attr: \
CheckboxesWidgetS3.widget(f, v, cols=4, **attr),
label=T("Vulnerabilities")),
Field("building_status", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(building_status_opts)
),
represent = lambda opt:
building_status_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=3, **attr),
label=T("Based on the DOB/FEMA sticker, the property is")),
Field("insurance", "list:integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_insurance_opts,
multiple=True)
),
represent = lambda ids: \
assess_multi_type_represent(ids,
assess_insurance_opts),
widget = lambda f, v, **attr: \
CheckboxesWidgetS3.widget(f, v, cols=3, **attr),
label=T("Type of Insurance")),
Field("work_requested", "list:integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_work_requested_opts,
multiple=True)
),
represent = lambda ids: \
assess_multi_type_represent(ids,
assess_work_requested_opts),
widget = lambda f, v, **attr: \
CheckboxesWidgetS3.widget(f, v, cols=3, **attr),
label=T("Work Requested")),
Field("construction_type", "list:integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_construction_type_opts,
multiple=True)
),
represent = lambda ids: \
assess_multi_type_represent(ids,
assess_construction_type_opts),
widget = lambda f, v, **attr: \
CheckboxesWidgetS3.widget(f, v, cols=4, **attr),
label=T("Construction Type (Check all that apply)"),
),
Field("electricity", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(on_off_opts)
),
represent = lambda opt: \
on_off_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Electricity")),
Field("gas", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(on_off_opts)
),
represent = lambda opt: \
on_off_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Gas")),
Field("basement_flooding", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Basement Flooding")),
Field("basement_flooding_depth", "integer",
requires=IS_EMPTY_OR(
IS_INT_IN_RANGE(1, 99)
),
represent = lambda v: v or NONE,
label=T("Depth (feet)"),
),
Field("first_flooding", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("First Floor Flooding")),
Field("first_flooding_depth", "integer",
requires=IS_EMPTY_OR(
IS_INT_IN_RANGE(1, 99)
),
represent = lambda v: v or NONE,
label=T("Depth (feet)"),
),
Field("drywall", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Drywall")),
Field("floor", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Floor")),
Field("remove_loose_debris", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Remove Loose Debris")),
Field("remove_furniture", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Remove Furniture")),
Field("remove_water_heater", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Remove Water Heater")),
Field("remove_appliances", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Remove Major Appliances")),
Field("asbestos", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Asbestos")),
Field("damage_source", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_damage_opts)
),
represent = lambda opt: \
assess_damage_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=3, **attr),
label=T("Damage Source")),
Field("damage_source_other",
represent = lambda v: v or NONE,
label=T("Other")),
s3_comments("damage_details",
label=T("Additional Description of Damage"),
comment=None,
),
s3_comments("work_plan",
label=T("Work Plan"),
comment=T("Describe access points, advice for team leaders"),
),
#Field("tools_required", "list:integer",
# requires=IS_EMPTY_OR(
# IS_IN_SET(assess_tools_opts,
# multiple=True)
# ),
# represent = lambda ids: \
# assess_multi_type_represent(ids,
# assess_tools_opts),
# widget = lambda f, v, **attr: \
# CheckboxesWidgetS3.widget(f, v, cols=3, **attr),
# label=T("Tools and materials required"),
# ),
#s3_comments("tools_other",
# comment=None,
# label=T("Tools Other")),
#Field("mold_equipment", "list:integer",
# requires=IS_EMPTY_OR(
# IS_IN_SET(assess_mold_removal_opts,
# multiple=True)
# ),
# represent = lambda ids: \
# assess_multi_type_represent(ids,
# assess_mold_removal_opts),
# widget = lambda f, v, **attr: \
# CheckboxesWidgetS3.widget(f, v, cols=3, **attr),
# label=T("Mold removal equipment"),
# ),
#Field("personal_protectivity", "list:integer",
# requires=IS_EMPTY_OR(
# IS_IN_SET(assess_personal_protection_opts,
# multiple=True)
# ),
# represent = lambda ids: \
# assess_multi_type_represent(ids,
# assess_personal_protection_opts),
# widget = lambda f, v, **attr: \
# CheckboxesWidgetS3.widget(f, v, cols=2, **attr),
# label=T("All Teams Must Have Personal Protectivity Equipment"),
# ),
#Field("skills_required", "list:integer",
# requires=IS_EMPTY_OR(
# IS_IN_SET(assess_skills_required_opts,
# multiple=True)
# ),
# represent = lambda ids: \
# assess_multi_type_represent(ids,
# assess_skills_required_opts),
# widget = lambda f, v, **attr: \
# CheckboxesWidgetS3.widget(f, v, cols=2, **attr),
# label=T("Skills Required"),
# ),
#Field("special_skills_required", "list:integer",
# requires=IS_EMPTY_OR(
# IS_IN_SET(assess_special_skills_opts,
# multiple=True)
# ),
# represent = lambda ids: \
# assess_multi_type_represent(ids,
# assess_special_skills_opts),
# widget = lambda f, v, **attr: \
# CheckboxesWidgetS3.widget(f, v, cols=2, **attr),
# label=T("Special Skills Required"),
# ),
s3_comments("special_skills",
comment=None,
label=T("Special Tools and Skills")),
Field("estimated_volunteers",
represent = lambda v: v or NONE,
label=T("Estimated Volunteers"),
),
Field("estimated_days",
represent = lambda v: v or NONE,
label=T("Estimated Days"),
),
Field("additional_needs", "list:integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_additional_needs_opts,
multiple=True)
),
represent = lambda ids: \
assess_multi_type_represent(ids,
assess_additional_needs_opts),
widget = lambda f, v, **attr: \
CheckboxesWidgetS3.widget(f, v, cols=3, **attr),
label=T("Additional Needs"),
),
Field("approval", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(approved_opts)
),
represent = lambda opt:
approved_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Approved")),
s3_comments("approval_details",
comment=None,
label=T("Details")),
Field("permission", "integer",
requires=IS_EMPTY_OR(
IS_IN_SET(yes_no_opts)
),
represent = lambda opt: \
yes_no_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=2, **attr),
label=T("Permission from Owner to Work")),
s3_date("date_ready",
label=T("Date Ready")),
s3_comments(),
s3_comments("progress",
comment=None,
label=T("Progress and Notes")),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Assessment"),
title_display = T("Assessment Details"),
title_list = T("Assessments"),
title_update = T("Edit Assessment"),
label_list_button = T("List Assessments"),
label_delete_button = T("Delete Assessment"),
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments found")
)
# Filter Widgets
filter_widgets = [
S3TextFilter(["name",
"database_id",
],
label = T("Name, and/or ID"),
comment = T("To search for a building assessment, enter the name or ID. You may use % as wildcard. Press 'Search' without input to list all assessments."),
),
S3OptionsFilter("status",
label = T("Status"),
options = assess_status_opts,
cols = 4,
),
S3OptionsFilter("status_gutting",
label = T("Gutting Status"),
options = assess_status2_opts,
cols = 4,
),
S3OptionsFilter("status_mold",
label = T("Mold Status"),
options = assess_status2_opts,
cols = 4,
),
S3OptionsFilter("priority",
label = T("Priority"),
options = assess_priority_opts,
cols = 3,
),
]
# Configuration
self.configure(tablename,
onvalidation = self.assess_building_onvalidation,
filter_widgets = filter_widgets,
subheadings = {
T("Damages"): "electricity",
}
)
# Generate Work Order
self.set_method("assess", "building",
method="form",
action=self.assess_building_form)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(assess_building_rheader = self.assess_building_rheader,
)
# -------------------------------------------------------------------------
@staticmethod
def assess_building_onvalidation(form):
"""
Update the overall status from the Gutting/Mold status
"""
vars = form.vars
status = vars.status and int(vars.status) or None
if status < 3:
status_gutting = vars.status_gutting and int(vars.status_gutting) or None
status_mold = vars.status_mold and int(vars.status_mold) or None
if status_gutting in (3, 4) or \
status_mold in (3, 4):
vars.status = 3
return
# -------------------------------------------------------------------------
@staticmethod
def assess_building_rheader(r):
""" Resource Header """
if r.representation != "html" or r.method == "import" or not r.record:
# RHeaders only used in interactive views
return None
rheader = TABLE(A(T("Print Work Order"),
_href = URL(args = [r.record.id, "form"]
),
_class = "action-btn"
))
return rheader
# -------------------------------------------------------------------------
@staticmethod
def assess_building_form(r, **attr):
"""
Generate a PDF of a Work Order
@ToDo: Move this to Template?
"""
db = current.db
table = db.assess_building
gtable = db.gis_location
query = (table.id == r.id)
left = gtable.on(gtable.id == table.location_id)
record = db(query).select(left=left,
limitby=(0, 1)).first()
location = record.gis_location
record = record.assess_building
address = location.get("address", current.messages["NONE"])
header = TABLE(
TR(TD(),
TD(),
TD(),
TD("Rockaways", _align="right"),
),
TR(TD(),
TD(),
TD(),
TD("HOUSEHOLD ASSESSMENT"),
),
TR(TD(),
TD(),
TD(),
TD("Database ID: %s" % record.database_id),
),
TR(TD("Intake Date: %s" % table.date.represent(record.date)),
),
TR(TD("Assessor 1: %s" % record.assessor1,
_colspan=2,
),
TD("Assessor 2: %s" % record.assessor2,
_colspan=2,
),
),
TR(TD("Name: %s" % record.name),
TD("Phone Number: %s" % table.phone.represent(record.phone)),
TD("Other Contact: %s" % table.contact_other.represent(record.contact_other),
_colspan=2,
),
),
TR(TD("Address: %s" % address,
_colspan=4,
),
),
TR(TD("Homeowner Availability: %s" % record.homeowner_availability,
_colspan=4,
),
),
TR(TD("Type of Property: %s" % table.type_of_property.represent(record.type_of_property),
_colspan=4,
),
),
TR(TD("# of Inhabitants: %s" % table.inhabitants.represent(record.inhabitants)),
TD("Year Built: %s" % table.year_built.represent(record.year_built)),
TD("Ownership: %s" % table.ownership.represent(record.ownership)),
),
TR(TD("Current Residence: %s" % table.current_residence.represent(record.current_residence),
_colspan=4,
),
),
TR(TD("Intention to Stay Home: %s" % table.intention.represent(record.intention),
_colspan=2,
),
TD("Vulnerabilities: %s" % table.vulnerability.represent(record.vulnerability),
_colspan=2,
),
),
TR(TD("Based on the DOB/FEMA sticker, the property is: %s" % table.building_status.represent(record.building_status),
_colspan=2,
),
TD("Type of Insurance: %s" % table.insurance.represent(record.insurance),
_colspan=2,
),
),
TR(TD("Work Requested: %s" % table.work_requested.represent(record.work_requested),
_colspan=2),
TD("Construction Type: %s" % table.construction_type.represent(record.construction_type),
_colspan=2,
),
),
TR(TD("Damages"),
TD("Electricity: %s" % table.electricity.represent(record.electricity)),
TD("Gas: %s" % table.gas.represent(record.gas)),
TD("Basement Flooding: %s Depth: %s feet" % (table.basement_flooding.represent(record.basement_flooding),
table.basement_flooding_depth.represent(record.basement_flooding_depth))),
),
TR(TD(),
TD("Drywall: %s" % table.drywall.represent(record.drywall)),
TD("Floor: %s" % table.floor.represent(record.floor)),
TD("First Floor Flooding: %s Depth: %s feet" % (table.first_flooding.represent(record.first_flooding),
table.first_flooding_depth.represent(record.first_flooding_depth))),
),
TR(TD("Remove Loose Debris: %s" % table.remove_loose_debris.represent(record.remove_loose_debris)),
TD("Remove Furniture: %s" % table.remove_furniture.represent(record.remove_furniture)),
TD("Remove Water Heater: %s" % table.remove_water_heater.represent(record.remove_water_heater)),
TD("Remove Major Appliances: %s" % table.remove_appliances.represent(record.remove_appliances)),
),
TR(TD("Asbestos: %s" % table.asbestos.represent(record.asbestos)),
),
TR(TD("Source of Damages: %s" % table.damage_source.represent(record.damage_source),
_colspan=2,
),
TD("Other: %s" % table.damage_source_other.represent(record.damage_source_other),
_colspan=2,
),
),
TR(TD("Additional Description of Damage: %s" % table.damage_details.represent(record.damage_details),
_colspan=4,
),
),
TR(TD("Workplan: %s" % table.work_plan.represent(record.work_plan),
_colspan=4,
),
),
TR(TD("Special Skills: %s" % table.special_skills.represent(record.special_skills),
_colspan=4,
),
),
TR(TD("Estimated Volunteers: %s" % table.estimated_volunteers.represent(record.estimated_volunteers),
_colspan=2,
),
TD("Estimated Days: %s" % table.estimated_days.represent(record.estimated_days),
_colspan=2,
),
),
TR(TD("Additional Needs: %s" % table.additional_needs.represent(record.additional_needs),
_colspan=4,
),
),
TR(TD("Approval: %s" % table.approval.represent(record.approval)),
TD("Details: %s" % table.approval_details.represent(record.approval_details),
_colspan=3,
),
),
TR(TD("Permission from Owner to Work: %s" % table.permission.represent(record.permission),
_colspan=2,
),
TD("Date Ready: %s" % table.date_ready.represent(record.date_ready),
_colspan=2,
),
),
TR(TD("Comments: %s" % table.comments.represent(record.comments),
_colspan=4,
),
),
TR(TD("Progress and Notes: %s" % table.progress.represent(record.progress),
_colspan=4,
_rowspan=4,
),
),
)
WORK_ORDER = current.T("Work Order")
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
return exporter(r,
method = "read",
pdf_title = WORK_ORDER,
pdf_filename = "%s %s" % (WORK_ORDER,
record.database_id),
pdf_header = header,
pdf_header_padding = 12,
pdf_table_autogrow = "B",
**attr
)
# =============================================================================
class S3AssessCanvassModel(S3Model):
"""
Building Canvassing form
"""
names = ("assess_canvass",)
def model(self):
T = current.T
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
# Canvassing
#
status_opts = {
1 : T("Not actioned"),
2 : T("Actioned"),
}
tablename = "assess_canvass"
self.define_table(tablename,
Field("status", "integer",
default = 1,
requires = IS_IN_SET(status_opts),
represent = lambda opt:
status_opts.get(opt,
UNKNOWN_OPT),
widget = lambda f, v, **attr: \
SQLFORM.widgets.radio.widget(f, v, cols=4, **attr),
label=T("Status")),
s3_date(),
self.gis_location_id(),
Field("type_of_property", "list:integer",
requires=IS_EMPTY_OR(
IS_IN_SET(assess_property_type_opts,
multiple=True)
),
represent = lambda ids: \
assess_multi_type_represent(ids,
assess_property_type_opts),
widget = lambda f, v, **attr: \
CheckboxesWidgetS3.widget(f, v, cols=4, **attr),
label=T("Type of Property")),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Create Assessment"),
title_display = T("Assessment Details"),
title_list = T("Assessments"),
title_update = T("Edit Assessment"),
label_list_button = T("List Assessments"),
label_delete_button = T("Delete Assessment"),
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments found")
)
filter_widgets = [
S3TextFilter(["location_id$name",
"location_id$addr_street",
],
label=T("Building Name or Address"),
comment=T("To search for a building canvass assessment, enter the Building Name or Addresss. You may use % as wildcard. Press 'Search' without input to list all assessments."),
),
S3OptionsFilter("status",
label=T("Status"),
options = status_opts,
cols = 3,
),
]
self.configure(tablename,
filter_widgets = filter_widgets,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
def assess_multi_type_represent(ids, opts):
"""
Represent Multiple Types (list:integer)
"""
if not ids:
return current.messages["NONE"]
ids = [ids] if type(ids) is not list else ids
strings = [str(opts.get(id)) for id in ids]
return ", ".join(strings)
# END =========================================================================
|
|
import datetime
from collections import defaultdict
import maya
import requests
from lxml import html
from fuzzywuzzy import process
from Database import Database
class Almanac(Database):
def __init__(self):
super().__init__()
self.validate_database()
def update(self, marketdata):
ocean_id = self.create_or_get_ocean(marketdata["ocean"])
island_name = self.validate_island(marketdata["ocean"], marketdata["island"])
island_id = self.create_or_get_island(island_name, ocean_id)
commodities = marketdata["goods"]
now = maya.now().datetime()
for commodity in commodities:
commodity_id = self.create_or_get_commodity(commodity)
for order in commodities[commodity]["buy_orders"]:
self.add_order(order, commodity_id, island_id, "buy", now)
for order in commodities[commodity]["sell_orders"]:
self.add_order(order, commodity_id, island_id, "sell", now)
self.conn.commit()
def validate_island(self, ocean_name, island_name):
page = requests.get("http://{}.puzzlepirates.com/yoweb/"
"island/info.wm?showAll=true".format(ocean_name))
tree = html.fromstring(page.content)
islands = tree.xpath("//font[@size='+1']/text()")
return process.extract(island_name, islands, limit=1)[0][0]
def create_or_get_ocean(self, ocean_name):
ocean = empty = object()
for ocean in self.conn.execute("SELECT rowid FROM oceans WHERE name=?", (ocean_name,)):
id_ = ocean["rowid"]
if ocean is empty:
cur = self.conn.execute(
"INSERT INTO oceans VALUES (?)", (ocean_name,))
id_ = cur.lastrowid
return id_
def create_or_get_island(self, island_name, ocean_id):
island = empty = object()
for island in self.conn.execute(
"SELECT rowid FROM islands WHERE name=? AND ocean_id=?",
(island_name, ocean_id,)):
id_ = island["rowid"]
if island is empty:
cur = self.conn.execute(
"INSERT INTO islands (name, ocean_id) VALUES (?, ?)",
(island_name, ocean_id,))
id_ = cur.lastrowid
return id_
def create_or_get_commodity(self, commodity_name):
commodity = empty = object()
for commodity in self.conn.execute(
"SELECT rowid FROM commodities WHERE name=?",
(commodity_name,)):
id_ = commodity["rowid"]
if commodity is empty:
cur = self.conn.execute(
"INSERT INTO commodities (name) VALUES (?)",
(commodity_name,))
id_ = cur.lastrowid
return id_
def add_order(self, order, commodity_id, island_id, type_, time):
self.conn.execute(
"""INSERT INTO orders
(commodity_id, island_id, shop, price, amount, order_type, time_reported)
VALUES
(?, ?, ?, ?, ?, ?, ?)""",
(commodity_id,
island_id,
order["shop"],
order["price"],
order["amount"],
type_,
time,))
@property
def oceans(self):
return {
row["name"]: Ocean.from_db(row)
for row in self.conn.execute("SELECT rowid, * FROM oceans")
}
class Ocean(Database):
def __init__(self, id_, name):
super().__init__()
self.id_ = id_
self.name = name
@classmethod
def from_db(cls, db_row):
return cls(db_row["rowid"], db_row["name"])
@property
def islands(self):
return {
row["name"]: Island.from_db(row)
for row in self.conn.execute(
"SELECT rowid, * FROM islands WHERE ocean_id=?", (self.id_,))
}
@property
def commodities(self):
return {
row["name"]: Commodity.from_db(row, parent=self)
for row in self.conn.execute(
"SELECT rowid, * FROM commodities WHERE rowid in "
"(SELECT commodity_id FROM orders WHERE island_id in "
"(SELECT rowid FROM islands WHERE ocean_id=?))", (self.id_,))
}
def orders(self, commodity_id, type_, all_orders):
orders = [
Order.from_db(row) for row in
self.conn.execute(
("SELECT * FROM orders o "
"WHERE cast(commodity_id as text) like ? AND order_type like ?"),
(commodity_id, type_,))]
if all_orders:
return orders
else:
newest = max(orders, key=lambda o: o.time_reported)
return [order for order in orders if order.time_reported == newest.time_reported]
def buy_orders(self, commodity_id="%", all_orders=False):
return self.orders(commodity_id, "buy", all_orders)
def sell_orders(self, commodity_id="%", all_orders=False):
return self.orders(commodity_id, "sell", all_orders)
@property
def routes(self):
return []
orders = {
order.commodity_name: [
order
]
for order in self.orders("%", "%", False)
}
return orders
class Island(Database):
def __init__(self, id_, name, ocean_id):
super().__init__()
self.id_ = id_
self.name = name
self.ocean_id = ocean_id
@classmethod
def from_db(cls, db_row):
return cls(db_row["rowid"], db_row["name"], db_row["ocean_id"])
@property
def commodities(self):
return {
row["name"]: Commodity.from_db(row, parent=self)
for row in self.conn.execute(
"SELECT rowid, * FROM commodities WHERE rowid in "
"(SELECT commodity_id FROM orders WHERE island_id=?) ", (self.id_,)
)
}
@property
def parent(self):
cur = self.conn.execute("SELECT rowid, * from oceans WHERE rowid=?", (self.ocean_id,))
return Ocean.from_db(cur.fetchone())
def orders(self, commodity_id, type_, all_orders, sort):
orders = self.conn.execute(
("SELECT * FROM orders o "
"WHERE commodity_id = ? AND order_type = ? "
"AND island_id = ? AND "
"time_reported = (SELECT MAX(time_reported) "
"FROM orders WHERE o.commodity_id=orders.commodity_id "
"AND o.island_id=orders.island_id) ORDER BY price " + sort),
(commodity_id, type_, self.id_,))
return orders
def buy_orders(self, commodity_id="%", all_orders=False, sort="DESC"):
return self.orders(commodity_id, "buy", all_orders, sort)
def sell_orders(self, commodity_id="%", all_orders=False, sort="ASC"):
return self.orders(commodity_id, "sell", all_orders, sort)
@property
def routes(self):
routes = defaultdict(list)
islands = self.parent.islands
for island in islands:
commodities = islands[island].commodities
for commodity in commodities:
buy_orders = self.sell_orders(commodities[commodity].id_)
sell_orders = islands[island].buy_orders(commodities[commodity].id_)
try:
for orders in zip(buy_orders, sell_orders):
if orders[0]["price"] < orders[1]["price"]:
routes[islands[island].name].append(
Route(
self,
islands[island],
commodities[commodity],
Order.from_db(orders[0]),
Order.from_db(orders[1]))
)
else:
raise StopIteration
except StopIteration:
continue
return routes
class Route(object):
def __init__(self, start_island, end_island, commodity, buy_order, sell_order):
self.start_island = start_island
self.end_island = end_island
self.commodity = commodity
self.buy_order = buy_order
self.sell_order = sell_order
@property
def difference(self):
return self.sell_order.price - self.buy_order.price
@property
def amount(self):
if self.buy_order.amount >= self.sell_order.amount:
return self.sell_order.amount
else:
return self.buy_order.amount
@property
def profit(self):
return self.amount * self.difference
class Commodity(Database):
def __init__(self, id_, name, parent):
super().__init__()
self.id_ = id_
self.name = name
self.parent = parent
@classmethod
def from_db(cls, db_row, parent=None):
return cls(db_row["rowid"], db_row["name"], parent)
class Order(Database):
def __init__(self, shop, price, amount, order_type, time_reported,
island_id, commodity_id):
super().__init__()
self.shop = shop
self.price = price
self.amount = amount
self.order_type = order_type
self.time_reported = time_reported
self.island_id = island_id
self.commodity_id = commodity_id
@property
def time_reported_slang(self):
maya_dt = maya.MayaDT.from_datetime(self.time_reported)
return maya_dt.slang_time()
@property
def island_name(self):
cur = self.conn.execute(
"SELECT name FROM islands WHERE rowid=?", (self.island_id,)
)
return cur.fetchone()["name"]
@property
def commodity_name(self):
cur = self.conn.execute(
"SELECT name FROM commodities WHERE rowid=?", (self.commodity_id,)
)
return cur.fetchone()["name"]
@classmethod
def from_db(cls, db_row):
return cls(db_row["shop"], db_row["price"], db_row["amount"],
db_row["order_type"], db_row["time_reported"],
db_row["island_id"], db_row["commodity_id"])
@property
def parent(self):
cur = self.conn.execute("SELECT rowid, * from islands WHERE rowid=?", (self.island_id,))
return Commodity.from_db(cur.fetchone())
if __name__ == "__main__":
al = Almanac()
routes = al.oceans["Obsidian"].islands["Port Venture"].routes
for route in routes:
print("Dock side" if routes[route][0].end_island.name == "Port Venture" else routes[route][0].end_island.name)
for trade in routes[route]:
print( trade.start_island.name , trade.end_island.name , trade.buy_order.commodity_name , trade.difference)
|
|
# $Id$
#
# Copyright (C) 2007-2008 Greg Landrum
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Lipinski,Descriptors,Crippen
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbModule
import re
#set up the logger:
import rdkit.RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
def ProcessMol(mol,typeConversions,globalProps,nDone,nameProp='_Name',nameCol='compound_id',
redraw=False,keepHs=False,
skipProps=False,addComputedProps=False,
skipSmiles=False,
uniqNames=None,namesSeen=None):
if not mol:
raise ValueError('no molecule')
if keepHs:
Chem.SanitizeMol(mol)
try:
nm = mol.GetProp(nameProp)
except KeyError:
nm = None
if not nm:
nm = 'Mol_%d'%nDone
if uniqNames and nm in namesSeen:
logger.error('duplicate compound id (%s) encountered. second instance skipped.'%nm)
return None
namesSeen.add(nm)
row = [nm]
if not skipProps:
if addComputedProps:
nHD=Lipinski.NumHDonors(mol)
mol.SetProp('DonorCount',str(nHD))
nHA=Lipinski.NumHAcceptors(mol)
mol.SetProp('AcceptorCount',str(nHA))
nRot=Lipinski.NumRotatableBonds(mol)
mol.SetProp('RotatableBondCount',str(nRot))
MW=Descriptors.MolWt(mol)
mol.SetProp('AMW',str(MW))
logp=Crippen.MolLogP(mol)
mol.SetProp('MolLogP',str(logp))
pns = list(mol.GetPropNames())
pD={}
for pi,pn in enumerate(pns):
if pn.lower()==nameCol.lower(): continue
pv = mol.GetProp(pn).strip()
if pv.find('>')<0 and pv.find('<')<0:
colTyp = globalProps.get(pn,2)
while colTyp>0:
try:
tpi = typeConversions[colTyp][1](pv)
except Exception:
colTyp-=1
else:
break
globalProps[pn]=colTyp
pD[pn]=typeConversions[colTyp][1](pv)
else:
pD[pn]=pv
else:
pD={}
if redraw:
AllChem.Compute2DCoords(m)
if not skipSmiles:
row.append(Chem.MolToSmiles(mol,True))
row.append(DbModule.binaryHolder(mol.ToBinary()))
row.append(pD)
return row
def ConvertRows(rows,globalProps,defaultVal,skipSmiles):
for i,row in enumerate(rows):
newRow = [row[0],row[1]]
pD=row[-1]
for pn in globalProps:
pv = pD.get(pn,defaultVal)
newRow.append(pv)
newRow.append(row[2])
if not skipSmiles:
newRow.append(row[3])
rows[i] = newRow
def LoadDb(suppl,dbName,nameProp='_Name',nameCol='compound_id',silent=False,
redraw=False,errorsTo=None,keepHs=False,defaultVal='N/A',skipProps=False,
regName='molecules',skipSmiles=False,maxRowsCached=-1,
uniqNames=False,addComputedProps=False,lazySupplier=False,
startAnew=True):
if not lazySupplier:
nMols = len(suppl)
else:
nMols=-1
if not silent:
logger.info("Generating molecular database in file %s"%dbName)
if not lazySupplier:
logger.info(" Processing %d molecules"%nMols)
rows = []
globalProps = {}
namesSeen = set()
nDone = 0
typeConversions={0:('varchar',str),1:('float',float),2:('int',int)}
for m in suppl:
nDone +=1
if not m:
if errorsTo:
if hasattr(suppl,'GetItemText'):
d = suppl.GetItemText(nDone-1)
errorsTo.write(d)
else:
logger.warning('full error file support not complete')
continue
row=ProcessMol(m,typeConversions,globalProps,nDone,nameProp=nameProp,
nameCol=nameCol,redraw=redraw,
keepHs=keepHs,skipProps=skipProps,
addComputedProps=addComputedProps,skipSmiles=skipSmiles,
uniqNames=uniqNames,namesSeen=namesSeen)
if row is None: continue
rows.append([nDone]+row)
if not silent and not nDone%100:
logger.info(' done %d'%nDone)
if len(rows)==maxRowsCached:
break
nameDef='%s varchar not null'%nameCol
if uniqNames:
nameDef += ' unique'
typs = ['guid integer not null primary key',nameDef]
pns = []
for pn,v in globalProps.items():
addNm = re.sub(r'[\W]','_',pn)
typs.append('%s %s'%(addNm,typeConversions[v][0]))
pns.append(pn.lower())
if not skipSmiles:
if 'smiles' not in pns:
typs.append('smiles varchar')
else:
typs.append('cansmiles varchar')
typs.append('molpkl %s'%(DbModule.binaryTypeName))
conn = DbConnect(dbName)
curs = conn.GetCursor()
if startAnew:
try:
curs.execute('drop table %s'%regName)
except Exception:
pass
curs.execute('create table %s (%s)'%(regName,','.join(typs)))
else:
curs.execute('select * from %s limit 1'%(regName,))
ocolns = set([x[0] for x in curs.description])
ncolns = set([x.split()[0] for x in typs])
if ncolns != ocolns:
raise ValueError('Column names do not match: %s != %s'%(ocolns,ncolns))
curs.execute('select max(guid) from %s'%(regName,))
offset = curs.fetchone()[0]
for row in rows:
row[0] += offset
qs = ','.join([DbModule.placeHolder for x in typs])
ConvertRows(rows,globalProps,defaultVal,skipSmiles)
curs.executemany('insert into %s values (%s)'%(regName,qs),rows)
conn.Commit()
rows = []
while 1:
nDone +=1
try:
m = next(suppl)
except StopIteration:
break
if not m:
if errorsTo:
if hasattr(suppl,'GetItemText'):
d = suppl.GetItemText(nDone-1)
errorsTo.write(d)
else:
logger.warning('full error file support not complete')
continue
tmpProps={}
row=ProcessMol(m,typeConversions,globalProps,nDone,nameProp=nameProp,
nameCol=nameCol,redraw=redraw,
keepHs=keepHs,skipProps=skipProps,
addComputedProps=addComputedProps,skipSmiles=skipSmiles,
uniqNames=uniqNames,namesSeen=namesSeen)
if not row: continue
rows.append([nDone]+row)
if not silent and not nDone%100:
logger.info(' done %d'%nDone)
if len(rows)==maxRowsCached:
ConvertRows(rows,globalProps,defaultVal,skipSmiles)
curs.executemany('insert into %s values (%s)'%(regName,qs),rows)
conn.Commit()
rows = []
if len(rows):
ConvertRows(rows,globalProps,defaultVal,skipSmiles)
curs.executemany('insert into %s values (%s)'%(regName,qs),rows)
conn.Commit()
|
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from manilaclient.common.apiclient import exceptions as manila_ex
except ImportError:
from manilaclient.openstack.common.apiclient import exceptions as manila_ex
import mock
from oslo_utils import uuidutils
import testtools
from sahara import exceptions
from sahara.service import shares
from sahara.tests.unit import base
_NAMENODE_IPS = ['192.168.122.3', '192.168.122.4']
_DATANODE_IPS = ['192.168.122.5', '192.168.122.6', '192.168.122.7']
class _FakeShare(object):
def __init__(self, id='12345678-1234-1234-1234-123456789012',
share_proto='NFS',
export_location='192.168.122.1:/path',
access_list=None):
self.id = id
self.share_proto = share_proto
self.export_location = export_location
self.allow = mock.Mock()
self.deny = mock.Mock()
self.access_list = mock.Mock(return_value=access_list or [])
def _mock_node_group(ips, share_list):
# Returns a mocked node group and a list of mocked
# execute_command functions for its instances.
execute_mocks = [mock.Mock(return_value="centos") for ip in ips]
get_id = mock.Mock(return_value=uuidutils.generate_uuid())
instances = [
mock.Mock(
internal_ip=ip,
remote=mock.Mock(
return_value=mock.Mock(
__enter__=mock.Mock(
return_value=mock.Mock(
execute_command=execute_mocks[index],
get_os_distrib=execute_mocks[index])),
__exit__=mock.Mock())))
for index, ip in enumerate(ips)]
node_group = mock.Mock(instances=instances,
shares=share_list,
__getitem__=get_id)
return node_group, execute_mocks
def _setup_calls():
return [
mock.call('rpm -q nfs-utils || yum install -y nfs-utils',
run_as_root=True)]
def _expected_calls(local_path, remote_path, access_argument):
return [
mock.call('mkdir -p %s' % local_path, run_as_root=True),
mock.call("mount | grep '%(remote_path)s' | grep '%(local_path)s' | "
"grep nfs || mount -t nfs %(access_argument)s "
"%(remote_path)s %(local_path)s" %
{
"local_path": local_path,
"remote_path": remote_path,
"access_argument": access_argument
},
run_as_root=True)]
class TestShares(base.SaharaTestCase):
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_mount_nfs_shares_to_ng(self, f_manilaclient, f_context):
share = _FakeShare()
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(return_value=share)))
namenode_group, namenode_executors = _mock_node_group(
_NAMENODE_IPS,
[{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'rw',
'path': '/mnt/localpath'
}])
datanode_group, datanode_executors = _mock_node_group(
_DATANODE_IPS, [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group], shares=[])
shares.mount_shares(cluster)
permissions = [mock.call('ip', ip, 'rw') for ip in _NAMENODE_IPS]
share.allow.assert_has_calls(permissions, any_order=True)
for executor in namenode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/localpath', '192.168.122.1:/path', '-w'))
for executor in datanode_executors:
self.assertEqual(0, executor.call_count)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_mount_nfs_shares_to_cluster(self, f_manilaclient, f_context):
global_share = _FakeShare()
namenode_only_share = _FakeShare(
id='DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF',
export_location='192.168.122.2:/path')
all_shares = {share.id: share for share in
(global_share, namenode_only_share)}
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(
side_effect=lambda x: all_shares[x])))
namenode_group, namenode_executors = _mock_node_group(
['192.168.122.3', '192.168.122.4'],
[
{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'rw',
'path': '/mnt/localpath'
},
{
'id': 'DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF'
}
])
datanode_group, datanode_executors = _mock_node_group(
['192.168.122.5', '192.168.122.6', '192.168.122.7'], [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group],
shares=[
{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'ro',
'path': '/mnt/somanylocalpaths'
}
])
shares.mount_shares(cluster)
all_permissions = [mock.call('ip', ip, 'ro')
for ip in _NAMENODE_IPS + _DATANODE_IPS]
global_share.allow.assert_has_calls(all_permissions, any_order=True)
namenode_permissions = [mock.call('ip', ip, 'rw')
for ip in _NAMENODE_IPS]
namenode_only_share.allow.assert_has_calls(namenode_permissions,
any_order=True)
for executor in namenode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/somanylocalpaths',
'192.168.122.1:/path', '-r') +
_expected_calls('/mnt/DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF',
'192.168.122.2:/path', '-w'),
any_order=True)
self.assertEqual(6, executor.call_count)
for executor in datanode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/somanylocalpaths',
'192.168.122.1:/path', '-r'))
self.assertEqual(4, executor.call_count)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_share_does_not_exist(self, f_manilaclient, f_context):
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(
side_effect=manila_ex.NotFound)))
namenode_group, namenode_executors = _mock_node_group(
['192.168.122.3', '192.168.122.4'],
[
{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'rw',
'path': '/mnt/localpath'
},
{
'id': 'DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF'
}
])
datanode_group, datanode_executors = _mock_node_group(
['192.168.122.5', '192.168.122.6', '192.168.122.7'], [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group],
shares=[
{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'ro',
'path': '/mnt/somanylocalpaths'
}
])
with testtools.ExpectedException(exceptions.NotFoundException):
shares.mount_shares(cluster)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_acl_exists_unexpected_type(self, f_manilaclient, f_context):
share = _FakeShare(access_list=[mock.Mock(
access_level='wat', access_to=ip, access_type='ip')
for ip in _NAMENODE_IPS])
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(return_value=share)))
namenode_group, namenode_executors = _mock_node_group(
_NAMENODE_IPS,
[{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'rw',
'path': '/mnt/localpath'
}])
datanode_group, datanode_executors = _mock_node_group(
_DATANODE_IPS, [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group], shares=[])
shares.mount_shares(cluster)
self.assertEqual(0, share.allow.call_count)
for executor in namenode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/localpath', '192.168.122.1:/path', '-w'))
for executor in datanode_executors:
self.assertEqual(0, executor.call_count)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_acl_exists_no_recreate(self, f_manilaclient, f_context):
share = _FakeShare(access_list=[mock.Mock(
access_level='rw', access_to=ip, access_type='ip')
for ip in _NAMENODE_IPS])
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(return_value=share)))
namenode_group, namenode_executors = _mock_node_group(
_NAMENODE_IPS,
[{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'ro',
'path': '/mnt/localpath'
}])
datanode_group, datanode_executors = _mock_node_group(
_DATANODE_IPS, [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group], shares=[])
shares.mount_shares(cluster)
self.assertEqual(0, share.allow.call_count)
for executor in namenode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/localpath', '192.168.122.1:/path', '-r'))
for executor in datanode_executors:
self.assertEqual(0, executor.call_count)
@mock.patch('sahara.context.set_current_instance_id')
@mock.patch('sahara.utils.openstack.manila.client')
def test_acl_exists_recreate(self, f_manilaclient, f_context):
share = _FakeShare(access_list=[mock.Mock(
access_level='ro', access_to=ip, access_type='ip', id="access_id")
for ip in _NAMENODE_IPS])
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(return_value=share)))
namenode_group, namenode_executors = _mock_node_group(
_NAMENODE_IPS,
[{
'id': '12345678-1234-1234-1234-123456789012',
'access_level': 'rw',
'path': '/mnt/localpath'
}])
datanode_group, datanode_executors = _mock_node_group(
_DATANODE_IPS, [])
cluster = mock.Mock(
node_groups=[namenode_group, datanode_group], shares=[])
shares.mount_shares(cluster)
namenode_denials = [mock.call('access_id')
for ip in _NAMENODE_IPS]
share.deny.assert_has_calls(namenode_denials)
namenode_permissions = [mock.call('ip', ip, 'rw')
for ip in _NAMENODE_IPS]
share.allow.assert_has_calls(namenode_permissions,
any_order=True)
for executor in namenode_executors:
executor.assert_has_calls(
_setup_calls() +
_expected_calls('/mnt/localpath', '192.168.122.1:/path', '-w'))
for executor in datanode_executors:
self.assertEqual(0, executor.call_count)
def test_get_share_path(self):
share_list = [
{'id': 'the_share_id',
'path': '/mnt/mymountpoint'},
{'id': 'the_share_id',
'path': '/mnt/othermountpoint'},
{'id': '123456',
'path': '/mnt/themountpoint'}
]
url = 'manila://the_share_id/the_path'
path = shares.get_share_path(url, share_list)
self.assertEqual("/mnt/mymountpoint/the_path", path)
share_list.pop(0)
path = shares.get_share_path(url, share_list)
self.assertEqual("/mnt/othermountpoint/the_path", path)
share_list.pop(0)
path = shares.get_share_path(url, share_list)
self.assertIsNone(path)
@mock.patch('sahara.utils.openstack.manila.client')
def test_get_share_path_default(self, f_manilaclient):
share_list = [
{'id': 'i_have_no_mnt'}
]
share = _FakeShare(share_list[0]['id'])
f_manilaclient.return_value = mock.Mock(
shares=mock.Mock(
get=mock.Mock(return_value=share)))
url = 'manila://i_have_no_mnt/the_path'
path = shares.get_share_path(url, share_list)
self.assertEqual("/mnt/i_have_no_mnt/the_path", path)
|
|
'''Testing numerical differentiation
Still some problems, with API (args tuple versus *args)
finite difference Hessian has some problems that I did not look at yet
Should Hessian also work per observation, if fun returns 2d
'''
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
import statsmodels.api as sm
from statsmodels.tools import numdiff
from statsmodels.tools.numdiff import (
approx_fprime,
approx_fprime_cs,
approx_hess_cs,
_approx_fprime_scalar,
_approx_fprime_cs_scalar
)
DEC3 = 3
DEC4 = 4
DEC5 = 5
DEC6 = 6
DEC8 = 8
DEC13 = 13
DEC14 = 14
def maxabs(x,y):
return np.abs(x-y).max()
def fun(beta, x):
return np.dot(x, beta).sum(0)
def fun1(beta, y, x):
#print(beta.shape, x.shape)
xb = np.dot(x, beta)
return (y-xb)**2 #(xb-xb.mean(0))**2
def fun2(beta, y, x):
#print(beta.shape, x.shape)
return fun1(beta, y, x).sum(0)
#ravel() added because of MNLogit 2d params
class CheckGradLoglikeMixin(object):
def test_score(self):
for test_params in self.params:
sc = self.mod.score(test_params)
scfd = numdiff.approx_fprime(test_params.ravel(),
self.mod.loglike)
assert_almost_equal(sc, scfd, decimal=1)
sccs = numdiff.approx_fprime_cs(test_params.ravel(),
self.mod.loglike)
assert_almost_equal(sc, sccs, decimal=11)
def test_hess(self):
for test_params in self.params:
he = self.mod.hessian(test_params)
hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hefd, decimal=DEC8)
#NOTE: notice the accuracy below
assert_almost_equal(he, hefd, decimal=7)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=True)
assert_allclose(he, hefd, rtol=1e-9)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=False)
assert_almost_equal(he, hefd, decimal=4)
hescs = numdiff.approx_fprime_cs(test_params.ravel(),
self.mod.score)
assert_allclose(he, hescs, rtol=1e-13)
hecs = numdiff.approx_hess_cs(test_params.ravel(),
self.mod.loglike)
assert_allclose(he, hecs, rtol=1e-9)
#NOTE: Look at the lack of precision - default epsilon not always
#best
grad = self.mod.score(test_params)
hecs, gradcs = numdiff.approx_hess1(test_params, self.mod.loglike,
1e-6, return_grad=True)
assert_almost_equal(he, hecs, decimal=1)
assert_almost_equal(grad, gradcs, decimal=1)
hecs, gradcs = numdiff.approx_hess2(test_params, self.mod.loglike,
1e-4, return_grad=True)
assert_almost_equal(he, hecs, decimal=3)
assert_almost_equal(grad, gradcs, decimal=1)
hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-5)
assert_almost_equal(he, hecs, decimal=4)
class TestGradMNLogit(CheckGradLoglikeMixin):
@classmethod
def setup_class(cls):
#from .results.results_discrete import Anes
data = sm.datasets.anes96.load()
data.exog = np.asarray(data.exog)
data.endog = np.asarray(data.endog)
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
cls.mod = sm.MNLogit(data.endog, exog)
#def loglikeflat(cls, params):
#reshapes flattened params
# return cls.loglike(params.reshape(6,6))
#cls.mod.loglike = loglikeflat #need instance method
#cls.params = [np.ones((6,6)).ravel()]
res = cls.mod.fit(disp=0)
cls.params = [res.params.ravel('F')]
def test_hess(self):
#NOTE: I had to overwrite this to lessen the tolerance
for test_params in self.params:
he = self.mod.hessian(test_params)
hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hefd, decimal=DEC8)
#NOTE: notice the accuracy below and the epsilon changes
# this does not work well for score -> hessian with non-cs step
# it's a little better around the optimum
assert_almost_equal(he, hefd, decimal=7)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=True)
assert_almost_equal(he, hefd, decimal=4)
hefd = numdiff.approx_fprime(test_params, self.mod.score, 1e-9,
centered=False)
assert_almost_equal(he, hefd, decimal=2)
hescs = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hescs, decimal=DEC8)
hecs = numdiff.approx_hess_cs(test_params, self.mod.loglike)
assert_almost_equal(he, hecs, decimal=5)
#NOTE: these just do not work well
#hecs = numdiff.approx_hess1(test_params, self.mod.loglike, 1e-3)
#assert_almost_equal(he, hecs, decimal=1)
#hecs = numdiff.approx_hess2(test_params, self.mod.loglike, 1e-4)
#assert_almost_equal(he, hecs, decimal=0)
hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-4)
assert_almost_equal(he, hecs, decimal=0)
class TestGradLogit(CheckGradLoglikeMixin):
@classmethod
def setup_class(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
#mod = sm.Probit(data.endog, data.exog)
cls.mod = sm.Logit(data.endog, data.exog)
#res = mod.fit(method="newton")
cls.params = [np.array([1,0.25,1.4,-7])]
##loglike = mod.loglike
##score = mod.score
##hess = mod.hessian
class CheckDerivativeMixin(object):
@classmethod
def setup_class(cls):
nobs = 200
#x = np.arange(nobs*3).reshape(nobs,-1)
np.random.seed(187678)
x = np.random.randn(nobs,3)
xk = np.array([1,2,3])
xk = np.array([1.,1.,1.])
#xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
xkols = np.dot(np.linalg.pinv(x),y)
cls.x = x
cls.y = y
cls.params = [np.array([1.,1.,1.]), xkols]
cls.init()
@classmethod
def init(cls):
pass
def test_grad_fun1_fd(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
epsilon = 1e-6
gfd = numdiff.approx_fprime(test_params, fun, epsilon=epsilon,
args=self.args)
gfd += numdiff.approx_fprime(test_params, fun, epsilon=-epsilon,
args=self.args)
gfd /= 2.
assert_almost_equal(gtrue, gfd, decimal=DEC6)
def test_grad_fun1_fdc(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
# default epsilon of 1e-6 is not precise enough here
gfd = numdiff.approx_fprime(test_params, fun, epsilon=1e-8,
args=self.args, centered=True)
assert_almost_equal(gtrue, gfd, decimal=DEC5)
def test_grad_fun1_cs(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
gcs = numdiff.approx_fprime_cs(test_params, fun, args=self.args)
assert_almost_equal(gtrue, gcs, decimal=DEC13)
def test_hess_fun1_fd(self):
for test_params in self.params:
#hetrue = 0
hetrue = self.hesstrue(test_params)
if hetrue is not None: #Hessian does not work for 2d return of fun
fun = self.fun()
#default works, epsilon 1e-6 or 1e-8 is not precise enough
hefd = numdiff.approx_hess1(test_params, fun, #epsilon=1e-8,
# TODO: should be kwds
args=self.args)
assert_almost_equal(hetrue, hefd, decimal=DEC3)
#TODO: I reduced precision to DEC3 from DEC4 because of
# TestDerivativeFun
hefd = numdiff.approx_hess2(test_params, fun, #epsilon=1e-8,
# TODO: should be kwds
args=self.args)
assert_almost_equal(hetrue, hefd, decimal=DEC3)
hefd = numdiff.approx_hess3(test_params, fun, #epsilon=1e-8,
# TODO: should be kwds
args=self.args)
assert_almost_equal(hetrue, hefd, decimal=DEC3)
def test_hess_fun1_cs(self):
for test_params in self.params:
#hetrue = 0
hetrue = self.hesstrue(test_params)
if hetrue is not None: #Hessian does not work for 2d return of fun
fun = self.fun()
hecs = numdiff.approx_hess_cs(test_params, fun, args=self.args)
assert_almost_equal(hetrue, hecs, decimal=DEC6)
class TestDerivativeFun(CheckDerivativeMixin):
@classmethod
def setup_class(cls):
super(TestDerivativeFun,cls).setup_class()
xkols = np.dot(np.linalg.pinv(cls.x), cls.y)
cls.params = [np.array([1.,1.,1.]), xkols]
cls.args = (cls.x,)
def fun(self):
return fun
def gradtrue(self, params):
return self.x.sum(0)
def hesstrue(self, params):
return np.zeros((3,3)) #make it (3,3), because test fails with scalar 0
#why is precision only DEC3
class TestDerivativeFun2(CheckDerivativeMixin):
@classmethod
def setup_class(cls):
super(TestDerivativeFun2,cls).setup_class()
xkols = np.dot(np.linalg.pinv(cls.x), cls.y)
cls.params = [np.array([1.,1.,1.]), xkols]
cls.args = (cls.y, cls.x)
def fun(self):
return fun2
def gradtrue(self, params):
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None]).sum(0)
#2*(y-np.dot(x, params)).sum(0)
def hesstrue(self, params):
x = self.x
return 2*np.dot(x.T, x)
class TestDerivativeFun1(CheckDerivativeMixin):
@classmethod
def setup_class(cls):
super(TestDerivativeFun1, cls).setup_class()
xkols = np.dot(np.linalg.pinv(cls.x), cls.y)
cls.params = [np.array([1.,1.,1.]), xkols]
cls.args = (cls.y, cls.x)
def fun(self):
return fun1
def gradtrue(self, params):
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None])
def hesstrue(self, params):
return None
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None]) #TODO: check shape
def test_dtypes():
def f(x):
return 2*x
desired = np.array([[2, 0],
[0, 2]])
assert_allclose(approx_fprime(np.array([1, 2]), f), desired)
assert_allclose(approx_fprime(np.array([1., 2.]), f), desired)
assert_allclose(approx_fprime(np.array([1.+0j, 2.+0j]), f), desired)
def test_vectorized():
def f(x):
return 2*x
desired = np.array([2, 2])
# vectorized parameter, column vector
p = np.array([[1, 2]]).T
assert_allclose(_approx_fprime_scalar(p, f), desired[:, None], rtol=1e-8)
assert_allclose(_approx_fprime_scalar(p.squeeze(), f),
desired, rtol=1e-8)
assert_allclose(_approx_fprime_cs_scalar(p, f), desired[:, None],
rtol=1e-8)
assert_allclose(_approx_fprime_cs_scalar(p.squeeze(), f),
desired, rtol=1e-8)
# check 2-d row, see #7680
# not allowed/implemented for approx_fprime, raises broadcast ValueError
# assert_allclose(approx_fprime(p.T, f), desired, rtol=1e-8)
# similar as used in MarkovSwitching unit test
assert_allclose(approx_fprime_cs(p.T, f).squeeze(), desired, rtol=1e-8)
if __name__ == '__main__': # FIXME: turn into tests or move/remove
epsilon = 1e-6
nobs = 200
x = np.arange(nobs*3).reshape(nobs,-1)
x = np.random.randn(nobs,3)
xk = np.array([1,2,3])
xk = np.array([1.,1.,1.])
#xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
xkols = np.dot(np.linalg.pinv(x),y)
print(approx_fprime((1,2,3),fun,epsilon,x))
gradtrue = x.sum(0)
print(x.sum(0))
gradcs = approx_fprime_cs((1,2,3), fun, (x,), h=1.0e-20)
print(gradcs, maxabs(gradcs, gradtrue))
print(approx_hess_cs((1,2,3), fun, (x,), h=1.0e-20)) #this is correctly zero
print(approx_hess_cs((1,2,3), fun2, (y,x), h=1.0e-20)-2*np.dot(x.T, x))
print(numdiff.approx_hess(xk,fun2,1e-3, (y,x))[0] - 2*np.dot(x.T, x))
gt = (-x*2*(y-np.dot(x, [1,2,3]))[:,None])
g = approx_fprime_cs((1,2,3), fun1, (y,x), h=1.0e-20)#.T #this should not be transposed
gd = numdiff.approx_fprime((1,2,3),fun1,epsilon,(y,x))
print(maxabs(g, gt))
print(maxabs(gd, gt))
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
#mod = sm.Probit(data.endog, data.exog)
mod = sm.Logit(data.endog, data.exog)
#res = mod.fit(method="newton")
test_params = [1,0.25,1.4,-7]
loglike = mod.loglike
score = mod.score
hess = mod.hessian
#cs does not work for Probit because special.ndtr does not support complex
#maybe calculating ndtr for real and imag parts separately, if we need it
#and if it still works in this case
print('sm', score(test_params))
print('fd', numdiff.approx_fprime(test_params,loglike,epsilon))
print('cs', numdiff.approx_fprime_cs(test_params,loglike))
print('sm', hess(test_params))
print('fd', numdiff.approx_fprime(test_params,score,epsilon))
print('cs', numdiff.approx_fprime_cs(test_params, score))
hesscs = numdiff.approx_hess_cs(test_params, loglike)
print('cs', hesscs)
print(maxabs(hess(test_params), hesscs))
data = sm.datasets.anes96.load()
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
datap = sm.datasets.randhie.load()
nobs = len(datap.endog)
exogp = sm.add_constant(datap.exog.view(float).reshape(nobs,-1),
prepend=False)
modp = sm.Poisson(datap.endog, exogp)
resp = modp.fit(method='newton', disp=0)
|
|
import os
import numpy as np
from moviepy.audio.io.ffmpeg_audiowriter import ffmpeg_audiowrite
from moviepy.decorators import requires_duration
from moviepy.tools import (deprecated_version_of,
extensions_dict)
from moviepy.Clip import Clip
from tqdm import tqdm
class AudioClip(Clip):
""" Base class for audio clips.
See ``SoundClip`` and ``CompositeSoundClip`` for usable classes.
An AudioClip is a Clip with a ``make_frame`` attribute of
the form `` t -> [ f_t ]`` for mono sound and
``t-> [ f1_t, f2_t ]`` for stereo sound (the arrays are Numpy arrays).
The `f_t` are floats between -1 and 1. These bounds can be
trespassed wihtout problems (the program will put the
sound back into the bounds at conversion time, without much impact).
Parameters
-----------
make_frame
A function `t-> frame at time t`. The frame does not mean much
for a sound, it is just a float. What 'makes' the sound are
the variations of that float in the time.
nchannels
Number of channels (one or two for mono or stereo).
Examples
---------
>>> # Plays the note A (a sine wave of frequency 404HZ)
>>> import numpy as np
>>> make_frame = lambda t : 2*[ np.sin(404 * 2 * np.pi * t) ]
>>> clip = AudioClip(make_frame, duracion=5)
>>> clip.preview()
"""
def __init__(self, make_frame = None, duracion=None):
Clip.__init__(self)
if make_frame is not None:
self.make_frame = make_frame
frame0 = self.get_frame(0)
if hasattr(frame0, '__iter__'):
self.nchannels = len(list(frame0))
else:
self.nchannels = 1
if duracion is not None:
self.duracion = duracion
self.fin = duracion
@requires_duration
def iter_chunks(self, chunksize=None, chunk_duration=None, fps=None,
quantize=False, nbytes=2, progress_bar=False):
""" Iterator that returns the whole sound array of the clip by chunks
"""
if fps is None:
fps=self.fps
if chunk_duration is not None:
chunksize = int(chunk_duration*fps)
totalsize = int(fps*self.duracion)
if (totalsize % chunksize == 0):
nchunks = totalsize // chunksize
else:
nchunks = totalsize // chunksize + 1
pospos = list(range(0, totalsize, chunksize))+[totalsize]
def generator():
for i in range(nchunks):
tt = (1.0/fps)*np.arange(pospos[i],pospos[i+1])
yield self.to_soundarray(tt, nbytes= nbytes, quantize=quantize, fps=fps,
buffersize=chunksize)
if progress_bar:
return tqdm(generator(), total=nchunks)
else:
return generator()
@requires_duration
def to_soundarray(self,tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000):
"""
Transforms the sound into an array that can be played by pygame
or written in a wav file. See ``AudioClip.preview``.
Parameters
------------
fps
Frame rate of the sound for the conversion.
44100 for top quality.
nbytes
Number of bytes to encode the sound: 1 for 8bit sound,
2 for 16bit, 4 for 32bit sound.
"""
if fps is None:
fps = self.fps
stacker = np.vstack if self.nchannels==2 else np.hstack
max_duration = 1.0 * buffersize / fps
if (tt is None):
if self.duracion>max_duration:
return stacker(self.iter_chunks(fps=fps, quantize=quantize, nbytes=2,
chunksize=buffersize))
else:
tt = np.arange(0, self.duracion, 1.0/fps)
"""
elif len(tt)> 1.5*buffersize:
nchunks = int(len(tt)/buffersize+1)
tt_chunks = np.array_split(tt, nchunks)
return stacker([self.to_soundarray(tt=ttc, buffersize=buffersize, fps=fps,
quantize=quantize, nbytes=nbytes)
for ttc in tt_chunks])
"""
#print tt.max() - tt.min(), tt.min(), tt.max()
snd_array = self.get_frame(tt)
if quantize:
snd_array = np.maximum(-0.99, np.minimum(0.99,snd_array))
inttype = {1:'int8',2:'int16', 4:'int32'}[nbytes]
snd_array= (2**(8*nbytes-1)*snd_array).astype(inttype)
return snd_array
def max_volume(self, stereo=False, chunksize=50000, progress_bar=False):
stereo = stereo and (self.nchannels == 2)
maxi = np.array([0,0]) if stereo else 0
for chunk in self.iter_chunks(chunksize=chunksize, progress_bar=progress_bar):
maxi = np.maximum(maxi,abs(chunk).max(axis=0)) if stereo else max(maxi,abs(chunk).max())
return maxi
@requires_duration
def write_audiofile(self,filename, fps=44100, nbytes=2,
buffersize=2000, codec=None,
bitrate=None, ffmpeg_params=None,
write_logfile=False, verbose=True):
""" Writes an audio file from the AudioClip.
Parameters
-----------
filename
Name of the output file
fps
Frames per second
nbyte
Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound)
codec
Which audio codec should be used. If None provided, the codec is
determined based on the extension of the filename. Choose
'pcm_s16le' for 16-bit wav and 'pcm_s32le' for 32-bit wav.
bitrate
Audio bitrate, given as a string like '50k', '500k', '3000k'.
Will determine the tamano and quality of the output file.
Note that it mainly an indicative goal, the bitrate won't
necessarily be the this in the output file.
ffmpeg_params
Any additional parameters you would like to pass, as a list
of terms, like ['-option1', 'value1', '-option2', 'value2']
write_logfile
If true, produces a detailed logfile named filename + '.log'
when writing the file
verbose
If True, displays informations
"""
if codec is None:
name, ext = os.path.splitext(os.path.basename(filename))
try:
codec = extensions_dict[ext[1:]]['codec'][0]
except KeyError:
raise ValueError("MoviePy couldn't find the codec associated "
"with the filename. Provide the 'codec' parameter in "
"write_videofile.")
return ffmpeg_audiowrite(self, filename, fps, nbytes, buffersize,
codec=codec, bitrate=bitrate, write_logfile=write_logfile,
verbose=verbose, ffmpeg_params=ffmpeg_params)
###
#
# The to_audiofile method is replaced by the more explicit write_audiofile.
AudioClip.to_audiofile = deprecated_version_of(AudioClip.write_audiofile,
'to_audiofile')
###
class AudioArrayClip(AudioClip):
"""
An audio clip made from a sound array.
Parameters
-----------
array
A Numpy array representing the sound, of tamano Nx1 for mono,
Nx2 for stereo.
fps
Frames per second : speed at which the sound is supposed to be
played.
"""
def __init__(self, array, fps):
Clip.__init__(self)
self.array = array
self.fps = fps
self.duracion = 1.0 * len(array) / fps
def make_frame(t):
""" complicated, but must be able to handle the case where t
is a list of the form sin(t) """
if isinstance(t, np.ndarray):
array_inds = (self.fps*t).astype(int)
in_array = (array_inds>0) & (array_inds < len(self.array))
result = np.zeros((len(t),2))
result[in_array] = self.array[array_inds[in_array]]
return result
else:
i = int(self.fps * t)
if i < 0 or i >= len(self.array):
return 0*self.array[0]
else:
return self.array[i]
self.make_frame = make_frame
self.nchannels = len(list(self.get_frame(0)))
class CompositeAudioClip(AudioClip):
""" Clip made by composing several AudioClips.
An audio clip made by putting together several audio clips.
Parameters
------------
clips
List of audio clips, which may inicia playing at different times or
together. If all have their ``duracion`` attribute set, the
duracion of the composite clip is computed automatically.
"""
def __init__(self, clips):
Clip.__init__(self)
self.clips = clips
ends = [c.fin for c in self.clips]
self.nchannels = max([c.nchannels for c in self.clips])
if not any([(e is None) for e in ends]):
self.duracion = max(ends)
self.fin = max(ends)
def make_frame(t):
played_parts = [c.is_playing(t) for c in self.clips]
sounds= [c.get_frame(t - c.inicia)*np.array([part]).T
for c,part in zip(self.clips, played_parts)
if (part is not False) ]
if isinstance(t,np.ndarray):
zero = np.zeros((len(t),self.nchannels))
else:
zero = np.zeros(self.nchannels)
return zero + sum(sounds)
self.make_frame = make_frame
def concatenate_audioclips(clips):
durations = [c.duracion for c in clips]
tt = np.cumsum([0]+durations) # inicia times, and fin time.
newclips= [c.set_start(t) for c,t in zip(clips, tt)]
return CompositeAudioClip(newclips).set_duracion(tt[-1])
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import vision_v1p3beta1
from google.cloud.vision_v1p3beta1.proto import product_search_service_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestProductSearchClient(object):
def test_create_product_set(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
expected_response = {'name': name, 'display_name': display_name}
expected_response = product_search_service_pb2.ProductSet(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
parent = client.location_path('[PROJECT]', '[LOCATION]')
product_set = {}
product_set_id = 'productSetId4216680'
response = client.create_product_set(parent, product_set,
product_set_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.CreateProductSetRequest(
parent=parent,
product_set=product_set,
product_set_id=product_set_id)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_product_set_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
parent = client.location_path('[PROJECT]', '[LOCATION]')
product_set = {}
product_set_id = 'productSetId4216680'
with pytest.raises(CustomException):
client.create_product_set(parent, product_set, product_set_id)
def test_list_product_sets(self):
# Setup Expected Response
next_page_token = ''
product_sets_element = {}
product_sets = [product_sets_element]
expected_response = {
'next_page_token': next_page_token,
'product_sets': product_sets
}
expected_response = product_search_service_pb2.ListProductSetsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
parent = client.location_path('[PROJECT]', '[LOCATION]')
paged_list_response = client.list_product_sets(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.product_sets[0] == resources[0]
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.ListProductSetsRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_product_sets_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
parent = client.location_path('[PROJECT]', '[LOCATION]')
paged_list_response = client.list_product_sets(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_product_set(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name = 'displayName1615086568'
expected_response = {'name': name_2, 'display_name': display_name}
expected_response = product_search_service_pb2.ProductSet(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
name = client.product_set_path('[PROJECT]', '[LOCATION]',
'[PRODUCT_SET]')
response = client.get_product_set(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.GetProductSetRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_product_set_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
name = client.product_set_path('[PROJECT]', '[LOCATION]',
'[PRODUCT_SET]')
with pytest.raises(CustomException):
client.get_product_set(name)
def test_update_product_set(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
expected_response = {'name': name, 'display_name': display_name}
expected_response = product_search_service_pb2.ProductSet(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
product_set = {}
update_mask = {}
response = client.update_product_set(product_set, update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.UpdateProductSetRequest(
product_set=product_set, update_mask=update_mask)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_product_set_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
product_set = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_product_set(product_set, update_mask)
def test_delete_product_set(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
name = client.product_set_path('[PROJECT]', '[LOCATION]',
'[PRODUCT_SET]')
client.delete_product_set(name)
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.DeleteProductSetRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_product_set_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
name = client.product_set_path('[PROJECT]', '[LOCATION]',
'[PRODUCT_SET]')
with pytest.raises(CustomException):
client.delete_product_set(name)
def test_create_product(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
description = 'description-1724546052'
product_category = 'productCategory-1607451058'
expected_response = {
'name': name,
'display_name': display_name,
'description': description,
'product_category': product_category
}
expected_response = product_search_service_pb2.Product(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
parent = client.location_path('[PROJECT]', '[LOCATION]')
product = {}
product_id = 'productId1753008747'
response = client.create_product(parent, product, product_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.CreateProductRequest(
parent=parent, product=product, product_id=product_id)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_product_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
parent = client.location_path('[PROJECT]', '[LOCATION]')
product = {}
product_id = 'productId1753008747'
with pytest.raises(CustomException):
client.create_product(parent, product, product_id)
def test_list_products(self):
# Setup Expected Response
next_page_token = ''
products_element = {}
products = [products_element]
expected_response = {
'next_page_token': next_page_token,
'products': products
}
expected_response = product_search_service_pb2.ListProductsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
parent = client.location_path('[PROJECT]', '[LOCATION]')
paged_list_response = client.list_products(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.products[0] == resources[0]
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.ListProductsRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_products_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
parent = client.location_path('[PROJECT]', '[LOCATION]')
paged_list_response = client.list_products(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_product(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name = 'displayName1615086568'
description = 'description-1724546052'
product_category = 'productCategory-1607451058'
expected_response = {
'name': name_2,
'display_name': display_name,
'description': description,
'product_category': product_category
}
expected_response = product_search_service_pb2.Product(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
name = client.product_path('[PROJECT]', '[LOCATION]', '[PRODUCT]')
response = client.get_product(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.GetProductRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_product_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
name = client.product_path('[PROJECT]', '[LOCATION]', '[PRODUCT]')
with pytest.raises(CustomException):
client.get_product(name)
def test_update_product(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
description = 'description-1724546052'
product_category = 'productCategory-1607451058'
expected_response = {
'name': name,
'display_name': display_name,
'description': description,
'product_category': product_category
}
expected_response = product_search_service_pb2.Product(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
product = {}
update_mask = {}
response = client.update_product(product, update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.UpdateProductRequest(
product=product, update_mask=update_mask)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_product_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
product = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_product(product, update_mask)
def test_delete_product(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
name = client.product_path('[PROJECT]', '[LOCATION]', '[PRODUCT]')
client.delete_product(name)
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.DeleteProductRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_product_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
name = client.product_path('[PROJECT]', '[LOCATION]', '[PRODUCT]')
with pytest.raises(CustomException):
client.delete_product(name)
def test_create_reference_image(self):
# Setup Expected Response
name = 'name3373707'
uri = 'uri116076'
expected_response = {'name': name, 'uri': uri}
expected_response = product_search_service_pb2.ReferenceImage(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
parent = client.product_path('[PROJECT]', '[LOCATION]', '[PRODUCT]')
reference_image = {}
reference_image_id = 'referenceImageId1946713331'
response = client.create_reference_image(parent, reference_image,
reference_image_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.CreateReferenceImageRequest(
parent=parent,
reference_image=reference_image,
reference_image_id=reference_image_id)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_reference_image_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
parent = client.product_path('[PROJECT]', '[LOCATION]', '[PRODUCT]')
reference_image = {}
reference_image_id = 'referenceImageId1946713331'
with pytest.raises(CustomException):
client.create_reference_image(parent, reference_image,
reference_image_id)
def test_delete_reference_image(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
name = client.reference_image_path('[PROJECT]', '[LOCATION]',
'[PRODUCT]', '[REFERENCE_IMAGE]')
client.delete_reference_image(name)
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.DeleteReferenceImageRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_reference_image_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
name = client.reference_image_path('[PROJECT]', '[LOCATION]',
'[PRODUCT]', '[REFERENCE_IMAGE]')
with pytest.raises(CustomException):
client.delete_reference_image(name)
def test_list_reference_images(self):
# Setup Expected Response
page_size = 883849137
next_page_token = ''
reference_images_element = {}
reference_images = [reference_images_element]
expected_response = {
'page_size': page_size,
'next_page_token': next_page_token,
'reference_images': reference_images
}
expected_response = product_search_service_pb2.ListReferenceImagesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
parent = client.product_path('[PROJECT]', '[LOCATION]', '[PRODUCT]')
paged_list_response = client.list_reference_images(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.reference_images[0] == resources[0]
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.ListReferenceImagesRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_reference_images_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
parent = client.product_path('[PROJECT]', '[LOCATION]', '[PRODUCT]')
paged_list_response = client.list_reference_images(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_reference_image(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
uri = 'uri116076'
expected_response = {'name': name_2, 'uri': uri}
expected_response = product_search_service_pb2.ReferenceImage(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
name = client.reference_image_path('[PROJECT]', '[LOCATION]',
'[PRODUCT]', '[REFERENCE_IMAGE]')
response = client.get_reference_image(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.GetReferenceImageRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_reference_image_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
name = client.reference_image_path('[PROJECT]', '[LOCATION]',
'[PRODUCT]', '[REFERENCE_IMAGE]')
with pytest.raises(CustomException):
client.get_reference_image(name)
def test_add_product_to_product_set(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
name = client.product_set_path('[PROJECT]', '[LOCATION]',
'[PRODUCT_SET]')
product = 'product-309474065'
client.add_product_to_product_set(name, product)
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.AddProductToProductSetRequest(
name=name, product=product)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_add_product_to_product_set_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
name = client.product_set_path('[PROJECT]', '[LOCATION]',
'[PRODUCT_SET]')
product = 'product-309474065'
with pytest.raises(CustomException):
client.add_product_to_product_set(name, product)
def test_remove_product_from_product_set(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
name = client.product_set_path('[PROJECT]', '[LOCATION]',
'[PRODUCT_SET]')
product = 'product-309474065'
client.remove_product_from_product_set(name, product)
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.RemoveProductFromProductSetRequest(
name=name, product=product)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_remove_product_from_product_set_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
name = client.product_set_path('[PROJECT]', '[LOCATION]',
'[PRODUCT_SET]')
product = 'product-309474065'
with pytest.raises(CustomException):
client.remove_product_from_product_set(name, product)
def test_list_products_in_product_set(self):
# Setup Expected Response
next_page_token = ''
products_element = {}
products = [products_element]
expected_response = {
'next_page_token': next_page_token,
'products': products
}
expected_response = product_search_service_pb2.ListProductsInProductSetResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
name = client.product_set_path('[PROJECT]', '[LOCATION]',
'[PRODUCT_SET]')
paged_list_response = client.list_products_in_product_set(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.products[0] == resources[0]
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.ListProductsInProductSetRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_products_in_product_set_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup request
name = client.product_set_path('[PROJECT]', '[LOCATION]',
'[PRODUCT_SET]')
paged_list_response = client.list_products_in_product_set(name)
with pytest.raises(CustomException):
list(paged_list_response)
def test_import_product_sets(self):
# Setup Expected Response
expected_response = {}
expected_response = product_search_service_pb2.ImportProductSetsResponse(
**expected_response)
operation = operations_pb2.Operation(
name='operations/test_import_product_sets', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
parent = client.location_path('[PROJECT]', '[LOCATION]')
input_config = {}
response = client.import_product_sets(parent, input_config)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = product_search_service_pb2.ImportProductSetsRequest(
parent=parent, input_config=input_config)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_import_product_sets_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_import_product_sets_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1p3beta1.ProductSearchClient()
# Setup Request
parent = client.location_path('[PROJECT]', '[LOCATION]')
input_config = {}
response = client.import_product_sets(parent, input_config)
exception = response.exception()
assert exception.errors[0] == error
|
|
from django.db.models import Q
from django.db import models
from django.contrib.contenttypes.models import ContentType
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey
from .generic import GFKOptimizedQuerySet
class RelatedResourceManager(models.Manager):
def get_content_type(self, content_type, **kwargs):
"""
Get all the items of the given content type related to this item.
"""
qs = self.get_queryset()
return qs.filter(content_type__name=content_type, **kwargs)
def get_relation_source(self, relation_source, **kwargs):
"""
Get all the items of the given relation source to this item
"""
qs = self.get_queryset()
return qs.filter(relation_source=relation_source, **kwargs)
class RelatedResource(models.Model):
"""
A resource related to another object
"""
# SOURCE OBJECT
source_type = models.ForeignKey(ContentType,
related_name="child_relatedobjects")
source_id = models.IntegerField(db_index=True)
source = GenericForeignKey(
ct_field="source_type",
fk_field="source_id")
# ACTUAL RELATED OBJECT:
object_type = models.ForeignKey(ContentType,
related_name="related_objects",)
object_id = models.IntegerField(db_index=True)
object = GenericForeignKey(
ct_field="object_type",
fk_field="object_id")
# METADATA
relation_source = models.IntegerField(
editable=False,
blank=True, null=True)
relation_type = models.CharField(
max_length=255,
blank=True,
null=True)
order = models.IntegerField(
blank=True, null=True)
objects = RelatedResourceManager()
class Meta:
ordering = ('order', )
def __unicode__(self):
out = u'%s related to %s' % (self.source, self.object)
if self.relation_type:
return "%s (%s)" % (out, self.relation_type)
else:
return out
class RelatedObjectsDescriptor(object):
def __init__(self, model=None, from_field='source', to_field='object'):
self.related_model = model or RelatedResource
self.from_field = self.get_related_model_field(from_field)
self.to_field = self.get_related_model_field(to_field)
def get_related_model_field(self, field_name):
opts = self.related_model._meta
for virtual_field in opts.virtual_fields:
if virtual_field.name == field_name:
return virtual_field
return opts.get_field(field_name)
def is_gfk(self, field):
return isinstance(field, GenericForeignKey)
def get_query_for_field(self, instance, field):
if self.is_gfk(field):
ctype = ContentType.objects.get_for_model(instance)
return {
field.ct_field: ctype,
field.fk_field: instance.pk
}
elif isinstance(instance, field.rel.to):
return {field.name: instance}
raise TypeError('Unable to query %s with %s' % (field, instance))
def get_query_from(self, instance):
return self.get_query_for_field(instance, self.from_field)
def get_query_to(self, instance):
return self.get_query_for_field(instance, self.to_field)
def set_attributes_from_name(self, name):
self.name = name
self.concrete = False
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
# if virtual_only:
# cls._meta.add_virtual_field(self)
# else:
# cls._meta.add_field(self)
setattr(cls, self.name, self)
def __get__(self, instance, cls=None):
if instance is None:
return self
ManagerClass = type(self.related_model._default_manager) # NOQA
return self.create_manager(instance, ManagerClass)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
manager.add(*value)
def delete_manager(self, instance):
return self.create_manager(instance,
self.related_model._base_manager.__class__)
def create_manager(self, instance, superclass, cf_from=True):
rel_obj = self
if cf_from:
core_filters = self.get_query_from(instance)
rel_field = self.to_field
else:
core_filters = self.get_query_to(instance)
rel_field = self.from_field
uses_gfk = self.is_gfk(rel_field)
class RelatedManager(superclass):
def get_queryset(self):
if uses_gfk:
qs = GFKOptimizedQuerySet(self.model, gfk_field=rel_field)
return qs.filter(**(core_filters))
else:
return superclass.get_queryset(self).filter(**(core_filters))
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
for (k, v) in core_filters.iteritems():
setattr(obj, k, v)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update(core_filters)
return super(RelatedManager, self).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs.update(core_filters)
return super(RelatedManager, self).get_or_create(**kwargs)
get_or_create.alters_data = True
def remove(self, *objs):
for obj in objs:
# Is obj actually part of this descriptor set?
if obj in self.all():
obj.delete()
else:
raise rel_obj.related_model.DoesNotExist, \
"%r is not related to %r." % (obj, instance)
remove.alters_data = True
def clear(self):
self.all().delete()
clear.alters_data = True
def connect(self, obj, **kwargs):
kwargs.update(rel_obj.get_query_to(obj))
connection, created = self.get_or_create(**kwargs)
return connection
def related_to(self):
mgr = rel_obj.create_manager(instance, superclass, False)
return mgr.filter(
**rel_obj.get_query_to(instance)
)
def symmetrical(self):
return superclass.get_queryset(self).filter(
Q(**rel_obj.get_query_from(instance)) |
Q(**rel_obj.get_query_to(instance))
).distinct()
manager = RelatedManager()
manager.core_filters = core_filters
manager.model = self.related_model
return manager
def all(self):
if self.is_gfk(self.from_field):
ctype = ContentType.objects.get_for_model(self.model)
query = {self.from_field.ct_field: ctype}
else:
query = {}
return self.related_model._default_manager.filter(**query)
class ReverseRelatedObjectsDescriptor(RelatedObjectsDescriptor):
def __init__(self, model=None, from_field='object', to_field='source'):
super(ReverseRelatedObjectsDescriptor, self).__init__(model, from_field, to_field)
|
|
"""
pygments.util
~~~~~~~~~~~~~
Utility functions.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from io import TextIOWrapper
split_path_re = re.compile(r'[/\\ ]')
doctype_lookup_re = re.compile(r'''
<!DOCTYPE\s+(
[a-zA-Z_][a-zA-Z0-9]*
(?: \s+ # optional in HTML5
[a-zA-Z_][a-zA-Z0-9]*\s+
"[^"]*")?
)
[^>]*>
''', re.DOTALL | re.MULTILINE | re.VERBOSE)
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
re.UNICODE | re.IGNORECASE | re.DOTALL | re.MULTILINE)
xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
class ClassNotFound(ValueError):
"""Raised if one of the lookup functions didn't find a matching class."""
class OptionError(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, str):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, str):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.search(text)
if m is None:
return False
doctype = m.group(1)
return re.compile(regex, re.I).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""Check if the file looks like it has a html doctype."""
return doctype_matches(text, r'html')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.search(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
def surrogatepair(c):
"""Given a unicode character code with length greater than 16 bits,
return the two 16 bit surrogate pair.
"""
# From example D28 of:
# http://www.unicode.org/book/ch03.pdf
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines)
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
class Future:
"""Generic class to defer some work.
Handled specially in RegexLexerMeta, to support regex string construction at
first use.
"""
def get(self):
raise NotImplementedError
def guess_decode(text):
"""Decode *text* with guessed encoding.
First try UTF-8; this should fail for non-UTF-8 encodings.
Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
try:
text = text.decode('utf-8')
return text, 'utf-8'
except UnicodeDecodeError:
try:
import locale
prefencoding = locale.getpreferredencoding()
text = text.decode()
return text, prefencoding
except (UnicodeDecodeError, LookupError):
text = text.decode('latin1')
return text, 'latin1'
def guess_decode_from_terminal(text, term):
"""Decode *text* coming from terminal *term*.
First try the terminal encoding, if given.
Then try UTF-8. Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
if getattr(term, 'encoding', None):
try:
text = text.decode(term.encoding)
except UnicodeDecodeError:
pass
else:
return text, term.encoding
return guess_decode(text)
def terminal_encoding(term):
"""Return our best guess of encoding for the given *term*."""
if getattr(term, 'encoding', None):
return term.encoding
import locale
return locale.getpreferredencoding()
class UnclosingTextIOWrapper(TextIOWrapper):
# Don't close underlying buffer on destruction.
def close(self):
self.flush()
|
|
from .. project import data_maker, project
from .. util import data_file, deprecated, log, pid_context
"""Common command line arguments for run and demo."""
COMPONENTS = 'driver', 'layout', 'animation'
PRESET_LIBRARY_DEFAULT = '~/.bibliopixel'
ENABLE_PRESETS = False
NUMBER_TYPES = ('python',) + data_maker.NUMPY_TYPES
ADD_REDUNDANT_ARGUMENTS = True
def add_arguments(parser):
pid_context.add_arguments(parser)
if ADD_REDUNDANT_ARGUMENTS:
_add_redundant_arguments(parser)
parser.add_argument(
'-b', '--brightness', default=None,
help='Override project brightness value')
parser.add_argument(
'--dump', action='store_true',
help='Dump the full project as DATA_FILE after loading but ' +
'before running')
parser.add_argument(
'--dry_run', action='store_true',
help='Load projects but do not run them')
parser.add_argument(
'-f', '--fail_on_exception', action='store_true',
help='If true, bp fail if any subanimation fails to construct')
if deprecated.allowed():
parser.add_argument(
'-g', '--gif', default='', nargs='?', help=GIF_HELP)
parser.add_argument(
'-i', '--ignore_exceptions', action='store_true',
help='If true, continue running the next project if one project fails')
parser.add_argument(
'-j', '--json', action='store_true',
help='Use Json when dumping description data')
parser.add_argument(
'-m', '--movie', default='', nargs='?', help=MOVIE_HELP)
parser.add_argument(
'--pause', default=0, help='Time to pause between running animations')
parser.add_argument(
'--project_lengths', '--pl', default=None, help=PROJECT_LENGTHS_HELP)
parser.add_argument(
'-s', action='store_true', help='Run SimPixel at the default URL')
parser.add_argument(
'--simpixel', help='Run SimPixel at a specific URL')
parser.add_argument(
'--animation_lengths', '--at', default=None,
help='Set run length for each animation')
parser.add_argument(
'-t', '--ledtype', default=None,
help='Default LED type if no LED type is specified')
def _add_redundant_arguments(parser):
"""
These arguments are redundant with just using a project, and we should
encouraging that as you don't have to learn any dumb flags!
For example, instead of
bp foo.yml --animation=wombat --numbers=float
use
bp foo.yml + '{animation: wombat, numbers: float}'
"""
parser.add_argument(
'-a', '--animation', default=None,
help='Default animation type if no animation is specified')
if deprecated.allowed(): # pragma: no cover
parser.add_argument(
'--dimensions', '--dim', default=None,
help='DEPRECATED: x, (x, y) or (x, y, z) dimensions for project')
parser.add_argument(
'--shape', default=None,
help='x, (x, y) or (x, y, z) dimensions for project')
parser.add_argument(
'-l', '--layout', default=None,
help='Default layout class if no layout is specified')
parser.add_argument(
'--numbers', '-n', default='python', choices=NUMBER_TYPES,
help=NUMBERS_HELP)
parser.add_argument('-p', '--path', default=None, help=PATH_HELP)
def _make_project_flags(args):
def get_value(name):
value = getattr(args, name, None)
if not value:
return {}
if '{' in value:
return data_file.loads(value)
return {'typename': value}
project_flags = {name: get_value(name) for name in COMPONENTS}
if args.ledtype:
project_flags['driver']['ledtype'] = args.ledtype
if args.brightness:
project_flags['layout']['brightness'] = int(args.brightness)
if args.v4:
log.printer('*** Using v4 forward compatibility mode.')
project_flags['numbers'] = 'float'
elif args.numbers != 'python':
project_flags['numbers'] = args.numbers
if args.project_lengths is not None:
run = project_flags.setdefault('run', {})
run['seconds'] = float(args.project_lengths)
if args.animation_lengths is not None:
animation = project_flags.setdefault('animation', {})
length = [float(i) for i in args.animation_lengths.split(',')]
animation['length'] = length
if deprecated.allowed():
if args.dimensions is not None:
deprecated.deprecated('Use --shape instead of --dimensions')
shape = args.shape or args.dimensions
else:
shape = args.shape
if shape is not None:
shape = shape.split(',')
try:
project_flags['shape'] = [int(i) for i in shape]
except:
raise ValueError('--shape must be one to three numbers '
'separated by commas.')
return project_flags
def make_project(args, *descs, root_file=None):
project_flags = _make_project_flags(args)
descs += (project_flags,)
return project.project(*descs, root_file=root_file)
# Help messages.
PATH_HELP = """\
A list of directories, separated by colons, 'which are added to the end of
`sys.path`.
You can also use loady-style paths which start with `//git/` to
dynamically load a library from a public git repository.
See https://github.com/ManiacalLabs/BiblioPixel/wiki/BiblioPixel-Paths
for more information.
"""
NUMBERS_HELP = """
The type of numbers that are used in color list calculations.
`python` means to use the classic Python lists of (r, g, b) tuples.
Anything else is a numpy type, which means that bp uses numpy arrays, which
use much faster arithmetic.
numpy types include:
""" + ' '.join(data_maker.NUMPY_TYPES)
PRESET_HELP = """Filenames for preset library"""
PROJECT_LENGTHS_HELP = """\
How long to run the animation (overrides runner.seconds)"""
MOVIE_HELP = """
Write a movie file (animated GIF or mp4).
If --gif has no argument, the name of the movie file is the same as the name of
the project, with a .gif added to the end.
If it has a single string argument, then it's the name of the GIF file.
Otherwise, the argument is read as JSON and used to construct a GifWriter class.
"""
# TODO: this should go somewhere else
"""
Set defaults for projects using YAML or JSON data files.
A Project is a data file or equivalently a Python dictionary that describes
a BibliopPixel installation. The top-level keys in a Project are called the
sections and a Project might have the following sections:
{sections}
Often some sections of your project correspond to hardware and thus rarely
change, so the `bp default` command allows you to set defaults so you
don't have to mention these from your project at all.
"""
GIF_HELP = """
--gif/-g is a deprecated name for the --movie/-m flag."
""" + MOVIE_HELP
|
|
from __future__ import division
from itertools import *
import math
import operator
import re
import xml.dom
import weakref
from xpath.exceptions import *
import xpath
#
# Data model functions.
#
def string_value(node):
"""Compute the string-value of a node."""
if (node.nodeType == node.DOCUMENT_NODE or
node.nodeType == node.ELEMENT_NODE):
s = u''
for n in axes['descendant'](node):
if n.nodeType == n.TEXT_NODE:
s += n.data
elif n.nodeType == n.CDATA_SECTION_NODE:
s += n.nodeValue
return s
elif node.nodeType == node.ATTRIBUTE_NODE:
return node.value
elif (node.nodeType == node.PROCESSING_INSTRUCTION_NODE or
node.nodeType == node.COMMENT_NODE or
node.nodeType == node.TEXT_NODE):
return node.data
elif node.nodeType == node.CDATA_SECTION_NODE:
return node.nodeValue
def document_order(node):
"""Compute a document order value for the node.
cmp(document_order(a), document_order(b)) will return -1, 0, or 1 if
a is before, identical to, or after b in the document respectively.
We represent document order as a list of sibling indexes. That is,
the third child of the document node has an order of [2]. The first
child of that node has an order of [2,0].
Attributes have a sibling index of -1 (coming before all children of
their node) and are further ordered by name--e.g., [2,0,-1,'href'].
"""
# Attributes: parent-order + [-1, attribute-name]
if node.nodeType == node.ATTRIBUTE_NODE:
order = document_order(node.ownerElement)
order.extend((-1, node.name))
return order
# The document root (hopefully): []
if node.parentNode is None:
return []
# Determine which child this is of its parent.
sibpos = 0
sib = node
while sib.previousSibling is not None:
sibpos += 1
sib = sib.previousSibling
# Order: parent-order + [sibling-position]
order = document_order(node.parentNode)
order.append(sibpos)
return order
#
# Type functions, operating on the various XPath types.
#
# Internally, we use the following representations:
# nodeset - list of DOM tree nodes in document order
# string - str or unicode
# boolean - bool
# number - int or float
#
def nodeset(v):
"""Convert a value to a nodeset."""
if not nodesetp(v):
raise XPathTypeError, "value is not a node-set"
return v
def nodesetp(v):
"""Return true iff 'v' is a node-set."""
if isinstance(v, list):
return True
def string(v):
"""Convert a value to a string."""
if nodesetp(v):
if not v:
return u''
return string_value(v[0])
elif numberp(v):
if v == float('inf'):
return u'Infinity'
elif v == float('-inf'):
return u'-Infinity'
elif str(v) == 'nan':
return u'NaN'
elif int(v) == v and v <= 0xffffffff:
v = int(v)
return unicode(v)
elif booleanp(v):
return u'true' if v else u'false'
return v
def stringp(v):
"""Return true iff 'v' is a string."""
return isinstance(v, basestring)
def boolean(v):
"""Convert a value to a boolean."""
if nodesetp(v):
return len(v) > 0
elif numberp(v):
if v == 0 or v != v:
return False
return True
elif stringp(v):
return v != ''
return v
def booleanp(v):
"""Return true iff 'v' is a boolean."""
return isinstance(v, bool)
def number(v):
"""Convert a value to a number."""
if nodesetp(v):
v = string(v)
try:
return float(v)
except ValueError:
return float('NaN')
def numberp(v):
"""Return true iff 'v' is a number."""
return (not(isinstance(v, bool)) and
(isinstance(v, int) or isinstance(v, float)))
class Expr(object):
"""Abstract base class for XPath expressions."""
def evaluate(self, node, pos, size, context):
"""Evaluate the expression.
The context node, context position, and context size are passed as
arguments.
Returns an XPath value: a nodeset, string, boolean, or number.
"""
class BinaryOperatorExpr(Expr):
"""Base class for all binary operators."""
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def evaluate(self, node, pos, size, context):
# Subclasses either override evaluate() or implement operate().
return self.operate(self.left.evaluate(node, pos, size, context),
self.right.evaluate(node, pos, size, context))
def __str__(self):
return '(%s %s %s)' % (self.left, self.op, self.right)
class AndExpr(BinaryOperatorExpr):
"""<x> and <y>"""
def evaluate(self, node, pos, size, context):
# Note that XPath boolean operations short-circuit.
return (boolean(self.left.evaluate(node, pos, size, context) and
boolean(self.right.evaluate(node, pos, size, context))))
class OrExpr(BinaryOperatorExpr):
"""<x> or <y>"""
def evaluate(self, node, pos, size, context):
# Note that XPath boolean operations short-circuit.
return (boolean(self.left.evaluate(node, pos, size, context) or
boolean(self.right.evaluate(node, pos, size, context))))
class EqualityExpr(BinaryOperatorExpr):
"""<x> = <y>, <x> != <y>, etc."""
operators = {
'=' : operator.eq,
'!=' : operator.ne,
'<=' : operator.le,
'<' : operator.lt,
'>=' : operator.ge,
'>' : operator.gt,
}
def operate(self, a, b):
if nodesetp(a):
for node in a:
if self.operate(string_value(node), b):
return True
return False
if nodesetp(b):
for node in b:
if self.operate(a, string_value(node)):
return True
return False
if self.op in ('=', '!='):
if booleanp(a) or booleanp(b):
convert = boolean
elif numberp(a) or numberp(b):
convert = number
else:
convert = string
else:
convert = number
a, b = convert(a), convert(b)
return self.operators[self.op](a, b)
def divop(x, y):
try:
return x / y
except ZeroDivisionError:
if x == 0 and y == 0:
return float('nan')
if x < 0:
return float('-inf')
return float('inf')
class ArithmeticalExpr(BinaryOperatorExpr):
"""<x> + <y>, <x> - <y>, etc."""
# Note that we must use math.fmod for the correct modulo semantics.
operators = {
'+' : operator.add,
'-' : operator.sub,
'*' : operator.mul,
'div' : divop,
'mod' : math.fmod
}
def operate(self, a, b):
return self.operators[self.op](number(a), number(b))
class UnionExpr(BinaryOperatorExpr):
"""<x> | <y>"""
def operate(self, a, b):
if not nodesetp(a) or not nodesetp(b):
raise XPathTypeError("union operand is not a node-set")
# Need to sort the result to preserve document order.
return sorted(set(chain(a, b)), key=document_order)
class NegationExpr(Expr):
"""- <x>"""
def __init__(self, expr):
self.expr = expr
def evaluate(self, node, pos, size, context):
return -number(self.expr.evaluate(node, pos, size, context))
def __str__(self):
return '(-%s)' % self.expr
class LiteralExpr(Expr):
"""Literals--either numbers or strings."""
def __init__(self, literal):
self.literal = literal
def evaluate(self, node, pos, size, context):
return self.literal
def __str__(self):
if stringp(self.literal):
if "'" in self.literal:
return '"%s"' % self.literal
else:
return "'%s'" % self.literal
return string(self.literal)
class VariableReference(Expr):
"""Variable references."""
def __init__(self, prefix, name):
self.prefix = prefix
self.name = name
def evaluate(self, node, pos, size, context):
try:
if self.prefix is not None:
try:
namespaceURI = context.namespaces[self.prefix]
except KeyError:
raise XPathUnknownPrefixError(self.prefix)
return context.variables[(namespaceURI, self.name)]
else:
return context.variables[self.name]
except KeyError:
raise XPathUnknownVariableError(str(self))
def __str__(self):
if self.prefix is None:
return '$%s' % self.name
else:
return '$%s:%s' % (self.prefix, self.name)
class Function(Expr):
"""Functions."""
def __init__(self, name, args):
self.name = name
self.args = args
self.evaluate = getattr(self, 'f_%s' % name.replace('-', '_'), None)
if self.evaluate is None:
raise XPathUnknownFunctionError, 'unknown function "%s()"' % name
if len(self.args) < self.evaluate.minargs:
raise XPathTypeError, 'too few arguments for "%s()"' % name
if (self.evaluate.maxargs is not None and
len(self.args) > self.evaluate.maxargs):
raise XPathTypeError, 'too many arguments for "%s()"' % name
#
# XPath functions are implemented by methods of the Function class.
#
# A method implementing an XPath function is decorated with the function
# decorator, and receives the evaluated function arguments as positional
# parameters.
#
def function(minargs, maxargs, implicit=False, first=False, convert=None):
"""Function decorator.
minargs -- Minimum number of arguments taken by the function.
maxargs -- Maximum number of arguments taken by the function.
implicit -- True for functions which operate on a nodeset consisting
of the current context node when passed no argument.
(e.g., string() and number().)
convert -- When non-None, a function used to filter function arguments.
"""
def decorator(f):
def new_f(self, node, pos, size, context):
if implicit and len(self.args) == 0:
args = [[node]]
else:
args = [x.evaluate(node, pos, size, context)
for x in self.args]
if first:
args[0] = nodeset(args[0])
if len(args[0]) > 0:
args[0] = args[0][0]
else:
args[0] = None
if convert is not None:
args = [convert(x) for x in args]
return f(self, node, pos, size, context, *args)
new_f.minargs = minargs
new_f.maxargs = maxargs
new_f.__name__ = f.__name__
new_f.__doc__ = f.__doc__
return new_f
return decorator
# Node Set Functions
@function(0, 0)
def f_last(self, node, pos, size, context):
return size
@function(0, 0)
def f_position(self, node, pos, size, context):
return pos
@function(1, 1, convert=nodeset)
def f_count(self, node, pos, size, context, nodes):
return len(nodes)
@function(1, 1)
def f_id(self, node, pos, size, context, arg):
if nodesetp(arg):
ids = (string_value(x) for x in arg)
else:
ids = [string(arg)]
if node.nodeType != node.DOCUMENT_NODE:
node = node.ownerDocument
return list(filter(None, (node.getElementById(id) for id in ids)))
@function(0, 1, implicit=True, first=True)
def f_local_name(self, node, pos, size, context, argnode):
if argnode is None:
return ''
if (argnode.nodeType == argnode.ELEMENT_NODE or
argnode.nodeType == argnode.ATTRIBUTE_NODE):
return argnode.localName
elif argnode.nodeType == argnode.PROCESSING_INSTRUCTION_NODE:
return argnode.target
return ''
@function(0, 1, implicit=True, first=True)
def f_namespace_uri(self, node, pos, size, context, argnode):
if argnode is None:
return ''
return argnode.namespaceURI
@function(0, 1, implicit=True, first=True)
def f_name(self, node, pos, size, context, argnode):
if argnode is None:
return ''
if argnode.nodeType == argnode.ELEMENT_NODE:
return argnode.tagName
elif argnode.nodeType == argnode.ATTRIBUTE_NODE:
return argnode.name
elif argnode.nodeType == argnode.PROCESSING_INSTRUCTION_NODE:
return argnode.target
return ''
# String Functions
@function(0, 1, implicit=True, convert=string)
def f_string(self, node, pos, size, context, arg):
return arg
@function(2, None, convert=string)
def f_concat(self, node, pos, size, context, *args):
return ''.join((x for x in args))
@function(2, 2, convert=string)
def f_starts_with(self, node, pos, size, context, a, b):
return a.startswith(b)
@function(2, 2, convert=string)
def f_contains(self, node, pos, size, context, a, b):
return b in a
@function(2, 2, convert=string)
def f_substring_before(self, node, pos, size, context, a, b):
try:
return a[0:a.index(b)]
except ValueError:
return ''
@function(2, 2, convert=string)
def f_substring_after(self, node, pos, size, context, a, b):
try:
return a[a.index(b)+len(b):]
except ValueError:
return ''
@function(2, 3)
def f_substring(self, node, pos, size, context, s, start, count=None):
s = string(s)
start = round(number(start))
if start != start:
# Catch NaN
return ''
if count is None:
end = len(s) + 1
else:
end = start + round(number(count))
if end != end:
# Catch NaN
return ''
if end > len(s):
end = len(s)+1
if start < 1:
start = 1
if start > len(s):
return ''
if end <= start:
return ''
return s[int(start)-1:int(end)-1]
@function(0, 1, implicit=True, convert=string)
def f_string_length(self, node, pos, size, context, s):
return len(s)
@function(0, 1, implicit=True, convert=string)
def f_normalize_space(self, node, pos, size, context, s):
return re.sub(r'\s+', ' ', s.strip())
@function(3, 3, convert=lambda x: unicode(string(x)))
def f_translate(self, node, pos, size, context, s, source, target):
# str.translate() and unicode.translate() are completely different.
# The translate() arguments are coerced to unicode.
table = {}
for schar, tchar in izip(source, target):
schar = ord(schar)
if schar not in table:
table[schar] = tchar
if len(source) > len(target):
for schar in source[len(target):]:
schar = ord(schar)
if schar not in table:
table[schar] = None
return s.translate(table)
# Boolean functions
@function(1, 1, convert=boolean)
def f_boolean(self, node, pos, size, context, b):
return b
@function(1, 1, convert=boolean)
def f_not(self, node, pos, size, context, b):
return not b
@function(0, 0)
def f_true(self, node, pos, size, context):
return True
@function(0, 0)
def f_false(self, node, pos, size, context):
return False
@function(1, 1, convert=string)
def f_lang(self, node, pos, size, context, s):
s = s.lower()
for n in axes['ancestor-or-self'](node):
if n.nodeType == n.ELEMENT_NODE and n.hasAttribute('xml:lang'):
lang = n.getAttribute('xml:lang').lower()
if s == lang or lang.startswith(s + u'-'):
return True
break
return False
# Number functions
@function(0, 1, implicit=True, convert=number)
def f_number(self, node, pos, size, context, n):
return n
@function(1, 1, convert=nodeset)
def f_sum(self, node, pos, size, context, nodes):
return sum((number(string_value(x)) for x in nodes))
@function(1, 1, convert=number)
def f_floor(self, node, pos, size, context, n):
return math.floor(n)
@function(1, 1, convert=number)
def f_ceiling(self, node, pos, size, context, n):
return math.ceil(n)
@function(1, 1, convert=number)
def f_round(self, node, pos, size, context, n):
# XXX round(-0.0) should be -0.0, not 0.0.
# XXX round(-1.5) should be -1.0, not -2.0.
return round(n)
def __str__(self):
return '%s(%s)' % (self.name, ', '.join((str(x) for x in self.args)))
#
# XPath axes.
#
# Dictionary of all axis functions.
axes = {}
def axisfn(reverse=False, principal_node_type=xml.dom.Node.ELEMENT_NODE):
"""Axis function decorator.
An axis function will take a node as an argument and return a sequence
over the nodes along an XPath axis. Axis functions have two extra
attributes indicating the axis direction and principal node type.
"""
def decorate(f):
f.__name__ = f.__name__.replace('_', '-')
f.reverse = reverse
f.principal_node_type = principal_node_type
return f
return decorate
def make_axes():
"""Define functions to walk each of the possible XPath axes."""
@axisfn()
def target(node):
return node.childNodes # TODO: return association targets
@axisfn()
def target_graph(node):
return node.childNodes # TODO: return association targets (recursive)
@axisfn()
def subject(node):
return node.parentNode # TODO: return association subject
@axisfn()
def subject_graph(node):
return node.parentNode # TODO: return association subject (recursive)
@axisfn()
def child(node):
return node.childNodes
@axisfn()
def descendant(node):
for child in node.childNodes:
for node in descendant_or_self(child):
yield node
@axisfn()
def parent(node):
if node.parentNode is not None:
yield node.parentNode
@axisfn(reverse=True)
def ancestor(node):
while node.parentNode is not None:
node = node.parentNode
yield node
@axisfn()
def following_sibling(node):
while node.nextSibling is not None:
node = node.nextSibling
yield node
@axisfn(reverse=True)
def preceding_sibling(node):
while node.previousSibling is not None:
node = node.previousSibling
yield node
@axisfn()
def following(node):
while node is not None:
while node.nextSibling is not None:
node = node.nextSibling
for n in descendant_or_self(node):
yield n
node = node.parentNode
@axisfn(reverse=True)
def preceding(node):
while node is not None:
while node.previousSibling is not None:
node = node.previousSibling
# Could be more efficient here.
for n in reversed(list(descendant_or_self(node))):
yield n
node = node.parentNode
@axisfn(principal_node_type=xml.dom.Node.ATTRIBUTE_NODE)
def attribute(node):
if node.attributes is not None:
return (node.attributes.item(i)
for i in xrange(node.attributes.length))
return ()
@axisfn()
def namespace(node):
raise XPathNotImplementedError("namespace axis is not implemented")
@axisfn()
def self(node):
yield node
@axisfn()
def descendant_or_self(node):
yield node
for child in node.childNodes:
for node in descendant_or_self(child):
yield node
@axisfn(reverse=True)
def ancestor_or_self(node):
return chain([node], ancestor(node))
# Place each axis function defined here into the 'axes' dict.
for axis in locals().values():
axes[axis.__name__] = axis
make_axes()
def merge_into_nodeset(target, source):
"""Place all the nodes from the source node-set into the target
node-set, preserving document order. Both node-sets must be in
document order to begin with.
"""
if len(target) == 0:
target.extend(source)
return
source = [n for n in source if n not in target]
if len(source) == 0:
return
# If the last node in the target set comes before the first node in the
# source set, then we can just concatenate the sets. Otherwise, we
# will need to sort. (We could also check to see if the last node in
# the source set comes before the first node in the target set, but this
# situation is very unlikely in practice.)
if document_order(target[-1]) < document_order(source[0]):
target.extend(source)
else:
target.extend(source)
target.sort(key=document_order)
class AbsolutePathExpr(Expr):
"""Absolute location paths."""
def __init__(self, path):
self.path = path
def evaluate(self, node, pos, size, context):
if node.nodeType != node.DOCUMENT_NODE:
node = node.ownerDocument
if self.path is None:
return [node]
return self.path.evaluate(node, 1, 1, context)
def __str__(self):
return '/%s' % (self.path or '')
class PathExpr(Expr):
"""Location path expressions."""
def __init__(self, steps):
self.steps = steps
def evaluate(self, node, pos, size, context):
# The first step in the path is evaluated in the current context.
# If this is the only step in the path, the return value is
# unimportant. If there are other steps, however, it must be a
# node-set.
result = self.steps[0].evaluate(node, pos, size, context)
if len(self.steps) > 1 and not nodesetp(result):
raise XPathTypeError("path step is not a node-set")
# Subsequent steps are evaluated for each node in the node-set
# resulting from the previous step.
for step in self.steps[1:]:
aggregate = []
for i in xrange(len(result)):
nodes = step.evaluate(result[i], i+1, len(result), context)
if not nodesetp(nodes):
raise XPathTypeError("path step is not a node-set")
merge_into_nodeset(aggregate, nodes)
result = aggregate
return result
def __str__(self):
return '/'.join((str(s) for s in self.steps))
class PredicateList(Expr):
"""A list of predicates.
Predicates are handled as an expression wrapping the expression
filtered by the predicates.
"""
def __init__(self, expr, predicates, axis='child'):
self.predicates = predicates
self.expr = expr
self.axis = axes[axis]
def evaluate(self, node, pos, size, context):
result = self.expr.evaluate(node, pos, size, context)
if not nodesetp(result):
raise XPathTypeError("predicate input is not a node-set")
if self.axis.reverse:
result.reverse()
for pred in self.predicates:
match = []
for i, node in izip(count(1), result):
r = pred.evaluate(node, i, len(result), context)
# If a predicate evaluates to a number, select the node
# with that position. Otherwise, select nodes for which
# the boolean value of the predicate is true.
if numberp(r):
if r == i:
match.append(node)
elif boolean(r):
match.append(node)
result = match
if self.axis.reverse:
result.reverse()
return result
def __str__(self):
s = str(self.expr)
if '/' in s:
s = '(%s)' % s
return s + ''.join(('[%s]' % x for x in self.predicates))
class AxisStep(Expr):
"""One step in a location path expression."""
def __init__(self, axis, test=None, predicates=None):
if test is None:
test = AnyKindTest()
self.axis = axes[axis]
self.test = test
def evaluate(self, node, pos, size, context):
match = []
for n in self.axis(node):
if self.test.match(n, self.axis, context):
match.append(n)
if self.axis.reverse:
match.reverse()
return match
def __str__(self):
return '%s::%s' % (self.axis.__name__, self.test)
#
# Node tests.
#
class ComponentTest(object):
def __init__(self, name=None):
self.name = name
def match(self, node, axis, context):
return (node.nodeType == node.PROCESSING_INSTRUCTION_NODE and # TODO: verify this node is type Component
(self.name is None or node.target == self.name))
def __str__(self):
if self.name is None:
name = ''
elif "'" in self.name:
name = '"%s"' % self.name
else:
name = "'%s'" % self.name
return 'component(%s)' % name
class ResourceTest(object):
def __init__(self, name=None):
self.name = name
def match(self, node, axis, context):
return (node.nodeType == node.PROCESSING_INSTRUCTION_NODE and # TODO: verify this node is type Resource
(self.name is None or node.target == self.name))
def __str__(self):
if self.name is None:
name = ''
elif "'" in self.name:
name = '"%s"' % self.name
else:
name = "'%s'" % self.name
return 'resource(%s)' % name
class EventTest(object):
def __init__(self, name=None):
self.name = name
def match(self, node, axis, context):
return (node.nodeType == node.PROCESSING_INSTRUCTION_NODE and # TODO: verify this node is type Event
(self.name is None or node.target == self.name))
def __str__(self):
if self.name is None:
name = ''
elif "'" in self.name:
name = '"%s"' % self.name
else:
name = "'%s'" % self.name
return 'event(%s)' % name
class AssociationTest(object):
def __init__(self, name=None):
self.name = name
def match(self, node, axis, context):
return (node.nodeType == node.PROCESSING_INSTRUCTION_NODE and # TODO: verify this node is type Association
(self.name is None or node.target == self.name))
def __str__(self):
if self.name is None:
name = ''
elif "'" in self.name:
name = '"%s"' % self.name
else:
name = "'%s'" % self.name
return 'association(%s)' % name
class Test(object):
"""Abstract base class for node tests."""
def match(self, node, axis, context):
"""Return True if 'node' matches the test along 'axis'."""
class NameTest(object):
def __init__(self, prefix, localpart):
self.prefix = prefix
self.localName = localpart
if self.prefix == None and self.localName == '*':
self.prefix = '*'
def match(self, node, axis, context):
if node.nodeType != axis.principal_node_type:
return False
if self.prefix != '*':
namespaceURI = None
if self.prefix is not None:
try:
namespaceURI = context.namespaces[self.prefix]
except KeyError:
raise XPathUnknownPrefixError(self.prefix)
elif axis.principal_node_type == node.ELEMENT_NODE:
namespaceURI = context.default_namespace
if namespaceURI != node.namespaceURI:
return False
if self.localName != '*':
if self.localName != node.localName:
return False
return True
def __str__(self):
if self.prefix is not None:
return '%s:%s' % (self.prefix, self.localName)
else:
return self.localName
class PITest(object):
def __init__(self, name=None):
self.name = name
def match(self, node, axis, context):
return (node.nodeType == node.PROCESSING_INSTRUCTION_NODE and
(self.name is None or node.target == self.name))
def __str__(self):
if self.name is None:
name = ''
elif "'" in self.name:
name = '"%s"' % self.name
else:
name = "'%s'" % self.name
return 'processing-instruction(%s)' % name
class CommentTest(object):
def match(self, node, axis, context):
return node.nodeType == node.COMMENT_NODE
def __str__(self):
return 'comment()'
class TextTest(object):
def match(self, node, axis, context):
return (node.nodeType == node.TEXT_NODE or
node.nodeType == node.CDATA_SECTION_NODE)
def __str__(self):
return 'text()'
class AnyKindTest(object):
def match(self, node, axis, context):
return True
def __str__(self):
return 'node()'
|
|
"""Support for AdGuard Home."""
from __future__ import annotations
import logging
from adguardhome import AdGuardHome, AdGuardHomeConnectionError, AdGuardHomeError
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_URL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import (
CONF_FORCE,
DATA_ADGUARD_CLIENT,
DATA_ADGUARD_VERSION,
DOMAIN,
SERVICE_ADD_URL,
SERVICE_DISABLE_URL,
SERVICE_ENABLE_URL,
SERVICE_REFRESH,
SERVICE_REMOVE_URL,
)
_LOGGER = logging.getLogger(__name__)
SERVICE_URL_SCHEMA = vol.Schema({vol.Required(CONF_URL): cv.url})
SERVICE_ADD_URL_SCHEMA = vol.Schema(
{vol.Required(CONF_NAME): cv.string, vol.Required(CONF_URL): cv.url}
)
SERVICE_REFRESH_SCHEMA = vol.Schema(
{vol.Optional(CONF_FORCE, default=False): cv.boolean}
)
PLATFORMS = ["sensor", "switch"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up AdGuard Home from a config entry."""
session = async_get_clientsession(hass, entry.data[CONF_VERIFY_SSL])
adguard = AdGuardHome(
entry.data[CONF_HOST],
port=entry.data[CONF_PORT],
username=entry.data[CONF_USERNAME],
password=entry.data[CONF_PASSWORD],
tls=entry.data[CONF_SSL],
verify_ssl=entry.data[CONF_VERIFY_SSL],
session=session,
)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {DATA_ADGUARD_CLIENT: adguard}
try:
await adguard.version()
except AdGuardHomeConnectionError as exception:
raise ConfigEntryNotReady from exception
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
async def add_url(call) -> None:
"""Service call to add a new filter subscription to AdGuard Home."""
await adguard.filtering.add_url(
allowlist=False, name=call.data.get(CONF_NAME), url=call.data.get(CONF_URL)
)
async def remove_url(call) -> None:
"""Service call to remove a filter subscription from AdGuard Home."""
await adguard.filtering.remove_url(allowlist=False, url=call.data.get(CONF_URL))
async def enable_url(call) -> None:
"""Service call to enable a filter subscription in AdGuard Home."""
await adguard.filtering.enable_url(allowlist=False, url=call.data.get(CONF_URL))
async def disable_url(call) -> None:
"""Service call to disable a filter subscription in AdGuard Home."""
await adguard.filtering.disable_url(
allowlist=False, url=call.data.get(CONF_URL)
)
async def refresh(call) -> None:
"""Service call to refresh the filter subscriptions in AdGuard Home."""
await adguard.filtering.refresh(
allowlist=False, force=call.data.get(CONF_FORCE)
)
hass.services.async_register(
DOMAIN, SERVICE_ADD_URL, add_url, schema=SERVICE_ADD_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_REMOVE_URL, remove_url, schema=SERVICE_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_ENABLE_URL, enable_url, schema=SERVICE_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_DISABLE_URL, disable_url, schema=SERVICE_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_REFRESH, refresh, schema=SERVICE_REFRESH_SCHEMA
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload AdGuard Home config entry."""
hass.services.async_remove(DOMAIN, SERVICE_ADD_URL)
hass.services.async_remove(DOMAIN, SERVICE_REMOVE_URL)
hass.services.async_remove(DOMAIN, SERVICE_ENABLE_URL)
hass.services.async_remove(DOMAIN, SERVICE_DISABLE_URL)
hass.services.async_remove(DOMAIN, SERVICE_REFRESH)
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
del hass.data[DOMAIN]
return unload_ok
class AdGuardHomeEntity(Entity):
"""Defines a base AdGuard Home entity."""
def __init__(
self,
adguard: AdGuardHome,
entry: ConfigEntry,
name: str,
icon: str,
enabled_default: bool = True,
) -> None:
"""Initialize the AdGuard Home entity."""
self._available = True
self._enabled_default = enabled_default
self._icon = icon
self._name = name
self._entry = entry
self.adguard = adguard
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
async def async_update(self) -> None:
"""Update AdGuard Home entity."""
if not self.enabled:
return
try:
await self._adguard_update()
self._available = True
except AdGuardHomeError:
if self._available:
_LOGGER.debug(
"An error occurred while updating AdGuard Home sensor",
exc_info=True,
)
self._available = False
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
raise NotImplementedError()
class AdGuardHomeDeviceEntity(AdGuardHomeEntity):
"""Defines a AdGuard Home device entity."""
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this AdGuard Home instance."""
return {
"identifiers": {
(DOMAIN, self.adguard.host, self.adguard.port, self.adguard.base_path)
},
"name": "AdGuard Home",
"manufacturer": "AdGuard Team",
"sw_version": self.hass.data[DOMAIN][self._entry.entry_id].get(
DATA_ADGUARD_VERSION
),
"entry_type": "service",
}
|
|
from __future__ import absolute_import
import bisect
import functools
import itertools
import logging
import math
import operator
import zlib
from calendar import Calendar
from collections import OrderedDict, namedtuple
from datetime import datetime, timedelta
import pytz
from django.utils import dateformat, timezone
from sentry.app import tsdb
from sentry.models import (
Activity,
GroupStatus,
Organization,
OrganizationStatus,
Project,
Team,
User,
UserOption,
)
from sentry.tasks.base import instrumented_task
from sentry.utils import json, redis
from sentry.utils.dates import floor_to_utc_day, to_datetime, to_timestamp
from sentry.utils.email import MessageBuilder
from sentry.utils.iterators import chunked
from sentry.utils.math import mean
from six.moves import reduce
date_format = functools.partial(dateformat.format, format_string="F jS, Y")
logger = logging.getLogger(__name__)
BATCH_SIZE = 30000
def _get_organization_queryset():
return Organization.objects.filter(status=OrganizationStatus.VISIBLE)
def _fill_default_parameters(timestamp=None, rollup=None):
if timestamp is None:
timestamp = to_timestamp(floor_to_utc_day(timezone.now()))
if rollup is None:
rollup = 60 * 60 * 24 * 7
return (timestamp, rollup)
def _to_interval(timestamp, duration):
return (to_datetime(timestamp - duration), to_datetime(timestamp))
def change(value, reference):
"""
Calculate the relative change between a value and a reference point.
"""
if not reference: # handle both None and divide by zero case
return None
return ((value or 0) - reference) / float(reference)
def safe_add(x, y):
"""
Adds two values which are either numeric types or None.
- If both values are numeric, the result is the sum of those values.
- If only one numeric value is provided, that value is returned.
- If both values are None, then None is returned.
"""
if x is not None and y is not None:
return x + y
elif x is not None:
return x
elif y is not None:
return y
else:
return None
def month_to_index(year, month):
"""
Convert a year and month to a single value: the number of months between
this month and 1 AD.
This mainly exists to simplify doing month-based arithmetic (e.g. "three
months ago") without having to manually handle wrapping around years, since
timedelta doesn't accept a "months" parameter.
"""
assert 12 >= month >= 1
return (year - 1) * 12 + month - 1
def index_to_month(index):
"""
The opposite companion to ``month_to_index``. Returns a (year, month)
tuple.
"""
return (index // 12) + 1, index % 12 + 1
def clean_series(start, stop, rollup, series):
"""
Validate a series, ensuring that it follows the specified rollup and
boundaries. The start bound is inclusive, while the stop bound is
exclusive (similar to the slice operation.)
"""
start_timestamp = to_timestamp(start)
stop_timestamp = to_timestamp(stop)
result = []
for i, (timestamp, value) in enumerate(series):
assert timestamp == start_timestamp + rollup * i
if timestamp >= stop_timestamp:
break
result.append((timestamp, value))
return result
def merge_sequences(target, other, function=operator.add):
"""
Merge two sequences into a single sequence. The length of the two
sequences must be equal.
"""
assert len(target) == len(other), "sequence lengths must match"
return type(target)([function(x, y) for x, y in zip(target, other)])
def merge_mappings(target, other, function=lambda x, y: x + y):
"""
Merge two mappings into a single mapping. The set of keys in both
mappings must be equal.
"""
assert set(target) == set(other), "keys must match"
return {k: function(v, other[k]) for k, v in target.items()}
def merge_series(target, other, function=operator.add):
"""
Merge two series into a single series. Both series must have the same
start and end points as well as the same resolution.
"""
missing = object()
results = []
for x, y in itertools.izip_longest(target, other, fillvalue=missing):
assert x is not missing and y is not missing, "series must be same length"
assert x[0] == y[0], "series timestamps must match"
results.append((x[0], function(x[1], y[1])))
return results
def _query_tsdb_chunked(func, issue_ids, start, stop, rollup):
combined = {}
for chunk in chunked(issue_ids, BATCH_SIZE):
combined.update(func(tsdb.models.group, chunk, start, stop, rollup=rollup))
return combined
def prepare_project_series(start__stop, project, rollup=60 * 60 * 24):
start, stop = start__stop
resolution, series = tsdb.get_optimal_rollup_series(start, stop, rollup)
assert resolution == rollup, "resolution does not match requested value"
clean = functools.partial(clean_series, start, stop, rollup)
issue_ids = project.group_set.filter(
status=GroupStatus.RESOLVED, resolved_at__gte=start, resolved_at__lt=stop
).values_list("id", flat=True)
tsdb_range = _query_tsdb_chunked(tsdb.get_range, issue_ids, start, stop, rollup)
return merge_series(
reduce(
merge_series,
map(clean, tsdb_range.values()),
clean([(timestamp, 0) for timestamp in series]),
),
clean(
tsdb.get_range(tsdb.models.project, [project.id], start, stop, rollup=rollup)[
project.id
]
),
lambda resolved, total: (resolved, total - resolved), # unresolved
)
def prepare_project_aggregates(ignore__stop, project):
# TODO: This needs to return ``None`` for periods that don't have any data
# (because the project is not old enough) and possibly extrapolate for
# periods that only have partial periods.
_, stop = ignore__stop
segments = 4
period = timedelta(days=7)
start = stop - (period * segments)
def get_aggregate_value(start, stop):
return tsdb.get_sums(tsdb.models.project, (project.id,), start, stop, rollup=60 * 60 * 24)[
project.id
]
return [
get_aggregate_value(start + (period * i), start + (period * (i + 1) - timedelta(seconds=1)))
for i in range(segments)
]
def prepare_project_issue_summaries(interval, project):
start, stop = interval
queryset = project.group_set.exclude(status=GroupStatus.IGNORED)
# Fetch all new issues.
new_issue_ids = set(
queryset.filter(first_seen__gte=start, first_seen__lt=stop).values_list("id", flat=True)
)
# Fetch all regressions. This is a little weird, since there's no way to
# tell *when* a group regressed using the Group model. Instead, we query
# all groups that have been seen in the last week and have ever regressed
# and query the Activity model to find out if they regressed within the
# past week. (In theory, the activity table *could* be used to answer this
# query without the subselect, but there's no suitable indexes to make it's
# performance predictable.)
reopened_issue_ids = set(
Activity.objects.filter(
group__in=queryset.filter(
last_seen__gte=start,
last_seen__lt=stop,
resolved_at__isnull=False, # signals this has *ever* been resolved
),
type__in=(Activity.SET_REGRESSION, Activity.SET_UNRESOLVED),
datetime__gte=start,
datetime__lt=stop,
)
.distinct()
.values_list("group_id", flat=True)
)
rollup = 60 * 60 * 24
event_counts = _query_tsdb_chunked(
tsdb.get_sums, new_issue_ids | reopened_issue_ids, start, stop, rollup
)
new_issue_count = sum(event_counts[id] for id in new_issue_ids)
reopened_issue_count = sum(event_counts[id] for id in reopened_issue_ids)
existing_issue_count = max(
tsdb.get_sums(tsdb.models.project, [project.id], start, stop, rollup=rollup)[project.id]
- new_issue_count
- reopened_issue_count,
0,
)
return [new_issue_count, reopened_issue_count, existing_issue_count]
def prepare_project_usage_summary(start__stop, project):
start, stop = start__stop
return (
tsdb.get_sums(
tsdb.models.project_total_blacklisted, [project.id], start, stop, rollup=60 * 60 * 24
)[project.id],
tsdb.get_sums(
tsdb.models.project_total_rejected, [project.id], start, stop, rollup=60 * 60 * 24
)[project.id],
)
def get_calendar_range(ignore__stop_time, months):
_, stop_time = ignore__stop_time
assert (
stop_time.hour,
stop_time.minute,
stop_time.second,
stop_time.microsecond,
stop_time.tzinfo,
) == (0, 0, 0, 0, pytz.utc)
last_day = stop_time - timedelta(days=1)
stop_month_index = month_to_index(last_day.year, last_day.month)
start_month_index = stop_month_index - months + 1
return start_month_index, stop_month_index
def get_calendar_query_range(interval, months):
start_month_index, _ = get_calendar_range(interval, months)
start_time = datetime(day=1, tzinfo=pytz.utc, *index_to_month(start_month_index))
return start_time, interval[1]
def clean_calendar_data(project, series, start, stop, rollup, timestamp=None):
earliest = tsdb.get_earliest_timestamp(rollup, timestamp=timestamp)
def remove_invalid_values(item):
timestamp, value = item
if timestamp < earliest:
value = None
elif to_datetime(timestamp) < project.date_added:
value = None
return (timestamp, value)
return map(remove_invalid_values, clean_series(start, stop, rollup, series))
def prepare_project_calendar_series(interval, project):
start, stop = get_calendar_query_range(interval, 3)
rollup = 60 * 60 * 24
series = tsdb.get_range(tsdb.models.project, [project.id], start, stop, rollup=rollup)[
project.id
]
return clean_calendar_data(project, series, start, stop, rollup)
def build(name, fields):
names, prepare_fields, merge_fields = zip(*fields)
cls = namedtuple(name, names)
def prepare(*args):
return cls(*[f(*args) for f in prepare_fields])
def merge(target, other):
return cls(*[f(target[i], other[i]) for i, f in enumerate(merge_fields)])
return cls, prepare, merge
Report, prepare_project_report, merge_reports = build(
"Report",
[
(
"series",
prepare_project_series,
functools.partial(merge_series, function=merge_sequences),
),
(
"aggregates",
prepare_project_aggregates,
functools.partial(merge_sequences, function=safe_add),
),
("issue_summaries", prepare_project_issue_summaries, merge_sequences),
("usage_summary", prepare_project_usage_summary, merge_sequences),
(
"calendar_series",
prepare_project_calendar_series,
functools.partial(merge_series, function=safe_add),
),
],
)
class ReportBackend(object):
def build(self, timestamp, duration, project):
return prepare_project_report(_to_interval(timestamp, duration), project)
def prepare(self, timestamp, duration, organization):
"""
Build and store reports for all projects in the organization.
"""
raise NotImplementedError
def fetch(self, timestamp, duration, organization, projects):
"""
Fetch reports for a set of projects in the organization, returning
reports in the order that they were requested.
"""
raise NotImplementedError
class DummyReportBackend(ReportBackend):
def prepare(self, timestamp, duration, organization):
pass
def fetch(self, timestamp, duration, organization, projects):
assert all(project.organization_id == organization.id for project in projects)
return map(functools.partial(self.build, timestamp, duration), projects)
class RedisReportBackend(ReportBackend):
version = 1
def __init__(self, cluster, ttl, namespace="r"):
self.cluster = cluster
self.ttl = ttl
self.namespace = namespace
def __make_key(self, timestamp, duration, organization):
return u"{}:{}:{}:{}:{}".format(
self.namespace, self.version, organization.id, int(timestamp), int(duration)
)
def __encode(self, report):
return zlib.compress(json.dumps(list(report)))
def __decode(self, value):
if value is None:
return None
return Report(*json.loads(zlib.decompress(value)))
def prepare(self, timestamp, duration, organization):
reports = {}
for project in organization.project_set.all():
reports[project.id] = self.__encode(self.build(timestamp, duration, project))
if not reports:
# XXX: HMSET requires at least one key/value pair, so we need to
# protect ourselves here against organizations that were created
# but haven't set up any projects yet.
return
with self.cluster.map() as client:
key = self.__make_key(timestamp, duration, organization)
client.hmset(key, reports)
client.expire(key, self.ttl)
def fetch(self, timestamp, duration, organization, projects):
with self.cluster.map() as client:
result = client.hmget(
self.__make_key(timestamp, duration, organization),
[project.id for project in projects],
)
return list(map(self.__decode, result.value))
backend = RedisReportBackend(redis.clusters.get("default"), 60 * 60 * 3)
@instrumented_task(name="sentry.tasks.reports.prepare_reports", queue="reports.prepare")
def prepare_reports(dry_run=False, *args, **kwargs):
timestamp, duration = _fill_default_parameters(*args, **kwargs)
organization_ids = _get_organization_queryset().values_list("id", flat=True)
for organization_id in organization_ids:
prepare_organization_report.delay(timestamp, duration, organization_id, dry_run=dry_run)
@instrumented_task(name="sentry.tasks.reports.prepare_organization_report", queue="reports.prepare")
def prepare_organization_report(timestamp, duration, organization_id, dry_run=False):
try:
organization = _get_organization_queryset().get(id=organization_id)
except Organization.DoesNotExist:
logger.warning(
"reports.organization.missing",
extra={
"timestamp": timestamp,
"duration": duration,
"organization_id": organization_id,
},
)
return
backend.prepare(timestamp, duration, organization)
# If an OrganizationMember row doesn't have an associated user, this is
# actually a pending invitation, so no report should be delivered.
member_set = organization.member_set.filter(user_id__isnull=False, user__is_active=True)
for user_id in member_set.values_list("user_id", flat=True):
deliver_organization_user_report.delay(
timestamp, duration, organization_id, user_id, dry_run=dry_run
)
def fetch_personal_statistics(start__stop, organization, user):
start, stop = start__stop
resolved_issue_ids = set(
Activity.objects.filter(
project__organization_id=organization.id,
user_id=user.id,
type__in=(Activity.SET_RESOLVED, Activity.SET_RESOLVED_IN_RELEASE),
datetime__gte=start,
datetime__lt=stop,
group__status=GroupStatus.RESOLVED, # only count if the issue is still resolved
)
.distinct()
.values_list("group_id", flat=True)
)
if resolved_issue_ids:
users = tsdb.get_distinct_counts_union(
tsdb.models.users_affected_by_group, resolved_issue_ids, start, stop, 60 * 60 * 24
)
else:
users = {}
return {"resolved": len(resolved_issue_ids), "users": users}
Duration = namedtuple(
"Duration",
(
"adjective", # e.g. "daily" or "weekly",
"noun", # relative to today, e.g. "yesterday" or "this week"
"date_format", # date format used for large series x axis labeling
),
)
durations = {(60 * 60 * 24 * 7): Duration("weekly", "this week", "D")}
def build_message(timestamp, duration, organization, user, reports):
start, stop = interval = _to_interval(timestamp, duration)
duration_spec = durations[duration]
message = MessageBuilder(
subject=u"{} Report for {}: {} - {}".format(
duration_spec.adjective.title(),
organization.name,
date_format(start),
date_format(stop),
),
template="sentry/emails/reports/body.txt",
html_template="sentry/emails/reports/body.html",
type="report.organization",
context={
"duration": duration_spec,
"interval": {"start": date_format(start), "stop": date_format(stop)},
"organization": organization,
"personal": fetch_personal_statistics(interval, organization, user),
"report": to_context(organization, interval, reports),
"user": user,
},
)
message.add_users((user.id,))
return message
DISABLED_ORGANIZATIONS_USER_OPTION_KEY = "reports:disabled-organizations"
def user_subscribed_to_organization_reports(user, organization):
return organization.id not in UserOption.objects.get_value(
user=user, key=DISABLED_ORGANIZATIONS_USER_OPTION_KEY, default=[]
)
class Skipped(object):
NotSubscribed = object()
NoProjects = object()
NoReports = object()
def has_valid_aggregates(interval, project__report):
project, report = project__report
return any(bool(value) for value in report.aggregates)
@instrumented_task(
name="sentry.tasks.reports.deliver_organization_user_report", queue="reports.deliver"
)
def deliver_organization_user_report(timestamp, duration, organization_id, user_id, dry_run=False):
try:
organization = _get_organization_queryset().get(id=organization_id)
except Organization.DoesNotExist:
logger.warning(
"reports.organization.missing",
extra={
"timestamp": timestamp,
"duration": duration,
"organization_id": organization_id,
},
)
return
user = User.objects.get(id=user_id)
if not user_subscribed_to_organization_reports(user, organization):
logger.debug(
"Skipping report for %r to %r, user is not subscribed to reports.", organization, user
)
return Skipped.NotSubscribed
projects = set()
for team in Team.objects.get_for_user(organization, user):
projects.update(Project.objects.get_for_user(team, user, _skip_team_check=True))
if not projects:
logger.debug(
"Skipping report for %r to %r, user is not associated with any projects.",
organization,
user,
)
return Skipped.NoProjects
interval = _to_interval(timestamp, duration)
projects = list(projects)
inclusion_predicates = [
lambda interval, project__report: project__report[1] is not None,
has_valid_aggregates,
]
reports = dict(
filter(
lambda item: all(predicate(interval, item) for predicate in inclusion_predicates),
zip(projects, backend.fetch(timestamp, duration, organization, projects)),
)
)
if not reports:
logger.debug(
"Skipping report for %r to %r, no qualifying reports to deliver.", organization, user
)
return Skipped.NoReports
message = build_message(timestamp, duration, organization, user, reports)
if not dry_run:
message.send()
Point = namedtuple("Point", "resolved unresolved")
DistributionType = namedtuple("DistributionType", "label color")
def series_map(function, series):
return [(timestamp, function(value)) for timestamp, value in series]
colors = ["#696dc3", "#6288ba", "#59aca4", "#99d59a", "#daeca9"]
def build_project_breakdown_series(reports):
Key = namedtuple("Key", "label url color data")
def get_legend_data(report):
filtered, rate_limited = report.usage_summary
return {
"events": sum(sum(value) for timestamp, value in report.series),
"filtered": filtered,
"rate_limited": rate_limited,
}
# Find the reports with the most total events. (The number of reports to
# keep is the same as the number of colors available to use in the legend.)
instances = map(
operator.itemgetter(0),
sorted(
reports.items(),
key=lambda instance__report: sum(
sum(values) for timestamp, values in instance__report[1][0]
),
reverse=True,
),
)[: len(colors)]
# Starting building the list of items to include in the report chart. This
# is a list of [Key, Report] pairs, in *ascending* order of the total sum
# of values in the series. (This is so when we render the series, the
# largest color blocks are at the bottom and it feels appropriately
# weighted.)
selections = map(
lambda instance__color: (
Key(
instance__color[0].slug,
instance__color[0].get_absolute_url(),
instance__color[1],
get_legend_data(reports[instance__color[0]]),
),
reports[instance__color[0]],
),
zip(instances, colors),
)[::-1]
# Collect any reports that weren't in the selection set, merge them
# together and add it at the top (front) of the stack.
overflow = set(reports) - set(instances)
if overflow:
overflow_report = reduce(merge_reports, [reports[instance] for instance in overflow])
selections.insert(
0, (Key("Other", None, "#f2f0fa", get_legend_data(overflow_report)), overflow_report)
)
def summarize(key, points):
total = sum(points)
return [(key, total)] if total else []
# Collect all of the independent series into a single series to make it
# easier to render, resulting in a series where each value is a sequence of
# (key, count) pairs.
series = reduce(
merge_series,
[series_map(functools.partial(summarize, key), report[0]) for key, report in selections],
)
legend = [key for key, value in reversed(selections)]
return {
"points": [(to_datetime(timestamp), value) for timestamp, value in series],
"maximum": max(sum(count for key, count in value) for timestamp, value in series),
"legend": {
"rows": legend,
"total": Key("Total", None, None, reduce(merge_mappings, [key.data for key in legend])),
},
}
def to_context(organization, interval, reports):
report = reduce(merge_reports, reports.values())
series = [(to_datetime(timestamp), Point(*values)) for timestamp, values in report.series]
return {
"series": {
"points": series,
"maximum": max(sum(point) for timestamp, point in series),
"all": sum([sum(point) for timestamp, point in series]),
"resolved": sum([point.resolved for timestamp, point in series]),
},
"distribution": {
"types": list(
zip(
(
DistributionType("New", "#8477e0"),
DistributionType("Reopened", "#6C5FC7"),
DistributionType("Existing", "#534a92"),
),
report.issue_summaries,
)
),
"total": sum(report.issue_summaries),
},
"comparisons": [
("last week", change(report.aggregates[-1], report.aggregates[-2])),
(
"four week average",
change(
report.aggregates[-1],
mean(report.aggregates)
if all(v is not None for v in report.aggregates)
else None,
),
),
],
"projects": {"series": build_project_breakdown_series(reports)},
"calendar": to_calendar(interval, report.calendar_series),
}
def get_percentile(values, percentile):
# XXX: ``values`` must be sorted.
assert 1 >= percentile > 0
if percentile == 1:
index = -1
else:
index = int(math.ceil(len(values) * percentile)) - 1
return values[index]
def colorize(spectrum, values):
calculate_percentile = functools.partial(get_percentile, sorted(values))
legend = OrderedDict()
width = 1.0 / len(spectrum)
for i, color in enumerate(spectrum, 1):
legend[color] = calculate_percentile(i * width)
find_index = functools.partial(bisect.bisect_left, legend.values())
results = []
for value in values:
results.append((value, spectrum[find_index(value)]))
return legend, results
def to_calendar(interval, series):
start, stop = get_calendar_range(interval, 3)
legend, values = colorize(
[
"#fae5cf",
"#f9ddc2",
"#f9d6b6",
"#f9cfaa",
"#f8c79e",
"#f8bf92",
"#f8b786",
"#f9a66d",
"#f99d60",
"#fa9453",
"#fb8034",
"#fc7520",
"#f9600c",
"#f75500",
],
[value for timestamp, value in series if value is not None],
)
value_color_map = dict(values)
value_color_map[None] = "#F2F2F2"
series_value_map = dict(series)
def get_data_for_date(date):
dt = datetime(date.year, date.month, date.day, tzinfo=pytz.utc)
ts = to_timestamp(dt)
value = series_value_map.get(ts, None)
return (dt, {"value": value, "color": value_color_map[value]})
calendar = Calendar(6)
sheets = []
for year, month in map(index_to_month, range(start, stop + 1)):
weeks = []
for week in calendar.monthdatescalendar(year, month):
weeks.append(map(get_data_for_date, week))
sheets.append((datetime(year, month, 1, tzinfo=pytz.utc), weeks))
return {"legend": list(legend.keys()), "sheets": sheets}
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from py2neo.core import Resource, Node, Rel, Rev, Relationship, Service, Path
from py2neo.error import BindError
def test_can_create_bindable_with_initial_uri():
uri = "http://localhost:7474/db/data/node/1"
bindable = Service()
bindable.bind(uri)
assert bindable.bound
assert bindable.uri == uri
def test_can_create_bindable_with_initial_uri_and_metadata():
uri = "http://localhost:7474/db/data/node/1"
metadata = {"foo": "bar"}
bindable = Service()
bindable.bind(uri, metadata)
assert bindable.bound
assert bindable.uri == uri
assert bindable.resource.metadata == metadata
def test_can_create_bindable_with_initial_uri_template():
uri = "http://localhost:7474/db/data/node/{node_id}"
bindable = Service()
bindable.bind(uri)
assert bindable.bound
assert bindable.uri == uri
def test_cannot_create_bindable_with_initial_uri_template_and_metadata():
uri = "http://localhost:7474/db/data/node/{node_id}"
metadata = {"foo": "bar"}
service = Service()
try:
service.bind(uri, metadata)
except ValueError:
assert True
else:
assert False
def test_default_state_for_node_is_unbound():
node = Node()
assert not node.bound
with pytest.raises(BindError):
_ = node.resource
def test_bound_path_is_bound(graph):
alice = Node(name="Alice")
bob = Node(name="Bob")
carol = Node(name="Carol")
dave = Node(name="Dave")
path = Path(alice, "LOVES", bob, Rev("HATES"), carol, "KNOWS", dave)
graph.create(path)
assert path.bound
def test_unbound_path_is_not_bound():
alice = Node(name="Alice")
bob = Node(name="Bob")
carol = Node(name="Carol")
dave = Node(name="Dave")
path = Path(alice, "LOVES", bob, Rev("HATES"), carol, "KNOWS", dave)
assert not path.bound
def test_can_bind_node_to_resource():
uri = "http://localhost:7474/db/data/node/1"
node = Node()
node.bind(uri)
assert node.bound
assert isinstance(node.resource, Resource)
assert node.resource.uri == uri
node.unbind()
assert not node.bound
with pytest.raises(BindError):
_ = node.resource
def test_can_bind_rel_to_resource():
uri = "http://localhost:7474/db/relationship/1"
rel = Rel()
rel.bind(uri)
assert rel.bound
assert isinstance(rel.resource, Resource)
assert rel.resource.uri == uri
rel.unbind()
assert not rel.bound
with pytest.raises(BindError):
_ = rel.resource
def test_can_bind_rev_to_resource():
uri = "http://localhost:7474/db/relationship/1"
rel = Rev()
rel.bind(uri)
assert rel.bound
assert isinstance(rel.resource, Resource)
assert rel.resource.uri == uri
rel.unbind()
assert not rel.bound
with pytest.raises(BindError):
_ = rel.resource
def test_can_bind_relationship_to_resource():
uri = "http://localhost:7474/db/relationship/1"
metadata = {
"start": "http://localhost:7474/db/node/1",
"end": "http://localhost:7474/db/node/2",
}
relationship = Relationship({}, "", {})
# Pass in metadata to avoid callback to server
relationship.bind(uri, metadata=metadata)
assert relationship.bound
assert isinstance(relationship.resource, Resource)
assert relationship.resource.uri == uri
relationship.unbind()
assert not relationship.bound
with pytest.raises(BindError):
_ = relationship.resource
def test_can_unbind_node_if_not_cached(graph):
node, = graph.create({})
Node.cache.clear()
node.unbind()
assert not node.bound
def test_can_unbind_rel_if_not_cached(graph):
a, b, ab = graph.create({}, {}, (0, "KNOWS", 1))
Rel.cache.clear()
ab.rel.unbind()
assert not ab.bound
def test_can_unbind_relationship_if_not_cached(graph):
a, b, ab = graph.create({}, {}, (0, "KNOWS", 1))
Relationship.cache.clear()
ab.unbind()
assert not ab.bound
def test_can_unbind_relationship_with_already_unbound_nodes(graph):
a, b, ab = graph.create({}, {}, (0, "KNOWS", 1))
a.unbind()
b.unbind()
assert not a.bound
assert not b.bound
ab.unbind()
assert not ab.bound
def test_can_unbind_bound_path(graph):
alice = Node(name="Alice")
bob = Node(name="Bob")
carol = Node(name="Carol")
dave = Node(name="Dave")
path = Path(alice, "LOVES", bob, Rev("HATES"), carol, "KNOWS", dave)
graph.create(path)
path.unbind()
assert not path.bound
def test_can_unbind_unbound_path_without_error():
alice = Node(name="Alice")
bob = Node(name="Bob")
carol = Node(name="Carol")
dave = Node(name="Dave")
path = Path(alice, "LOVES", bob, Rev("HATES"), carol, "KNOWS", dave)
path.unbind()
assert not path.bound
def test_unbinding_rel_also_unbinds_rev(graph):
a, b, ab = graph.create({}, {}, (0, "KNOWS", 1))
rel = ab.rel
assert rel.pair is None
rev = -rel
assert rel.pair is rev
assert rev.pair is rel
assert rel.bound
assert rev.bound
assert rel.resource is rev.resource
rel.unbind()
assert not rel.bound
assert not rev.bound
def test_unbinding_rev_also_unbinds_rel(graph):
a, b, ab = graph.create({}, {}, (0, Rev("KNOWS"), 1))
rev = ab.rel
#assert rev.pair is None
rel = -rev
assert rev.pair is rel
assert rel.pair is rev
assert rev.bound
assert rel.bound
assert rev.resource is rel.resource
rev.unbind()
assert not rev.bound
assert not rel.bound
|
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import struct
import inspect
from nose.tools import ok_, eq_, nottest, raises
from nose.plugins.skip import Skip, SkipTest
from ryu.ofproto import ether, inet
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.packet import Packet
from ryu.lib.packet import icmpv6
from ryu.lib.packet.ipv6 import ipv6
from ryu.lib.packet import packet_utils
from ryu.lib import addrconv
LOG = logging.getLogger(__name__)
def icmpv6_csum(prev, buf):
ph = struct.pack('!16s16sI3xB',
addrconv.ipv6.text_to_bin(prev.src),
addrconv.ipv6.text_to_bin(prev.dst),
prev.payload_length, prev.nxt)
h = bytearray(buf)
struct.pack_into('!H', h, 2, 0)
return packet_utils.checksum(ph + h)
class Test_icmpv6_header(unittest.TestCase):
type_ = 255
code = 0
csum = 207
buf = '\xff\x00\x00\xcf'
icmp = icmpv6.icmpv6(type_, code, 0)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_, self.icmp.type_)
eq_(self.code, self.icmp.code)
eq_(0, self.icmp.csum)
def test_parser(self):
msg, n, _ = self.icmp.parser(self.buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
eq_(msg.data, None)
eq_(n, None)
def test_serialize(self):
src_ipv6 = 'fe80::200:ff:fe00:ef'
dst_ipv6 = 'fe80::200:ff:fe00:1'
prev = ipv6(6, 0, 0, 4, 58, 255, src_ipv6, dst_ipv6)
buf = self.icmp.serialize(bytearray(), prev)
(type_, code, csum) = struct.unpack(self.icmp._PACK_STR, buffer(buf))
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, self.csum)
@raises(Exception)
def test_malformed_icmpv6(self):
m_short_buf = self.buf[1:self.icmp._MIN_LEN]
self.icmp.parser(m_short_buf)
class Test_icmpv6_echo_request(unittest.TestCase):
type_ = 128
code = 0
csum = 0xa572
id_ = 0x7620
seq = 0
data = '\x01\xc9\xe7\x36\xd3\x39\x06\x00'
buf = '\x80\x00\xa5\x72\x76\x20\x00\x00'
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
echo = icmpv6.echo(0, 0)
eq_(echo.id, 0)
eq_(echo.seq, 0)
eq_(echo.data, None)
def _test_parser(self, data=None):
buf = self.buf + str(data or '')
msg, n, _ = icmpv6.icmpv6.parser(buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
eq_(msg.data.id, self.id_)
eq_(msg.data.seq, self.seq)
eq_(msg.data.data, data)
eq_(n, None)
def test_parser_without_data(self):
self._test_parser()
def test_parser_with_data(self):
self._test_parser(self.data)
def _test_serialize(self, echo_data=None):
buf = self.buf + str(echo_data or '')
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
prev = ipv6(6, 0, 0, len(buf), 64, 255, src_ipv6, dst_ipv6)
echo_csum = icmpv6_csum(prev, buf)
echo = icmpv6.echo(self.id_, self.seq, echo_data)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, echo)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
(id_, seq) = struct.unpack_from(echo._PACK_STR, buf, icmp._MIN_LEN)
data = buf[(icmp._MIN_LEN + echo._MIN_LEN):]
data = data if len(data) != 0 else None
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, echo_csum)
eq_(id_, self.id_)
eq_(seq, self.seq)
eq_(data, echo_data)
def test_serialize_without_data(self):
self._test_serialize()
def test_serialize_with_data(self):
self._test_serialize(self.data)
def test_to_string(self):
ec = icmpv6.echo(self.id_, self.seq, self.data)
ic = icmpv6.icmpv6(self.type_, self.code, self.csum, ec)
echo_values = {'id': self.id_,
'seq': self.seq,
'data': self.data}
_echo_str = ','.join(['%s=%s' % (k, repr(echo_values[k]))
for k, v in inspect.getmembers(ec)
if k in echo_values])
echo_str = '%s(%s)' % (icmpv6.echo.__name__, _echo_str)
icmp_values = {'type_': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': echo_str}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _ic_str)
eq_(str(ic), ic_str)
eq_(repr(ic), ic_str)
class Test_icmpv6_echo_reply(Test_icmpv6_echo_request):
def setUp(self):
self.type_ = 129
self.csum = 0xa472
self.buf = '\x81\x00\xa4\x72\x76\x20\x00\x00'
class Test_icmpv6_neighbor_solict(unittest.TestCase):
type_ = 135
code = 0
csum = 0x952d
res = 0
dst = '3ffe:507:0:1:200:86ff:fe05:80da'
nd_type = 1
nd_length = 1
nd_hw_src = '00:60:97:07:69:ea'
data = '\x01\x01\x00\x60\x97\x07\x69\xea'
buf = '\x87\x00\x95\x2d\x00\x00\x00\x00' \
+ '\x3f\xfe\x05\x07\x00\x00\x00\x01' \
+ '\x02\x00\x86\xff\xfe\x05\x80\xda'
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
nd = icmpv6.nd_neighbor(self.res, self.dst)
eq_(nd.res >> 29, self.res)
eq_(nd.dst, self.dst)
eq_(nd.type_, None)
eq_(nd.length, None)
eq_(nd.data, None)
def _test_parser(self, data=None):
buf = self.buf + str(data or '')
msg, n, _ = icmpv6.icmpv6.parser(buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
eq_(msg.data.res >> 29, self.res)
eq_(addrconv.ipv6.text_to_bin(msg.data.dst),
addrconv.ipv6.text_to_bin(self.dst))
eq_(n, None)
if data:
nd = msg.data
eq_(nd.type_, self.nd_type)
eq_(nd.length, self.nd_length)
eq_(nd.data.hw_src, self.nd_hw_src)
eq_(nd.data.data, None)
def test_parser_without_data(self):
self._test_parser()
def test_parser_with_data(self):
self._test_parser(self.data)
def test_serialize_without_data(self):
nd = icmpv6.nd_neighbor(self.res, self.dst)
prev = ipv6(6, 0, 0, 24, 64, 255, self.src_ipv6, self.dst_ipv6)
nd_csum = icmpv6_csum(prev, self.buf)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, nd)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
(res, dst) = struct.unpack_from(nd._PACK_STR, buf, icmp._MIN_LEN)
data = buf[(icmp._MIN_LEN + nd._MIN_LEN):]
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, nd_csum)
eq_(res >> 29, self.res)
eq_(dst, addrconv.ipv6.text_to_bin(self.dst))
eq_(data, '')
def test_serialize_with_data(self):
nd_opt = icmpv6.nd_option_la(self.nd_hw_src)
nd = icmpv6.nd_neighbor(
self.res, self.dst, self.nd_type, self.nd_length, nd_opt)
prev = ipv6(6, 0, 0, 32, 64, 255, self.src_ipv6, self.dst_ipv6)
nd_csum = icmpv6_csum(prev, self.buf + self.data)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, nd)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
(res, dst) = struct.unpack_from(nd._PACK_STR, buf, icmp._MIN_LEN)
(nd_type, nd_length, nd_hw_src) = struct.unpack_from(
'!BB6s', buf, icmp._MIN_LEN + nd._MIN_LEN)
data = buf[(icmp._MIN_LEN + nd._MIN_LEN + 8):]
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, nd_csum)
eq_(res >> 29, self.res)
eq_(dst, addrconv.ipv6.text_to_bin(self.dst))
eq_(nd_type, self.nd_type)
eq_(nd_length, self.nd_length)
eq_(nd_hw_src, addrconv.mac.text_to_bin(self.nd_hw_src))
def test_to_string(self):
nd_opt = icmpv6.nd_option_la(self.nd_hw_src)
nd = icmpv6.nd_neighbor(
self.res, self.dst, self.nd_type, self.nd_length, nd_opt)
ic = icmpv6.icmpv6(self.type_, self.code, self.csum, nd)
nd_opt_values = {'hw_src': self.nd_hw_src,
'data': None}
_nd_opt_str = ','.join(['%s=%s' % (k, repr(nd_opt_values[k]))
for k, v in inspect.getmembers(nd_opt)
if k in nd_opt_values])
nd_opt_str = '%s(%s)' % (icmpv6.nd_option_la.__name__, _nd_opt_str)
nd_values = {'res': repr(nd.res),
'dst': repr(self.dst),
'type_': repr(self.nd_type),
'length': repr(self.nd_length),
'data': nd_opt_str}
_nd_str = ','.join(['%s=%s' % (k, nd_values[k])
for k, v in inspect.getmembers(nd)
if k in nd_values])
nd_str = '%s(%s)' % (icmpv6.nd_neighbor.__name__, _nd_str)
icmp_values = {'type_': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': nd_str}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _ic_str)
eq_(str(ic), ic_str)
eq_(repr(ic), ic_str)
class Test_icmpv6_neighbor_advert(Test_icmpv6_neighbor_solict):
def setUp(self):
self.type_ = 136
self.csum = 0xb8ba
self.res = 7
self.dst = '3ffe:507:0:1:260:97ff:fe07:69ea'
self.nd_type = 2
self.nd_length = 1
self.nd_data = None
self.nd_hw_src = '00:60:97:07:69:ea'
self.data = '\x02\x01\x00\x60\x97\x07\x69\xea'
self.buf = '\x88\x00\xb8\xba\xe0\x00\x00\x00' \
+ '\x3f\xfe\x05\x07\x00\x00\x00\x01' \
+ '\x02\x60\x97\xff\xfe\x07\x69\xea'
class Test_icmpv6_router_solict(unittest.TestCase):
type_ = 133
code = 0
csum = 0x97d9
res = 0
nd_type = 1
nd_length = 1
nd_hw_src = '12:2d:a5:6d:bc:0f'
data = '\x00\x00\x00\x00\x01\x01\x12\x2d\xa5\x6d\xbc\x0f'
buf = '\x85\x00\x97\xd9'
src_ipv6 = '3ffe:507:0:1:200:86ff:fe05:80da'
dst_ipv6 = '3ffe:501:0:1001::2'
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
rs = icmpv6.nd_router_solicit(self.res)
eq_(rs.res, self.res)
eq_(rs.type_, None)
eq_(rs.length, None)
eq_(rs.data, None)
def _test_parser(self, data=None):
buf = self.buf + str(data or '')
msg, n, _ = icmpv6.icmpv6.parser(buf)
eq_(msg.type_, self.type_)
eq_(msg.code, self.code)
eq_(msg.csum, self.csum)
if data is not None:
eq_(msg.data.res[0], self.res)
eq_(n, None)
if data:
rs = msg.data
eq_(rs.type_, self.nd_type)
eq_(rs.length, self.nd_length)
eq_(rs.data.hw_src, self.nd_hw_src)
eq_(rs.data.data, None)
def test_parser_without_data(self):
self._test_parser()
def test_parser_with_data(self):
self._test_parser(self.data)
def test_serialize_without_data(self):
rs = icmpv6.nd_router_solicit(self.res)
prev = ipv6(6, 0, 0, 8, 64, 255, self.src_ipv6, self.dst_ipv6)
rs_csum = icmpv6_csum(prev, self.buf)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, rs)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
res = struct.unpack_from(rs._PACK_STR, buf, icmp._MIN_LEN)
data = buf[(icmp._MIN_LEN + rs._MIN_LEN):]
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, rs_csum)
eq_(res[0], self.res)
eq_(data, '')
def test_serialize_with_data(self):
nd_opt = icmpv6.nd_option_la(self.nd_hw_src)
rs = icmpv6.nd_router_solicit(self.res, self.nd_type, self.nd_length,
nd_opt)
prev = ipv6(6, 0, 0, 16, 64, 255, self.src_ipv6, self.dst_ipv6)
rs_csum = icmpv6_csum(prev, self.buf + self.data)
icmp = icmpv6.icmpv6(self.type_, self.code, 0, rs)
buf = buffer(icmp.serialize(bytearray(), prev))
(type_, code, csum) = struct.unpack_from(icmp._PACK_STR, buf, 0)
res = struct.unpack_from(rs._PACK_STR, buf, icmp._MIN_LEN)
(nd_type, nd_length, nd_hw_src) = struct.unpack_from(
'!BB6s', buf, icmp._MIN_LEN + rs._MIN_LEN)
data = buf[(icmp._MIN_LEN + rs._MIN_LEN + 8):]
eq_(type_, self.type_)
eq_(code, self.code)
eq_(csum, rs_csum)
eq_(res[0], self.res)
eq_(nd_type, self.nd_type)
eq_(nd_length, self.nd_length)
eq_(nd_hw_src, addrconv.mac.text_to_bin(self.nd_hw_src))
def test_to_string(self):
nd_opt = icmpv6.nd_option_la(self.nd_hw_src)
rs = icmpv6.nd_router_solicit(
self.res, self.nd_type, self.nd_length, nd_opt)
ic = icmpv6.icmpv6(self.type_, self.code, self.csum, rs)
nd_opt_values = {'hw_src': self.nd_hw_src,
'data': None}
_nd_opt_str = ','.join(['%s=%s' % (k, repr(nd_opt_values[k]))
for k, v in inspect.getmembers(nd_opt)
if k in nd_opt_values])
nd_opt_str = '%s(%s)' % (icmpv6.nd_option_la.__name__, _nd_opt_str)
rs_values = {'res': repr(rs.res),
'type_': repr(self.nd_type),
'length': repr(self.nd_length),
'data': nd_opt_str}
_rs_str = ','.join(['%s=%s' % (k, rs_values[k])
for k, v in inspect.getmembers(rs)
if k in rs_values])
rs_str = '%s(%s)' % (icmpv6.nd_router_solicit.__name__, _rs_str)
icmp_values = {'type_': repr(self.type_),
'code': repr(self.code),
'csum': repr(self.csum),
'data': rs_str}
_ic_str = ','.join(['%s=%s' % (k, icmp_values[k])
for k, v in inspect.getmembers(ic)
if k in icmp_values])
ic_str = '%s(%s)' % (icmpv6.icmpv6.__name__, _ic_str)
eq_(str(ic), ic_str)
eq_(repr(ic), ic_str)
|
|
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import re
from oslo.config import cfg
from oslo import messaging
import six
import stevedore
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import servers as schema_servers
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.image import glance
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
CONF = cfg.CONF
CONF.import_opt('enable_instance_password',
'nova.api.openstack.compute.servers')
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
CONF.import_opt('extensions_blacklist', 'nova.api.openstack', group='osapi_v3')
CONF.import_opt('extensions_whitelist', 'nova.api.openstack', group='osapi_v3')
LOG = logging.getLogger(__name__)
authorizer = extensions.core_authorizer('compute:v3', 'servers')
class ServersController(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
EXTENSION_CREATE_NAMESPACE = 'nova.api.v3.extensions.server.create'
EXTENSION_DESERIALIZE_EXTRACT_SERVER_NAMESPACE = (
'nova.api.v3.extensions.server.create.deserialize')
EXTENSION_REBUILD_NAMESPACE = 'nova.api.v3.extensions.server.rebuild'
EXTENSION_DESERIALIZE_EXTRACT_REBUILD_NAMESPACE = (
'nova.api.v3.extensions.server.rebuild.deserialize')
EXTENSION_UPDATE_NAMESPACE = 'nova.api.v3.extensions.server.update'
_view_builder_class = views_servers.ViewBuilderV3
schema_server_create = schema_servers.base_create
schema_server_update = schema_servers.base_update
schema_server_rebuild = schema_servers.base_rebuild
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, **kwargs):
def _check_load_extension(required_function):
def check_whiteblack_lists(ext):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return True
else:
LOG.warn(_LW("Not loading %s because it is "
"in the blacklist"), ext.obj.alias)
return False
else:
LOG.warn(
_LW("Not loading %s because it is not in the "
"whitelist"), ext.obj.alias)
return False
def check_load_extension(ext):
if isinstance(ext.obj, extensions.V3APIExtensionBase):
# Filter out for the existence of the required
# function here rather than on every request. We
# don't have a new abstract base class to reduce
# duplication in the extensions as they may want
# to implement multiple server (and other) entry
# points if hasattr(ext.obj, 'server_create'):
if hasattr(ext.obj, required_function):
LOG.debug('extension %(ext_alias)s detected by '
'servers extension for function %(func)s',
{'ext_alias': ext.obj.alias,
'func': required_function})
return check_whiteblack_lists(ext)
else:
LOG.debug(
'extension %(ext_alias)s is missing %(func)s',
{'ext_alias': ext.obj.alias,
'func': required_function})
return False
else:
return False
return check_load_extension
self.extension_info = kwargs.pop('extension_info')
super(ServersController, self).__init__(**kwargs)
self.compute_api = compute.API()
# Look for implementation of extension point of server creation
self.create_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_CREATE_NAMESPACE,
check_func=_check_load_extension('server_create'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.create_extension_manager):
LOG.debug("Did not find any server create extensions")
# Look for implementation of extension point of server rebuild
self.rebuild_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_REBUILD_NAMESPACE,
check_func=_check_load_extension('server_rebuild'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.rebuild_extension_manager):
LOG.debug("Did not find any server rebuild extensions")
# Look for implementation of extension point of server update
self.update_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_UPDATE_NAMESPACE,
check_func=_check_load_extension('server_update'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.update_extension_manager):
LOG.debug("Did not find any server update extensions")
# Look for API schema of server create extension
self.create_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_CREATE_NAMESPACE,
check_func=_check_load_extension('get_server_create_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.create_schema_manager):
self.create_schema_manager.map(self._create_extension_schema,
self.schema_server_create)
else:
LOG.debug("Did not find any server create schemas")
# Look for API schema of server update extension
self.update_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_UPDATE_NAMESPACE,
check_func=_check_load_extension('get_server_update_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.update_schema_manager):
self.update_schema_manager.map(self._update_extension_schema,
self.schema_server_update)
else:
LOG.debug("Did not find any server update schemas")
# Look for API schema of server rebuild extension
self.rebuild_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_REBUILD_NAMESPACE,
check_func=_check_load_extension('get_server_rebuild_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.rebuild_schema_manager):
self.rebuild_schema_manager.map(self._rebuild_extension_schema,
self.schema_server_rebuild)
else:
LOG.debug("Did not find any server rebuild schemas")
@extensions.expected_errors((400, 403))
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@extensions.expected_errors((400, 403))
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
search_opts.pop('status', None)
if 'status' in req.GET.keys():
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
vm_state, task_state = states
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
else:
# Convert deleted filter value to a valid boolean.
# Return non-deleted servers if an invalid value
# is passed with deleted filter.
search_opts['deleted'] = strutils.bool_from_string(
search_opts['deleted'], default=False)
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
# If tenant_id is passed as a search parameter this should
# imply that all_tenants is also enabled unless explicitly
# disabled. Note that the tenant_id parameter is filtered out
# by remove_invalid_options above unless the requestor is an
# admin.
if 'tenant_id' in search_opts and 'all_tenants' not in search_opts:
# We do not need to add the all_tenants flag if the tenant
# id associated with the token is the tenant id
# specified. This is done so a request that does not need
# the all_tenants flag does not fail because of lack of
# policy permission for compute:get_all_tenants when it
# doesn't actually need it.
if context.project_id != search_opts.get('tenant_id'):
search_opts['all_tenants'] = 1
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(six.text_type(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts, limit=limit, marker=marker,
want_objects=True, expected_attrs=['pci_devices'])
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
log_msg = _("Flavor '%s' could not be found ")
LOG.debug(log_msg, search_opts['flavor'])
# TODO(mriedem): Move to ObjectListBase.__init__ for empty lists.
instance_list = objects.InstanceList(objects=[])
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
instance = common.get_instance(self.compute_api, context,
instance_uuid, want_objects=True,
expected_attrs=['pci_devices'])
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
network_uuids = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
request.address = network.get('fixed_ip', None)
request.port_id = network.get('port', None)
if request.port_id:
request.network_id = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument: port")
raise exc.HTTPBadRequest(explanation=msg)
if request.address is not None:
msg = _("Specified Fixed IP '%(addr)s' cannot be used "
"with port '%(port)s': port already has "
"a Fixed IP allocated.") % {
"addr": request.address,
"port": request.port_id}
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
if (not request.port_id and
not uuidutils.is_uuid_like(request.network_id)):
br_uuid = request.network_id.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % request.network_id
raise exc.HTTPBadRequest(explanation=msg)
if (request.network_id and
request.network_id in network_uuids):
expl = (_("Duplicate networks"
" (%s) are not allowed") %
request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks)
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
@extensions.expected_errors(404)
def show(self, req, id):
"""Returns server details by server id."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, id,
want_objects=True,
expected_attrs=['pci_devices'])
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
@extensions.expected_errors((400, 403, 409, 413))
@wsgi.response(202)
@validation.schema(schema_server_create)
def create(self, req, body):
"""Creates a new server for a given user."""
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
name = server_dict['name']
# Arguments to be passed to instance create function
create_kwargs = {}
# Query extensions which want to manipulate the keyword
# arguments.
# NOTE(cyeoh): This is the hook that extensions use
# to replace the extension specific code below.
# When the extensions are ported this will also result
# in some convenience function from this class being
# moved to the extension
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point,
server_dict, create_kwargs, body)
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
# NOTE(cyeoh): Although an extension can set
# return_reservation_id in order to request that a reservation
# id be returned to the client instead of the newly created
# instance information we do not want to pass this parameter
# to the compute create call which always returns both. We use
# this flag after the instance create call to determine what
# to return to the client
return_reservation_id = create_kwargs.pop('return_reservation_id',
False)
requested_networks = None
# TODO(cyeoh): bp v3-api-core-as-extensions
# Replace with an extension point when the os-networks
# extension is ported. Currently reworked
# to take into account is_neutron
# if (self.ext_mgr.is_loaded('os-networks')
# or utils.is_neutron()):
# requested_networks = server_dict.get('networks')
if utils.is_neutron():
requested_networks = server_dict.get('networks')
if requested_networks is not None:
requested_networks = self._get_requested_networks(
requested_networks)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
try:
inst_type = flavors.get_flavor_by_flavor_id(
flavor_id, ctxt=context, read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
metadata=server_dict.get('metadata', {}),
admin_password=password,
requested_networks=requested_networks,
check_server_group_quota=True,
**create_kwargs)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ExternalNetworkAttachForbidden as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.InvalidVolume,
exception.MultiplePortsNotApplicable,
exception.InvalidFixedIpAndMaxCountRequest,
exception.InstanceUserDataMalformed,
exception.InstanceUserDataTooLarge,
exception.PortNotFound,
exception.FixedIpAlreadyInUse,
exception.SecurityGroupNotFound,
exception.PortRequiresFixedIP,
exception.NetworkRequiresSubnet,
exception.NetworkNotFound,
exception.InvalidBDMVolumeNotBootable,
exception.InvalidBDMSnapshot,
exception.InvalidBDMVolume,
exception.InvalidBDMImage,
exception.InvalidBDMBootSequence,
exception.InvalidBDMLocalsLimit,
exception.InvalidBDMVolumeNotBootable) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NetworkAmbiguous,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if return_reservation_id:
# NOTE(cyeoh): In v3 reservation_id was wrapped in
# servers_reservation but this is reverted for V2 API
# compatibility. In the long term with the tasks API we
# will probably just drop the concept of reservation_id
return wsgi.ResponseObject({'reservation_id': resv_id})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
# NOTE(gmann): Parameter 'req_body' is placed to handle scheduler_hint
# extension for V2.1. No other extension supposed to use this as
# it will be removed soon.
def _create_extension_point(self, ext, server_dict,
create_kwargs, req_body):
handler = ext.obj
LOG.debug("Running _create_extension_point for %s", ext.obj)
handler.server_create(server_dict, create_kwargs, req_body)
def _rebuild_extension_point(self, ext, rebuild_dict, rebuild_kwargs):
handler = ext.obj
LOG.debug("Running _rebuild_extension_point for %s", ext.obj)
handler.server_rebuild(rebuild_dict, rebuild_kwargs)
def _resize_extension_point(self, ext, resize_dict, resize_kwargs):
handler = ext.obj
LOG.debug("Running _resize_extension_point for %s", ext.obj)
handler.server_resize(resize_dict, resize_kwargs)
def _update_extension_point(self, ext, update_dict, update_kwargs):
handler = ext.obj
LOG.debug("Running _update_extension_point for %s", ext.obj)
handler.server_update(update_dict, update_kwargs)
def _create_extension_schema(self, ext, create_schema):
handler = ext.obj
LOG.debug("Running _create_extension_schema for %s", ext.obj)
schema = handler.get_server_create_schema()
create_schema['properties']['server']['properties'].update(schema)
def _update_extension_schema(self, ext, update_schema):
handler = ext.obj
LOG.debug("Running _update_extension_schema for %s", ext.obj)
schema = handler.get_server_update_schema()
update_schema['properties']['server']['properties'].update(schema)
def _rebuild_extension_schema(self, ext, rebuild_schema):
handler = ext.obj
LOG.debug("Running _rebuild_extension_schema for %s", ext.obj)
schema = handler.get_server_rebuild_schema()
rebuild_schema['properties']['rebuild']['properties'].update(schema)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@extensions.expected_errors((400, 404))
@validation.schema(schema_server_update)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
update_dict['display_name'] = body['server']['name']
# TODO(oomichi): The following host_id validation code can be removed
# when setting "'additionalProperties': False" in base_update schema.
if 'host_id' in body['server']:
msg = _("host_id cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point,
body['server'], update_dict)
instance = common.get_instance(self.compute_api, ctxt, id,
want_objects=True,
expected_attrs=['pci_devices'])
try:
# NOTE(mikal): this try block needs to stay because save() still
# might throw an exception.
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
# NOTE(gmann): Returns 204 for backwards compatibility but should be 202
# for representing async API as this API just accepts the request and
# request hypervisor driver to complete the same in async mode.
@extensions.expected_errors((400, 404, 409))
@wsgi.response(204)
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize')
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
return webob.Response(status_int=202)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeDisk as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, server_dict, create_kwargs):
"""Get image data from the request or raise appropriate
exceptions.
The field imageRef is mandatory when no block devices have been
defined and must be a proper uuid when present.
"""
image_href = server_dict.get('imageRef')
if not image_href and create_kwargs.get('block_device_mapping'):
return ''
elif image_href:
return self._image_uuid_from_href(unicode(image_href))
else:
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
flavor_ref = data['server']['flavorRef']
return common.get_id_from_href(flavor_ref)
@extensions.expected_errors((400, 401, 403, 404, 409))
@wsgi.response(202)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
resize_dict = body['resize']
try:
flavor_ref = str(resize_dict["flavorRef"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
resize_kwargs = {}
return self._resize(req, id, flavor_ref, **resize_kwargs)
@extensions.expected_errors((400, 403, 404, 409, 413))
@wsgi.response(202)
@wsgi.action('rebuild')
@validation.schema(schema_server_rebuild)
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
image_href = rebuild_dict["imageRef"]
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'name': 'display_name',
'metadata': 'metadata',
}
rebuild_kwargs = {}
if 'preserve_ephemeral' in rebuild_dict:
rebuild_kwargs['preserve_ephemeral'] = strutils.bool_from_string(
rebuild_dict['preserve_ephemeral'], strict=True)
if list(self.rebuild_extension_manager):
self.rebuild_extension_manager.map(self._rebuild_extension_point,
rebuild_dict, rebuild_kwargs)
for request_attribute, instance_attribute in attr_map.items():
try:
rebuild_kwargs[instance_attribute] = rebuild_dict[
request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
**rebuild_kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the admin_password attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.response(202)
@wsgi.action('createImage')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("createImage", {})
image_name = entity.get("name")
if not image_name:
msg = _("createImage entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
properties = bdms.root_metadata(
context, self.compute_api.image_api,
self.compute_api.volume_api)
image_meta = {'properties': properties}
else:
image_meta = self.compute_api.image_api.get(context, img)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
except KeyError:
password = utils.generate_password()
return password
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
def _get_instance(self, context, instance_uuid):
try:
attrs = ['system_metadata', 'metadata']
return objects.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=attrs)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
@extensions.expected_errors((404, 409))
@wsgi.action('os-start')
def _start_server(self, req, id, body):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'start')
LOG.debug('start instance', instance=instance)
try:
self.compute_api.start(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked,
exception.InstanceInvalidState) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'stop')
LOG.debug('stop instance', instance=instance)
try:
self.compute_api.stop(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked,
exception.InstanceInvalidState) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug("Removing options '%s' from query",
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
class Servers(extensions.V3APIExtensionBase):
"""Servers."""
name = "Servers"
alias = "servers"
version = 1
def get_resources(self):
member_actions = {'action': 'POST'}
collection_actions = {'detail': 'GET'}
resources = [
extensions.ResourceExtension(
'servers',
ServersController(extension_info=self.extension_info),
member_name='server', collection_actions=collection_actions,
member_actions=member_actions)]
return resources
def get_controller_extensions(self):
return []
|
|
import base64
import logging
import re
import time
from html import unescape as html_unescape
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
import streamlink
from streamlink.exceptions import FatalPluginError
from streamlink.plugin import Plugin, PluginArgument, PluginArguments
from streamlink.plugin.api import validate
from streamlink.plugin.api.utils import itertags, parse_json
from streamlink.plugin.api.validate import Schema
from streamlink.stream.dash import DASHStream
log = logging.getLogger(__name__)
class SteamLoginFailed(Exception):
pass
class SteamBroadcastPlugin(Plugin):
_url_re = re.compile(r"https?://steamcommunity.com/broadcast/watch/(\d+)")
_steamtv_url_re = re.compile(r"https?://steam.tv/(\w+)")
_watch_broadcast_url = "https://steamcommunity.com/broadcast/watch/"
_get_broadcast_url = "https://steamcommunity.com/broadcast/getbroadcastmpd/"
_user_agent = "streamlink/{}".format(streamlink.__version__)
_broadcast_schema = Schema({
"success": validate.any("ready", "unavailable", "waiting", "waiting_to_start", "waiting_for_start"),
"retry": int,
"broadcastid": validate.any(validate.text, int),
validate.optional("url"): validate.url(),
validate.optional("viewertoken"): validate.text
})
_get_rsa_key_url = "https://steamcommunity.com/login/getrsakey/"
_rsa_key_schema = validate.Schema({
"publickey_exp": validate.all(validate.text, validate.transform(lambda x: int(x, 16))),
"publickey_mod": validate.all(validate.text, validate.transform(lambda x: int(x, 16))),
"success": True,
"timestamp": validate.text,
"token_gid": validate.text
})
_dologin_url = "https://steamcommunity.com/login/dologin/"
_dologin_schema = validate.Schema({
"success": bool,
"requires_twofactor": bool,
validate.optional("message"): validate.text,
validate.optional("emailauth_needed"): bool,
validate.optional("emaildomain"): validate.text,
validate.optional("emailsteamid"): validate.text,
validate.optional("login_complete"): bool,
validate.optional("captcha_needed"): bool,
validate.optional("captcha_gid"): validate.any(validate.text, int)
})
_captcha_url = "https://steamcommunity.com/public/captcha.php?gid={}"
arguments = PluginArguments(
PluginArgument(
"email",
metavar="EMAIL",
requires=["password"],
help="""
A Steam account email address to access friends/private streams
"""
),
PluginArgument(
"password",
metavar="PASSWORD",
sensitive=True,
help="""
A Steam account password to use with --steam-email.
"""
))
def __init__(self, url):
super().__init__(url)
self.session.http.headers["User-Agent"] = self._user_agent
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None or cls._steamtv_url_re.match(url) is not None
@property
def donotcache(self):
return str(int(time.time() * 1000))
def encrypt_password(self, email, password):
"""
Get the RSA key for the user and encrypt the users password
:param email: steam account
:param password: password for account
:return: encrypted password
"""
res = self.session.http.get(self._get_rsa_key_url, params=dict(username=email, donotcache=self.donotcache))
rsadata = self.session.http.json(res, schema=self._rsa_key_schema)
rsa = RSA.construct((rsadata["publickey_mod"], rsadata["publickey_exp"]))
cipher = PKCS1_v1_5.new(rsa)
return base64.b64encode(cipher.encrypt(password.encode("utf8"))), rsadata["timestamp"]
def dologin(self, email, password, emailauth="", emailsteamid="", captchagid="-1", captcha_text="", twofactorcode=""):
"""
Logs in to Steam
"""
epassword, rsatimestamp = self.encrypt_password(email, password)
login_data = {
'username': email,
"password": epassword,
"emailauth": emailauth,
"loginfriendlyname": "Streamlink",
"captchagid": captchagid,
"captcha_text": captcha_text,
"emailsteamid": emailsteamid,
"rsatimestamp": rsatimestamp,
"remember_login": True,
"donotcache": self.donotcache,
"twofactorcode": twofactorcode
}
res = self.session.http.post(self._dologin_url, data=login_data)
resp = self.session.http.json(res, schema=self._dologin_schema)
if not resp["success"]:
if resp.get("captcha_needed"):
# special case for captcha
captchagid = resp["captcha_gid"]
log.error("Captcha result required, open this URL to see the captcha: {}".format(
self._captcha_url.format(captchagid)))
try:
captcha_text = self.input_ask("Captcha text")
except FatalPluginError:
captcha_text = None
if not captcha_text:
return False
else:
# If the user must enter the code that was emailed to them
if resp.get("emailauth_needed"):
if not emailauth:
try:
emailauth = self.input_ask("Email auth code required")
except FatalPluginError:
emailauth = None
if not emailauth:
return False
else:
raise SteamLoginFailed("Email auth key error")
# If the user must enter a two factor auth code
if resp.get("requires_twofactor"):
try:
twofactorcode = self.input_ask("Two factor auth code required")
except FatalPluginError:
twofactorcode = None
if not twofactorcode:
return False
if resp.get("message"):
raise SteamLoginFailed(resp["message"])
return self.dologin(email, password,
emailauth=emailauth,
emailsteamid=resp.get("emailsteamid", ""),
captcha_text=captcha_text,
captchagid=captchagid,
twofactorcode=twofactorcode)
elif resp.get("login_complete"):
return True
else:
log.error("Something when wrong when logging in to Steam")
return False
def login(self, email, password):
log.info("Attempting to login to Steam as {}".format(email))
return self.dologin(email, password)
def _get_broadcast_stream(self, steamid, viewertoken=0, sessionid=None):
log.debug("Getting broadcast stream: sessionid={0}".format(sessionid))
res = self.session.http.get(self._get_broadcast_url,
params=dict(broadcastid=0,
steamid=steamid,
viewertoken=viewertoken,
sessionid=sessionid))
return self.session.http.json(res, schema=self._broadcast_schema)
def _get_streams(self):
streamdata = None
if self.get_option("email"):
if self.login(self.get_option("email"), self.get_option("password")):
log.info("Logged in as {0}".format(self.get_option("email")))
self.save_cookies(lambda c: "steamMachineAuth" in c.name)
# Handle steam.tv URLs
if self._steamtv_url_re.match(self.url) is not None:
# extract the steam ID from the page
res = self.session.http.get(self.url)
for div in itertags(res.text, 'div'):
if div.attributes.get("id") == "webui_config":
broadcast_data = html_unescape(div.attributes.get("data-broadcast"))
steamid = parse_json(broadcast_data).get("steamid")
self.url = self._watch_broadcast_url + steamid
# extract the steam ID from the URL
steamid = self._url_re.match(self.url).group(1)
res = self.session.http.get(self.url) # get the page to set some cookies
sessionid = res.cookies.get('sessionid')
while streamdata is None or streamdata["success"] in ("waiting", "waiting_for_start"):
streamdata = self._get_broadcast_stream(steamid,
sessionid=sessionid)
if streamdata["success"] == "ready":
return DASHStream.parse_manifest(self.session, streamdata["url"])
elif streamdata["success"] == "unavailable":
log.error("This stream is currently unavailable")
return
else:
r = streamdata["retry"] / 1000.0
log.info("Waiting for stream, will retry again in {} seconds...".format(r))
time.sleep(r)
__plugin__ = SteamBroadcastPlugin
|
|
# Copyright David Abrahams 2004.
# Copyright Daniel Wallin 2006.
# Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import os
import tempfile
import litre
import re
import sys
import traceback
# Thanks to Jean Brouwers for this snippet
def _caller(up=0):
'''Get file name, line number, function name and
source text of the caller's caller as 4-tuple:
(file, line, func, text).
The optional argument 'up' allows retrieval of
a caller further back up into the call stack.
Note, the source text may be None and function
name may be '?' in the returned result. In
Python 2.3+ the file name may be an absolute
path.
'''
try: # just get a few frames'
f = traceback.extract_stack(limit=up+2)
if f:
return f[0]
except:
pass
# running with psyco?
return ('', 0, '', None)
class Example:
closed = False
in_emph = None
def __init__(self, node, section, line_offset, line_hash = '#'):
# A list of text fragments comprising the Example. Start with a #line
# directive
self.section = section
self.line_hash = line_hash
self.node = node
self.body = []
self.line_offset = line_offset
self._number_of_prefixes = 0
self.emphasized = [] # indices of text strings that have been
# emphasized. These are generally expected to be
# invalid C++ and will need special treatment
def begin_emphasis(self):
self.in_emph = len(self.body)
def end_emphasis(self):
self.emphasized.append( (self.in_emph, len(self.body)) )
def append(self, s):
self.append_raw(self._make_line(s))
def prepend(self, s):
self.prepend_raw(self._make_line(s))
def append_raw(self, s):
self.body.append(s)
def prepend_raw(self, s):
self.body.insert(0,s)
self.emphasized = [ (x[0]+1,x[1]+1) for x in self.emphasized ]
self._number_of_prefixes += 1
def replace(self, s1, s2):
self.body = [x.replace(s1,s2) for x in self.body]
def sub(self, pattern, repl, count = 1, flags = re.MULTILINE):
pat = re.compile(pattern, flags)
for i,txt in enumerate(self.body):
if count > 0:
x, subs = pat.subn(repl, txt, count)
self.body[i] = x
count -= subs
def wrap(self, s1, s2):
self.append_raw(self._make_line(s2))
self.prepend_raw(self._make_line(s1, offset = -s1.count('\n')))
def replace_emphasis(self, s, index = 0):
"""replace the index'th emphasized text with s"""
e = self.emphasized[index]
self.body[e[0]:e[1]] = [s]
del self.emphasized[index]
elipsis = re.compile('^([ \t]*)([.][.][.][ \t]*)$', re.MULTILINE)
def __str__(self):
# Comment out any remaining emphasized sections
b = [self.elipsis.sub(r'\1// \2', s) for s in self.body]
emph = self.emphasized
emph.reverse()
for e in emph:
b.insert(e[1], ' */')
b.insert(e[0], '/* ')
emph.reverse()
# Add initial #line
b.insert(
self._number_of_prefixes,
self._line_directive(self.node.line, self.node.source)
)
# Add trailing newline to avoid warnings
b.append('\n')
return ''.join(b)
def __repr__(self):
return "Example: " + repr(str(self))
def raw(self):
return ''.join(self.body)
def _make_line(self, s, offset = 0):
c = _caller(2)[1::-1]
offset -= s.count('\n')
return '\n%s%s\n' % (self._line_directive(offset = offset, *c), s.strip('\n'))
def _line_directive(self, line, source, offset = None):
if self.line_hash is None:
return '\n'
if offset is None:
offset = self.line_offset
if line is None or line <= -offset:
line = 1
else:
line += offset
if source is None:
return '%sline %d\n' % (self.line_hash, line)
else:
return '%sline %d "%s"\n' % (self.line_hash, line, source)
def syscmd(
cmd
, expect_error = False
, input = None
, max_output_lines = None
):
# On windows close() returns the exit code, on *nix it doesn't so
# we need to use popen2.Popen4 instead.
if sys.platform == 'win32':
stdin, stdout_stderr = os.popen4(cmd)
if input: stdin.write(input)
stdin.close()
out = stdout_stderr.read()
status = stdout_stderr.close()
else:
import popen2
process = popen2.Popen4(cmd)
if input: process.tochild.write(input)
out = process.fromchild.read()
status = process.wait()
if max_output_lines is not None:
out = '\n'.join(out.split('\n')[:max_output_lines])
if expect_error:
status = not status
if status:
print
print '========== offending command ==========='
print cmd
print '------------ stdout/stderr -------------'
print expect_error and 'Error expected, but none seen' or out
elif expect_error > 1:
print
print '------ Output of Expected Error --------'
print out
print '----------------------------------------'
sys.stdout.flush()
return (status,out)
def expand_vars(path):
if os.name == 'nt':
re_env = re.compile(r'%\w+%')
return re_env.sub(
lambda m: os.environ.get( m.group(0)[1:-1] )
, path
)
else:
return os.path.expandvars(path)
def remove_directory_and_contents(path):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(path)
class BuildResult:
def __init__(self, path):
self.path = path
def __repr__(self):
return self.path
def __del__(self):
remove_directory_and_contents(self.path)
class CPlusPlusTranslator(litre.LitreTranslator):
_exposed_attrs = ['compile', 'test', 'ignore', 'match_stdout', 'stack', 'config'
, 'example', 'prefix', 'preprocessors', 'litre_directory',
'litre_translator', 'includes', 'build', 'jam_prefix',
'run_python']
last_run_output = ''
"""Attributes that will be made available to litre code"""
def __init__(self, document, config):
litre.LitreTranslator.__init__(self, document, config)
self.in_literal = False
self.in_table = True
self.preprocessors = []
self.stack = []
self.example = None
self.prefix = []
self.includes = config.includes
self.litre_directory = os.path.split(__file__)[0]
self.config = config
self.litre_translator = self
self.line_offset = 0
self.last_source = None
self.jam_prefix = []
self.globals = { 'test_literals_in_tables' : False }
for m in self._exposed_attrs:
self.globals[m] = getattr(self, m)
self.examples = {}
self.current_section = None
#
# Stuff for use by docutils writer framework
#
def visit_emphasis(self, node):
if self.in_literal:
self.example.begin_emphasis()
def depart_emphasis(self, node):
if self.in_literal:
self.example.end_emphasis()
def visit_section(self, node):
self.current_section = node['ids'][0]
def visit_literal_block(self, node):
if node.source is None:
node.source = self.last_source
self.last_source = node.source
# create a new example
self.example = Example(node, self.current_section, line_offset = self.line_offset, line_hash = self.config.line_hash)
self.stack.append(self.example)
self.in_literal = True
def depart_literal_block(self, node):
self.in_literal = False
def visit_literal(self, node):
if self.in_table and self.globals['test_literals_in_tables']:
self.visit_literal_block(node)
else:
litre.LitreTranslator.visit_literal(self,node)
def depart_literal(self, node):
if self.in_table and self.globals['test_literals_in_tables']:
self.depart_literal_block(node)
else:
litre.LitreTranslator.depart_literal(self,node)
def visit_table(self,node):
self.in_table = True
litre.LitreTranslator.visit_table(self,node)
def depart_table(self,node):
self.in_table = False
litre.LitreTranslator.depart_table(self,node)
def visit_Text(self, node):
if self.in_literal:
self.example.append_raw(node.astext())
def depart_document(self, node):
self.write_examples()
#
# Private stuff
#
def handled(self, n = 1):
r = self.stack[-n:]
del self.stack[-n:]
return r
def _execute(self, code):
"""Override of litre._execute; sets up variable context before
evaluating code
"""
self.globals['example'] = self.example
eval(code, self.globals)
#
# Stuff for use by embedded python code
#
def match_stdout(self, expected = None):
if expected is None:
expected = self.example.raw()
self.handled()
if not re.search(expected, self.last_run_output, re.MULTILINE):
#if self.last_run_output.strip('\n') != expected.strip('\n'):
print 'output failed to match example'
print '-------- Actual Output -------------'
print repr(self.last_run_output)
print '-------- Expected Output -----------'
print repr(expected)
print '------------------------------------'
sys.stdout.flush()
def ignore(self, n = 1):
if n == 'all':
n = len(self.stack)
return self.handled(n)
def wrap(self, n, s1, s2):
self.stack[-1].append(s2)
self.stack[-n].prepend(s1)
def compile(
self
, howmany = 1
, pop = -1
, expect_error = False
, extension = '.o'
, options = ['-c']
, built_handler = lambda built_file: None
, source_file = None
, source_suffix = '.cpp'
# C-style comments by default; handles C++ and YACC
, make_comment = lambda text: '/*\n%s\n*/' % text
, built_file = None
, command = None
):
"""
Compile examples on the stack, whose topmost item is the last example
seen but not yet handled so far.
:howmany: How many of the topmost examples on the stack to compile.
You can pass a number, or 'all' to indicate that all examples should
be compiled.
:pop: How many of the topmost examples to discard. By default, all of
the examples that are compiled are discarded.
:expect_error: Whether a compilation error is to be expected. Any value
> 1 will cause the expected diagnostic's text to be dumped for
diagnostic purposes. It's common to expect an error but see a
completely unrelated one because of bugs in the example (you can get
this behavior for all examples by setting show_expected_error_output
in your config).
:extension: The extension of the file to build (set to .exe for
run)
:options: Compiler flags
:built_file: A path to use for the built file. By default, a temp
filename is conjured up
:built_handler: A function that's called with the name of the built file
upon success.
:source_file: The full name of the source file to write
:source_suffix: If source_file is None, the suffix to use for the source file
:make_comment: A function that transforms text into an appropriate comment.
:command: A function that is passed (includes, opts, target, source), where
opts is a string representing compiler options, target is the name of
the file to build, and source is the name of the file into which the
example code is written. By default, the function formats
litre.config.compiler with its argument tuple.
"""
# Grab one example by default
if howmany == 'all':
howmany = len(self.stack)
source = '\n'.join(
self.prefix
+ [str(x) for x in self.stack[-howmany:]]
)
source = reduce(lambda s, f: f(s), self.preprocessors, source)
if pop:
if pop < 0:
pop = howmany
del self.stack[-pop:]
if len(self.stack):
self.example = self.stack[-1]
cpp = self._source_file_path(source_file, source_suffix)
if built_file is None:
built_file = self._output_file_path(source_file, extension)
opts = ' '.join(options)
includes = ' '.join(['-I%s' % d for d in self.includes])
if not command:
command = self.config.compiler
if type(command) == str:
command = lambda i, o, t, s, c = command: c % (i, o, t, s)
cmd = command(includes, opts, expand_vars(built_file), expand_vars(cpp))
if expect_error and self.config.show_expected_error_output:
expect_error += 1
comment_cmd = command(includes, opts, built_file, os.path.basename(cpp))
comment = make_comment(config.comment_text(comment_cmd, expect_error))
self._write_source(cpp, '\n'.join([comment, source]))
#print 'wrote in', cpp
#print 'trying command', cmd
status, output = syscmd(cmd, expect_error)
if status or expect_error > 1:
print
if expect_error and expect_error < 2:
print 'Compilation failure expected, but none seen'
print '------------ begin offending source ------------'
print open(cpp).read()
print '------------ end offending source ------------'
if self.config.save_cpp:
print 'saved in', repr(cpp)
else:
self._remove_source(cpp)
sys.stdout.flush()
else:
print '.',
sys.stdout.flush()
built_handler(built_file)
self._remove_source(cpp)
try:
self._unlink(built_file)
except:
if not expect_error:
print 'failed to unlink', built_file
return status
def test(
self
, rule = 'run'
, howmany = 1
, pop = -1
, expect_error = False
, requirements = ''
, input = ''
):
# Grab one example by default
if howmany == 'all':
howmany = len(self.stack)
source = '\n'.join(
self.prefix
+ [str(x) for x in self.stack[-howmany:]]
)
source = reduce(lambda s, f: f(s), self.preprocessors, source)
id = self.example.section
if not id:
id = 'top-level'
if not self.examples.has_key(self.example.section):
self.examples[id] = [(rule, source)]
else:
self.examples[id].append((rule, source))
if pop:
if pop < 0:
pop = howmany
del self.stack[-pop:]
if len(self.stack):
self.example = self.stack[-1]
def write_examples(self):
jam = open(os.path.join(self.config.dump_dir, 'Jamfile.v2'), 'w')
jam.write('''
import testing ;
''')
for id,examples in self.examples.items():
for i in range(len(examples)):
cpp = '%s%d.cpp' % (id, i)
jam.write('%s %s ;\n' % (examples[i][0], cpp))
outfile = os.path.join(self.config.dump_dir, cpp)
print cpp,
try:
if open(outfile, 'r').read() == examples[i][1]:
print ' .. skip'
continue
except:
pass
open(outfile, 'w').write(examples[i][1])
print ' .. written'
jam.close()
def build(
self
, howmany = 1
, pop = -1
, source_file = 'example.cpp'
, expect_error = False
, target_rule = 'obj'
, requirements = ''
, input = ''
, output = 'example_output'
):
# Grab one example by default
if howmany == 'all':
howmany = len(self.stack)
source = '\n'.join(
self.prefix
+ [str(x) for x in self.stack[-howmany:]]
)
source = reduce(lambda s, f: f(s), self.preprocessors, source)
if pop:
if pop < 0:
pop = howmany
del self.stack[-pop:]
if len(self.stack):
self.example = self.stack[-1]
dir = tempfile.mkdtemp()
cpp = os.path.join(dir, source_file)
self._write_source(cpp, source)
self._write_jamfile(
dir
, target_rule = target_rule
, requirements = requirements
, input = input
, output = output
)
cmd = 'bjam'
if self.config.bjam_options:
cmd += ' %s' % self.config.bjam_options
os.chdir(dir)
status, output = syscmd(cmd, expect_error)
if status or expect_error > 1:
print
if expect_error and expect_error < 2:
print 'Compilation failure expected, but none seen'
print '------------ begin offending source ------------'
print open(cpp).read()
print '------------ begin offending Jamfile -----------'
print open(os.path.join(dir, 'Jamroot')).read()
print '------------ end offending Jamfile -------------'
sys.stdout.flush()
else:
print '.',
sys.stdout.flush()
if status: return None
else: return BuildResult(dir)
def _write_jamfile(self, path, target_rule, requirements, input, output):
jamfile = open(os.path.join(path, 'Jamroot'), 'w')
contents = r"""
import modules ;
BOOST_ROOT = [ modules.peek : BOOST_ROOT ] ;
use-project /boost : $(BOOST_ROOT) ;
%s
%s %s
: example.cpp %s
: <include>.
%s
%s
;
""" % (
'\n'.join(self.jam_prefix)
, target_rule
, output
, input
, ' '.join(['<include>%s' % d for d in self.includes])
, requirements
)
jamfile.write(contents)
def run_python(
self
, howmany = 1
, pop = -1
, module_path = []
, expect_error = False
):
# Grab one example by default
if howmany == 'all':
howmany = len(self.stack)
if module_path == None: module_path = []
if isinstance(module_path, BuildResult) or type(module_path) == str:
module_path = [module_path]
module_path = map(lambda p: str(p), module_path)
source = '\n'.join(
self.prefix
+ [str(x) for x in self.stack[-howmany:]]
)
if pop:
if pop < 0:
pop = howmany
del self.stack[-pop:]
if len(self.stack):
self.example = self.stack[-1]
r = re.compile(r'^(>>>|\.\.\.) (.*)$', re.MULTILINE)
source = r.sub(r'\2', source)
py = self._source_file_path(source_file = None, source_suffix = 'py')
open(py, 'w').write(source)
old_path = os.getenv('PYTHONPATH')
if old_path == None:
pythonpath = ':'.join(module_path)
old_path = ''
else:
pythonpath = old_path + ':%s' % ':'.join(module_path)
os.putenv('PYTHONPATH', pythonpath)
status, output = syscmd('python %s' % py)
if status or expect_error > 1:
print
if expect_error and expect_error < 2:
print 'Compilation failure expected, but none seen'
print '------------ begin offending source ------------'
print open(py).read()
print '------------ end offending Jamfile -------------'
sys.stdout.flush()
else:
print '.',
sys.stdout.flush()
self.last_run_output = output
os.putenv('PYTHONPATH', old_path)
self._unlink(py)
def _write_source(self, filename, contents):
open(filename,'w').write(contents)
def _remove_source(self, source_path):
os.unlink(source_path)
def _source_file_path(self, source_file, source_suffix):
if source_file is None:
cpp = tempfile.mktemp(suffix=source_suffix)
else:
cpp = os.path.join(tempfile.gettempdir(), source_file)
return cpp
def _output_file_path(self, source_file, extension):
return tempfile.mktemp(suffix=extension)
def _unlink(self, file):
file = expand_vars(file)
if os.path.exists(file):
os.unlink(file)
def _launch(self, exe, stdin = None):
status, output = syscmd(exe, input = stdin)
self.last_run_output = output
def run_(self, howmany = 1, stdin = None, **kw):
new_kw = { 'options':[], 'extension':'.exe' }
new_kw.update(kw)
self.compile(
howmany
, built_handler = lambda exe: self._launch(exe, stdin = stdin)
, **new_kw
)
def astext(self):
return ""
return '\n\n ---------------- Unhandled Fragment ------------ \n\n'.join(
[''] # generates a leading announcement
+ [ unicode(s) for s in self.stack]
)
class DumpTranslator(CPlusPlusTranslator):
example_index = 1
def _source_file_path(self, source_file, source_suffix):
if source_file is None:
source_file = 'example%s%s' % (self.example_index, source_suffix)
self.example_index += 1
cpp = os.path.join(config.dump_dir, source_file)
return cpp
def _output_file_path(self, source_file, extension):
chapter = os.path.basename(config.dump_dir)
return '%%TEMP%%\metaprogram-%s-example%s%s' \
% ( chapter, self.example_index - 1, extension)
def _remove_source(self, source_path):
pass
class WorkaroundTranslator(DumpTranslator):
"""Translator used to test/dump workaround examples for vc6 and vc7. Just
like a DumpTranslator except that we leave existing files alone.
Warning: not sensitive to changes in .rst source!! If you change the actual
examples in source files you will have to move the example files out of the
way and regenerate them, then re-incorporate the workarounds.
"""
def _write_source(self, filename, contents):
if not os.path.exists(filename):
DumpTranslator._write_source(self, filename, contents)
class Config:
save_cpp = False
line_hash = '#'
show_expected_error_output = False
max_output_lines = None
class Writer(litre.Writer):
translator = CPlusPlusTranslator
def __init__(
self
, config
):
litre.Writer.__init__(self)
self._config = Config()
defaults = Config.__dict__
# update config elements
self._config.__dict__.update(config.__dict__)
# dict([i for i in config.__dict__.items()
# if i[0] in config.__all__]))
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'foo': 'bar'})
raise AssertionError("Accepted invalid option foo")
except JSONRPCException as e:
assert("Unexpected key foo" in e.error['message'])
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': 'foobar'})
raise AssertionError("Accepted invalid bluecoin address")
except JSONRPCException as e:
assert("changeAddress must be a valid bluecoin address" in e.error['message'])
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 2})
except JSONRPCException as e:
assert('changePosition out of bounds' == e.error['message'])
else:
assert(False)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 2)
stop_node(self.nodes[2], 3)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.2)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
#############################
# Test address reuse option #
#############################
result3 = self.nodes[3].fundrawtransaction(rawtx, {"reserveChangeKey": False})
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# frt should not have removed the key from the keypool
assert(changeaddress == nextaddr)
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
keys = list(outputs.keys())
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
|
|
import json
from nose.tools import eq_
from lib.sellers.constants import (ACCESS_PURCHASE, ACCESS_SIMULATE,
EXTERNAL_PRODUCT_ID_IS_NOT_UNIQUE)
from lib.sellers.models import Seller, SellerProduct, SellerPaypal
from solitude.base import APITest
uuid = 'sample:uid'
class TestSeller(APITest):
def setUp(self):
self.api_name = 'generic'
self.list_url = self.get_list_url('seller')
def test_add(self):
res = self.client.post(self.list_url, data={'uuid': uuid})
eq_(res.status_code, 201)
eq_(Seller.objects.filter(uuid=uuid).count(), 1)
def test_add_multiple(self):
self.client.post(self.list_url, data={'uuid': uuid})
res = self.client.post(self.list_url, data={'uuid': uuid})
eq_(res.status_code, 400)
eq_(self.get_errors(res.content, 'uuid'),
['Seller with this Uuid already exists.'])
def test_add_empty(self):
res = self.client.post(self.list_url, data={'uuid': ''})
eq_(res.status_code, 400)
eq_(self.get_errors(res.content, 'uuid'), ['This field is required.'])
def test_add_missing(self):
res = self.client.post(self.list_url, data={})
eq_(res.status_code, 400)
eq_(self.get_errors(res.content, 'uuid'), ['This field is required.'])
def test_list_allowed(self):
self.allowed_verbs(self.list_url, ['post', 'get'])
def create(self):
return Seller.objects.create(uuid=uuid)
def test_get(self):
obj = self.create()
res = self.client.get(self.get_detail_url('seller', obj))
eq_(res.status_code, 200)
content = json.loads(res.content)
eq_(content['uuid'], uuid)
eq_(content['resource_pk'], obj.pk)
class TestSellerPaypal(APITest):
def setUp(self):
self.api_name = 'paypal'
self.seller = Seller.objects.create(uuid=uuid)
self.list_url = self.get_list_url('seller')
def data(self):
return {'seller': '/generic/seller/%s/' % self.seller.pk,
'paypal_id': 'foo@bar.com',
'address_one': '123 main st.',
'country': 'canada'}
def test_post(self):
res = self.client.post(self.list_url, data=self.data())
eq_(res.status_code, 201)
objs = SellerPaypal.objects.all()
eq_(objs.count(), 1)
eq_(objs[0].paypal_id, 'foo@bar.com')
eq_(objs[0].address_one, '123 main st.')
def test_get(self):
obj = self.create()
url = self.get_detail_url('seller', obj)
res = self.client.get(url)
eq_(res.status_code, 200)
eq_(json.loads(res.content)['token'], False)
eq_(json.loads(res.content)['secret'], False)
def test_get_generic(self):
self.create()
url = self.get_detail_url('seller', self.seller, api_name='generic')
res = self.client.get(url)
eq_(res.status_code, 200)
content = json.loads(res.content)
eq_(content['paypal']['token'], False)
eq_(content['paypal']['secret'], False)
def create(self):
return SellerPaypal.objects.create(seller=self.seller,
address_one='123 main st.')
def test_booleans(self):
obj = self.create()
url = self.get_detail_url('seller', obj)
res = self.client.get(url, data={'uuid': uuid})
content = json.loads(res.content)
eq_(content['secret'], False)
eq_(content['token'], False)
obj.token = obj.secret = 'abc'
obj.save()
res = self.client.get(url, data={'uuid': uuid})
content = json.loads(res.content)
eq_(content['secret'], True)
eq_(content['token'], True)
def test_set_paypal_id(self):
obj = self.create()
url = self.get_detail_url('seller', obj)
id_ = 'foo@bar.com'
res = self.client.put(url, data={'paypal_id': id_})
eq_(res.status_code, 202)
eq_(json.loads(res.content)['paypal_id'], id_)
def test_patch(self):
obj = self.create()
url = self.get_detail_url('seller', obj)
id_ = 'foo@bar.com'
secret = 'some-secret'
obj.secret = secret
obj.save()
res = self.client.patch(url, data={'paypal_id': id_})
eq_(res.status_code, 202, res.content)
res = SellerPaypal.objects.get(pk=obj.pk)
eq_(res.secret, secret)
eq_(res.paypal_id, id_)
eq_(res.address_one, '123 main st.')
def test_list_allowed(self):
obj = self.create()
url = self.get_detail_url('seller', obj)
self.allowed_verbs(self.list_url, ['post', 'get'])
self.allowed_verbs(url, ['get', 'delete', 'put', 'patch'])
class TestSellerProduct(APITest):
def setUp(self):
self.api_name = 'generic'
self.seller = Seller.objects.create(uuid=uuid)
self.seller_url = self.get_detail_url('seller', self.seller.pk)
self.list_url = self.get_list_url('product')
def create(self, **kw):
params = {'seller': self.seller, 'external_id': 'xyz',
'public_id': uuid}
params.update(kw)
return SellerProduct.objects.create(**params)
def create_url(self):
obj = self.create(public_id='%s-url' % uuid)
url = self.get_detail_url('product', obj)
return obj, url
def data(self, **kw):
params = {'seller': self.seller_uri(),
'external_id': 'pre-generated-product-id',
'secret': 'hush',
'access': ACCESS_PURCHASE,
'public_id': 'public-id'}
params.update(**kw)
return params
def seller_uri(self):
return self.get_detail_url('seller', self.seller.pk)
def test_get_miss(self):
# A test that filtering on the wrong uuid returns zero.
self.create()
res = self.client.get(self.list_url, data={'seller__uuid': 'foo'})
eq_(json.loads(res.content)['meta']['total_count'], 0)
def test_get_all(self):
# No filters at all still returns everything.
self.create()
res = self.client.get(self.list_url)
eq_(json.loads(res.content)['meta']['total_count'], 1)
def test_get_one(self):
# Getting just one object just works.
self.create()
res = self.client.get(self.list_url, data={'seller__uuid': uuid})
eq_(json.loads(res.content)['meta']['total_count'], 1)
def test_not_active(self):
obj = self.create()
obj.seller.active = False
obj.seller.save()
res = self.client.get(self.list_url, data={'seller__uuid': uuid,
'seller__active': True})
eq_(json.loads(res.content)['meta']['total_count'], 0)
def test_post(self):
res = self.client.post(self.list_url, data=self.data())
eq_(res.status_code, 201)
objs = SellerProduct.objects.all()
eq_(objs.count(), 1)
def test_get_by_external_id(self):
prod = self.create(external_id='my-id')
res = self.client.get(self.list_url, data={'seller': self.seller.pk,
'external_id': 'my-id'})
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['meta']['total_count'], 1)
eq_(data['objects'][0]['resource_pk'], prod.pk)
def test_get_by_public_id(self):
self.create(public_id='one', external_id='one')
self.create(public_id='two', external_id='two')
res = self.client.get(self.list_url, data={'seller': self.seller.pk,
'public_id': 'one'})
eq_(res.status_code, 200, res)
data = json.loads(res.content)
eq_(data['meta']['total_count'], 1)
eq_(data['objects'][0]['public_id'], 'one')
def test_id_unique_for_seller_error(self):
res = self.client.post(self.list_url,
data=self.data(external_id='unique-id'))
eq_(res.status_code, 201, res.content)
# Submit the same ID for the same seller.
res = self.client.post(self.list_url,
data=self.data(external_id='unique-id'))
eq_(res.status_code, 400)
eq_(self.get_errors(res.content, '__all__'),
[EXTERNAL_PRODUCT_ID_IS_NOT_UNIQUE], res.content)
def test_id_unique_for_seller_ok(self):
res = self.client.post(self.list_url,
data=self.data(external_id='unique-id'))
eq_(res.status_code, 201)
new_seller = Seller.objects.create(uuid='some-other-seller')
data = self.data(seller=self.get_detail_url('seller', new_seller.pk),
external_id='unique-id', public_id='blah')
res = self.client.post(self.list_url, data=data)
eq_(res.status_code, 201)
def test_list_allowed(self):
obj, url = self.create_url()
self.allowed_verbs(self.list_url, ['post', 'get'])
self.allowed_verbs(url, ['get', 'put', 'patch'])
def test_patch_get_secret(self):
obj, url = self.create_url()
res = self.client.patch(url, json.dumps({'seller': self.seller_url,
'external_id': 'xyz',
'secret': 'hush'}))
eq_(res.status_code, 202, res.content)
res = self.client.get(url)
data = json.loads(res.content)
eq_(data['secret'], 'hush')
def test_patch_get_access(self):
obj, url = self.create_url()
res = self.client.patch(url, json.dumps({'access': ACCESS_SIMULATE}))
eq_(res.status_code, 202, res.content)
res = self.client.get(url)
data = json.loads(res.content)
eq_(data['access'], ACCESS_SIMULATE)
def test_patch_get_ext_id(self):
obj, url = self.create_url()
res = self.client.patch(url, json.dumps({'seller': self.seller_url,
'external_id': 'some-id'}))
eq_(res.status_code, 202)
data = obj.reget()
eq_(data.external_id, 'some-id')
def test_put_get(self):
obj, url = self.create_url()
res = self.client.put(url, json.dumps({'seller': self.seller_url,
'secret': 'hush',
'access': ACCESS_PURCHASE,
'external_id': 'abc',
'public_id': 'blah'}))
eq_(res.status_code, 202)
data = obj.reget()
eq_(data.secret, 'hush')
eq_(data.external_id, 'abc')
def test_patch_non_unique_ext_id(self):
self.create(external_id='some-id')
obj, url = self.create_url()
res = self.client.patch(url, json.dumps({'external_id': 'some-id'}))
eq_(res.status_code, 400)
eq_(self.get_errors(res.content, '__all__'),
[EXTERNAL_PRODUCT_ID_IS_NOT_UNIQUE], res.content)
def test_put_non_unique_ext_id(self):
self.create(external_id='some-id')
obj, url = self.create_url()
res = self.client.put(url, json.dumps({'seller': self.seller_url,
'secret': 'hush',
'external_id': 'some-id',
'public_id': 'blah'}))
eq_(res.status_code, 400)
eq_(self.get_errors(res.content, '__all__'),
[EXTERNAL_PRODUCT_ID_IS_NOT_UNIQUE], res.content)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Numba documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 30 11:55:40 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
try:
# Numba is installed
import numba
except ImportError:
# Numba is run from its source checkout
sys.path.insert(0, os.path.abspath('../..'))
import numba
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
# The following is needed to fix RTD issue with numpydoc
# https://github.com/readthedocs/sphinx_rtd_theme/issues/766
from conda.cli.python_api import run_command as conda_cmd
conda_cmd("install", "-c", "conda-forge", "sphinx_rtd_theme>=0.5.1", "-y")
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
#'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
#'sphinx.ext.graphviz',
'numpydoc',
]
# Adding the github files extension
sys.path.append(os.path.abspath(os.path.join(".", "_ext")))
extensions.append('ghfiles')
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Numba'
copyright = u'2012-2020, Anaconda, Inc. and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = '.'.join(numba.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = numba.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# All sphinx_rtd_theme options. Default values commented out; uncomment to
# change.
html_theme_options = {
'canonical_url': 'https://numba.readthedocs.io/en/stable/',
# 'logo_only': False,
# 'display_version': True,
# 'prev_next_buttons_location': 'bottom',
'style_external_links': True,
# 'vcs_pageview_mode': '',
'style_nav_header_background': '#00A3E0',
# Toc options
'collapse_navigation': False,
# 'sticky_navigation': True,
# 'navigation_depth': 4,
# 'includehidden': True,
# 'titles_only': False
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = None
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../_static/numba-white-icon-rgb.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../_static/numba-blue-icon-rgb.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Numbadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'numba.tex', u'Numba Documentation',
u'Anaconda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'numba', 'Numba Documentation',
['Anaconda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Numba', 'Numba Documentation',
'Anaconda', 'Numba', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Configuration for intersphinx: refer to the Python standard library
# and the Numpy documentation.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'llvmlite': ('http://llvmlite.pydata.org/en/latest/', None),
}
# numpydoc options
# To silence "WARNING: toctree contains reference to nonexisting document"
numpydoc_show_class_members = False
# -- Custom autogeneration ------------------------------------------------
def _autogenerate():
from numba.scripts.generate_lower_listing import gen_lower_listing
from numba.misc.help.inspector import write_listings
basedir = os.path.dirname(__file__)
gen_lower_listing(os.path.join(basedir,
'developer/autogen_lower_listing.rst'))
# Run inspector on supported packages
for package in ['builtins', 'math', 'cmath', 'numpy']:
write_listings(
package_name=package,
filename=os.path.join(
basedir, 'developer', 'autogen_{}_listing'.format(package),
),
output_format='rst',
)
_autogenerate()
def setup(app):
app.add_stylesheet('rtd-overrides.css')
|
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# search functions for entity-type objects
# notes in plain text, not html, should be fix later
import datetime
import re
import time
import unittest
from vistrails.core.query import extract_text
################################################################################
class SearchParseError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class SearchStmt(object):
def __init__(self, content):
self.text = content
self.content = re.compile('.*'+content+'.*', re.MULTILINE | re.IGNORECASE)
def match(self, entity):
return True
def matchModule(self, v, m):
return True
def run(self, v, n):
pass
def __call__(self):
"""Make SearchStmt behave just like a QueryObject."""
return self
class TimeSearchStmt(SearchStmt):
oneSecond = 1.0
oneMinute = oneSecond * 60.0
oneHour = oneMinute * 60.0
oneDay = oneHour * 24.0
oneWeek = oneDay * 7.0
oneMonth = oneDay * 31.0 # wrong, I know
oneYear = oneDay * 365.0 # wrong, I know
amounts = {'seconds': oneSecond,
'minutes': oneMinute,
'hours': oneHour,
'days': oneDay,
'weeks': oneWeek,
'months': oneMonth,
'years': oneYear}
months = {'january': 1,
'february': 2,
'march': 3,
'april': 4,
'may': 5,
'june': 6,
'july': 7,
'august': 8,
'september': 9,
'october': 10,
'november': 11,
'december': 12}
dateEntry = r'([^\,\/\: ]+)'
timeEntry = r'(\d?\d?)'
dateSep = r' *[\,\/\- ] *'
timeSep = r' *: *'
sep = r' *'
start = r'^ *'
finish = r' *$'
twoEntryDate = (dateEntry+
dateSep+
dateEntry)
threeEntryDate = (dateEntry+
dateSep+
dateEntry+
dateSep+
dateEntry)
twoEntryTime = (timeEntry+
timeSep+
timeEntry)
threeEntryTime = (timeEntry+
timeSep+
timeEntry+
timeSep+
timeEntry)
dateRE = [re.compile((start+
twoEntryDate+
finish)), # Mar 12 Mar, 12
re.compile((start+
threeEntryDate+
finish)), # Mar, 12, 2006 2006 Mar 12 etc
re.compile((start+
twoEntryTime+
finish)),
re.compile((start+
threeEntryTime+
finish)),
re.compile((start+
twoEntryDate+
sep+
twoEntryTime+
finish)),
re.compile((start+
twoEntryDate+
sep+
threeEntryTime+
finish)),
re.compile((start+
threeEntryDate+
sep+
twoEntryTime+
finish)),
re.compile((start+
threeEntryDate+
sep+
threeEntryTime+
finish)),
re.compile((start+
twoEntryTime+
sep+
twoEntryDate+
finish)),
re.compile((start+
twoEntryTime+
sep+
threeEntryDate+
finish)),
re.compile((start+
threeEntryTime+
sep+
twoEntryDate+
finish)),
re.compile((start+
threeEntryTime+
sep+
threeEntryDate+
finish))]
def __init__(self, date):
self.date = self.parseDate(date)
def parseDate(self, dateStr):
def parseAgo(s):
[amount, unit] = s.split(' ')
try:
amount = float(amount)
except ValueError:
raise SearchParseError("Expected a number, got %s" % amount)
if amount <= 0:
raise SearchParseError("Expected a positive number, got %s" % amount)
unitRe = re.compile('^'+unit)
keys = [k
for k in TimeSearchStmt.amounts.keys()
if unitRe.match(k)]
if len(keys) == 0:
raise SearchParseError("Time unit unknown: %s" % unit)
elif len(keys) > 1:
raise SearchParseError("Time unit ambiguous: %s matches %s" % (unit, keys))
return round(time.time()) - TimeSearchStmt.amounts[keys[0]] * amount
def guessDate(unknownEntries, year=None):
def guessStrMonth(s):
monthRe = re.compile('^'+s)
keys = [k
for k in TimeSearchStmt.months.keys()
if monthRe.match(k)]
if len(keys) == 0:
raise SearchParseError("Unknown month: %s" % s)
elif len(keys) > 1:
raise SearchParseError("Ambiguous month: %s matches %s" % (s, keys))
return TimeSearchStmt.months[keys[0]]
if not year:
m = None
# First heuristic: if month comes first, then year comes last
try:
e0 = int(unknownEntries[0])
except ValueError:
m = guessStrMonth(unknownEntries[0])
try:
d = int(unknownEntries[1])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[1])
try:
y = int(unknownEntries[2])
except ValueError:
raise SearchParseError("Expected year, got %s" % unknownEntries[2])
return (y, m, d)
# Second heuristic: if month comes last, then year comes first
try:
e2 = int(unknownEntries[2])
except ValueError:
m = guessStrMonth(unknownEntries[2])
try:
d = int(unknownEntries[1])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[1])
try:
y = int(unknownEntries[0])
except ValueError:
raise SearchParseError("Expected year, got %s" % unknownEntries[0])
return (y, m, d)
# If month is the middle one, decide day and year by size
# (year is largest, hopefully year was entered using 4 digits)
try:
e1 = int(unknownEntries[1])
except ValueError:
m = guessStrMonth(unknownEntries[1])
try:
d = int(unknownEntries[2])
except ValueError:
raise SearchParseError("Expected day or year, got %s" % unknownEntries[2])
try:
y = int(unknownEntries[0])
except ValueError:
raise SearchParseError("Expected year or year, got %s" % unknownEntries[0])
return (max(y,d), m, min(y, d))
lst = [(e0,0),(e1,1),(e2,2)]
lst.sort()
return guessDate([str(lst[0][0]),
str(lst[1][0])],
year=e2)
# We know year, decide month using similar heuristics - try string month first,
# then decide which is possible
try:
e0 = int(unknownEntries[0])
except ValueError:
m = guessStrMonth(unknownEntries[0])
try:
d = int(unknownEntries[1])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[1])
return (year, m, d)
try:
e1 = int(unknownEntries[1])
except ValueError:
m = guessStrMonth(unknownEntries[1])
try:
d = int(unknownEntries[0])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[0])
return (year, m, d)
if e0 > 12:
return (year, e1, e0)
else:
return (year, e0, e1)
dateStr = dateStr.lower().lstrip().rstrip()
if dateStr.endswith(" ago"):
return parseAgo(dateStr[:-4])
if dateStr == "yesterday":
lst = list(time.localtime(round(time.time()) - TimeSearchStmt.oneDay))
# Reset hour, minute, second
lst[3] = 0
lst[4] = 0
lst[5] = 0
return time.mktime(lst)
if dateStr == "today":
lst = list(time.localtime())
# Reset hour, minute, second
lst[3] = 0
lst[4] = 0
lst[5] = 0
return time.mktime(lst)
if dateStr.startswith("this "):
rest = dateStr[5:]
lst = list(time.localtime(round(time.time())))
if rest == "minute":
lst[5] = 0
elif rest == "hour":
lst[5] = 0
lst[4] = 0
elif rest == "day":
lst[5] = 0
lst[4] = 0
lst[3] = 0
elif rest == "week": # weeks start on monday
lst[5] = 0
lst[4] = 0
lst[3] = 0
# This hack saves me the hassle of computing negative days, months, etc
lst = list(time.localtime(time.mktime(lst) - TimeSearchStmt.oneDay * lst[6]))
elif rest == "month":
lst[5] = 0
lst[4] = 0
lst[3] = 0
lst[2] = 1
elif rest == "year":
lst[5] = 0
lst[4] = 0
lst[3] = 0
lst[2] = 1
lst[1] = 1
return time.mktime(lst)
result = [x.match(dateStr) for x in TimeSearchStmt.dateRE]
this = list(time.localtime())
def setTwoDate(g):
d = guessDate(g, year=this[0])
this[0] = d[0]
this[1] = d[1]
this[2] = d[2]
def setThreeDate(g):
d = guessDate(g)
this[0] = d[0]
this[1] = d[1]
this[2] = d[2]
def setTwoTime(g):
this[3] = int(g[0])
this[4] = int(g[1])
this[5] = 0
def setThreeTime(g):
this[3] = int(g[0])
this[4] = int(g[1])
this[5] = int(g[2])
if result[0]:
setTwoDate(result[0].groups())
setTwoTime([0,0])
elif result[1]:
setThreeDate(result[1].groups())
setTwoTime([0,0])
elif result[2]:
setTwoTime(result[2].groups())
elif result[3]:
setThreeTime(result[3].groups())
elif result[4]:
g = result[4].groups()
setTwoDate([g[0], g[1]])
setTwoTime([g[2], g[3]])
elif result[5]:
g = result[5].groups()
setTwoDate([g[0], g[1]])
setThreeTime([g[2], g[3], g[4]])
elif result[6]:
g = result[6].groups()
setThreeDate([g[0], g[1], g[2]])
setTwoTime([g[3], g[4]])
elif result[7]:
g = result[7].groups()
setThreeDate([g[0], g[1], g[2]])
setThreeTime([g[3], g[4], g[5]])
elif result[8]:
g = result[8].groups()
setTwoTime([g[0], g[1]])
setTwoDate([g[2], g[3]])
elif result[9]:
g = result[9].groups()
setTwoTime([g[0], g[1]])
setThreeDate([g[2], g[3], g[4]])
elif result[10]:
g = result[10].groups()
setThreeTime([g[0], g[1], g[2]])
setTwoDate([g[3], g[4]])
elif result[11]:
g = result[11].groups()
setThreeTime([g[0], g[1], g[2]])
setThreeDate([g[3], g[4],g[5]])
else:
raise SearchParseError("Expected a date, got '%s'" % dateStr)
return time.mktime(this)
class BeforeSearchStmt(TimeSearchStmt):
def match(self, entity):
if not entity.mod_time:
return False
t = time.mktime(entity.mod_time)
return t <= self.date
class AfterSearchStmt(TimeSearchStmt):
def match(self, entity):
if not entity.mod_time:
return False
t = time.mktime(entity.mod_time)
return t >= self.date
class UserSearchStmt(SearchStmt):
def match(self, entity):
if not entity.user:
return False
return self.content.match(entity.user)
class NotesSearchStmt(SearchStmt):
def match(self, entity):
if entity.description:
plainNotes = extract_text(entity.description)
return self.content.search(plainNotes)
return False
class NameSearchStmt(SearchStmt):
def match(self, entity):
return self.content.match(entity.name)
class AndSearchStmt(SearchStmt):
def __init__(self, lst):
self.matchList = lst
def match(self, entity):
for s in self.matchList:
if not s.match(entity):
return False
return True
class OrSearchStmt(SearchStmt):
def __init__(self, lst):
self.matchList = lst
def match(self, entity):
for s in self.matchList:
if s.match(entity):
return True
return False
class NotSearchStmt(SearchStmt):
def __init__(self, stmt):
self.stmt = stmt
def match(self, entity):
return not self.stmt.match(entity)
class TrueSearch(SearchStmt):
def __init__(self):
pass
def match(self, entity):
return True
################################################################################
class SearchCompiler(object):
SEPARATOR = -1
def __init__(self, searchStr):
self.searchStmt = self.compile(searchStr)
def compile(self, searchStr):
lst = []
t1 = searchStr.split(' ')
while t1:
tok = t1[0]
cmd = tok.split(':')
if not SearchCompiler.dispatch.has_key(cmd[0]):
fun = SearchCompiler.parseAny
else:
fun = SearchCompiler.dispatch[cmd[0]]
if len(cmd) > 1:
[search, rest] = fun(self, cmd[1:] + t1[1:])
else:
[search, rest] = fun(self, t1)
lst.append(search)
t1 = rest
return AndSearchStmt(lst)
def parseUser(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
return (UserSearchStmt(tokStream[0]), tokStream[1:])
def parseAny(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
tok = tokStream[0]
return (OrSearchStmt([UserSearchStmt(tok),
NotesSearchStmt(tok),
NameSearchStmt(tok)]), tokStream[1:])
def parseNotes(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
if ':' in tok:
return (AndSearchStmt(lst), tokStream)
lst.append(NotesSearchStmt(tok))
tokStream = tokStream[1:]
return (AndSearchStmt(lst), [])
def parseName(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
if ':' in tok:
return (AndSearchStmt(lst), tokStream)
lst.append(NameSearchStmt(tok))
tokStream = tokStream[1:]
return (AndSearchStmt(lst), [])
def parseBefore(self, tokStream):
old_tokstream = tokStream
try:
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
# ugly, special case times
if (':' in tok and
not TimeSearchStmt.dateRE[2].match(tok) and
not TimeSearchStmt.dateRE[3].match(tok)):
return (BeforeSearchStmt(" ".join(lst)), tokStream)
lst.append(tok)
tokStream = tokStream[1:]
return (BeforeSearchStmt(" ".join(lst)), [])
except SearchParseError, e:
if 'Expected a date' in e.args[0]:
try:
return self.parseAny(old_tokstream)
except SearchParseError, e2:
print "Another exception...", e2.args[0]
raise e
else:
raise
def parseAfter(self, tokStream):
try:
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
# ugly, special case times
if (':' in tok and
not TimeSearchStmt.dateRE[2].match(tok) and
not TimeSearchStmt.dateRE[3].match(tok)):
return (AfterSearchStmt(" ".join(lst)), tokStream)
lst.append(tok)
tokStream = tokStream[1:]
return (AfterSearchStmt(" ".join(lst)), [])
except SearchParseError, e:
if 'Expected a date' in e.args[0]:
try:
return self.parseAny(['after'] + tokStream)
except SearchParseError, e2:
print "Another exception...", e2.args[0]
raise e
else:
raise
dispatch = {'user': parseUser,
'notes': parseNotes,
'before': parseBefore,
'after': parseAfter,
'name': parseName,
'any': parseAny}
################################################################################
class TestSearch(unittest.TestCase):
def test1(self):
self.assertEquals((TimeSearchStmt('1 day ago').date -
TimeSearchStmt('2 days ago').date), TimeSearchStmt.oneDay)
def test2(self):
self.assertEquals((TimeSearchStmt('12 mar 2006').date -
TimeSearchStmt('11 mar 2006').date), TimeSearchStmt.oneDay)
def test3(self):
# This will fail if year flips during execution. Oh well :)
yr = datetime.datetime.today().year
self.assertEquals((TimeSearchStmt('12 mar').date -
TimeSearchStmt('12 mar %d' % yr).date), 0.0)
def test4(self):
# This will fail if year flips during execution. Oh well :)
yr = datetime.datetime.today().year
self.assertEquals((TimeSearchStmt('mar 12').date -
TimeSearchStmt('12 mar %d' % yr).date), 0.0)
def test5(self):
yr = datetime.datetime.today().year
self.assertEquals((TimeSearchStmt('03 15').date -
TimeSearchStmt('15 mar %d' % yr).date), 0.0)
def test6(self):
self.assertEquals((TimeSearchStmt('03/15/2006').date -
TimeSearchStmt('15 mar 2006').date), 0.0)
def test7(self):
self.assertEquals((TimeSearchStmt('1 day ago').date -
TimeSearchStmt('24 hours ago').date), 0.0)
def test8(self):
self.assertEquals((TimeSearchStmt('1 hour ago').date -
TimeSearchStmt('60 minutes ago').date), 0.0)
def test9(self):
self.assertEquals((TimeSearchStmt('1 minute ago').date -
TimeSearchStmt('60 seconds ago').date), 0.0)
def test10(self):
self.assertEquals((TimeSearchStmt('1 week ago').date -
TimeSearchStmt('7 days ago').date), 0.0)
def test11(self):
self.assertEquals((TimeSearchStmt('1 month ago').date -
TimeSearchStmt('31 days ago').date), 0.0)
def test12(self):
self.assertEquals(TimeSearchStmt('12 mar 2007 21:00:00').date,
TimeSearchStmt('21:00:00 12 mar 2007').date)
def test13(self):
# This will fail if year flips during execution. Oh well :)
yr = datetime.datetime.today().year
self.assertEquals(TimeSearchStmt('12 mar %d 21:00' % yr).date,
TimeSearchStmt('21:00:00 12 mar').date)
def test14(self):
self.assertEquals(TimeSearchStmt('13 apr 2006 21:00').date,
TimeSearchStmt('04/13/2006 21:00:00').date)
def test15(self):
import vistrails.core.vistrail
from vistrails.core.db.locator import XMLFileLocator
import vistrails.core.system
v = XMLFileLocator(vistrails.core.system.vistrails_root_directory() +
'/tests/resources/dummy.xml').load()
# FIXME: Add notes to this.
# self.assertTrue(NotesSearchStmt('mapper').match(v.actionMap[36]))
# self.assertFalse(NotesSearchStmt('-qt-block-indent').match(v.actionMap[36]))
# test16 and 17 now pass.
# def test16(self):
# self.assertRaises(SearchParseError, lambda *args: SearchCompiler('before:'))
# def test17(self):
# self.assertRaises(SearchParseError, lambda *args: SearchCompiler('after:yesterday before:lalala'))
def test18(self):
self.assertEquals(TimeSearchStmt(' 13 apr 2006 ').date,
TimeSearchStmt(' 13 apr 2006 ').date)
def test19(self):
self.assertEquals(SearchCompiler('before:13 apr 2006 12:34:56').searchStmt.matchList[0].date,
BeforeSearchStmt('13 apr 2006 12:34:56').date)
def test20(self):
self.assertEquals(SearchCompiler('after:yesterday').searchStmt.matchList[0].date,
SearchCompiler('before:yesterday').searchStmt.matchList[0].date)
def test21(self):
self.assertEquals(SearchCompiler('after:today').searchStmt.matchList[0].date,
SearchCompiler('before:today').searchStmt.matchList[0].date)
def test22(self):
self.assertEquals(SearchCompiler('before:today').searchStmt.matchList[0].date,
SearchCompiler('before:this day').searchStmt.matchList[0].date)
def test23(self):
t = time.localtime()
import vistrails.core.utils
inv = vistrails.core.utils.invert(TimeSearchStmt.months)
m = inv[t[1]]
self.assertEquals(SearchCompiler('after:%s %s %s' % (t[0], m, t[2])).searchStmt.matchList[0].date,
SearchCompiler('after:today').searchStmt.matchList[0].date)
def test24(self):
# Test compiling these searches
SearchCompiler('before')
SearchCompiler('after')
if __name__ == '__main__':
unittest.main()
|
|
"""celery.log"""
import os
import sys
import time
import logging
import traceback
from celery import conf
from celery.utils import noop
from celery.utils.patch import ensure_process_aware_logger
from celery.utils.compat import LoggerAdapter
_hijacked = False
_monkeypatched = False
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
"WARNING": YELLOW,
"DEBUG": BLUE,
"CRITICAL": MAGENTA,
"ERROR": RED,
}
class ColorFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
record.msg = COLOR_SEQ % (
30 + COLORS[levelname]) + record.msg + RESET_SEQ
return logging.Formatter.format(self, record)
def get_task_logger(loglevel=None, name=None):
ensure_process_aware_logger()
logger = logging.getLogger(name or "celery.task.default")
if loglevel is not None:
logger.setLevel(loglevel)
return logger
def _hijack_multiprocessing_logger():
from multiprocessing import util as mputil
global _hijacked
if _hijacked:
return mputil.get_logger()
ensure_process_aware_logger()
logging.Logger.manager.loggerDict.clear()
try:
if mputil._logger is not None:
mputil.logger = None
except AttributeError:
pass
_hijacked = True
return mputil.get_logger()
def _detect_handler(logfile=None):
"""Create log handler with either a filename, an open stream
or ``None`` (stderr)."""
if not logfile or hasattr(logfile, "write"):
return logging.StreamHandler(logfile)
return logging.FileHandler(logfile)
def get_default_logger(loglevel=None):
"""Get default logger instance.
:keyword loglevel: Initial log level.
"""
logger = _hijack_multiprocessing_logger()
if loglevel is not None:
logger.setLevel(loglevel)
return logger
def setup_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
format=conf.CELERYD_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
**kwargs):
"""Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
then ``stderr`` is used.
Returns logger object.
"""
return _setup_logger(get_default_logger(loglevel),
logfile, format, colorize, **kwargs)
def setup_task_logger(loglevel=conf.CELERYD_LOG_LEVEL, logfile=None,
format=conf.CELERYD_TASK_LOG_FORMAT, colorize=conf.CELERYD_LOG_COLOR,
task_kwargs=None, **kwargs):
"""Setup the task logger. If ``logfile`` is not specified, then
``stderr`` is used.
Returns logger object.
"""
if task_kwargs is None:
task_kwargs = {}
task_kwargs.setdefault("task_id", "-?-")
task_name = task_kwargs.get("task_name")
task_kwargs.setdefault("task_name", "-?-")
logger = _setup_logger(get_task_logger(loglevel, task_name),
logfile, format, colorize, **kwargs)
return LoggerAdapter(logger, task_kwargs)
def _setup_logger(logger, logfile, format, colorize,
formatter=ColorFormatter, **kwargs):
if logger.handlers: # Logger already configured
return logger
handler = _detect_handler(logfile)
handler.setFormatter(formatter(format, use_color=colorize))
logger.addHandler(handler)
return logger
def emergency_error(logfile, message):
"""Emergency error logging, for when there's no standard file
descriptors open because the process has been daemonized or for
some other reason."""
closefh = noop
logfile = logfile or sys.__stderr__
if hasattr(logfile, "write"):
logfh = logfile
else:
logfh = open(logfile, "a")
closefh = logfh.close
try:
logfh.write("[%(asctime)s: CRITICAL/%(pid)d]: %(message)s\n" % {
"asctime": time.asctime(),
"pid": os.getpid(),
"message": message})
finally:
closefh()
def redirect_stdouts_to_logger(logger, loglevel=None):
"""Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
logging instance.
:param logger: The :class:`logging.Logger` instance to redirect to.
:param loglevel: The loglevel redirected messages will be logged as.
"""
proxy = LoggingProxy(logger, loglevel)
sys.stdout = sys.stderr = proxy
return proxy
class LoggingProxy(object):
"""Forward file object to :class:`logging.Logger` instance.
:param logger: The :class:`logging.Logger` instance to forward to.
:param loglevel: Loglevel to use when writing messages.
"""
mode = "w"
name = None
closed = False
loglevel = logging.ERROR
def __init__(self, logger, loglevel=None):
self.logger = logger
self.loglevel = loglevel or self.logger.level or self.loglevel
self._safewrap_handlers()
def _safewrap_handlers(self):
"""Make the logger handlers dump internal errors to
``sys.__stderr__`` instead of ``sys.stderr`` to circumvent
infinite loops."""
def wrap_handler(handler): # pragma: no cover
class WithSafeHandleError(logging.Handler):
def handleError(self, record):
exc_info = sys.exc_info()
try:
try:
traceback.print_exception(exc_info[0],
exc_info[1],
exc_info[2],
None, sys.__stderr__)
except IOError:
pass # see python issue 5971
finally:
del(exc_info)
handler.handleError = WithSafeHandleError().handleError
return map(wrap_handler, self.logger.handlers)
def write(self, data):
"""Write message to logging object."""
data = data.strip()
if data and not self.closed:
self.logger.log(self.loglevel, data)
def writelines(self, sequence):
"""``writelines(sequence_of_strings) -> None``.
Write the strings to the file.
The sequence can be any iterable object producing strings.
This is equivalent to calling :meth:`write` for each string.
"""
map(self.write, sequence)
def flush(self):
"""This object is not buffered so any :meth:`flush` requests
are ignored."""
pass
def close(self):
"""When the object is closed, no write requests are forwarded to
the logging object anymore."""
self.closed = True
def isatty(self):
"""Always returns ``False``. Just here for file support."""
return False
def fileno(self):
return None
class SilenceRepeated(object):
"""Only log action every n iterations."""
def __init__(self, action, max_iterations=10):
self.action = action
self.max_iterations = max_iterations
self._iterations = 0
def __call__(self, *msgs):
if self._iterations >= self.max_iterations:
map(self.action, msgs)
self._iterations = 0
else:
self._iterations += 1
|
|
# Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
|
|
#################################################################
# seSelection.py
# Originally from DirectSelection.py
# Altered by Yi-Hong Lin, yihhongl@andrew.cmu.edu, 2004
#
# We didn't change anything essential.
# Just because we customized the seSession from DirectSession,
# So we need related files can follow the change.
# However, we don't want to change anything inside the original directool
# to let them can work with our scene editor.
# (If we do change original directools, it will force user has to install the latest version of OUR Panda)
#
#################################################################
from pandac.PandaModules import GeomNode
from direct.directtools.DirectGlobals import *
from direct.directtools.DirectUtil import *
from seGeometry import *
from direct.showbase.DirectObject import *
from quad import *
COA_ORIGIN = 0
COA_CENTER = 1
# MRM: To do: handle broken node paths in selected and deselected dicts
class DirectNodePath(NodePath):
# A node path augmented with info, bounding box, and utility methods
def __init__(self, nodePath):
# Initialize the superclass
NodePath.__init__(self)
self.assign(nodePath)
# Create a bounding box
self.bbox = DirectBoundingBox(self)
center = self.bbox.getCenter()
# Create matrix to hold the offset between the nodepath
# and its center of action (COA)
self.mCoa2Dnp = Mat4(Mat4.identMat())
if SEditor.coaMode == COA_CENTER:
self.mCoa2Dnp.setRow(3, Vec4(center[0], center[1], center[2], 1))
# Transform from nodePath to widget
self.tDnp2Widget = TransformState.makeIdentity()
def highlight(self):
self.bbox.show()
def dehighlight(self):
self.bbox.hide()
def getCenter(self):
return self.bbox.getCenter()
def getRadius(self):
return self.bbox.getRadius()
def getMin(self):
return self.bbox.getMin()
def getMax(self):
return self.bbox.getMax()
class SelectedNodePaths(DirectObject):
def __init__(self):
self.reset()
def reset(self):
self.selectedDict = {}
self.deselectedDict = {}
__builtins__["last"] = self.last = None
def select(self, nodePath, fMultiSelect = 0):
""" Select the specified node path. Multiselect as required """
# Do nothing if nothing selected
if not nodePath:
print 'Nothing selected!!'
return None
# Reset selected objects and highlight if multiSelect is false
if not fMultiSelect:
self.deselectAll()
# Get this pointer
id = nodePath.id()
# First see if its already in the selected dictionary
dnp = self.getSelectedDict(id)
# If so, we're done
if not dnp:
# See if it is in the deselected dictionary
dnp = self.getDeselectedDict(id)
if dnp:
# Remove it from the deselected dictionary
del(self.deselectedDict[id])
# Show its bounding box
dnp.highlight()
else:
# Didn't find it, create a new selectedNodePath instance
dnp = DirectNodePath(nodePath)
# Show its bounding box
dnp.highlight()
# Add it to the selected dictionary
self.selectedDict[dnp.id()] = dnp
# And update last
__builtins__["last"] = self.last = dnp
return dnp
def deselect(self, nodePath):
""" Deselect the specified node path """
# Get this pointer
id = nodePath.id()
# See if it is in the selected dictionary
dnp = self.getSelectedDict(id)
if dnp:
# It was selected:
# Hide its bounding box
dnp.dehighlight()
# Remove it from the selected dictionary
del(self.selectedDict[id])
# And keep track of it in the deselected dictionary
self.deselectedDict[id] = dnp
# Send a message
messenger.send('DIRECT_deselectedNodePath', [dnp])
return dnp
def getSelectedAsList(self):
"""
Return a list of all selected node paths. No verification of
connectivity is performed on the members of the list
"""
return self.selectedDict.values()[:]
def __getitem__(self,index):
return self.getSelectedAsList()[index]
def getSelectedDict(self, id):
"""
Search selectedDict for node path, try to repair broken node paths.
"""
dnp = self.selectedDict.get(id, None)
if dnp:
return dnp
else:
# Not in selected dictionary
return None
def getDeselectedAsList(self):
return self.deselectedDict.values()[:]
def getDeselectedDict(self, id):
"""
Search deselectedDict for node path, try to repair broken node paths.
"""
dnp = self.deselectedDict.get(id, None)
if dnp:
# Yes
return dnp
else:
# Not in deselected dictionary
return None
def forEachSelectedNodePathDo(self, func):
"""
Perform given func on selected node paths. No node path
connectivity verification performed
"""
selectedNodePaths = self.getSelectedAsList()
for nodePath in selectedNodePaths:
func(nodePath)
def forEachDeselectedNodePathDo(self, func):
"""
Perform given func on deselected node paths. No node path
connectivity verification performed
"""
deselectedNodePaths = self.getDeselectedAsList()
for nodePath in deselectedNodePaths:
func(nodePath)
def getWrtAll(self):
self.forEachSelectedNodePathDo(self.getWrt)
def getWrt(self, nodePath):
nodePath.tDnp2Widget = nodePath.getTransform(SEditor.widget)
def moveWrtWidgetAll(self):
self.forEachSelectedNodePathDo(self.moveWrtWidget)
def moveWrtWidget(self, nodePath):
nodePath.setTransform(SEditor.widget, nodePath.tDnp2Widget)
def deselectAll(self):
self.forEachSelectedNodePathDo(self.deselect)
def highlightAll(self):
self.forEachSelectedNodePathDo(DirectNodePath.highlight)
def dehighlightAll(self):
self.forEachSelectedNodePathDo(DirectNodePath.dehighlight)
def removeSelected(self):
selected = self.last
if selected:
selected.remove()
__builtins__["last"] = self.last = None
def removeAll(self):
# Remove all selected nodePaths from the Scene Graph
self.forEachSelectedNodePathDo(NodePath.remove)
def toggleVisSelected(self):
selected = self.last
# Toggle visibility of selected node paths
if selected:
selected.toggleVis()
def toggleVisAll(self):
# Toggle viz for all selected node paths
self.forEachSelectedNodePathDo(NodePath.toggleVis)
def isolateSelected(self):
selected = self.last
if selected:
selected.isolate()
def getDirectNodePath(self, nodePath):
# Get this pointer
id = nodePath.id()
# First check selected dict
dnp = self.getSelectedDict(id)
if dnp:
return dnp
# Otherwise return result of deselected search
return self.getDeselectedDict(id)
def getNumSelected(self):
return len(self.selectedDict.keys())
class DirectBoundingBox:
def __init__(self, nodePath):
# Record the node path
self.nodePath = nodePath
# Compute bounds, min, max, etc.
self.computeTightBounds()
# Generate the bounding box
self.lines = self.createBBoxLines()
def computeTightBounds(self):
# Compute bounding box using tighter calcTightBounds function
# Need to clear out existing transform on node path
tMat = Mat4()
tMat.assign(self.nodePath.getMat())
self.nodePath.clearMat()
# Get bounds
self.min = Point3(0)
self.max = Point3(0)
self.nodePath.calcTightBounds(self.min,self.max)
# Calc center and radius
self.center = Point3((self.min + self.max)/2.0)
self.radius = Vec3(self.max - self.min).length()
# Restore transform
self.nodePath.setMat(tMat)
del tMat
def computeBounds(self):
self.bounds = self.getBounds()
if self.bounds.isEmpty() or self.bounds.isInfinite():
self.center = Point3(0)
self.radius = 1.0
else:
self.center = self.bounds.getCenter()
self.radius = self.bounds.getRadius()
self.min = Point3(self.center - Point3(self.radius))
self.max = Point3(self.center + Point3(self.radius))
def createBBoxLines(self):
# Create a line segments object for the bbox
lines = LineNodePath(hidden)
lines.node().setName('bboxLines')
lines.setColor( VBase4( 1., 0., 0., 1. ) )
lines.setThickness( 0.5 )
minX = self.min[0]
minY = self.min[1]
minZ = self.min[2]
maxX = self.max[0]
maxY = self.max[1]
maxZ = self.max[2]
# Bottom face
lines.moveTo( minX, minY, minZ )
lines.drawTo( maxX, minY, minZ )
lines.drawTo( maxX, maxY, minZ )
lines.drawTo( minX, maxY, minZ )
lines.drawTo( minX, minY, minZ )
# Front Edge/Top face
lines.drawTo( minX, minY, maxZ )
lines.drawTo( maxX, minY, maxZ )
lines.drawTo( maxX, maxY, maxZ )
lines.drawTo( minX, maxY, maxZ )
lines.drawTo( minX, minY, maxZ )
# Three remaining edges
lines.moveTo( maxX, minY, minZ )
lines.drawTo( maxX, minY, maxZ )
lines.moveTo( maxX, maxY, minZ )
lines.drawTo( maxX, maxY, maxZ )
lines.moveTo( minX, maxY, minZ )
lines.drawTo( minX, maxY, maxZ )
# Create and return bbox lines
lines.create()
# Make sure bbox is never lit or drawn in wireframe
useDirectRenderStyle(lines)
return lines
def updateBBoxLines(self):
ls = self.lines.lineSegs
minX = self.min[0]
minY = self.min[1]
minZ = self.min[2]
maxX = self.max[0]
maxY = self.max[1]
maxZ = self.max[2]
# Bottom face
ls.setVertex( 0, minX, minY, minZ )
ls.setVertex( 1, maxX, minY, minZ )
ls.setVertex( 2, maxX, maxY, minZ )
ls.setVertex( 3, minX, maxY, minZ )
ls.setVertex( 4, minX, minY, minZ )
# Front Edge/Top face
ls.setVertex( 5, minX, minY, maxZ )
ls.setVertex( 6, maxX, minY, maxZ )
ls.setVertex( 7, maxX, maxY, maxZ )
ls.setVertex( 8, minX, maxY, maxZ )
ls.setVertex( 9, minX, minY, maxZ )
# Three remaining edges
ls.setVertex( 10, maxX, minY, minZ )
ls.setVertex( 11, maxX, minY, maxZ )
ls.setVertex( 12, maxX, maxY, minZ )
ls.setVertex( 13, maxX, maxY, maxZ )
ls.setVertex( 14, minX, maxY, minZ )
ls.setVertex( 15, minX, maxY, maxZ )
def getBounds(self):
# Get a node path's bounds
nodeBounds = BoundingSphere()
nodeBounds.extendBy(self.nodePath.node().getInternalBound())
for child in self.nodePath.getChildren():
nodeBounds.extendBy(child.getBounds())
return nodeBounds.makeCopy()
def show(self):
self.lines.reparentTo(self.nodePath)
def hide(self):
self.lines.reparentTo(hidden)
def getCenter(self):
return self.center
def getRadius(self):
return self.radius
def getMin(self):
return self.min
def getMax(self):
return self.max
def vecAsString(self, vec):
return '%.2f %.2f %.2f' % (vec[0], vec[1], vec[2])
def __repr__(self):
return (`self.__class__` +
'\nNodePath:\t%s\n' % self.nodePath.getName() +
'Min:\t\t%s\n' % self.vecAsString(self.min) +
'Max:\t\t%s\n' % self.vecAsString(self.max) +
'Center:\t\t%s\n' % self.vecAsString(self.center) +
'Radius:\t\t%.2f' % self.radius
)
class SelectionQueue(CollisionHandlerQueue):
def __init__(self, parentNP = render):
# Initialize the superclass
CollisionHandlerQueue.__init__(self)
# Current index and entry in collision queue
self.index = -1
self.entry = None
self.skipFlags = SKIP_NONE
# Create a collision node path attached to the given NP
self.collisionNodePath = NodePath(CollisionNode("collisionNP"))
self.setParentNP(parentNP)
# Don't pay the penalty of drawing this collision ray
self.collisionNodePath.hide()
self.collisionNode = self.collisionNodePath.node()
# Intersect with geometry to begin with
self.collideWithGeom()
# And a traverser to do the actual collision tests
self.ct = CollisionTraverser()
# Let the traverser know about the collision node and the queue
#Manakel 2/12/2005: replace CollisionNode by its nodepath
self.ct.addCollider(self.collisionNodePath, self)
# List of objects that can't be selected
self.unpickable = UNPICKABLE
# Derived class must add Collider to complete initialization
def setParentNP(self, parentNP):
# Update collisionNodePath's parent
self.collisionNodePath.reparentTo(parentNP)
def addCollider(self, collider):
# Inherited class must call this function to specify collider object
# Record collision object
self.collider = collider
# Add the collider to the collision Node
self.collisionNode.addSolid( self.collider )
def collideWithBitMask(self, bitMask):
# The into collide mask is the bit pattern colliders look at
# when deciding whether or not to test for a collision "into"
# this collision solid. Set to all Off so this collision solid
# will not be considered in any collision tests
self.collisionNode.setIntoCollideMask(BitMask32().allOff())
# The from collide mask is the bit pattern *this* collision solid
# compares against the into collide mask of candidate collision solids
# Turn this mask all off since we're not testing for collisions against
# collision solids
self.collisionNode.setFromCollideMask(bitMask)
def collideWithGeom(self):
# The into collide mask is the bit pattern colliders look at
# when deciding whether or not to test for a collision "into"
# this collision solid. Set to all Off so this collision solid
# will not be considered in any collision tests
self.collisionNode.setIntoCollideMask(BitMask32().allOff())
# The from collide mask is the bit pattern *this* collision solid
# compares against the into collide mask of candidate collision solids
# Turn this mask all off since we're not testing for collisions against
# collision solids, but we do want to test against geometry
self.collisionNode.setFromCollideMask(GeomNode.getDefaultCollideMask())
def collideWithWidget(self):
# This collision node should not be tested against by any other
# collision solids
self.collisionNode.setIntoCollideMask(BitMask32().allOff())
# This collision node will test for collisions with any collision
# solids with a bit mask set to 0x80000000
mask = BitMask32()
mask.setBit(31)
self.collisionNode.setFromCollideMask(mask)
def addUnpickable(self, item):
if item not in self.unpickable:
self.unpickable.append(item)
def removeUnpickable(self, item):
if item in self.unpickable:
self.unpickable.remove(item)
def setCurrentIndex(self, index):
if (index < 0) or (index >= self.getNumEntries()):
self.index = -1
else:
self.index = index
def setCurrentEntry(self, entry):
self.entry = entry
def getCurrentEntry(self):
return self.entry
def isEntryBackfacing(self, entry):
# If dot product of collision point surface normal and
# ray from camera to collision point is positive, we are
# looking at the backface of the polygon
if not entry.hasSurfaceNormal():
# Well, no way to tell. Assume we're not backfacing.
return 0
fromNodePath = entry.getFromNodePath()
v = Vec3(entry.getSurfacePoint(fromNodePath))
n = entry.getSurfaceNormal(fromNodePath)
# Convert to camera space for backfacing test
if self.collisionNodePath.getParent() != base.cam:
# Problem: assumes base.cam is the camera in question
p2cam = self.collisionNodePath.getParent().getMat(base.cam)
v = Vec3(p2cam.xformPoint(v))
n = p2cam.xformVec(n)
# Normalize and check angle between to vectors
v.normalize()
return v.dot(n) >= 0
def findNextCollisionEntry(self, skipFlags = SKIP_NONE):
return self.findCollisionEntry(skipFlags, self.index + 1)
def findCollisionEntry(self, skipFlags = SKIP_NONE, startIndex = 0 ):
# Init self.index and self.entry
self.setCurrentIndex(-1)
self.setCurrentEntry(None)
# Pick out the closest object that isn't a widget
for i in range(startIndex,self.getNumEntries()):
entry = self.getEntry(i)
nodePath = entry.getIntoNodePath()
if (skipFlags & SKIP_HIDDEN) and nodePath.isHidden():
# Skip if hidden node
pass
elif (skipFlags & SKIP_BACKFACE) and self.isEntryBackfacing(entry):
# Skip, if backfacing poly
pass
elif ((skipFlags & SKIP_CAMERA) and
(camera in nodePath.getAncestors())):
# Skip if parented to a camera.
pass
# Can pick unpickable, use the first visible node
elif ((skipFlags & SKIP_UNPICKABLE) and
(nodePath.getName() in self.unpickable)):
# Skip if in unpickable list
pass
else:
self.setCurrentIndex(i)
self.setCurrentEntry(entry)
break
return self.getCurrentEntry()
class SelectionRay(SelectionQueue):
def __init__(self, parentNP = render):
# Initialize the superclass
SelectionQueue.__init__(self, parentNP)
self.addCollider(CollisionRay())
def pick(self, targetNodePath, xy = None):
# Determine ray direction based upon the mouse coordinates
if xy:
mx = xy[0]
my = xy[1]
elif direct:
mx = SEditor.dr.mouseX
my = SEditor.dr.mouseY
else:
if not base.mouseWatcherNode.hasMouse():
# No mouse in window.
self.clearEntries()
return
mx = base.mouseWatcherNode.getMouseX()
my = base.mouseWatcherNode.getMouseY()
#base.mouseWatcherNode.setDisplayRegion(base.win.getDisplayRegion(0))
#mx = base.mouseWatcherNode.getMouseX()+1
#my = base.mouseWatcherNode.getMouseY()+1
#print base.camNode.getName()
#print "Arrived X" + str(mx) + " Arrived Y " + str(my)
self.collider.setFromLens( base.camNode, mx, my )
self.ct.traverse( targetNodePath )
self.sortEntries()
def pickBitMask(self, bitMask = BitMask32.allOff(),
targetNodePath = render,
skipFlags = SKIP_ALL ):
self.collideWithBitMask(bitMask)
self.pick(targetNodePath)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
def pickGeom(self, targetNodePath = render, skipFlags = SKIP_ALL,
xy = None):
self.collideWithGeom()
self.pick(targetNodePath, xy = xy)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
def pickWidget(self, targetNodePath = render, skipFlags = SKIP_NONE ):
self.collideWithWidget()
self.pick(targetNodePath)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
def pick3D(self, targetNodePath, origin, dir):
# Determine ray direction based upon the mouse coordinates
self.collider.setOrigin( origin )
self.collider.setDirection( dir )
self.ct.traverse( targetNodePath )
self.sortEntries()
def pickGeom3D(self, targetNodePath = render,
origin = Point3(0), dir = Vec3(0,0,-1),
skipFlags = SKIP_HIDDEN | SKIP_CAMERA ):
self.collideWithGeom()
self.pick3D(targetNodePath, origin, dir)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
def pickBitMask3D(self, bitMask = BitMask32.allOff(),
targetNodePath = render,
origin = Point3(0), dir = Vec3(0,0,-1),
skipFlags = SKIP_ALL ):
self.collideWithBitMask(bitMask)
self.pick3D(targetNodePath, origin, dir)
# Determine collision entry
return self.findCollisionEntry(skipFlags)
class SelectionSegment(SelectionQueue):
# Like a selection ray but with two endpoints instead of an endpoint
# and a direction
def __init__(self, parentNP = render, numSegments = 1):
# Initialize the superclass
SelectionQueue.__init__(self, parentNP)
self.colliders = []
self.numColliders = 0
for i in range(numSegments):
self.addCollider(CollisionSegment())
def addCollider(self, collider):
# Record new collision object
self.colliders.append(collider)
# Add the collider to the collision Node
self.collisionNode.addSolid( collider )
self.numColliders += 1
def pickGeom(self, targetNodePath = render, endPointList = [],
skipFlags = SKIP_HIDDEN | SKIP_CAMERA ):
self.collideWithGeom()
for i in range(min(len(endPointList), self.numColliders)):
pointA, pointB = endPointList[i]
collider = self.colliders[i]
collider.setPointA( pointA )
collider.setPointB( pointB )
self.ct.traverse( targetNodePath )
# Determine collision entry
return self.findCollisionEntry(skipFlags)
def pickBitMask(self, bitMask = BitMask32.allOff(),
targetNodePath = render, endPointList = [],
skipFlags = SKIP_HIDDEN | SKIP_CAMERA ):
self.collideWithBitMask(bitMask)
for i in range(min(len(endPointList), self.numColliders)):
pointA, pointB = endPointList[i]
collider = self.colliders[i]
collider.setPointA( pointA )
collider.setPointB( pointB )
self.ct.traverse( targetNodePath )
# Determine collision entry
return self.findCollisionEntry(skipFlags)
class SelectionSphere(SelectionQueue):
# Wrapper around collision sphere
def __init__(self, parentNP = render, numSpheres = 1):
# Initialize the superclass
SelectionQueue.__init__(self, parentNP)
self.colliders = []
self.numColliders = 0
for i in range(numSpheres):
self.addCollider(CollisionSphere(Point3(0), 1))
def addCollider(self, collider):
# Record new collision object
self.colliders.append(collider)
# Add the collider to the collision Node
self.collisionNode.addSolid( collider )
self.numColliders += 1
def setCenter(self, i, center):
c = self.colliders[i]
c.setCenter(center)
def setRadius(self, i, radius):
c = self.colliders[i]
c.setRadius(radius)
def setCenterRadius(self, i, center, radius):
c = self.colliders[i]
c.setCenter(center)
c.setRadius(radius)
def isEntryBackfacing(self, entry):
# If dot product of collision point surface normal and
# ray from sphere origin to collision point is positive,
# center is on the backside of the polygon
fromNodePath = entry.getFromNodePath()
v = Vec3(entry.getSurfacePoint(fromNodePath) -
entry.getFrom().getCenter())
n = entry.getSurfaceNormal(fromNodePath)
# If points almost on top of each other, reject face
# (treat as backfacing)
if v.length() < 0.05:
return 1
# Normalize and check angle between to vectors
v.normalize()
return v.dot(n) >= 0
def pick(self, targetNodePath, skipFlags):
self.ct.traverse( targetNodePath )
self.sortEntries()
return self.findCollisionEntry(skipFlags)
def pickGeom(self, targetNodePath = render,
skipFlags = SKIP_HIDDEN | SKIP_CAMERA ):
self.collideWithGeom()
return self.pick(targetNodePath, skipFlags)
def pickBitMask(self, bitMask = BitMask32.allOff(),
targetNodePath = render,
skipFlags = SKIP_HIDDEN | SKIP_CAMERA ):
self.collideWithBitMask(bitMask)
return self.pick(targetNodePath, skipFlags)
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements raw HID device communication on Windows."""
import ctypes
from ctypes import wintypes
import platform
from pyu2f import errors
from pyu2f.hid import base
# Load relevant DLLs
hid = ctypes.windll.Hid
setupapi = ctypes.windll.SetupAPI
kernel32 = ctypes.windll.Kernel32
# Various structs that are used in the Windows APIs we call
class GUID(ctypes.Structure):
_fields_ = [("Data1", ctypes.c_ulong),
("Data2", ctypes.c_ushort),
("Data3", ctypes.c_ushort),
("Data4", ctypes.c_ubyte * 8)]
# On Windows, SetupAPI.h packs structures differently in 64bit and
# 32bit mode. In 64bit mode, thestructures are packed on 8 byte
# boundaries, while in 32bit mode, they are packed on 1 byte boundaries.
# This is important to get right for some API calls that fill out these
# structures.
if platform.architecture()[0] == "64bit":
SETUPAPI_PACK = 8
elif platform.architecture()[0] == "32bit":
SETUPAPI_PACK = 1
else:
raise errors.HidError("Unknown architecture: %s" % platform.architecture()[0])
class DeviceInterfaceData(ctypes.Structure):
_fields_ = [("cbSize", wintypes.DWORD),
("InterfaceClassGuid", GUID),
("Flags", wintypes.DWORD),
("Reserved", ctypes.POINTER(ctypes.c_ulong))]
_pack_ = SETUPAPI_PACK
class DeviceInterfaceDetailData(ctypes.Structure):
_fields_ = [("cbSize", wintypes.DWORD),
("DevicePath", ctypes.c_byte * 1)]
_pack_ = SETUPAPI_PACK
class HidAttributes(ctypes.Structure):
_fields_ = [("Size", ctypes.c_ulong),
("VendorID", ctypes.c_ushort),
("ProductID", ctypes.c_ushort),
("VersionNumber", ctypes.c_ushort)]
class HidCapabilities(ctypes.Structure):
_fields_ = [("Usage", ctypes.c_ushort),
("UsagePage", ctypes.c_ushort),
("InputReportByteLength", ctypes.c_ushort),
("OutputReportByteLength", ctypes.c_ushort),
("FeatureReportByteLength", ctypes.c_ushort),
("Reserved", ctypes.c_ushort * 17),
("NotUsed", ctypes.c_ushort * 10)]
# Various void* aliases for readability.
HDEVINFO = ctypes.c_void_p
HANDLE = ctypes.c_void_p
PHIDP_PREPARSED_DATA = ctypes.c_void_p # pylint: disable=invalid-name
# This is a HANDLE.
INVALID_HANDLE_VALUE = 0xffffffff
# Status codes
NTSTATUS = ctypes.c_long
HIDP_STATUS_SUCCESS = 0x00110000
FILE_SHARE_READ = 0x00000001
FILE_SHARE_WRITE = 0x00000002
OPEN_EXISTING = 0x03
ERROR_ACCESS_DENIED = 0x05
# CreateFile Flags
GENERIC_WRITE = 0x40000000
GENERIC_READ = 0x80000000
# Function signatures
hid.HidD_GetHidGuid.restype = None
hid.HidD_GetHidGuid.argtypes = [ctypes.POINTER(GUID)]
hid.HidD_GetAttributes.restype = wintypes.BOOLEAN
hid.HidD_GetAttributes.argtypes = [HANDLE, ctypes.POINTER(HidAttributes)]
hid.HidD_GetPreparsedData.restype = wintypes.BOOLEAN
hid.HidD_GetPreparsedData.argtypes = [HANDLE,
ctypes.POINTER(PHIDP_PREPARSED_DATA)]
hid.HidD_FreePreparsedData.restype = wintypes.BOOLEAN
hid.HidD_FreePreparsedData.argtypes = [PHIDP_PREPARSED_DATA]
hid.HidD_GetProductString.restype = wintypes.BOOLEAN
hid.HidD_GetProductString.argtypes = [HANDLE, ctypes.c_void_p, ctypes.c_ulong]
hid.HidP_GetCaps.restype = NTSTATUS
hid.HidP_GetCaps.argtypes = [PHIDP_PREPARSED_DATA,
ctypes.POINTER(HidCapabilities)]
setupapi.SetupDiGetClassDevsA.argtypes = [ctypes.POINTER(GUID), ctypes.c_char_p,
wintypes.HWND, wintypes.DWORD]
setupapi.SetupDiGetClassDevsA.restype = HDEVINFO
setupapi.SetupDiEnumDeviceInterfaces.restype = wintypes.BOOL
setupapi.SetupDiEnumDeviceInterfaces.argtypes = [
HDEVINFO, ctypes.c_void_p, ctypes.POINTER(GUID), wintypes.DWORD,
ctypes.POINTER(DeviceInterfaceData)]
setupapi.SetupDiGetDeviceInterfaceDetailA.restype = wintypes.BOOL
setupapi.SetupDiGetDeviceInterfaceDetailA.argtypes = [
HDEVINFO, ctypes.POINTER(DeviceInterfaceData),
ctypes.POINTER(DeviceInterfaceDetailData), wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD), ctypes.c_void_p]
kernel32.CreateFileA.restype = HANDLE
kernel32.CreateFileA.argtypes = [
ctypes.c_char_p, wintypes.DWORD, wintypes.DWORD, ctypes.c_void_p,
wintypes.DWORD, wintypes.DWORD, HANDLE]
kernel32.CloseHandle.restype = wintypes.BOOL
kernel32.CloseHandle.argtypes = [HANDLE]
kernel32.ReadFile.restype = wintypes.BOOL
kernel32.ReadFile.argtypes = [
HANDLE, ctypes.c_void_p, wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD), ctypes.c_void_p]
kernel32.WriteFile.restype = wintypes.BOOL
kernel32.WriteFile.argtypes = [
HANDLE, ctypes.c_void_p, wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD), ctypes.c_void_p]
def FillDeviceAttributes(device, descriptor):
"""Fill out the attributes of the device.
Fills the devices HidAttributes and product string
into the descriptor.
Args:
device: A handle to the open device
descriptor: The DeviceDescriptor to populate with the
attributes.
Returns:
None
Raises:
WindowsError when unable to obtain attributes or product
string.
"""
attributes = HidAttributes()
result = hid.HidD_GetAttributes(device, ctypes.byref(attributes))
if not result:
raise ctypes.WinError()
buf = ctypes.create_string_buffer(1024)
result = hid.HidD_GetProductString(device, buf, 1024)
if not result:
raise ctypes.WinError()
descriptor.vendor_id = attributes.VendorID
descriptor.product_id = attributes.ProductID
descriptor.product_string = ctypes.wstring_at(buf)
def FillDeviceCapabilities(device, descriptor):
"""Fill out device capabilities.
Fills the HidCapabilitites of the device into descriptor.
Args:
device: A handle to the open device
descriptor: DeviceDescriptor to populate with the
capabilities
Returns:
none
Raises:
WindowsError when unable to obtain capabilitites.
"""
preparsed_data = PHIDP_PREPARSED_DATA(0)
ret = hid.HidD_GetPreparsedData(device, ctypes.byref(preparsed_data))
if not ret:
raise ctypes.WinError()
try:
caps = HidCapabilities()
ret = hid.HidP_GetCaps(preparsed_data, ctypes.byref(caps))
if ret != HIDP_STATUS_SUCCESS:
raise ctypes.WinError()
descriptor.usage = caps.Usage
descriptor.usage_page = caps.UsagePage
descriptor.internal_max_in_report_len = caps.InputReportByteLength
descriptor.internal_max_out_report_len = caps.OutputReportByteLength
finally:
hid.HidD_FreePreparsedData(preparsed_data)
# The python os.open() implementation uses the windows libc
# open() function, which writes CreateFile but does so in a way
# that doesn't let us open the device with the right set of permissions.
# Therefore, we have to directly use the Windows API calls.
# We could use PyWin32, which provides simple wrappers. However, to avoid
# requiring a PyWin32 dependency for clients, we simply also implement it
# using ctypes.
def OpenDevice(path, enum=False):
"""Open the device and return a handle to it."""
desired_access = GENERIC_WRITE | GENERIC_READ
share_mode = FILE_SHARE_READ | FILE_SHARE_WRITE
if enum:
desired_access = 0
h = kernel32.CreateFileA(path,
desired_access,
share_mode,
None, OPEN_EXISTING, 0, None)
if h == INVALID_HANDLE_VALUE:
raise ctypes.WinError()
return h
class WindowsHidDevice(base.HidDevice):
"""Implementation of raw HID interface on Windows."""
@staticmethod
def Enumerate():
"""See base class."""
hid_guid = GUID()
hid.HidD_GetHidGuid(ctypes.byref(hid_guid))
devices = setupapi.SetupDiGetClassDevsA(
ctypes.byref(hid_guid), None, None, 0x12)
index = 0
interface_info = DeviceInterfaceData()
interface_info.cbSize = ctypes.sizeof(DeviceInterfaceData) # pylint: disable=invalid-name
out = []
while True:
result = setupapi.SetupDiEnumDeviceInterfaces(
devices, 0, ctypes.byref(hid_guid), index,
ctypes.byref(interface_info))
index += 1
if not result:
break
detail_len = wintypes.DWORD()
result = setupapi.SetupDiGetDeviceInterfaceDetailA(
devices, ctypes.byref(interface_info), None, 0,
ctypes.byref(detail_len), None)
detail_len = detail_len.value
if detail_len == 0:
# skip this device, some kind of error
continue
buf = ctypes.create_string_buffer(detail_len)
interface_detail = DeviceInterfaceDetailData.from_buffer(buf)
interface_detail.cbSize = ctypes.sizeof(DeviceInterfaceDetailData)
result = setupapi.SetupDiGetDeviceInterfaceDetailA(
devices, ctypes.byref(interface_info),
ctypes.byref(interface_detail), detail_len, None, None)
if not result:
raise ctypes.WinError()
descriptor = base.DeviceDescriptor()
# This is a bit of a hack to work around a limitation of ctypes and
# "header" structures that are common in windows. DevicePath is a
# ctypes array of length 1, but it is backed with a buffer that is much
# longer and contains a null terminated string. So, we read the null
# terminated string off DevicePath here. Per the comment above, the
# alignment of this struct varies depending on architecture, but
# in all cases the path string starts 1 DWORD into the structure.
#
# The path length is:
# length of detail buffer - header length (1 DWORD)
path_len = detail_len - ctypes.sizeof(wintypes.DWORD)
descriptor.path = ctypes.string_at(
ctypes.addressof(interface_detail.DevicePath), path_len)
device = None
try:
device = OpenDevice(descriptor.path, True)
except WindowsError as e: # pylint: disable=undefined-variable
if e.winerror == ERROR_ACCESS_DENIED: # Access Denied, e.g. a keyboard
continue
else:
raise e
try:
FillDeviceAttributes(device, descriptor)
FillDeviceCapabilities(device, descriptor)
out.append(descriptor.ToPublicDict())
except WindowsError as e:
continue # skip this device
finally:
kernel32.CloseHandle(device)
return out
def __init__(self, path):
"""See base class."""
base.HidDevice.__init__(self, path)
self.dev = OpenDevice(path)
self.desc = base.DeviceDescriptor()
FillDeviceCapabilities(self.dev, self.desc)
def GetInReportDataLength(self):
"""See base class."""
return self.desc.internal_max_in_report_len - 1
def GetOutReportDataLength(self):
"""See base class."""
return self.desc.internal_max_out_report_len - 1
def Write(self, packet):
"""See base class."""
if len(packet) != self.GetOutReportDataLength():
raise errors.HidError("Packet length must match report data length.")
packet_data = [0] + packet # Prepend the zero-byte (report ID)
out = bytes(bytearray(packet_data))
num_written = wintypes.DWORD()
ret = (
kernel32.WriteFile(
self.dev, out, len(out),
ctypes.byref(num_written), None))
if num_written.value != len(out):
raise errors.HidError(
"Failed to write complete packet. " + "Expected %d, but got %d" %
(len(out), num_written.value))
if not ret:
raise ctypes.WinError()
def Read(self):
"""See base class."""
buf = ctypes.create_string_buffer(self.desc.internal_max_in_report_len)
num_read = wintypes.DWORD()
ret = kernel32.ReadFile(
self.dev, buf, len(buf), ctypes.byref(num_read), None)
if num_read.value != self.desc.internal_max_in_report_len:
raise errors.HidError("Failed to read full length report from device.")
if not ret:
raise ctypes.WinError()
# Convert the string buffer to a list of numbers. Throw away the first
# byte, which is the report id (which we don't care about).
return list(bytearray(buf[1:]))
def __del__(self):
"""Closes the file handle when object is GC-ed."""
if hasattr(self, 'dev'):
kernel32.CloseHandle(self.dev)
|
|
#!/usr/bin/env python
"""
Copyright (c) 2015-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import wb
module = 'wb_adapter'
testbench = 'test_%s_32_8' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/priority_encoder.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
ADDR_WIDTH = 32
WBM_DATA_WIDTH = 32
WBM_SELECT_WIDTH = 4
WBS_DATA_WIDTH = 8
WBS_SELECT_WIDTH = 1
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
wbm_adr_i = Signal(intbv(0)[ADDR_WIDTH:])
wbm_dat_i = Signal(intbv(0)[WBM_DATA_WIDTH:])
wbm_we_i = Signal(bool(0))
wbm_sel_i = Signal(intbv(0)[WBM_SELECT_WIDTH:])
wbm_stb_i = Signal(bool(0))
wbm_cyc_i = Signal(bool(0))
wbs_dat_i = Signal(intbv(0)[WBS_DATA_WIDTH:])
wbs_ack_i = Signal(bool(0))
wbs_err_i = Signal(bool(0))
wbs_rty_i = Signal(bool(0))
# Outputs
wbm_dat_o = Signal(intbv(0)[WBM_DATA_WIDTH:])
wbm_ack_o = Signal(bool(0))
wbm_err_o = Signal(bool(0))
wbm_rty_o = Signal(bool(0))
wbs_adr_o = Signal(intbv(0)[ADDR_WIDTH:])
wbs_dat_o = Signal(intbv(0)[WBS_DATA_WIDTH:])
wbs_we_o = Signal(bool(0))
wbs_sel_o = Signal(intbv(0)[WBS_SELECT_WIDTH:])
wbs_stb_o = Signal(bool(0))
wbs_cyc_o = Signal(bool(0))
# WB master
wbm_inst = wb.WBMaster()
wbm_logic = wbm_inst.create_logic(
clk,
adr_o=wbm_adr_i,
dat_i=wbm_dat_o,
dat_o=wbm_dat_i,
we_o=wbm_we_i,
sel_o=wbm_sel_i,
stb_o=wbm_stb_i,
ack_i=wbm_ack_o,
cyc_o=wbm_cyc_i,
name='master'
)
# WB RAM model
wb_ram_inst = wb.WBRam(2**16)
wb_ram_port0 = wb_ram_inst.create_port(
clk,
adr_i=wbs_adr_o,
dat_i=wbs_dat_o,
dat_o=wbs_dat_i,
we_i=wbs_we_o,
sel_i=wbs_sel_o,
stb_i=wbs_stb_o,
ack_o=wbs_ack_i,
cyc_i=wbs_cyc_o,
latency=1,
asynchronous=False,
name='slave'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
wbm_adr_i=wbm_adr_i,
wbm_dat_i=wbm_dat_i,
wbm_dat_o=wbm_dat_o,
wbm_we_i=wbm_we_i,
wbm_sel_i=wbm_sel_i,
wbm_stb_i=wbm_stb_i,
wbm_ack_o=wbm_ack_o,
wbm_err_o=wbm_err_o,
wbm_rty_o=wbm_rty_o,
wbm_cyc_i=wbm_cyc_i,
wbs_adr_o=wbs_adr_o,
wbs_dat_i=wbs_dat_i,
wbs_dat_o=wbs_dat_o,
wbs_we_o=wbs_we_o,
wbs_sel_o=wbs_sel_o,
wbs_stb_o=wbs_stb_o,
wbs_ack_i=wbs_ack_i,
wbs_err_i=wbs_err_i,
wbs_rty_i=wbs_rty_i,
wbs_cyc_o=wbs_cyc_o
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
print("test 1: write")
current_test.next = 1
wbm_inst.init_write(4, b'\x11\x22\x33\x44')
yield wbm_inst.wait()
yield clk.posedge
data = wb_ram_inst.read_mem(0, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert wb_ram_inst.read_mem(4,4) == b'\x11\x22\x33\x44'
yield delay(100)
yield clk.posedge
print("test 2: read")
current_test.next = 2
wbm_inst.init_read(4, 4)
yield wbm_inst.wait()
yield clk.posedge
data = wbm_inst.get_read_data()
assert data[0] == 4
assert data[1] == b'\x11\x22\x33\x44'
yield delay(100)
yield clk.posedge
print("test 3: various writes")
current_test.next = 3
for length in range(1,8):
for offset in range(4,8):
wb_ram_inst.write_mem(256*(16*offset+length), b'\xAA'*16)
wbm_inst.init_write(256*(16*offset+length)+offset, b'\x11\x22\x33\x44\x55\x66\x77\x88'[0:length])
yield wbm_inst.wait()
yield clk.posedge
data = wb_ram_inst.read_mem(256*(16*offset+length), 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert wb_ram_inst.read_mem(256*(16*offset+length)+offset, length) == b'\x11\x22\x33\x44\x55\x66\x77\x88'[0:length]
assert wb_ram_inst.read_mem(256*(16*offset+length)+offset-1, 1) == b'\xAA'
assert wb_ram_inst.read_mem(256*(16*offset+length)+offset+length, 1) == b'\xAA'
yield delay(100)
yield clk.posedge
print("test 4: various reads")
current_test.next = 4
for length in range(1,8):
for offset in range(4,8):
wbm_inst.init_read(256*(16*offset+length)+offset, length)
yield wbm_inst.wait()
yield clk.posedge
data = wbm_inst.get_read_data()
assert data[0] == 256*(16*offset+length)+offset
assert data[1] == b'\x11\x22\x33\x44\x55\x66\x77\x88'[0:length]
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
#=========================================================================
# Mesh Unit Test
#=========================================================================
import random
from math import sqrt
from pclib.ifcs import NetMsg
#-------------------------------------------------------------------------
# mk_msg
#-------------------------------------------------------------------------
def mk_msg( dest, src, seqnum, payload ):
msg = NetMsg( nrouters, nmessages, payload_nbits )
msg.src = src
msg.dest = dest
msg.seqnum = seqnum
msg.payload = payload
return msg
#-------------------------------------------------------------------------
# terminal_msgs
#-------------------------------------------------------------------------
def terminal_msgs():
size = 8
src_msgs = [ [] for x in xrange( nrouters ) ]
sink_msgs = [ [] for x in xrange( nrouters ) ]
# Syntax helpers
def mk_net_msg( dest, src, seq_num, payload ):
msg = mk_msg( dest, src, seq_num, payload )
src_msgs[src].append( msg )
sink_msgs[dest].append( msg )
for i in xrange( nrouters ):
for j in xrange( size ):
dest = i
mk_net_msg( dest, i, j, j )
return [ src_msgs, sink_msgs ]
#-------------------------------------------------------------------------
# nearest_neighbor_east_msgs
#-------------------------------------------------------------------------
def nearest_neighbor_east_msgs( size ):
src_msgs = [ [] for x in xrange( nrouters ) ]
sink_msgs = [ [] for x in xrange( nrouters ) ]
# Syntax helpers
def mk_net_msg( dest, src, seq_num, payload ):
msg = mk_msg( dest, src, seq_num, payload )
src_msgs[src].append( msg )
sink_msgs[dest].append( msg )
for i in xrange( nrouters ):
for j in xrange( size ):
if ( i == nrouters-1 ):
dest = 0
else:
dest = i + 1
#data_roll = random.randint( 0, pow( 2, 32 ) - 1 )
mk_net_msg( dest, i, j, j )
return [ src_msgs, sink_msgs ]
#-------------------------------------------------------------------------
# nearest_neighbor_west_msgs
#-------------------------------------------------------------------------
def nearest_neighbor_west_msgs( size ):
src_msgs = [ [] for x in xrange( nrouters ) ]
sink_msgs = [ [] for x in xrange( nrouters ) ]
# Syntax helpers
def mk_net_msg( dest, src, seq_num, payload ):
msg = mk_msg( dest, src, seq_num, payload )
src_msgs[src].append( msg )
sink_msgs[dest].append( msg )
for i in xrange( nrouters ):
for j in xrange( size ):
if ( i == 0 ):
dest = nrouters-1
else:
dest = i - 1
data_roll = random.randint( 0, pow( 2, 32 ) - 1 )
mk_net_msg( dest, i, j, data_roll )
return [ src_msgs, sink_msgs ]
#-------------------------------------------------------------------------
# hotspot_msgs
#-------------------------------------------------------------------------
def hotspot_msgs( size ):
src_msgs = [ [] for x in xrange( nrouters ) ]
sink_msgs = [ [] for x in xrange( nrouters ) ]
# Syntax helpers
def mk_net_msg( dest, src, seq_num, payload ):
msg = mk_msg( dest, src, seq_num, payload )
src_msgs[src].append( msg )
sink_msgs[dest].append( msg )
for i in xrange( nrouters ):
for j in xrange( size ):
# all routers send to node 0
dest = 0
data_roll = random.randint( 0, pow( 2, 32 ) - 1 )
mk_net_msg( dest, i, j, data_roll )
return [ src_msgs, sink_msgs ]
#-------------------------------------------------------------------------
# partition_msgs
#-------------------------------------------------------------------------
def partition_msgs( size ):
src_msgs = [ [] for x in xrange( nrouters ) ]
sink_msgs = [ [] for x in xrange( nrouters ) ]
# Syntax helpers
def mk_net_msg( dest, src, seq_num, payload ):
msg = mk_msg( dest, src, seq_num, payload )
src_msgs[src].append( msg )
sink_msgs[dest].append( msg )
# Partition the network into halves
partition_edge = nrouters / 2
for i in xrange( nrouters ):
for j in xrange( size ):
if ( i < partition_edge ):
dest_roll = random.randint( 0, partition_edge - 1 )
else:
dest_roll = random.randint( partition_edge, nrouters - 1 )
data_roll = random.randint( 0, pow( 2, 32 ) - 1 )
mk_net_msg( dest_roll, i, j, data_roll )
return [ src_msgs, sink_msgs ]
#-------------------------------------------------------------------------
# uniform_random_msgs
#-------------------------------------------------------------------------
def uniform_random_msgs( size ):
src_msgs = [ [] for x in xrange( nrouters ) ]
sink_msgs = [ [] for x in xrange( nrouters ) ]
# Syntax helpers
def mk_net_msg( dest, src, seq_num, payload ):
msg = mk_msg( dest, src, seq_num, payload )
src_msgs[src].append( msg )
sink_msgs[dest].append( msg )
for i in xrange( nrouters ):
for j in xrange( size ):
dest_roll = random.randint( 0, nrouters - 1 )
data_roll = random.randint( 0, pow( 2, 32 ) - 1 )
mk_net_msg( dest_roll, i, j, data_roll )
return [ src_msgs, sink_msgs ]
#-------------------------------------------------------------------------
# tornado_msgs
#-------------------------------------------------------------------------
def tornado_msgs( size ):
src_msgs = [ [] for x in xrange( nrouters ) ]
sink_msgs = [ [] for x in xrange( nrouters ) ]
# Syntax helpers
def mk_net_msg( dest, src, seq_num, payload ):
msg = mk_msg( dest, src, seq_num, payload )
src_msgs[src].append( msg )
sink_msgs[dest].append( msg )
nrouters_1D = int( sqrt( nrouters ) )
for i in xrange( nrouters ):
for j in xrange( size ):
x = ( (i%nrouters_1D) + int( nrouters_1D/2 ) - 1 ) % nrouters_1D
y = ( (i/nrouters_1D) + int( nrouters_1D/2 ) - 1 ) % nrouters_1D
dest = x + nrouters_1D * y
data_roll = random.randint( 0, pow( 2, 32 ) - 1 )
mk_net_msg( dest, i, j, data_roll )
return [ src_msgs, sink_msgs ]
|
|
import colorama
from colorama import Style
import dbt.events.functions as this # don't worry I hate it too.
from dbt.events.base_types import NoStdOut, Event, NoFile, ShowException, Cache
from dbt.events.types import EventBufferFull, T_Event, MainReportVersion, EmptyLine
import dbt.flags as flags
# TODO this will need to move eventually
from dbt.logger import SECRET_ENV_PREFIX, make_log_dir_if_missing, GLOBAL_LOGGER
from datetime import datetime
import json
import io
from io import StringIO, TextIOWrapper
import logbook
import logging
from logging import Logger
import sys
from logging.handlers import RotatingFileHandler
import os
import uuid
import threading
from typing import Any, Dict, List, Optional, Union
from collections import deque
global LOG_VERSION
LOG_VERSION = 2
# create the global event history buffer with the default max size (10k)
# python 3.7 doesn't support type hints on globals, but mypy requires them. hence the ignore.
# TODO the flags module has not yet been resolved when this is created
global EVENT_HISTORY
EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE) # type: ignore
# create the global file logger with no configuration
global FILE_LOG
FILE_LOG = logging.getLogger('default_file')
null_handler = logging.NullHandler()
FILE_LOG.addHandler(null_handler)
# set up logger to go to stdout with defaults
# setup_event_logger will be called once args have been parsed
global STDOUT_LOG
STDOUT_LOG = logging.getLogger('default_stdout')
STDOUT_LOG.setLevel(logging.INFO)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
STDOUT_LOG.addHandler(stdout_handler)
format_color = True
format_json = False
invocation_id: Optional[str] = None
# Colorama needs some help on windows because we're using logger.info
# intead of print(). If the Windows env doesn't have a TERM var set,
# then we should override the logging stream to use the colorama
# converter. If the TERM var is set (as with Git Bash), then it's safe
# to send escape characters and no log handler injection is needed.
colorama_stdout = sys.stdout
colorama_wrap = True
colorama.init(wrap=colorama_wrap)
if sys.platform == 'win32' and not os.getenv('TERM'):
colorama_wrap = False
colorama_stdout = colorama.AnsiToWin32(sys.stdout).stream
elif sys.platform == 'win32':
colorama_wrap = False
colorama.init(wrap=colorama_wrap)
def setup_event_logger(log_path, level_override=None):
# flags have been resolved, and log_path is known
global EVENT_HISTORY
EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE) # type: ignore
make_log_dir_if_missing(log_path)
this.format_json = flags.LOG_FORMAT == 'json'
# USE_COLORS can be None if the app just started and the cli flags
# havent been applied yet
this.format_color = True if flags.USE_COLORS else False
# TODO this default should live somewhere better
log_dest = os.path.join(log_path, 'dbt.log')
level = level_override or (logging.DEBUG if flags.DEBUG else logging.INFO)
# overwrite the STDOUT_LOG logger with the configured one
this.STDOUT_LOG = logging.getLogger('configured_std_out')
this.STDOUT_LOG.setLevel(level)
FORMAT = "%(message)s"
stdout_passthrough_formatter = logging.Formatter(fmt=FORMAT)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(stdout_passthrough_formatter)
stdout_handler.setLevel(level)
# clear existing stdout TextIOWrapper stream handlers
this.STDOUT_LOG.handlers = [
h for h in this.STDOUT_LOG.handlers
if not (hasattr(h, 'stream') and isinstance(h.stream, TextIOWrapper)) # type: ignore
]
this.STDOUT_LOG.addHandler(stdout_handler)
# overwrite the FILE_LOG logger with the configured one
this.FILE_LOG = logging.getLogger('configured_file')
this.FILE_LOG.setLevel(logging.DEBUG) # always debug regardless of user input
file_passthrough_formatter = logging.Formatter(fmt=FORMAT)
file_handler = RotatingFileHandler(
filename=log_dest,
encoding='utf8',
maxBytes=10 * 1024 * 1024, # 10 mb
backupCount=5
)
file_handler.setFormatter(file_passthrough_formatter)
file_handler.setLevel(logging.DEBUG) # always debug regardless of user input
this.FILE_LOG.handlers.clear()
this.FILE_LOG.addHandler(file_handler)
# used for integration tests
def capture_stdout_logs() -> StringIO:
capture_buf = io.StringIO()
stdout_capture_handler = logging.StreamHandler(capture_buf)
stdout_handler.setLevel(logging.DEBUG)
this.STDOUT_LOG.addHandler(stdout_capture_handler)
return capture_buf
# used for integration tests
def stop_capture_stdout_logs() -> None:
this.STDOUT_LOG.handlers = [
h for h in this.STDOUT_LOG.handlers
if not (hasattr(h, 'stream') and isinstance(h.stream, StringIO)) # type: ignore
]
def env_secrets() -> List[str]:
return [
v for k, v in os.environ.items()
if k.startswith(SECRET_ENV_PREFIX)
]
def scrub_secrets(msg: str, secrets: List[str]) -> str:
scrubbed = msg
for secret in secrets:
scrubbed = scrubbed.replace(secret, "*****")
return scrubbed
# returns a dictionary representation of the event fields.
# the message may contain secrets which must be scrubbed at the usage site.
def event_to_serializable_dict(
e: T_Event,
) -> Dict[str, Any]:
log_line = dict()
code: str
try:
log_line = e.to_dict()
except AttributeError as exc:
event_type = type(e).__name__
raise Exception( # TODO this may hang async threads
f"type {event_type} is not serializable. {str(exc)}"
)
# We get the code from the event object, so we don't need it in the data
if 'code' in log_line:
del log_line['code']
event_dict = {
'type': 'log_line',
'log_version': LOG_VERSION,
'ts': get_ts_rfc3339(),
'pid': e.get_pid(),
'msg': e.message(),
'level': e.level_tag(),
'data': log_line,
'invocation_id': e.get_invocation_id(),
'thread_name': e.get_thread_name(),
'code': e.code
}
return event_dict
# translates an Event to a completely formatted text-based log line
# type hinting everything as strings so we don't get any unintentional string conversions via str()
def create_info_text_log_line(e: T_Event) -> str:
color_tag: str = '' if this.format_color else Style.RESET_ALL
ts: str = get_ts().strftime("%H:%M:%S")
scrubbed_msg: str = scrub_secrets(e.message(), env_secrets())
log_line: str = f"{color_tag}{ts} {scrubbed_msg}"
return log_line
def create_debug_text_log_line(e: T_Event) -> str:
log_line: str = ''
# Create a separator if this is the beginning of an invocation
if type(e) == MainReportVersion:
separator = 30 * '='
log_line = f'\n\n{separator} {get_ts()} | {get_invocation_id()} {separator}\n'
color_tag: str = '' if this.format_color else Style.RESET_ALL
ts: str = get_ts().strftime("%H:%M:%S.%f")
scrubbed_msg: str = scrub_secrets(e.message(), env_secrets())
level: str = e.level_tag() if len(e.level_tag()) == 5 else f"{e.level_tag()} "
thread = ''
if threading.current_thread().getName():
thread_name = threading.current_thread().getName()
thread_name = thread_name[:10]
thread_name = thread_name.ljust(10, ' ')
thread = f' [{thread_name}]:'
log_line = log_line + f"{color_tag}{ts} [{level}]{thread} {scrubbed_msg}"
return log_line
# translates an Event to a completely formatted json log line
def create_json_log_line(e: T_Event) -> Optional[str]:
if type(e) == EmptyLine:
return None # will not be sent to logger
# using preformatted ts string instead of formatting it here to be extra careful about timezone
values = event_to_serializable_dict(e)
raw_log_line = json.dumps(values, sort_keys=True)
return scrub_secrets(raw_log_line, env_secrets())
# calls create_stdout_text_log_line() or create_json_log_line() according to logger config
def create_log_line(
e: T_Event,
file_output=False
) -> Optional[str]:
if this.format_json:
return create_json_log_line(e) # json output, both console and file
elif file_output is True or flags.DEBUG:
return create_debug_text_log_line(e) # default file output
else:
return create_info_text_log_line(e) # console output
# allows for resuse of this obnoxious if else tree.
# do not use for exceptions, it doesn't pass along exc_info, stack_info, or extra
def send_to_logger(l: Union[Logger, logbook.Logger], level_tag: str, log_line: str):
if not log_line:
return
if level_tag == 'test':
# TODO after implmenting #3977 send to new test level
l.debug(log_line)
elif level_tag == 'debug':
l.debug(log_line)
elif level_tag == 'info':
l.info(log_line)
elif level_tag == 'warn':
l.warning(log_line)
elif level_tag == 'error':
l.error(log_line)
else:
raise AssertionError(
f"While attempting to log {log_line}, encountered the unhandled level: {level_tag}"
)
def send_exc_to_logger(
l: Logger,
level_tag: str,
log_line: str,
exc_info=True,
stack_info=False,
extra=False
):
if level_tag == 'test':
# TODO after implmenting #3977 send to new test level
l.debug(
log_line,
exc_info=exc_info,
stack_info=stack_info,
extra=extra
)
elif level_tag == 'debug':
l.debug(
log_line,
exc_info=exc_info,
stack_info=stack_info,
extra=extra
)
elif level_tag == 'info':
l.info(
log_line,
exc_info=exc_info,
stack_info=stack_info,
extra=extra
)
elif level_tag == 'warn':
l.warning(
log_line,
exc_info=exc_info,
stack_info=stack_info,
extra=extra
)
elif level_tag == 'error':
l.error(
log_line,
exc_info=exc_info,
stack_info=stack_info,
extra=extra
)
else:
raise AssertionError(
f"While attempting to log {log_line}, encountered the unhandled level: {level_tag}"
)
# top-level method for accessing the new eventing system
# this is where all the side effects happen branched by event type
# (i.e. - mutating the event history, printing to stdout, logging
# to files, etc.)
def fire_event(e: Event) -> None:
# skip logs when `--log-cache-events` is not passed
if isinstance(e, Cache) and not flags.LOG_CACHE_EVENTS:
return
# if and only if the event history deque will be completely filled by this event
# fire warning that old events are now being dropped
global EVENT_HISTORY
if len(EVENT_HISTORY) == (flags.EVENT_BUFFER_SIZE - 1):
EVENT_HISTORY.append(e)
fire_event(EventBufferFull())
else:
EVENT_HISTORY.append(e)
# backwards compatibility for plugins that require old logger (dbt-rpc)
if flags.ENABLE_LEGACY_LOGGER:
# using Event::message because the legacy logger didn't differentiate messages by
# destination
log_line = create_log_line(e)
if log_line:
send_to_logger(GLOBAL_LOGGER, e.level_tag(), log_line)
return # exit the function to avoid using the current logger as well
# always logs debug level regardless of user input
if not isinstance(e, NoFile):
log_line = create_log_line(e, file_output=True)
# doesn't send exceptions to exception logger
if log_line:
send_to_logger(FILE_LOG, level_tag=e.level_tag(), log_line=log_line)
if not isinstance(e, NoStdOut):
# explicitly checking the debug flag here so that potentially expensive-to-construct
# log messages are not constructed if debug messages are never shown.
if e.level_tag() == 'debug' and not flags.DEBUG:
return # eat the message in case it was one of the expensive ones
log_line = create_log_line(e)
if log_line:
if not isinstance(e, ShowException):
send_to_logger(STDOUT_LOG, level_tag=e.level_tag(), log_line=log_line)
else:
send_exc_to_logger(
STDOUT_LOG,
level_tag=e.level_tag(),
log_line=log_line,
exc_info=e.exc_info,
stack_info=e.stack_info,
extra=e.extra
)
def get_invocation_id() -> str:
global invocation_id
if invocation_id is None:
invocation_id = str(uuid.uuid4())
return invocation_id
def set_invocation_id() -> None:
# This is primarily for setting the invocation_id for separate
# commands in the dbt servers. It shouldn't be necessary for the CLI.
global invocation_id
invocation_id = str(uuid.uuid4())
# exactly one time stamp per concrete event
def get_ts() -> datetime:
ts = datetime.utcnow()
return ts
# preformatted time stamp
def get_ts_rfc3339() -> str:
ts = get_ts()
ts_rfc3339 = ts.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
return ts_rfc3339
|
|
#!/Users/will/anaconda3/bin/python
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import scan
from elasticsearch_xpack import XPackClient
import requests
import pandas as pd
import numpy as np
import re
from ipaddress import IPv4Address as ipv4, AddressValueError
import time
from bokeh.plotting import figure, output_file, show, save
from bokeh.models import FuncTickFormatter, FixedTicker, NumeralTickFormatter, Div, Title, LinearAxis, Range1d
from bokeh.charts import Bar, Donut
from bokeh.layouts import gridplot, column, row
es = 'http://users:cadcusers@206.12.59.36:9200'
class Init():
def __init__(self, url = None, timeout = 120):
self.timeout = timeout
if not url:
self.url = es
else:
self.url = url
if not requests.get(self.url):
print("Connection incorrect!")
exit(0)
def connect(self):
return Elasticsearch(self.url, timeout = self.timeout)
# Number of Batch Jobs Restarts
def fig1(conn, idx):
query = {
"query" : {
"match_all" : {}
},
"aggs" : {
"numres_peryr" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "year",
"format" : "yyyy"
},
"aggs" : {
"res_ranges" : {
"range" : {
"field" : "NumJobStarts",
# ranges are [from, to)
"ranges" : [
{"to" : 1},
{"from" : 1, "to" : 2},
{"from" : 2, "to" : 6},
{"from" : 6}
]
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["numres_peryr"]["buckets"]:
yr = _["key_as_string"]
events = [__["doc_count"] for __ in _["res_ranges"]["buckets"]]
df = df.append(pd.DataFrame([events], columns = ["Never", "Once", "2-5", ">5"], index = [yr]))
p = figure(plot_width = 1200, toolbar_location = "above")
clr = ["blue", "purple", "orange", "green"]
x = np.array([_ for _ in range(len(df))])
for i, col in enumerate(df.columns):
p.vbar(x = x + i/5 - 0.3, top = np.sqrt(df[col]), bottom = 0, width = 0.15, legend = col, color = clr[i])
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
y = np.array([0.1, 0.5, 1, 2, 4])
p.yaxis[0].ticker = FixedTicker(ticks = np.sqrt(y * 1e6))
p.yaxis[0].formatter = FuncTickFormatter(code = """return (tick**2 / 1e6).toLocaleString("en-US", { minimumFractionDigits: 1 })""")
p.yaxis.axis_label = "Number of jobs (millions)"
return column(Div(text = "<h1>Batch Processing Job Restarts</h1>", width = 600), p)
# histogram of job duration vs machine duration
def fig2(conn, idx):
query = {
"query" : {
"bool" : {
"must" : [
{"term" : {"JobStatus.keyword" : "Completed"}}
]
}
},
"aggs" : {
"jobdur_ranges" : {
"range" : {
"field" : "JobDuration",
# ranges are [from, to)
"ranges" : [
{"to" : 10},
{"from" : 10, "to" : 60},
{"from" : 60, "to" : 600},
{"from" : 600, "to" : 3600},
{"from" : 3600, "to" : 18000},
{"from" : 18000, "to" : 36000},
{"from" : 36000, "to" : 180000},
{"from" : 180000, "to" : 252000},
{"from" : 252000}
]
}
},
"machdur_ranges" : {
"range" : {
"script" : {
"lang" : "painless",
"inline" : "doc['CompletionDate'].value - doc['QDate'].value"
},
# ranges are [from, to)
"ranges" : [
{"to" : 10},
{"from" : 10, "to" : 60},
{"from" : 60, "to" : 600},
{"from" : 600, "to" : 3600},
{"from" : 3600, "to" : 18000},
{"from" : 18000, "to" : 36000},
{"from" : 36000, "to" : 180000},
{"from" : 180000, "to" : 252000},
{"from" : 252000}
]
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
cols = ["<10s", "10s~1m", "1m~10m", "10m~1h", "1h~5h", "5h~10h", "10h~50h", "50h~70h", ">70h"]
for i in ["jobdur_ranges", "machdur_ranges"]:
df = df.append(pd.DataFrame([[_["doc_count"] for _ in res["aggregations"][i]["buckets"]]], columns = cols, index = [i]))
df = df.T
p = figure(plot_width = 1200, toolbar_location = "above")
clr = ["blue", "purple", "orange", "green"]
x = np.array([_ for _ in range(len(df))])
for i, col in enumerate(df.columns):
p.vbar(x = x + i/5 - 0.10, top = np.sqrt(df[col]), bottom = 0, width = 0.2, color = clr[i], legend = "Machine" if col == "jobdur_ranges" else "User")
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
y = np.array([0.1, 0.5, 1, 2, 4])
p.yaxis[0].ticker = FixedTicker(ticks = np.sqrt(y * 1e6))
p.yaxis[0].formatter = FuncTickFormatter(code = """return (tick**2 / 1e6).toLocaleString("en-US", { minimumFractionDigits: 1 })""")
p.yaxis.axis_label = "Number of jobs (millions)"
return column(Div(text = "<h1>Batch Processing Jobs: Machine and User Duration</h1>", width = 1200), p)
# Median of Machine and User Batch Job Duration
def fig3(conn, idx):
query = {
"query" : {
"bool" : {
"must" : [
{"term" : {"JobStatus.keyword" : "Completed"}}
]
}
},
"aggs" : {
"dur_peryr" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "year",
"format" : "yyyy"
},
"aggs" : {
"jobdur_outlier" : {
"percentiles" : {
"field" : "JobDuration"
}
},
"machdur_outlier" : {
"percentiles" : {
"script" : {
"lang" : "painless",
"inline" : "doc['CompletionDate'].value - doc['QDate'].value"
}
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["dur_peryr"]["buckets"]:
yr = _["key_as_string"]
machdur_med = _["machdur_outlier"]["values"]["50.0"]
jobdur_med = _["jobdur_outlier"]["values"]["50.0"]
df = df.append(pd.DataFrame([[jobdur_med / 60, machdur_med / 60]], columns = ["jobdur_med", "machdur_med"], index = [yr]))
p = figure(plot_width = 1200, toolbar_location = "above")
clr = ["blue", "purple", "orange", "green"]
x = np.array([_ for _ in range(len(df))])
for i, col in enumerate(df.columns):
p.vbar(x = x + i/5 - 0.10, top = np.sqrt(df[col]), bottom = 0, width = 0.2, color = clr[i], legend = "Machine" if col == "jobdur_med" else "User")
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
y = np.array([5, 30, 100, 400, 1600])
p.yaxis[0].ticker = FixedTicker(ticks = np.sqrt(y))
p.yaxis[0].formatter = FuncTickFormatter(code = """return (tick**2).toLocaleString("en-US", { minimumFractionDigits: 0 })""")
p.yaxis.axis_label = "Median of Duration (Mins)"
return column(Div(text = "<h1>Median of Machine and User Batch Job Duration</h1>", width = 1200), p)
# Histogram of User Job Duration / Machine Job Duration Ratio
def fig4(conn, idx):
query = {
"query" : {
"bool" : {
"must" : [
{"term" : {"JobStatus.keyword" : "Completed"}}
]
}
},
"aggs": {
"ratio" : {
"histogram" : {
"field" : "JobDuration",
"interval" : 0.001,
"script" : "_value / (doc['CompletionDate'].value - doc['QDate'].value)"
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["ratio"]["buckets"]:
df = df.append(pd.DataFrame([[_["doc_count"]]], columns = ["ratio"], index = ['{:.3f}'.format(_["key"])]))
p = figure(plot_width = 1200, toolbar_location = "above")
p.vbar(x = list(map(float, df.index.values)), top = df["ratio"], bottom = 0, width = 0.001)
p.xaxis[0].formatter = NumeralTickFormatter(format = "0.00%")
p.yaxis.axis_label = "Number of Events"
return column(Div(text = "<h1>Histogram of Machine Job Duration / User Job Duration Ratio</h1>", width = 1200), p)
# Number of Batch Processing Users
def fig5(conn, idx):
query = {
"query" : {
"match_all" : {}
},
"aggs" : {
"usr_peryr" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "year",
"format" : "yyyy"
},
"aggs" : {
"unique_users" : {
"cardinality" : {
"field": "Owner.keyword"
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["usr_peryr"]["buckets"]:
yr = _["key_as_string"]
val = _["unique_users"]["value"]
df = df.append(pd.DataFrame([[val]], columns = ["uniq_usr"], index = [yr]))
p = figure(plot_width = 1200, toolbar_location = "above")
x = [_ for _ in range(len(df))]
p.vbar(x = x, top = df["uniq_usr"], bottom = 0, width = 0.8)
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.yaxis[0].axis_label = "Number of Users"
p.xaxis[0].axis_label = "Year"
return column(Div(text = "<h1>Number of Batch Processing Users</h1>", width = 1200), p)
# Request Ram/Dsk vs VM Ram/Dsk per VM Flavor
def fig6(conn, idx):
query = {
"query" : {
"bool" : {
"must_not" : [
{ "term": { "VMInstanceType.keyword" : "c4.med"} },
{ "term": { "VMInstanceType.keyword" : "12345678-6341-470e-92b7-5142014e7c5e"}},
{ "term": { "VMInstanceType.keyword" : "5c1ed3eb-6341-470e-92b7-5142014e7c5e"}}
]
}
},
"aggs" : {
"grpby_vm" : {
"terms" : {
"field" : "VMInstanceType.keyword",
"size" : 100
},
"aggs" : {
"avg_dskreq" : {
"avg": {
"field" : "RequestDisk"
}
},
"avg_ramreq" : {
"avg": {
"field" : "RequestMemory"
}
},
"dskspec" : {
"avg": {
"field" : "VMSpec.DISK"
}
},
"ramspec" : {
"avg": {
"field" : "VMSpec.RAM"
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["grpby_vm"]["buckets"]:
vm = _["key"]
avg_dskreq = _["avg_dskreq"]["value"]
avg_ramreq = _["avg_ramreq"]["value"]
dskspec = _["dskspec"]["value"]
ramspec = _["ramspec"]["value"]
df = df.append(pd.DataFrame([[vm, avg_dskreq / 1024, avg_ramreq, dskspec, ramspec]], columns = ["vm", "avg_dskreq", "avg_ramreq", "dskspec", "ramspec"]))
VMAlias = {
"c16.med":"13efd2a1-2fd8-48c4-822f-ce9bdc0e0004",
"c2.med":"23090fc1-bdf7-433e-9804-a7ec3d11de08",
"p8-12gb":"2cb70964-721d-47ff-badb-b702898b6fc2",
"c4.hi":"5112ed51-d263-4cc7-8b0f-7ef4782f783c",
"c2.low":"6c1ed3eb-6341-470e-92b7-5142014e7c5e",
"c8.med":"72009191-d893-4a07-871c-7f6e50b4e110",
"c4.low":"8061864c-722b-4f79-83af-91c3a835bd48",
"p8-6gb":"848b71a2-ae6b-4fcf-bba4-b7b0fccff5cf",
"c8.low":"8953676d-def7-4290-b239-4a14311fbb69",
"c8.hi":"a55036b9-f40c-4781-a293-789647c063d7",
"c16.hi":"d816ae8b-ab7d-403d-ae5f-f457b775903d",
"p1-0.75gb-tobedeleted":"f9f6fbd7-a0af-4604-8911-041ea6cbbbe4"
}
df = df.replace({"vm": VMAlias})
df = df.set_index("vm")
df = df.groupby(df.index).mean().sort_values(by = "ramspec")
y = np.array([_ for _ in range(len(df))])
clr = ["purple", "blue", "green" , "orange"]
w = 0.4
p = figure(plot_width = 1200, toolbar_location = "above")
for i, c in enumerate(["ramspec", "avg_ramreq"]):
p.hbar(y = y - w * (i - 1 / 2) , right = df[c] / 1024, left = 0, height = w, color = clr[i], legend = "Requested Memory" if i == 1 else "VM Memory")
p.xaxis[0].axis_label = "GB"
d = dict(zip(y, df.index))
p.yaxis[0].ticker = FixedTicker(ticks = y)
p.yaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.yaxis[0].axis_label = "VM UUID"
p.legend.location = "bottom_right"
df = df.sort_values(by = "dskspec")
p2 = figure(plot_width = 1200, toolbar_location = "above")
for i, c in enumerate(["dskspec", "avg_dskreq"]):
p2.hbar(y = y - w * (i - 1 / 2) , right = df[c], left = 0, height = w, color = clr[i], legend = "Requested Disk Size" if i == 1 else "VM Disk Size")
p2.xaxis[0].axis_label = "GB"
d = dict(zip(y, df.index))
p2.yaxis[0].ticker = FixedTicker(ticks = y)
p2.yaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p2.yaxis[0].axis_label = "VM UUID"
p2.legend.location = "bottom_right"
return column(Div(text = "<h1>Average Memory Requested For Batch VMS</h1>", width = 1200), p, Div(text = "<h1>Average Disk Requested For Batch VMS</h1>", width = 1200), p2)
# Number of Jobs Completed, Disk Usage, Memory Usage per VM Ins per Year
def fig7(conn, idx):
query = {
"query" : {
"bool" : {
"must" : [
{ "term": { "JobStatus.keyword" : "Completed"} }
]
}
},
"aggs" : {
"peryr" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "year",
"format" : "yyyy"
},
"aggs" : {
"tot_ram" : {
"sum" : {
"field" : "MemoryUsage"
}
},
"tot_dsk" : {
"sum" : {
"field" : "DiskUsage"
}
},
"vm_ins" : {
"cardinality" : {
"field" : "VMInstanceName.keyword"
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["peryr"]["buckets"]:
yr = _["key_as_string"]
num_jobs = _["doc_count"]
jobs_per_ins = _["doc_count"] / _["vm_ins"]["value"]
ram_per_ins = _["tot_ram"]["value"] / _["vm_ins"]["value"] / 1024
dsk_per_ins = _["tot_dsk"]["value"] / _["vm_ins"]["value"] / 1024
df = df.append(pd.DataFrame([[num_jobs, jobs_per_ins, ram_per_ins, dsk_per_ins]], columns = ["num_jobs", "jobs", "ram", "dsk"], index = [yr]))
plts = [Div(text = "<h1>Basic Stats</h1>", width = 1200)]
clr = ["blue", "purple", "orange", "green"]
ylabs = ["", "", "GB", "GB"]
ttl = ["Number of Jobs Completed", "Disk Usage", "Memory Usage"]
x = [_ for _ in range(len(df))]
for i in range(len(df.columns)):
p = figure(plot_width = 800, toolbar_location = "above")
p.vbar(x = x, top = df.ix[:,i], bottom = 0, width = 0.8, color = clr[i])
if i == 0:
p.title.text = "Number of Jobs per Year"
else:
p.title.text = "{} per VM Instance".format(ttl[i - 1])
p.yaxis[0].axis_label = ylabs[i]
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
plts.append(p)
return column(plts)
def fig8(conn, idx):
reses = []
for _ in [("lt", "RequestMemory"),("gte", "VMSpec.RAM")]:
query = {
"query" : {
"bool" : {
"must" : [
{ "term": { "JobStatus.keyword" : "Completed"} },
{ "range": { "@timestamp": { _[0]: "2015-01-01" }}}
]
}
},
"aggs" : {
"peryr" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "year",
"format" : "yyyy"
},
"aggs" : {
"med_reqmem" : {
"percentiles" : {
"field": "{}".format(_[1])
}
},
"med_ratio" : {
"percentiles" : {
"script" : {
"lang" : "painless",
"inline" : "doc['MemoryUsage'].value / doc['{}'].value".format(_[1])
}
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
reses.append(res)
df = pd.DataFrame()
for __ in reses:
for _ in __["aggregations"]["peryr"]["buckets"]:
yr = _["key_as_string"]
med_ratio = _["med_ratio"]["values"]["50.0"]
med_reqmem = _["med_reqmem"]["values"]["50.0"]
df = df.append(pd.DataFrame([[med_reqmem / 1024, med_ratio]], columns = ["med_mem", "med_ratio"], index = [yr]))
plts = []
clr = ["blue", "purple", "orange", "green"]
ylabs = ["GB", ""]
ttl = ["Requested Memory", "Memory Usage / Requested Memory Ratio"]
x = [_ for _ in range(len(df))]
for i in range(len(df.columns)):
p = figure(plot_width = 800, toolbar_location = "above", y_axis_type = "log")
if i == 1:
p.y_range = Range1d(0.001, 1.1)
p.vbar(x = x, top = df.ix[:,i], bottom = 0, width = 0.8, color = clr[i])
p.title.text = "Median of {} for Batch Jobs".format(ttl[i])
p.yaxis[0].axis_label = ylabs[i]
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
plts.append(p)
return column(plts)
def fig9(conn, idx):
reses = []
for _ in [("lt", "RequestMemory"),("gte", "VMSpec.RAM")]:
query = {
"query" : {
"bool" : {
"must" : [
{ "term": { "JobStatus.keyword" : "Completed"} },
{ "range": { "@timestamp": { _[0]: "2015-01-01" }}}
]
}
},
"aggs" : {
"per_proj" : {
"terms" : {
"field" : "Project.keyword",
"size" : 100
},
"aggs" : {
"memusg" : {
"avg" : {
"field": "MemoryUsage"
}
},
"reqmem" : {
"avg" : {
"field" : "{}".format(_[1])
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
reses.append(res)
df = pd.DataFrame()
for __ in reses:
for _ in __["aggregations"]["per_proj"]["buckets"]:
proj = _["key"]
reqmem = _["reqmem"]["value"]
memusg = _["memusg"]["value"]
df = df.append(pd.DataFrame([[proj, reqmem / 1024, memusg / 1024]], columns = ["proj", "reqmem", "memusg"]))
df = df.groupby("proj").sum().sort_values("reqmem")
y = np.array([_ for _ in range(len(df))])
clr = ["purple", "orange"]
w = 0.4
p = figure(plot_width = 800, toolbar_location = "above")
for i, c in enumerate(["reqmem", "memusg"]):
p.hbar(y = y - w * (i - 1 / 2) , right = df[c], left = 0, height = w, color = clr[i], legend = "Requested Memory" if i == 0 else "Memory Usage")
p.xaxis[0].axis_label = "GB"
d = dict(zip(y, df.index))
p.yaxis[0].ticker = FixedTicker(ticks = y)
p.yaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.yaxis[0].axis_label = "Projects"
p.legend.location = "bottom_right"
return column(Div(text = "<h1>Average Memory Usage & Requested Memory for Batch VMs</h1>", width = 1200), p)
def fig10(conn):
df = pd.DataFrame()
for idx in ["logs-tomcat", "logs-condor"]:
query = {
"size" : 0,
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : "proc_ws" } },
{ "term" : { "method" : "post" } }
]
}
},
"aggs" : {
"permo" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM"
},
"aggs" : {
"unique_user" : {
"cardinality" : {
"field": "user.keyword"
}
}
}
}
}
}
if idx == "logs-condor":
query["query"] = { "match_all" : {} }
query["aggs"]["permo"]["aggs"]["unique_user"]["cardinality"]["field"] = "Owner.keyword"
res = conn.search(index = idx, body = query)
for _ in res["aggregations"]["permo"]["buckets"]:
df = df.append(pd.DataFrame([[_["unique_user"]["value"]]], columns = [idx], index = [_["key_as_string"]]))
df = df.groupby(df.index).sum().dropna()
df.columns = ["HTCondor", "Web Service"]
x = np.array([_ for _ in range(len(df))])
p = figure(plot_width = 800, toolbar_location = "above")
p.vbar(x = x - 0.2, top = df["HTCondor"], bottom = 0, width = 0.4, legend = "HTCondor", color = "purple")
p.vbar(x = x + 0.2, top = df["Web Service"], bottom = 0, width = 0.4, legend = "Web Service", color = "blue")
p.legend.location = "top_right"
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) + """
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.xaxis.major_label_orientation = np.pi / 4
p.title.text = "Users Submitting Jobs by Web Service and Directly by HTCondor"
return p
def fig11(conn):
df = pd.DataFrame()
for m in ["post", "get"]:
query = {
"size" : 0,
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : "proc_ws" } },
{ "term" : { "method" : m } }
]
}
},
"aggs" : {
"permo" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM"
}
}
}
}
res = conn.search(index = "logs-tomcat", body = query)
for _ in res["aggregations"]["permo"]["buckets"]:
df = df.append(pd.DataFrame([[_["doc_count"]]], columns = [m], index = [_["key_as_string"]]))
df = df.groupby(df.index).sum().dropna()
df.columns = ["Job Submission", "Job Status"]
x = np.array([_ for _ in range(len(df))])
p = figure(plot_width = 800, toolbar_location = "above")
p.vbar(x = x - 0.2, top = df["Job Submission"], bottom = 0, width = 0.4, legend = "Job Submission", color = "purple")
p.vbar(x = x + 0.2, top = df["Job Status"], bottom = 0, width = 0.4, legend = "Job Status", color = "blue")
p.legend.location = "top_right"
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) + """
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.xaxis.major_label_orientation = np.pi / 4
p.title.text = "Requests for Job Queue Status and Job Submission"
return p
def fig12(conn):
df = pd.DataFrame()
for m in ["post", "get"]:
query = {
"size" : 0,
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : "proc_ws" } },
{ "term" : { "method" : m } }
]
}
},
"aggs" : {
"dur_hist" : {
"histogram" : {
"field" : "time",
"interval" : 100,
"extended_bounds" : {
"min" : 0,
"max" : 40000
}
}
}
}
}
res = conn.search(index = "logs-tomcat", body = query)
for _ in res["aggregations"]["dur_hist"]["buckets"]:
df = df.append(pd.DataFrame([[_["doc_count"]]], columns = [m], index = [_["key"]]))
df = df.groupby(df.index).sum().dropna()
df.columns = ["Job Submission", "Job Status"]
x = np.array([_ for _ in range(len(df))])
p = figure(plot_width = 1200, toolbar_location = "above", y_axis_type = "log")
p.vbar(x = x - 0.2, top = df["Job Submission"], bottom = 0, width = 0.4, legend = "Job Submission", color = "purple")
p.vbar(x = x + 0.2, top = df["Job Status"], bottom = 0, width = 0.4, legend = "Job Status", color = "blue")
p.legend.location = "top_right"
p.title.text = "Web Service Requests for Job Status and Job Submission"
return p
def fig13(conn):
df = pd.DataFrame()
query = {
"size" : 0,
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : "proc_ws" } },
{ "term" : { "method" : "post" } }
],
"must_not" : { "exists" : { "field" : "message" } }
}
},
"aggs" : {
"permo" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM"
}
}
}
}
res = conn.search(index = "logs-tomcat", body = query)
for _ in res["aggregations"]["permo"]["buckets"]:
df = df.append(pd.DataFrame([[_["doc_count"]]], columns = ["proc_ws"], index = [_["key_as_string"]]))
query = {
"size" : 0,
"query" : {
"bool" : {
"must" : [
{ "range" : { "ClusterId" : { "gte" : 0 } } },
{ "range" : { "ProcId" : { "gte" : 0 } } }
]
}
},
"aggs" : {
"permo" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM"
},
"aggs" : {
"uniq_clusterid" : {
"cardinality" : {
"field" : "ClusterId"
}
}
}
}
}
}
res = conn.search(index = "logs-condor", body = query)
for _ in res["aggregations"]["permo"]["buckets"]:
df = df.append(pd.DataFrame([[_["uniq_clusterid"]["value"]]], columns = ["condor"], index = [_["key_as_string"]]))
df = df.groupby(df.index).sum()
df = df[df.index < "2017-01"]
df["ratio"] = df["proc_ws"] / df["condor"]
df = df.dropna(how = "any")
p1 = figure(width = 800, title = "Average Ratio of Web Service submissions over HTCondor Submissions")
x = [_ for _ in range(len(df))]
p1.vbar(x = x, top = df["ratio"], bottom = 0, width = 0.8)
d = dict(zip(x, df.index))
p1.xaxis[0].ticker = FixedTicker(ticks = x)
p1.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) + """
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p1.yaxis[0].axis_label = "Ratio"
p1.xaxis.major_label_orientation = np.pi / 4
return p1
if __name__ == "__main__":
conn = Init(timeout = 300).connect()
#fig1("logs-condor", conn)
#fig2("logs-condor", conn)
#fig3("logs-condor", conn)
#fig4("logs-condor", conn)
#fig5("logs-condor", conn)
#fig6("logs-condor", conn)
#fig7("logs-condor", conn)
#fig8("logs-condor", conn)
#fig9("logs-condor", conn)
#fig10(conn)
#fig11(conn)
#fig12(conn)
fig13(conn)
#test()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mapreduce shuffler implementation."""
from __future__ import with_statement
import gc
import heapq
import logging
import time
from appengine_pipeline.src import pipeline
from appengine_pipeline.src.pipeline import common as pipeline_common
from google.appengine.api import files
from google.appengine.api.files import file_service_pb
from google.appengine.api.files import records
from google.appengine.ext import db
from google.appengine.ext.mapreduce import base_handler
from google.appengine.ext.mapreduce import context
from google.appengine.ext.mapreduce import errors
from google.appengine.ext.mapreduce import input_readers
from google.appengine.ext.mapreduce import mapper_pipeline
from google.appengine.ext.mapreduce import operation
from google.appengine.ext.mapreduce import output_writers
class _OutputFile(db.Model):
"""Entity to store output filenames of pipelines.
These entities are always children of key returned by get_root_key().
"""
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_GAE_MR_OutputFile"
@classmethod
def get_root_key(cls, job_id):
"""Get root key to store output files.
Args:
job_id: pipeline's job id.
Returns:
root key for a given job id to store output file entities.
"""
return db.Key.from_path(cls.kind(), job_id)
def _compare_keys(key_record1, key_record2):
"""Compare two (key, records) protos by key."""
return cmp(key_record1[0], key_record2[0])
class _BatchRecordsReader(input_readers.RecordsReader):
"""Records reader that reads in big batches."""
BATCH_SIZE = 1024*1024 * 3
def __iter__(self):
records = []
size = 0
for record in input_readers.RecordsReader.__iter__(self):
records.append(record)
size += len(record)
if size > self.BATCH_SIZE:
yield records
size = 0
records = []
gc.collect()
if records:
yield records
records = []
gc.collect()
def _sort_records_map(records):
"""Map function sorting records.
Converts records to KeyValue protos, sorts them by key and writes them
into new blobstore file. Creates _OutputFile entity to record resulting
file name.
Args:
records: list of records which are serialized KeyValue protos.
"""
ctx = context.get()
l = len(records)
key_records = [None] * l
logging.debug("Parsing")
for i in range(l):
proto = file_service_pb.KeyValue()
proto.ParseFromString(records[i])
key_records[i] = (proto.key(), records[i])
logging.debug("Sorting")
key_records.sort(cmp=_compare_keys)
logging.debug("Writing")
blob_file_name = (ctx.mapreduce_spec.name + "-" +
ctx.mapreduce_id + "-output")
output_path = files.blobstore.create(
_blobinfo_uploaded_filename=blob_file_name)
with output_writers.RecordsPool(output_path, ctx=ctx) as pool:
for key_record in key_records:
pool.append(key_record[1])
logging.debug("Finalizing")
files.finalize(output_path)
output_path = files.blobstore.get_file_name(
files.blobstore.get_blob_key(output_path))
entity = _OutputFile(key_name=output_path,
parent=_OutputFile.get_root_key(ctx.mapreduce_id))
entity.put()
class _SortChunksPipeline(base_handler.PipelineBase):
"""A pipeline to sort multiple key-value files.
Args:
job_name: root job name.
filenames: list of filenames to sort.
Returns:
The list of lists of sorted filenames. Each list corresponds to one
input file. Each filenames contains a chunk of sorted data.
"""
def run(self, job_name, filenames):
sort_mappers = []
for i in range(len(filenames)):
filename = filenames[i]
sort_mapper = yield mapper_pipeline.MapperPipeline(
"%s-shuffle-sort-%s" % (job_name, str(i)),
__name__ + "._sort_records_map",
__name__ + "._BatchRecordsReader",
None,
{
"files": [filename],
"processing_rate": 1000000,
},
shards=1)
sort_mappers.append(sort_mapper)
with pipeline.After(*sort_mappers):
job_ids = yield pipeline_common.Append(*[mapper.job_id for mapper in
sort_mappers])
result = yield _CollectOutputFiles(job_ids)
with pipeline.After(result):
yield _CleanupOutputFiles(job_ids)
yield pipeline_common.Return(result)
class _CollectOutputFiles(base_handler.PipelineBase):
"""Collect output file names from _OutputFile entities for given jobs.
Args:
job_ids: list of job ids to load filenames.
Returns:
list of lists of filenames produced by specified job ids.
"""
def run(self, job_ids):
result = []
for job_id in job_ids:
entities = _OutputFile.all().ancestor(_OutputFile.get_root_key(job_id))
result.append([entity.key().name() for entity in entities])
return result
class _CleanupOutputFiles(base_handler.PipelineBase):
"""Cleanup _OutputFile entities for given job ids.
Args:
job_ids: list of job ids.
"""
def run(self, job_ids):
result = []
for job_id in job_ids:
db.delete(_OutputFile.all().ancestor(_OutputFile.get_root_key(job_id)))
class _MergingReader(input_readers.InputReader):
"""Reader which merge-reads multiple sorted KeyValue files.
Reads list of lists of filenames. Each filename list constitutes one shard
and is merged together.
Yields (key, values) tuple.
"""
expand_parameters = True
FILES_PARAM = "files"
def __init__(self, offsets):
"""Constructor.
Args:
offsets: offsets for each input file to start from as list of ints.
"""
self._offsets = offsets
def __iter__(self):
"""Iterate over records in input files.
self._offsets is always correctly updated so that stopping iterations
doesn't skip records and doesn't read the same record twice.
"""
ctx = context.get()
mapper_spec = ctx.mapreduce_spec.mapper
shard_number = ctx.shard_state.shard_number
filenames = mapper_spec.params[self.FILES_PARAM][shard_number]
if len(filenames) != len(self._offsets):
raise Exception("Files list and offsets do not match.")
readers = []
for (i, filename) in enumerate(filenames):
offset = self._offsets[i]
reader = records.RecordsReader(files.BufferedFile(filename))
reader.seek(offset)
readers.append((None, None, i, reader))
current_result = None
while readers:
(key, value, index, reader) = readers[0]
if key is not None:
if current_result and key != current_result[0]:
yield current_result
if not current_result or key != current_result[0]:
current_result = (key, [])
current_result[1].append(value)
try:
self._offsets[index] = reader.tell()
start_time = time.time()
binary_record = reader.read()
if context.get():
operation.counters.Increment(
input_readers.COUNTER_IO_READ_BYTES,
len(binary_record))(context.get())
operation.counters.Increment(
input_readers.COUNTER_IO_READ_MSEC,
int((time.time() - start_time) * 1000))(context.get())
proto = file_service_pb.KeyValue()
proto.ParseFromString(binary_record)
heapq.heapreplace(readers,
(proto.key(), proto.value(), index, reader))
except EOFError:
heapq.heappop(readers)
if current_result:
yield current_result
@classmethod
def from_json(cls, json):
"""Restore reader from json state."""
return cls(json["offsets"])
def to_json(self):
"""Serialize reader state to json."""
return {"offsets": self._offsets}
@classmethod
def split_input(cls, mapper_spec):
"""Split input into multiple shards.
Only one shard is generated at the moment.
"""
filelists = mapper_spec.params[cls.FILES_PARAM]
return [cls([0] * len(files)) for files in filelists]
@classmethod
def validate(cls, mapper_spec):
"""Validate reader parameters in mapper_spec."""
if mapper_spec.input_reader_class() != cls:
raise errors.BadReaderParamsError("Input reader class mismatch")
params = mapper_spec.params
if not cls.FILES_PARAM in params:
raise errors.BadReaderParamsError("Missing files parameter.")
class _HashingBlobstoreOutputWriter(output_writers.BlobstoreOutputWriterBase):
"""An OutputWriter which outputs data into blobstore in key-value format.
The output is tailored towards shuffler needs. It shards key/values using
key hash modulo number of output files.
"""
def __init__(self, filenames):
"""Constructor.
Args:
filenames: list of filenames that this writer outputs to.
"""
self._filenames = filenames
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec to validate.
"""
if mapper_spec.output_writer_class() != cls:
raise errors.BadWriterParamsError("Output writer class mismatch")
@classmethod
def init_job(cls, mapreduce_state):
"""Initialize job-level writer state.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job. State can be modified during initialization.
"""
shards = mapreduce_state.mapreduce_spec.mapper.shard_count
filenames = []
for i in range(shards):
blob_file_name = (mapreduce_state.mapreduce_spec.name +
"-" + mapreduce_state.mapreduce_spec.mapreduce_id +
"-output-" + str(i))
filenames.append(
files.blobstore.create(
_blobinfo_uploaded_filename=blob_file_name))
mapreduce_state.writer_state = {"filenames": filenames}
@classmethod
def finalize_job(cls, mapreduce_state):
"""Finalize job-level writer state.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job. State can be modified during finalization.
"""
finalized_filenames = []
for filename in mapreduce_state.writer_state["filenames"]:
files.finalize(filename)
finalized_filenames.append(
files.blobstore.get_file_name(
files.blobstore.get_blob_key(filename)))
mapreduce_state.writer_state = {"filenames": finalized_filenames}
@classmethod
def from_json(cls, json):
"""Creates an instance of the OutputWriter for the given json state.
Args:
json: The OutputWriter state as a dict-like object.
Returns:
An instance of the OutputWriter configured using the values of json.
"""
return cls(json["filenames"])
def to_json(self):
"""Returns writer state to serialize in json.
Returns:
A json-izable version of the OutputWriter state.
"""
return {"filenames": self._filenames}
@classmethod
def create(cls, mapreduce_state, shard_number):
"""Create new writer for a shard.
Args:
mapreduce_state: an instance of model.MapreduceState describing current
job. State can be modified.
shard_number: shard number as integer.
"""
return cls(mapreduce_state.writer_state["filenames"])
@classmethod
def get_filenames(cls, mapreduce_state):
"""Obtain output filenames from mapreduce state.
Args:
mapreduce_state: an instance of model.MapreduceState
Returns:
list of filenames this writer writes to or None if writer
doesn't write to a file.
"""
return mapreduce_state.writer_state["filenames"]
def write(self, data, ctx):
"""Write data.
Args:
data: actual data yielded from handler. Type is writer-specific.
ctx: an instance of context.Context.
"""
if len(data) != 2:
logging.error("Got bad tuple of length %d (2-tuple expected): %s",
len(data), data)
try:
key = str(data[0])
value = str(data[1])
except TypeError:
logging.error("Expecting a tuple, but got %s: %s",
data.__class__.__name__, data)
file_index = key.__hash__() % len(self._filenames)
pool_name = "kv_pool%d" % file_index
filename = self._filenames[file_index]
if ctx.get_pool(pool_name) is None:
ctx.register_pool(pool_name,
output_writers.RecordsPool(filename=filename, ctx=ctx))
proto = file_service_pb.KeyValue()
proto.set_key(key)
proto.set_value(value)
ctx.get_pool(pool_name).append(proto.Encode())
def _merge_map(k, values):
"""A map function used in merge phase.
Stores (k, values) into KeyValues proto and yields its serialization.
"""
proto = file_service_pb.KeyValues()
proto.set_key(k)
proto.value_list().extend(values)
yield proto.Encode()
class _MergePipeline(base_handler.PipelineBase):
"""Pipeline to merge sorted chunks.
This pipeline merges together individually sorted chunks of each shard.
Args:
filenames: list of lists of filenames. Each list will correspond to a single
shard. Each file in the list should have keys sorted and should contain
records with KeyValue serialized entity.
Returns:
The list of filenames, where each filename is fully merged and will contain
records with KeyValues serialized entity.
"""
def run(self, job_name, filenames):
yield mapper_pipeline.MapperPipeline(
job_name + "-shuffle-merge",
__name__ + "._merge_map",
__name__ + "._MergingReader",
output_writer_spec=
output_writers.__name__ + ".BlobstoreRecordsOutputWriter",
params={'files': filenames},
shards=len(filenames))
def _hashing_map(binary_record):
"""A map function used in hash phase.
Reads KeyValue from binary record and yields (key, value).
"""
proto = file_service_pb.KeyValue()
proto.ParseFromString(binary_record)
yield (proto.key(), proto.value())
class _HashPipeline(base_handler.PipelineBase):
"""A pipeline to read mapper output and hash by key.
Args:
job_name: root mapreduce job name.
filenames: filenames of mapper output. Should be of records format
with serialized KeyValue proto.
Returns:
The list of filenames. Each file is of records formad with serialized
KeyValue proto. For each proto its output file is decided based on key
hash. Thus all equal keys would end up in the same file.
"""
def run(self, job_name, filenames):
yield mapper_pipeline.MapperPipeline(
job_name + "-shuffle-hash",
__name__ + "._hashing_map",
input_readers.__name__ + ".RecordsReader",
output_writer_spec= __name__ + "._HashingBlobstoreOutputWriter",
params={'files': filenames},
shards=len(filenames))
class ShufflePipeline(base_handler.PipelineBase):
"""A pipeline to shuffle multiple key-value files.
Args:
filenames: list of file names to sort. Files have to be of records format
defined by Files API and contain serialized file_service_pb.KeyValue
protocol messages.
Returns:
The list of filenames as string. Resulting files contain serialized
file_service_pb.KeyValues protocol messages with all values collated
to a single key.
"""
def run(self, job_name, filenames):
hashed_files = yield _HashPipeline(job_name, filenames)
sorted_files = yield _SortChunksPipeline(job_name, hashed_files)
merged_files = yield _MergePipeline(job_name, sorted_files)
with pipeline.After(merged_files):
all_temp_files = yield pipeline_common.Extend(
hashed_files, sorted_files)
yield mapper_pipeline._CleanupPipeline(all_temp_files)
yield pipeline_common.Return(merged_files)
|
|
from __future__ import unicode_literals
import difflib
import json
import posixpath
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy
from functools import wraps
from unittest.util import safe_repr
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import WSGIRequestHandler, WSGIServer
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils import six
from django.utils.decorators import classproperty
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import (
unquote, urljoin, urlparse, urlsplit, urlunsplit,
)
from django.utils.six.moves.urllib.request import url2pathname
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
query['sql'] for query in self.captured_queries
)
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _CursorFailure(object):
def __init__(self, cls_name, wrapped):
self.cls_name = cls_name
self.wrapped = wrapped
def __call__(self):
raise AssertionError(
"Database queries aren't allowed in SimpleTestCase. "
"Either use TestCase or TransactionTestCase to ensure proper test isolation or "
"set %s.allow_database_queries to True to silence this failure." % self.cls_name
)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
# Tests shouldn't be allowed to query the database since
# this base class doesn't enforce any isolation.
allow_database_queries = False
@classmethod
def setUpClass(cls):
super(SimpleTestCase, cls).setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = _CursorFailure(cls.__name__, connection.cursor)
@classmethod
def tearDownClass(cls):
if not cls.allow_database_queries:
for alias in connections:
connection = connections[alias]
connection.cursor = connection.cursor.wrapped
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super(SimpleTestCase, cls).tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform any post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix='',
fetch_redirect_response=True):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request (use fetch_redirect_response=False to check
such links without fetching them).
"""
if host is not None:
warnings.warn(
"The host argument is deprecated and no longer used by assertRedirects",
RemovedInDjango20Warning, stacklevel=2
)
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(
len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
if url != expected_url:
# For temporary backwards compatibility, try to compare with a relative url
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
relative_url = urlunsplit(('', '', e_path, e_query, e_fragment))
if url == relative_url:
warnings.warn(
"assertRedirects had to strip the scheme and domain from the "
"expected URL, as it was always added automatically to URLs "
"before Django 1.9. Please update your expected URLs by "
"removing the scheme and domain.",
RemovedInDjango20Warning, stacklevel=2)
expected_url = relative_url
self.assertEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response.charset)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors)
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Asserts that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_message_cm(self, expected_exception, expected_message):
with self.assertRaises(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(cm.exception))
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Asserts that expected_message is found in the the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
# callable_obj was a documented kwarg in Django 1.8 and older.
callable_obj = kwargs.pop('callable_obj', None)
if callable_obj:
warnings.warn(
'The callable_obj kwarg is deprecated. Pass the callable '
'as a positional argument instead.', RemovedInDjango20Warning
)
elif len(args):
callable_obj = args[0]
args = args[1:]
cm = self._assert_raises_message_cm(expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Asserts that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(
six.text_type(xml1).splitlines(),
six.text_type(xml2).splitlines(),
)
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
# Since tests will be wrapped in a transaction, or serialized if they
# are not available, we allow queries to be run.
allow_database_queries = True
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
registry to these applications, then firing post_migrate -- it must
run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [
alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# If we need to provide replica initial data from migrated apps,
# then do so.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_migrate isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None or
( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but uses `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Helper method to open atomic blocks for multiple databases"""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened through the previous method"""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
if not connections_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
@classmethod
def tearDownClass(cls):
if connections_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(TestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase"""
pass
def _should_reload_connections(self):
if connections_support_transactions():
return False
return super(TestCase, self)._should_reload_connections()
def _fixture_setup(self):
if not connections_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
class CheckCondition(object):
"""Descriptor class for deferred condition checking"""
def __init__(self, cond_func):
self.cond_func = cond_func
def __get__(self, instance, cls=None):
return self.cond_func()
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
test_item.__unittest_skip__ = CheckCondition(condition)
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(*features):
"""
Skip a test if a database has at least one of the named features.
"""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features)
)
def skipUnlessDBFeature(*features):
"""
Skip a test unless a database has all the named features.
"""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features)
)
def skipUnlessAnyDBFeature(*features):
"""
Skip a test unless a database has any of the named features.
"""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features)
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super(FSFilesHandler, self).__init__()
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super(FSFilesHandler, self).get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(FSFilesHandler, self).__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, static_handler, connections_override=None):
self.host = host
self.port = None
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server(0)
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self, port):
return WSGIServer((self.host, port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
host = 'localhost'
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (cls.host, cls.server_thread.port)
@classmethod
def setUpClass(cls):
super(LiveServerTestCase, cls).setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={'append': cls.host},
)
cls._live_server_modified_settings.enable()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
# Restore sqlite in-memory database connections' non-shareability
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
conn.allow_thread_sharing = False
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
cls._live_server_modified_settings.disable()
super(LiveServerTestCase, cls).tearDownClass()
class SerializeMixin(object):
"""
Mixin to enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass / tearDownClass.
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super(SerializeMixin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SerializeMixin, cls).tearDownClass()
cls._lockfile.close()
|
|
#!/env/bin/python
import hashlib
import json
import random
import string
import sys
import time
import zmq
from termcolor import colored
import fnode
# def check_files_node(node, my_id):
# files_my_id = {}
# delete = {}
# for i in node['file']:
# print i[0:7] + '-->>' + node['lower_bound']
# print 'i --> ' + i
# if my_id > node['lower_bound']:
# if (i <= my_id and i >= 0) or (i > node['lower_bound'] and i <= 0):
# # print i
# files_my_id[i] = node['file'][i]
# delete[i] = i
# else:
# if i <= my_id and i > node['lower_bound']:
# # print i
# files_my_id[i] = node['file'][i]
# delete[i] = i
#
# for i in delete:
# print ' DEL --> ' + i
# del node['file'][i]
#
# files_my_id = json.dumps(files_my_id)
#
# return files_my_id
def add(node, req, socket_send):
fnode.printJSON(req)
check = fnode.check_rank(node['id'], node['lower_bound'], req['msg']['id'])
print 'CHECK --> ' + str(check)
if check == 0:
# files_my_id = check_files_node(node, req['msg']['id'])
# # print files_my_id
#
# req_update_files = fnode.create_req('update_file',
# node['ip'] + ':' + node['port'],
# req['msg']['origin'],
# json.loads(files_my_id))
# req_update_files_json = json.loads(req_update_files)
# print 'Update to ' + 'tcp://' + req_update_files_json['to']
# time.sleep(2)
# socket_send.connect('tcp://' + req_update_files_json['to'])
# # fnode.printJSON(req_update_json)
# socket_send.send(req_update_files)
# message = socket_send.recv()
# print message
req_update = fnode.create_req(
'update', node['ip'] + ':' + node['port'], req['msg']['origin'], {
'lower_bound': node['lower_bound'],
'lower_bound_ip': node['lower_bound_ip']
})
req_update_json = json.loads(req_update)
print 'Update to ' + 'tcp://' + req_update_json['to']
time.sleep(2)
socket_send.connect('tcp://' + req_update_json['to'])
socket_send.send(req_update)
message = socket_send.recv()
print message
node['lower_bound'] = req['msg']['id']
node['lower_bound_ip'] = req['msg']['origin']
fnode.node_info(node)
elif check == -1:
req_add = fnode.create_req(
'add', node['ip'] + ':' + node['port'], node['lower_bound_ip'],
{'origin': req['msg']['origin'],
'id': req['msg']['id']})
req_add_json = json.loads(req_add)
socket_send.connect('tcp://' + req_add_json['to'])
# fnode.printJSON(req_add_json)
socket_send.send(req_add)
message = socket_send.recv()
print message
def update(node, req):
fnode.printJSON(req)
node['lower_bound'] = req['msg']['lower_bound']
node['lower_bound_ip'] = req['msg']['lower_bound_ip']
print '############ UPDATE OK'
fnode.node_info(node)
def save(node, req, socket_send):
fnode.printJSON(req)
check = fnode.check_rank(node['id'], node['lower_bound'], req['id'])
print 'CHECK --> ' + str(check)
if check == 0:
fnode.file_to_ring(node, req['name'], req['data'], req['id'])
fnode.node_info(node)
elif check == -1:
req_save = json.dumps({
'req': 'save',
'from': node['ip'] + ':' + node['port'],
'to': node['lower_bound_ip'],
'data': req['data'],
'name': req['name'],
'id': req['id']
})
req_save_json = json.loads(req_save)
socket_send.connect('tcp://' + req_save_json['to'])
# fnode.printJSON(req_add_json)
socket_send.send(req_save)
message = socket_send.recv()
print message
def remove_file(node, req, socket_send):
fnode.printJSON(req)
check = fnode.check_rank(node['id'], node['lower_bound'], req['id'])
print 'CHECK --> ' + str(check)
if check == 0:
fnode.remove_file_ring(node, req['id'])
fnode.node_info(node)
elif check == -1:
req_remove = json.dumps({
'req': 'remove',
'from': node['ip'] + ':' + node['port'],
'to': node['lower_bound_ip'],
'id': req['id']
})
req_remove_json = json.loads(req_remove)
socket_send.connect('tcp://' + req_remove_json['to'])
# fnode.printJSON(req_add_json)
socket_send.send(req_remove)
message = socket_send.recv()
print message
def check_file(node, file_id):
for i in node:
print i
if i == file_id:
return node[i]
break
return 'No file'
def get_file(node, req, socket_send):
fnode.printJSON(req)
check = check_file(node['file'], req['id'])
if check != 'No file':
print colored(check, 'cyan')
# fnode.node_info(node)
req_send = json.dumps({
'from': node['ip'] + ':' + node['port'],
'to': req['client_origin'],
'info': check
})
req_send_json = json.loads(req_send)
socket_send.connect('tcp://' + req_send_json['to'])
socket_send.send(req_send)
message = socket_send.recv()
print message
else:
print colored('File does not exist in this node :(', 'red')
if req['node_origin'] == node['lower_bound_ip']:
req_send = json.dumps({
'from': node['ip'] + ':' + node['port'],
'to': req['client_origin'],
'info': 'No'
})
req_send_json = json.loads(req_send)
socket_send.connect('tcp://' + req_send_json['to'])
socket_send.send(req_send)
message = socket_send.recv()
print message
else:
get_req = json.dumps({
'req': 'get',
'from': req['from'],
'to': node['lower_bound_ip'],
'id': req['id'],
'node_origin': req['node_origin'],
'client_origin': req['client_origin']
})
get_req_json = json.loads(get_req)
socket_send.connect('tcp://' + get_req_json['to'])
socket_send.send(get_req)
message = socket_send.recv()
print colored(message, 'green')
def pass_data(node, req_json):
for i in req_json['msg']:
node['file'][i] = req_json['msg'][i]
fnode.node_info(node)
def search_new_connection(node, info, socket_send):
if node['lower_bound'] == info['node_id']:
node['lower_bound'] = info['lower_bound']
node['lower_bound_ip'] = info['lower_bound_ip']
fnode.node_info(node)
else:
new_req = fnode.create_req('new_connection',
node['ip'] + ':' + node['port'],
node['lower_bound_ip'], info)
new_req_json = json.loads(new_req)
socket_send.connect('tcp://' + new_req_json['to'])
socket_send.send(new_req)
message = socket_send.recv()
print colored(message, 'green')
# def update_file_list(node, req):
# for i in req['msg']:
# # print i
# node['file'][i] = req['msg'][i]
#
# fnode.node_info(node)
|
|
import copy
import pickle
import warnings
import sys
from sympy.utilities.pytest import XFAIL
from sympy.core.basic import Atom, Basic
from sympy.core.core import BasicMeta, BasicType, ClassRegistry
from sympy.core.singleton import SingletonRegistry
from sympy.core.symbol import Dummy, Symbol, Wild
from sympy.core.numbers import Catalan, ComplexInfinity, EulerGamma, Exp1,\
GoldenRatio, Half, ImaginaryUnit, Infinity, Integer, NaN,\
NegativeInfinity, NegativeOne, Number, NumberSymbol, One, Pi,\
Rational, Float, Zero
from sympy.core.relational import ( Equality, GreaterThan, LessThan, Relational,
StrictGreaterThan, StrictLessThan, Unequality )
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.power import Pow
from sympy.core.function import Derivative, Function, FunctionClass, Lambda,\
WildFunction
from sympy.core.sets import Interval
from sympy.core.multidimensional import vectorize
from sympy.functions import exp
#from sympy.core.ast_parser import SymPyParser, SymPyTransformer
from sympy.core.compatibility import callable
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy import symbols
def check(a, check_attr=True):
""" Check that pickling and copying round-trips.
"""
# The below hasattr() check will warn about is_Real in Python 2.5, so
# disable this to keep the tests clean
warnings.filterwarnings("ignore", ".*is_Real.*")
protocols = [0, 1, 2, copy.copy, copy.deepcopy]
# Python 2.x doesn't support the third pickling protocol
if sys.version_info[0] > 2:
protocols.extend([3])
for protocol in protocols:
if callable(protocol):
if isinstance(a, BasicType):
# Classes can't be copied, but that's okay.
return
b = protocol(a)
else:
b = pickle.loads(pickle.dumps(a, protocol))
d1 = dir(a)
d2 = dir(b)
assert d1==d2
if not check_attr:
continue
def c(a,b,d):
for i in d:
if not hasattr(a,i):
continue
attr = getattr(a,i)
if not hasattr(attr, "__call__"):
assert hasattr(b,i), i
assert getattr(b,i)==attr
c(a,b,d1)
c(b,a,d2)
#================== core =========================
def test_core_basic():
for c in (Atom, Atom(),
Basic, Basic(),
# XXX: dynamically created types are not picklable
# BasicMeta, BasicMeta("test", (), {}),
# BasicType, BasicType("test", (), {}),
ClassRegistry, ClassRegistry(),
SingletonRegistry, SingletonRegistry()):
check(c)
def test_core_symbol():
for c in (Dummy, Dummy("x", commutative=False), Symbol,
Symbol("x", commutative=False), Wild, Wild("x")):
check(c)
def test_core_numbers():
for c in (Catalan, Catalan(), ComplexInfinity, ComplexInfinity(),
EulerGamma, EulerGamma(), Exp1, Exp1(), GoldenRatio, GoldenRatio(),
Half, Half(), ImaginaryUnit, ImaginaryUnit(), Infinity, Infinity(),
Integer, Integer(2), NaN, NaN(), NegativeInfinity,
NegativeInfinity(), NegativeOne, NegativeOne(), Number, Number(15),
NumberSymbol, NumberSymbol(), One, One(), Pi, Pi(), Rational,
Rational(1,2), Float, Float("1.2"), Zero, Zero()):
check(c)
def test_core_relational():
x = Symbol("x")
y = Symbol("y")
for c in (Equality, Equality(x,y), GreaterThan, GreaterThan(x, y),
LessThan, LessThan(x,y), Relational, Relational(x,y),
StrictGreaterThan, StrictGreaterThan(x,y), StrictLessThan,
StrictLessThan(x,y), Unequality, Unequality(x,y)):
check(c)
def test_core_add():
x = Symbol("x")
for c in (Add, Add(x,4)):
check(c)
def test_core_mul():
x = Symbol("x")
for c in (Mul, Mul(x,4)):
check(c)
def test_core_power():
x = Symbol("x")
for c in (Pow, Pow(x,4)):
check(c)
def test_core_function():
x = Symbol("x")
for f in (Derivative, Derivative(x), Function, FunctionClass, Lambda,\
WildFunction):
check(f)
@XFAIL
def test_core_dynamicfunctions():
# This fails because f is assumed to be a class at sympy.basic.function.f
f = Function("f")
check(f)
def test_core_interval():
for c in (Interval, Interval(0,2)):
check(c)
def test_core_multidimensional():
for c in (vectorize, vectorize(0)):
check(c)
# This doesn't have to be pickable.
#@XFAIL
#def test_core_astparser():
# # This probably fails because of importing the global sympy scope.
# for c in (SymPyParser, SymPyParser(), SymPyTransformer,
# SymPyTransformer({},{})):
# check(c)
#================== functions ===================
from sympy.functions import (Piecewise, lowergamma, acosh,
chebyshevu, chebyshevt, ln, chebyshevt_root, binomial, legendre,
Heaviside, factorial, bernoulli, coth, tanh, assoc_legendre, sign,
arg, asin, DiracDelta, re, rf, Abs, uppergamma, binomial, sinh, Ylm,
cos, cot, acos, acot, gamma, bell, hermite, harmonic,
LambertW, zeta, log, factorial, asinh, acoth, Zlm,
cosh, dirichlet_eta, Eijk, loggamma, erf, ceiling, im, fibonacci,
conjugate, tan, chebyshevu_root, floor, atanh, sqrt,
RisingFactorial, sin, atan, ff, FallingFactorial, lucas, atan2,
polygamma, exp)
from sympy.core import pi, oo, nan, zoo, E, I
def test_functions():
zero_var = (pi, oo, nan, zoo, E, I)
one_var = (acosh, ln, Heaviside, factorial, bernoulli, coth, tanh,
sign, arg, asin, DiracDelta, re, Abs, sinh, cos, cot, acos, acot,
gamma, bell, harmonic, LambertW, zeta, log, factorial, asinh,
acoth, cosh, dirichlet_eta, loggamma, erf, ceiling, im, fibonacci,
conjugate, tan, floor, atanh, sin, atan, lucas, exp)
two_var = (rf, ff, lowergamma, chebyshevu, chebyshevt, binomial,
atan2, polygamma, hermite, legendre, uppergamma)
x, y, z = symbols("x,y,z")
others = (chebyshevt_root, chebyshevu_root, Eijk(x, y, z),
Piecewise( (0, x<-1), (x**2, x<=1), (x**3, True)),
assoc_legendre)
for a in zero_var:
check(a)
for cls in one_var:
check(cls)
c = cls(x)
check(c)
for cls in two_var:
check(cls)
c = cls(x, y)
check(c)
for cls in others:
check(cls)
#================== geometry ====================
from sympy.geometry.entity import GeometryEntity
from sympy.geometry.point import Point
from sympy.geometry.ellipse import Circle, Ellipse
from sympy.geometry.line import Line, LinearEntity, Ray, Segment
from sympy.geometry.polygon import Polygon, RegularPolygon, Triangle
def test_geometry():
p1 = Point(1,2)
p2 = Point(2,3)
p3 = Point(0,0)
p4 = Point(0,1)
for c in (GeometryEntity, GeometryEntity(), Point, p1, Circle, Circle(p1,2),
Ellipse, Ellipse(p1,3,4), Line, Line(p1,p2), LinearEntity,
LinearEntity(p1,p2), Ray, Ray(p1,p2), Segment, Segment(p1,p2),
Polygon, Polygon(p1,p2,p3,p4), RegularPolygon, RegularPolygon(p1,4,5),
Triangle, Triangle(p1,p2,p3)):
check(c, check_attr = False)
#================== integrals ====================
from sympy.integrals.integrals import Integral
def test_integrals():
x = Symbol("x")
for c in (Integral, Integral(x)):
check(c)
#================== matrices ====================
from sympy.matrices.matrices import Matrix, SparseMatrix
def test_matrices():
for c in (Matrix, Matrix([1,2,3]), SparseMatrix, SparseMatrix([[1,2],[3,4]])):
check(c)
#================== ntheory =====================
from sympy.ntheory.generate import Sieve
def test_ntheory():
for c in (Sieve, Sieve()):
check(c)
#================== physics =====================
from sympy.physics.paulialgebra import Pauli
from sympy.physics.units import Unit
def test_physics():
for c in (Unit, Unit("meter", "m"), Pauli, Pauli(1)):
check(c)
#================== plotting ====================
# XXX: These tests are not complete, so XFAIL them
@XFAIL
def test_plotting():
from sympy.plotting.color_scheme import ColorGradient, ColorScheme
from sympy.plotting.managed_window import ManagedWindow
from sympy.plotting.plot import Plot, ScreenShot
from sympy.plotting.plot_axes import PlotAxes, PlotAxesBase, PlotAxesFrame, PlotAxesOrdinate
from sympy.plotting.plot_camera import PlotCamera
from sympy.plotting.plot_controller import PlotController
from sympy.plotting.plot_curve import PlotCurve
from sympy.plotting.plot_interval import PlotInterval
from sympy.plotting.plot_mode import PlotMode
from sympy.plotting.plot_modes import Cartesian2D, Cartesian3D, Cylindrical,\
ParametricCurve2D, ParametricCurve3D, ParametricSurface, Polar, Spherical
from sympy.plotting.plot_object import PlotObject
from sympy.plotting.plot_surface import PlotSurface
from sympy.plotting.plot_window import PlotWindow
for c in (ColorGradient, ColorGradient(0.2,0.4), ColorScheme, ManagedWindow,
ManagedWindow, Plot, ScreenShot, PlotAxes, PlotAxesBase,
PlotAxesFrame, PlotAxesOrdinate, PlotCamera, PlotController,
PlotCurve, PlotInterval, PlotMode, Cartesian2D, Cartesian3D,
Cylindrical, ParametricCurve2D, ParametricCurve3D,
ParametricSurface, Polar, Spherical, PlotObject, PlotSurface,
PlotWindow):
check(c)
@XFAIL
def test_plotting2():
from sympy.plotting.color_scheme import ColorGradient, ColorScheme
from sympy.plotting.managed_window import ManagedWindow
from sympy.plotting.plot import Plot, ScreenShot
from sympy.plotting.plot_axes import PlotAxes, PlotAxesBase, PlotAxesFrame, PlotAxesOrdinate
from sympy.plotting.plot_camera import PlotCamera
from sympy.plotting.plot_controller import PlotController
from sympy.plotting.plot_curve import PlotCurve
from sympy.plotting.plot_interval import PlotInterval
from sympy.plotting.plot_mode import PlotMode
from sympy.plotting.plot_modes import Cartesian2D, Cartesian3D, Cylindrical,\
ParametricCurve2D, ParametricCurve3D, ParametricSurface, Polar, Spherical
from sympy.plotting.plot_object import PlotObject
from sympy.plotting.plot_surface import PlotSurface
from sympy.plotting.plot_window import PlotWindow
check(ColorScheme("rainbow"))
check(Plot(1,visible=False))
check(PlotAxes())
#================== polys =======================
from sympy.polys.polytools import Poly
from sympy.polys.polyclasses import DMP, DMF, ANP
from sympy.polys.rootoftools import RootOf, RootSum
from sympy.polys.domains import (
PythonIntegerRing,
SymPyIntegerRing,
SymPyRationalField,
PolynomialRing,
FractionField,
ExpressionDomain,
)
def test_polys():
x = Symbol("X")
ZZ = PythonIntegerRing()
QQ = SymPyRationalField()
for c in (Poly, Poly(x, x)):
check(c)
for c in (DMP, DMP([[ZZ(1)],[ZZ(2)],[ZZ(3)]], ZZ)):
check(c)
for c in (DMF, DMF(([ZZ(1),ZZ(2)], [ZZ(1),ZZ(3)]), ZZ)):
check(c)
for c in (ANP, ANP([QQ(1),QQ(2)], [QQ(1),QQ(2),QQ(3)], QQ)):
check(c)
for c in (PythonIntegerRing, PythonIntegerRing()):
check(c)
for c in (SymPyIntegerRing, SymPyIntegerRing()):
check(c)
for c in (SymPyRationalField, SymPyRationalField()):
check(c)
for c in (PolynomialRing, PolynomialRing(ZZ, 'x', 'y')):
check(c)
for c in (FractionField, FractionField(ZZ, 'x', 'y')):
check(c)
for c in (ExpressionDomain, ExpressionDomain()):
check(c)
from sympy.polys.domains import PythonRationalField
for c in (PythonRationalField, PythonRationalField()):
check(c)
from sympy.polys.domains import HAS_GMPY
if HAS_GMPY:
from sympy.polys.domains import GMPYIntegerRing, GMPYRationalField
for c in (GMPYIntegerRing, GMPYIntegerRing()):
check(c)
for c in (GMPYRationalField, GMPYRationalField()):
check(c)
f = x**3 + x + 3
g = exp
for c in (RootOf, RootOf(f, 0), RootSum, RootSum(f, g)):
check(c)
#================== printing ====================
from sympy.printing.latex import LatexPrinter
from sympy.printing.mathml import MathMLPrinter
from sympy.printing.pretty.pretty import PrettyPrinter
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympy.printing.printer import Printer
from sympy.printing.python import PythonPrinter
def test_printing():
for c in (LatexPrinter, LatexPrinter(), MathMLPrinter,
PrettyPrinter, prettyForm, stringPict, stringPict("a"),
Printer, Printer(), PythonPrinter, PythonPrinter()):
check(c)
@XFAIL
def test_printing1():
check(MathMLPrinter())
@XFAIL
def test_printing2():
check(PrettyPrinter())
#================== series ======================
from sympy.series.limits import Limit
from sympy.series.order import Order
def test_series():
e = Symbol("e")
x = Symbol("x")
for c in (Limit, Limit(e, x, 1), Order, Order(e)):
check(c)
#================== statistics ==================
from sympy.statistics.distributions import ContinuousProbability, Normal, Sample, Uniform
def test_statistics():
x = Symbol("x")
y = Symbol("y")
for c in (ContinuousProbability, ContinuousProbability(), Normal,
Normal(x,y), Sample, Sample([1,3,4]), Uniform, Uniform(x,y)):
check(c)
#================== concrete ==================
from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
def test_concrete():
x = Symbol("x")
for c in (Product, Product(x, (x, 2, 4)), Sum, Sum(x, (x, 2, 4))):
check(c)
|
|
"""Support for sensors through the SmartThings cloud API."""
from __future__ import annotations
from collections import namedtuple
from collections.abc import Sequence
from pysmartthings import Attribute, Capability
from pysmartthings.device import DeviceEntity
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
AREA_SQUARE_METERS,
CONCENTRATION_PARTS_PER_MILLION,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
LIGHT_LUX,
MASS_KILOGRAMS,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
VOLUME_CUBIC_METERS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import dt as dt_util
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
Map = namedtuple(
"map", "attribute name default_unit device_class state_class entity_category"
)
CAPABILITY_TO_SENSORS = {
Capability.activity_lighting_mode: [
Map(
Attribute.lighting_mode,
"Activity Lighting Mode",
None,
None,
None,
EntityCategory.CONFIG,
)
],
Capability.air_conditioner_mode: [
Map(
Attribute.air_conditioner_mode,
"Air Conditioner Mode",
None,
None,
None,
EntityCategory.CONFIG,
)
],
Capability.air_quality_sensor: [
Map(
Attribute.air_quality,
"Air Quality",
"CAQI",
None,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.alarm: [Map(Attribute.alarm, "Alarm", None, None, None, None)],
Capability.audio_volume: [
Map(Attribute.volume, "Volume", PERCENTAGE, None, None, None)
],
Capability.battery: [
Map(
Attribute.battery,
"Battery",
PERCENTAGE,
SensorDeviceClass.BATTERY,
None,
EntityCategory.DIAGNOSTIC,
)
],
Capability.body_mass_index_measurement: [
Map(
Attribute.bmi_measurement,
"Body Mass Index",
f"{MASS_KILOGRAMS}/{AREA_SQUARE_METERS}",
None,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.body_weight_measurement: [
Map(
Attribute.body_weight_measurement,
"Body Weight",
MASS_KILOGRAMS,
None,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.carbon_dioxide_measurement: [
Map(
Attribute.carbon_dioxide,
"Carbon Dioxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
SensorDeviceClass.CO2,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.carbon_monoxide_detector: [
Map(
Attribute.carbon_monoxide,
"Carbon Monoxide Detector",
None,
None,
None,
None,
)
],
Capability.carbon_monoxide_measurement: [
Map(
Attribute.carbon_monoxide_level,
"Carbon Monoxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
SensorDeviceClass.CO,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.dishwasher_operating_state: [
Map(
Attribute.machine_state, "Dishwasher Machine State", None, None, None, None
),
Map(
Attribute.dishwasher_job_state,
"Dishwasher Job State",
None,
None,
None,
None,
),
Map(
Attribute.completion_time,
"Dishwasher Completion Time",
None,
SensorDeviceClass.TIMESTAMP,
None,
None,
),
],
Capability.dryer_mode: [
Map(
Attribute.dryer_mode,
"Dryer Mode",
None,
None,
None,
EntityCategory.CONFIG,
)
],
Capability.dryer_operating_state: [
Map(Attribute.machine_state, "Dryer Machine State", None, None, None, None),
Map(Attribute.dryer_job_state, "Dryer Job State", None, None, None, None),
Map(
Attribute.completion_time,
"Dryer Completion Time",
None,
SensorDeviceClass.TIMESTAMP,
None,
None,
),
],
Capability.dust_sensor: [
Map(
Attribute.fine_dust_level,
"Fine Dust Level",
None,
None,
SensorStateClass.MEASUREMENT,
None,
),
Map(
Attribute.dust_level,
"Dust Level",
None,
None,
SensorStateClass.MEASUREMENT,
None,
),
],
Capability.energy_meter: [
Map(
Attribute.energy,
"Energy Meter",
ENERGY_KILO_WATT_HOUR,
SensorDeviceClass.ENERGY,
SensorStateClass.TOTAL_INCREASING,
None,
)
],
Capability.equivalent_carbon_dioxide_measurement: [
Map(
Attribute.equivalent_carbon_dioxide_measurement,
"Equivalent Carbon Dioxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.formaldehyde_measurement: [
Map(
Attribute.formaldehyde_level,
"Formaldehyde Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.gas_meter: [
Map(
Attribute.gas_meter,
"Gas Meter",
ENERGY_KILO_WATT_HOUR,
None,
SensorStateClass.MEASUREMENT,
None,
),
Map(
Attribute.gas_meter_calorific, "Gas Meter Calorific", None, None, None, None
),
Map(
Attribute.gas_meter_time,
"Gas Meter Time",
None,
SensorDeviceClass.TIMESTAMP,
None,
None,
),
Map(
Attribute.gas_meter_volume,
"Gas Meter Volume",
VOLUME_CUBIC_METERS,
None,
SensorStateClass.MEASUREMENT,
None,
),
],
Capability.illuminance_measurement: [
Map(
Attribute.illuminance,
"Illuminance",
LIGHT_LUX,
SensorDeviceClass.ILLUMINANCE,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.infrared_level: [
Map(
Attribute.infrared_level,
"Infrared Level",
PERCENTAGE,
None,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.media_input_source: [
Map(Attribute.input_source, "Media Input Source", None, None, None, None)
],
Capability.media_playback_repeat: [
Map(
Attribute.playback_repeat_mode,
"Media Playback Repeat",
None,
None,
None,
None,
)
],
Capability.media_playback_shuffle: [
Map(
Attribute.playback_shuffle, "Media Playback Shuffle", None, None, None, None
)
],
Capability.media_playback: [
Map(Attribute.playback_status, "Media Playback Status", None, None, None, None)
],
Capability.odor_sensor: [
Map(Attribute.odor_level, "Odor Sensor", None, None, None, None)
],
Capability.oven_mode: [
Map(
Attribute.oven_mode,
"Oven Mode",
None,
None,
None,
EntityCategory.CONFIG,
)
],
Capability.oven_operating_state: [
Map(Attribute.machine_state, "Oven Machine State", None, None, None, None),
Map(Attribute.oven_job_state, "Oven Job State", None, None, None, None),
Map(Attribute.completion_time, "Oven Completion Time", None, None, None, None),
],
Capability.oven_setpoint: [
Map(Attribute.oven_setpoint, "Oven Set Point", None, None, None, None)
],
Capability.power_consumption_report: [],
Capability.power_meter: [
Map(
Attribute.power,
"Power Meter",
POWER_WATT,
SensorDeviceClass.POWER,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.power_source: [
Map(
Attribute.power_source,
"Power Source",
None,
None,
None,
EntityCategory.DIAGNOSTIC,
)
],
Capability.refrigeration_setpoint: [
Map(
Attribute.refrigeration_setpoint,
"Refrigeration Setpoint",
None,
SensorDeviceClass.TEMPERATURE,
None,
None,
)
],
Capability.relative_humidity_measurement: [
Map(
Attribute.humidity,
"Relative Humidity Measurement",
PERCENTAGE,
SensorDeviceClass.HUMIDITY,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.robot_cleaner_cleaning_mode: [
Map(
Attribute.robot_cleaner_cleaning_mode,
"Robot Cleaner Cleaning Mode",
None,
None,
None,
EntityCategory.CONFIG,
)
],
Capability.robot_cleaner_movement: [
Map(
Attribute.robot_cleaner_movement,
"Robot Cleaner Movement",
None,
None,
None,
None,
)
],
Capability.robot_cleaner_turbo_mode: [
Map(
Attribute.robot_cleaner_turbo_mode,
"Robot Cleaner Turbo Mode",
None,
None,
None,
EntityCategory.CONFIG,
)
],
Capability.signal_strength: [
Map(
Attribute.lqi,
"LQI Signal Strength",
None,
None,
SensorStateClass.MEASUREMENT,
EntityCategory.DIAGNOSTIC,
),
Map(
Attribute.rssi,
"RSSI Signal Strength",
None,
SensorDeviceClass.SIGNAL_STRENGTH,
SensorStateClass.MEASUREMENT,
EntityCategory.DIAGNOSTIC,
),
],
Capability.smoke_detector: [
Map(Attribute.smoke, "Smoke Detector", None, None, None, None)
],
Capability.temperature_measurement: [
Map(
Attribute.temperature,
"Temperature Measurement",
None,
SensorDeviceClass.TEMPERATURE,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.thermostat_cooling_setpoint: [
Map(
Attribute.cooling_setpoint,
"Thermostat Cooling Setpoint",
None,
SensorDeviceClass.TEMPERATURE,
None,
None,
)
],
Capability.thermostat_fan_mode: [
Map(
Attribute.thermostat_fan_mode,
"Thermostat Fan Mode",
None,
None,
None,
EntityCategory.CONFIG,
)
],
Capability.thermostat_heating_setpoint: [
Map(
Attribute.heating_setpoint,
"Thermostat Heating Setpoint",
None,
SensorDeviceClass.TEMPERATURE,
None,
EntityCategory.CONFIG,
)
],
Capability.thermostat_mode: [
Map(
Attribute.thermostat_mode,
"Thermostat Mode",
None,
None,
None,
EntityCategory.CONFIG,
)
],
Capability.thermostat_operating_state: [
Map(
Attribute.thermostat_operating_state,
"Thermostat Operating State",
None,
None,
None,
None,
)
],
Capability.thermostat_setpoint: [
Map(
Attribute.thermostat_setpoint,
"Thermostat Setpoint",
None,
SensorDeviceClass.TEMPERATURE,
None,
EntityCategory.CONFIG,
)
],
Capability.three_axis: [],
Capability.tv_channel: [
Map(Attribute.tv_channel, "Tv Channel", None, None, None, None),
Map(Attribute.tv_channel_name, "Tv Channel Name", None, None, None, None),
],
Capability.tvoc_measurement: [
Map(
Attribute.tvoc_level,
"Tvoc Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.ultraviolet_index: [
Map(
Attribute.ultraviolet_index,
"Ultraviolet Index",
None,
None,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.voltage_measurement: [
Map(
Attribute.voltage,
"Voltage Measurement",
ELECTRIC_POTENTIAL_VOLT,
SensorDeviceClass.VOLTAGE,
SensorStateClass.MEASUREMENT,
None,
)
],
Capability.washer_mode: [
Map(
Attribute.washer_mode,
"Washer Mode",
None,
None,
None,
EntityCategory.CONFIG,
)
],
Capability.washer_operating_state: [
Map(Attribute.machine_state, "Washer Machine State", None, None, None, None),
Map(Attribute.washer_job_state, "Washer Job State", None, None, None, None),
Map(
Attribute.completion_time,
"Washer Completion Time",
None,
SensorDeviceClass.TIMESTAMP,
None,
None,
),
],
}
UNITS = {"C": TEMP_CELSIUS, "F": TEMP_FAHRENHEIT}
THREE_AXIS_NAMES = ["X Coordinate", "Y Coordinate", "Z Coordinate"]
POWER_CONSUMPTION_REPORT_NAMES = [
"energy",
"power",
"deltaEnergy",
"powerEnergy",
"energySaved",
]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add binary sensors for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
sensors = []
for device in broker.devices.values():
for capability in broker.get_assigned(device.device_id, "sensor"):
if capability == Capability.three_axis:
sensors.extend(
[
SmartThingsThreeAxisSensor(device, index)
for index in range(len(THREE_AXIS_NAMES))
]
)
elif capability == Capability.power_consumption_report:
sensors.extend(
[
SmartThingsPowerConsumptionSensor(device, report_name)
for report_name in POWER_CONSUMPTION_REPORT_NAMES
]
)
else:
maps = CAPABILITY_TO_SENSORS[capability]
sensors.extend(
[
SmartThingsSensor(
device,
m.attribute,
m.name,
m.default_unit,
m.device_class,
m.state_class,
m.entity_category,
)
for m in maps
]
)
if broker.any_assigned(device.device_id, "switch"):
for capability in (Capability.energy_meter, Capability.power_meter):
maps = CAPABILITY_TO_SENSORS[capability]
sensors.extend(
[
SmartThingsSensor(
device,
m.attribute,
m.name,
m.default_unit,
m.device_class,
m.state_class,
m.entity_category,
)
for m in maps
]
)
async_add_entities(sensors)
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None:
"""Return all capabilities supported if minimum required are present."""
return [
capability for capability in CAPABILITY_TO_SENSORS if capability in capabilities
]
class SmartThingsSensor(SmartThingsEntity, SensorEntity):
"""Define a SmartThings Sensor."""
def __init__(
self,
device: DeviceEntity,
attribute: str,
name: str,
default_unit: str,
device_class: str,
state_class: str | None,
entity_category: str | None,
) -> None:
"""Init the class."""
super().__init__(device)
self._attribute = attribute
self._name = name
self._device_class = device_class
self._default_unit = default_unit
self._attr_state_class = state_class
self._attr_entity_category = entity_category
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {self._name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{self._attribute}"
@property
def native_value(self):
"""Return the state of the sensor."""
value = self._device.status.attributes[self._attribute].value
if self._device_class != SensorDeviceClass.TIMESTAMP:
return value
return dt_util.parse_datetime(value)
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def native_unit_of_measurement(self):
"""Return the unit this state is expressed in."""
unit = self._device.status.attributes[self._attribute].unit
return UNITS.get(unit, unit) if unit else self._default_unit
class SmartThingsThreeAxisSensor(SmartThingsEntity, SensorEntity):
"""Define a SmartThings Three Axis Sensor."""
def __init__(self, device, index):
"""Init the class."""
super().__init__(device)
self._index = index
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {THREE_AXIS_NAMES[self._index]}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{THREE_AXIS_NAMES[self._index]}"
@property
def native_value(self):
"""Return the state of the sensor."""
three_axis = self._device.status.attributes[Attribute.three_axis].value
try:
return three_axis[self._index]
except (TypeError, IndexError):
return None
class SmartThingsPowerConsumptionSensor(SmartThingsEntity, SensorEntity):
"""Define a SmartThings Sensor."""
def __init__(
self,
device: DeviceEntity,
report_name: str,
) -> None:
"""Init the class."""
super().__init__(device)
self.report_name = report_name
self._attr_state_class = SensorStateClass.MEASUREMENT
if self.report_name != "power":
self._attr_state_class = SensorStateClass.TOTAL_INCREASING
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {self.report_name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{self.report_name}_meter"
@property
def native_value(self):
"""Return the state of the sensor."""
value = self._device.status.attributes[Attribute.power_consumption].value
if value is None or value.get(self.report_name) is None:
return None
if self.report_name == "power":
return value[self.report_name]
return value[self.report_name] / 1000
@property
def device_class(self):
"""Return the device class of the sensor."""
if self.report_name == "power":
return SensorDeviceClass.POWER
return SensorDeviceClass.ENERGY
@property
def native_unit_of_measurement(self):
"""Return the unit this state is expressed in."""
if self.report_name == "power":
return POWER_WATT
return ENERGY_KILO_WATT_HOUR
|
|
"""A cog that requires server users to feed the bot in return for benefits."""
import os
import random
import asyncio
import copy
import datetime
import discord
from discord.ext import commands
from __main__ import send_cmd_help
from .utils import checks
from .utils.dataIO import dataIO
SAVE_FILEPATH = "data/KeaneCogs/parrot/parrot.json"
SAVE_DEFAULT = {
"Servers": {},
"Global": {
"StarveTime": [5, 0], # the hour and minute of the day that starve_check runs
"PerchInterval": 20, # the number of minutes between perches
"Version": "2.3"
}
}
SERVER_DEFAULT = {
"Parrot": {
"Appetite": 0, # max number of pellets Parrot can be fed (reset by starve_check)
"ChecksAlive": 0, # number of starve_checks survived
"HoursAlive": 0, # number of hours Parrot has been alive in the server
"UserWith": "", # ID of user Parrot is perched on (reset by starve_check)
"Fullness": 0, # number of pellets Parrot has in his belly (reset by starve_check)
"Cost": 5, # cost of feeding Parrot 1 pellet
"StarvedLoops": 0, # phase of starvation Parrot is in
"WarnedYet": False, # whether the server has been warned for the current self.checktime
"StealAvailable": True # whether steal is available for the perched user (reset by perch_loop)
},
"Feeders": {} # contains user IDs as keys and dicts as values (reset by starve_check)
}
FEEDER_DEFAULT = {
"PelletsFed": 0,
"HeistBoostAvailable": True,
"AirhornUses": 0,
"StolenFrom": [],
"CreditsCollected": 0.0
}
class Parrot:
"""Commands related to feeding the bot."""
def __init__(self, bot):
self.save_file = dataIO.load_json(SAVE_FILEPATH)
self.bot = bot
self.update_version()
self.checktime = datetime.datetime.utcnow() # dummy value
self.perchtime = datetime.datetime.utcnow() # dummy value
self.update_looptimes(False) # change checktime to what it should be
# without causing a new warning
self.loop_task = bot.loop.create_task(self.loop()) # remember to change __unload()
@commands.command(pass_context=True, no_pm=True)
async def feed(self, ctx, amount: int):
"""Feed the parrot! Use \"{prefix}help parrot\" for more information."""
server = ctx.message.server
# make sure the server is in the data file
self.add_server(server)
parrot = self.save_file["Servers"][server.id]["Parrot"]
feeders = self.save_file["Servers"][server.id]["Feeders"]
bank = self.bot.get_cog('Economy').bank
# Checks
error_msg = ""
if not bank.account_exists(ctx.message.author):
error_msg = ("You need to have a bank account with credits to feed me. "
"Use `{}bank register` to open one.".format(ctx.prefix))
elif amount <= 0:
error_msg = "You can't feed me nothing!"
elif parrot["Fullness"] == parrot["Appetite"]:
error_msg = "I'm full! I don't want to get fat."
if error_msg:
return await self.bot.say(error_msg)
# make sure parrot doesn't get overfed
if parrot["Fullness"] + amount > parrot["Appetite"]:
amount = parrot["Appetite"] - parrot["Fullness"]
await self.bot.say("I don't want to be too full. I'll only eat {} pellets, "
"and you can keep the rest.".format(amount))
usercost = amount * parrot["Cost"]
# confirmation prompt
await self.bot.say("You are about to spend {} credits to feed me {} pellets. "
"Reply \"yes\" to confirm.".format(usercost, amount))
response = await self.bot.wait_for_message(timeout=15, author=ctx.message.author)
if response is None or response.content.lower().strip() != "yes":
return await self.bot.say("Okay then, but don't let me starve!")
# deduct usercost from their credits account
if bank.can_spend(ctx.message.author, usercost):
bank.withdraw_credits(ctx.message.author, usercost)
else:
return await self.bot.say("You don't have enough credits to feed me that much.")
# set up user's dict in the data file
if ctx.message.author.id not in feeders:
feeders[ctx.message.author.id] = copy.deepcopy(FEEDER_DEFAULT)
# record how much the user has fed for the day
feeders[ctx.message.author.id]["PelletsFed"] += amount
# change parrot's fullness level
parrot["Fullness"] += amount
dataIO.save_json(SAVE_FILEPATH, self.save_file)
return await self.bot.say("Om nom nom. Thanks!")
@commands.group(pass_context=True)
async def parrot(self, ctx):
"""Parrot needs to be fed! Every day, Parrot has a different appetite value,
which is how many food pellets he would like to be fed for the day.
Spend your credits to feed Parrot pellets using the "{prefix}feed" command,
and find out how full Parrot is or what his appetite is by using the "{prefix}parrot info" command.
Every so often, Parrot perches on the shoulder of a random user who has fed him.
The fraction of Parrot's appetite that you have fed is your chance of being perched on by Parrot.
In return for providing your shoulder to him, Parrot will help you and give you powers.
For example, he can assist you with Heists."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@parrot.command(name="starvetime", pass_context=True) # no_pm=False
@checks.is_owner()
async def parrot_starve_time(self, ctx, hour: int = None, minute: int = 0):
"""View or change the time at which Parrot checks whether he has starved
and resets his appetite. This command takes UTC time.
(0 <= hour <= 23) (0 <= minute <= 59)"""
if hour is None:
cur_hour = self.save_file["Global"]["StarveTime"][0]
cur_minute = self.save_file["Global"]["StarveTime"][1]
cur_time = datetime.time(cur_hour, cur_minute)
return await self.bot.say("Current setting: {} UTC".format(cur_time.strftime("%H:%M")))
if not (0 <= hour <= 23 and 0 <= minute <= 59):
return await self.bot.say("Hour must be greater than -1 and less than 24. "
"Minute must be greater than -1 and less than 60. "
"Both numbers must be integers.")
# confirmation prompt
await self.bot.say("This is a global setting that affects all servers the bot is connected to. "
"Parrot checks whether he has starved every day at a certain time. "
"Parrot will check every day (including today if possible) at {} UTC. "
"Reply \"yes\" to confirm."
.format(datetime.time(hour, minute).strftime("%H:%M")))
response = await self.bot.wait_for_message(timeout=15, author=ctx.message.author)
if response is None or response.content.lower().strip() != "yes":
return await self.bot.say("Setting change cancelled.")
self.save_file["Global"]["StarveTime"] = [hour, minute]
dataIO.save_json(SAVE_FILEPATH, self.save_file)
self.update_looptimes()
return await self.bot.say("Setting change successful.")
@parrot.command(name="perchinterval", pass_context=True) # no_pm=False
@checks.is_owner()
async def parrot_perch_interval(self, ctx, minutes: int = None):
"""View or change how many minutes pass between perches."""
if minutes is None:
interval = self.save_file["Global"]["PerchInterval"]
return await self.bot.say("Current setting: {} minutes".format(interval))
if not 0 < minutes <= 1440:
return await self.bot.say("The number of minutes must be greater than 0 "
"and less than or equal to 1440.")
if not (60 % minutes == 0 or minutes % 60 == 0):
return await self.bot.say("The number of minutes must be a factor or "
"multiple of 60.")
# confirmation prompt
await self.bot.say("This is a global setting that affects all servers the bot is connected to. "
"Every day, the first perch is at Parrot's starve time. For the rest of the day, "
"Parrot will wait {} minutes between perches. Reply \"yes\" "
"to confirm.".format(minutes))
response = await self.bot.wait_for_message(timeout=15, author=ctx.message.author)
if response is None or response.content.lower().strip() != "yes":
return await self.bot.say("Setting change cancelled.")
self.save_file["Global"]["PerchInterval"] = minutes
dataIO.save_json(SAVE_FILEPATH, self.save_file)
self.update_looptimes() # this updates self.perchtime with the new interval
return await self.bot.say("Setting change successful.")
@parrot.command(name="checknow", pass_context=True) # no_pm=False
@checks.is_owner()
async def parrot_check_now(self, ctx):
"""Execute a starve check immediately. This will move Parrot to the next
appetite loop if he survives."""
await self.starve_check()
return await self.bot.send_message(ctx.message.author,
"starve_check was executed.")
@parrot.command(name="setcost", pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def parrot_set_cost(self, ctx, cost: int):
"""Change how much it costs to feed the parrot 1 pellet."""
server = ctx.message.server
self.add_server(server) # make sure the server is in the data file
if cost >= 0:
self.save_file["Servers"][server.id]["Parrot"]["Cost"] = cost
dataIO.save_json(SAVE_FILEPATH, self.save_file)
return await self.bot.say("Set cost of feeding to {} credits per pellet.".format(cost))
else:
return await self.bot.say("Cost must be at least 0.")
@parrot.command(name="steal", pass_context=True, no_pm=True)
async def parrot_steal(self, ctx, target: discord.Member):
"""Get Parrot to steal up to 1000 of someone's credits for you.
One use per perch. Parrot will not steal from people who have
fed him. Parrot will not steal from someone twice in a day."""
self.add_server(ctx.message.server) # make sure the server is in the data file
feeders = self.save_file["Servers"][ctx.message.server.id]["Feeders"]
parrot = self.save_file["Servers"][ctx.message.server.id]["Parrot"]
bank = self.bot.get_cog('Economy').bank
# checks
error_msg = ""
if ctx.message.author.id != parrot["UserWith"]:
error_msg = ("Parrot needs to be perched on you to use this command. "
"Use `{}help parrot` for more information.".format(ctx.prefix))
elif not parrot["StealAvailable"]:
error_msg = ("You have already used steal. You must wait until "
"the next time you are perched on.")
elif not bank.account_exists(target):
error_msg = "Your target doesn't have a bank account to steal credits from."
elif target.id in feeders and feeders[target.id]["PelletsFed"] > 0:
error_msg = ("Parrot refuses to steal from someone "
"who has fed him in the current fullness cycle.")
elif target.id in feeders[ctx.message.author.id]["StolenFrom"]:
error_msg = ("You have already stolen from this person today. "
"It is too risky to try a second time.")
if error_msg:
return await self.bot.say(error_msg)
await self.bot.say("Parrot flies off...")
await asyncio.sleep(3)
stolen = round(random.uniform(1, random.uniform(1, 1000)))
target_balance = bank.get_balance(target)
if stolen >= target_balance:
bank.transfer_credits(target, ctx.message.author, target_balance)
msg = ("Parrot stole every last credit ({} credits) from "
"{}'s bank account and deposited it in your account!"
.format(target_balance, target.mention))
else:
bank.transfer_credits(target, ctx.message.author, stolen)
msg = ("Parrot stole {} credits from {}'s bank account "
"and deposited it in your account!"
.format(stolen, target.mention))
parrot["StealAvailable"] = False
feeders[ctx.message.author.id]["StolenFrom"].append(target.id)
dataIO.save_json(SAVE_FILEPATH, self.save_file)
return await self.bot.say(msg)
@parrot.command(name="airhorn", pass_context=True, no_pm=True)
async def parrot_airhorn(self, ctx, channel: discord.Channel):
"""Play an airhorn sound to the target voice channel."""
# This is copy-pasted from audio.py's play() function and has
# been modified to always play an airhorn.
# Audio.py is a part of Red Bot, which is licensed under GPL v3
# https://www.gnu.org/licenses/gpl-3.0.en.html
# CHANGES:
# The function has been renamed to parrot_airhorn, and takes a
# channel instead of a URL as an argument now.
# The URL is now hard-coded to be a YouTube link.
# The try-except clause has been commented out. The calls for
# functions within audio.py have been changed
# Changes from self.function() to audio.function() .
# Newly added lines are labeled with a comment "NEW".
# No other changes were made.
server = ctx.message.server
self.add_server(server) # NEW
if ctx.message.author.id != self.save_file["Servers"][server.id]["Parrot"]["UserWith"]: # NEW
return await self.bot.say("Parrot needs to be perched on you to use this command. "
"Use `{}help parrot` for more information.".format(ctx.prefix)) # NEW
if self.save_file["Servers"][server.id]["Feeders"][ctx.message.author.id]["AirhornUses"] >= 3: # NEW
return await self.bot.say("You have already used airhorn 3 times. You must wait until "
"Parrot's fullness resets, and be perched on by him again.") # NEW
audio = self.bot.get_cog('Audio') # NEW
url = "https://www.youtube.com/watch?v=XDvuAYySJj0" # This line was changed to be a hard-coded
# YouTube link instead of being a URL argument.
# Checking if playing in current server
if audio.is_playing(server):
await self.bot.say("Parrot is already playing music in a channel on this server.")
return # Default to queue
# Checking already connected, will join if not
# try:
# audio.has_connect_perm(target, server)
# except AuthorNotConnected:
# await self.bot.say("You must join a voice channel before I can"
# " play anything.")
# return
# except UnauthorizedConnect:
# await self.bot.say("I don't have permissions to join your"
# " voice channel.")
# return
# except UnauthorizedSpeak:
# await self.bot.say("I don't have permissions to speak in your"
# " voice channel.")
# return
# except ChannelUserLimit:
# await self.bot.say("Your voice channel is full.")
# return
if not audio.voice_connected(server):
await audio._join_voice_channel(channel)
else: # We are connected but not to the right channel
if audio.voice_client(server).channel != channel:
await audio._stop_and_disconnect(server)
await audio._join_voice_channel(channel)
# If not playing, spawn a downloader if it doesn't exist and begin
# downloading the next song
if audio.currently_downloading(server):
await audio.bot.say("I'm already downloading a file!")
return
url = url.strip("<>")
if audio._match_any_url(url):
if not audio._valid_playable_url(url):
await self.bot.say("That's not a valid URL.")
return
else:
url = url.replace("/", "/")
url = "[SEARCH:]" + url
if "[SEARCH:]" not in url and "youtube" in url:
url = url.split("&")[0] # Temp fix for the &list issue
audio._stop_player(server)
audio._clear_queue(server)
audio._add_to_queue(server, url)
self.save_file["Servers"][server.id]["Feeders"][ctx.message.author.id]["AirhornUses"] += 1 # NEW
dataIO.save_json(SAVE_FILEPATH, self.save_file) # NEW
@parrot.command(name="info", pass_context=True, no_pm=True, aliases=["stats"])
async def parrot_info(self, ctx):
"""Information about the parrot."""
server = ctx.message.server
self.add_server(server) # make sure the server is in the data file
parrot = self.save_file["Servers"][server.id]["Parrot"]
fullness_str = "{} out of {} pellets".format(parrot["Fullness"], parrot["Appetite"])
feed_cost_str = "{} credits per pellet".format(parrot["Cost"])
days_living_str = "{} days".format(round(parrot["HoursAlive"] / 24))
# status and time_until_starved
if parrot["StarvedLoops"] == 0:
status_str = "healthy"
time_until_starved_str = "until Parrot begins\nstarving: "
elif parrot["StarvedLoops"] == 1:
status_str = "starving"
time_until_starved_str = "until Parrot becomes\ndeathly hungry:\n"
else:
status_str = "deathbed\n(will die if not fed!)"
time_until_starved_str = "until Parrot dies of\nstarvation: "
if parrot["Fullness"] / parrot["Appetite"] >= 0.5:
description_str = ("Parrot has been fed enough food that he won't starve for now. "
"Use `{}help parrot` for more information.".format(ctx.prefix))
time_until_starved_str = "until fullness resets:\n"
if parrot["StarvedLoops"] > 0:
status_str = "recovering"
else:
description_str = ("If Parrot is not fed enough to be half full by the time "
"the timer reaches 0, he will enter the next phase of "
"starvation. Use `{}help parrot` for more information.".format(ctx.prefix))
if parrot["ChecksAlive"] == 0:
# add an extra day because the first check won't starve or change Parrot's appetite
until_starved = (self.checktime + datetime.timedelta(days=1)
- datetime.datetime.utcnow())
else:
until_starved = self.checktime - datetime.datetime.utcnow()
seconds = round(until_starved.total_seconds())
time_until_starved_str += str(datetime.timedelta(seconds=seconds))
if parrot["UserWith"]:
userwith_str = server.get_member(parrot["UserWith"]).mention
else:
userwith_str = "nobody"
embed = discord.Embed(color=discord.Color.teal(), description=description_str)
embed.title = "Parrot Information"
embed.timestamp = datetime.datetime.utcfromtimestamp(os.path.getmtime(os.path.abspath(__file__)))
embed.set_thumbnail(url="{}".format(self.bot.user.avatar_url if self.bot.user.avatar_url
else self.bot.user.default_avatar_url))
embed.set_footer(text="Made by Keane")
embed.add_field(name="Fullness", value=fullness_str)
embed.add_field(name="Cost to feed", value=feed_cost_str)
embed.add_field(name="Age", value=days_living_str)
embed.add_field(name="Status", value=status_str)
embed.add_field(name="Perched on", value=userwith_str)
embed.add_field(name="Countdown", value=time_until_starved_str)
return await self.bot.say(embed=embed)
@parrot.command(name="feeders", pass_context=True, no_pm=True)
async def parrot_feeders(self, ctx):
"""Display a list of people who have fed Parrot in the current appetite
loop, with the number of pellets they have fed and the percent chance
they have of being perched on."""
server = ctx.message.server
self.add_server(server) # make sure the server is in the data file
output = "```py\n"
feeders = self.save_file["Servers"][server.id]["Feeders"]
parrot = self.save_file["Servers"][server.id]["Parrot"]
# The first perched user of the day is in feeders
# but may not have fed any pellets. If so, ignore them.
fedparrot = [feederid for feederid in feeders
if feeders[feederid]["PelletsFed"] > 0]
if not fedparrot:
return await self.bot.say("```Nobody has fed Parrot yet.```")
idlist = sorted(fedparrot,
key=(lambda idnum: feeders[idnum]["PelletsFed"]),
reverse=True)
max_chance = (feeders[idlist[0]]["PelletsFed"] / parrot["Appetite"]) * 100
max_chance_len = len(str(round(max_chance)))
max_pellets = feeders[idlist[0]]["PelletsFed"]
max_pellets_len = len(str(max_pellets))
# example: " 155/100%"
max_end_len = 1 + max_pellets_len + 1 + max_chance_len + 1
for feederid in idlist:
feeder = server.get_member(feederid)
chance = (feeders[feederid]["PelletsFed"] / parrot["Appetite"]) * 100
chance_str = str(round(chance))
if len(feeder.display_name) > 26 - max_end_len:
# 26 - 3 to leave room for the ellipsis
name = feeder.display_name[:23 - max_end_len] + "..."
else:
name = feeder.display_name
output += name
pellets_str = str(feeders[feederid]["PelletsFed"])
# example: " 1/ 1%"
end_len = 1 + len(pellets_str) + 1 + max_chance_len + 1
output += " " * (26 - len(name) - end_len)
# append the end
output += " " + pellets_str + "|"
output += " " * (max_chance_len - len(chance_str))
output += chance_str + "%"
output += "\n"
output += "```"
return await self.bot.say(output)
async def loop(self):
"""Loop forever to do four tasks:
Update HoursAlive, warn servers when Parrot is starving soon,
perch on users at perchtime, and reset Parrot at checktime."""
await self.bot.wait_until_ready()
self.update_looptimes()
current_hour = datetime.datetime.utcnow().hour
while True:
now = datetime.datetime.utcnow()
# Update HoursAlive
if current_hour != now.hour:
current_hour = now.hour
for serverid in self.save_file["Servers"]:
self.save_file["Servers"][serverid]["Parrot"]["HoursAlive"] += 1
dataIO.save_json(SAVE_FILEPATH, self.save_file)
# Send starvation warnings to each server (if they haven't been sent yet)
stoptime = self.checktime + datetime.timedelta(hours=-4)
if stoptime <= now:
change = False
for serverid in self.save_file["Servers"]:
parrot = self.save_file["Servers"][serverid]["Parrot"]
if (parrot["ChecksAlive"] > 0
and (parrot["Fullness"] / parrot["Appetite"]) < 0.5
and not parrot["WarnedYet"]):
if parrot["StarvedLoops"] == 0:
await self.bot.send_message(
self.bot.get_server(serverid),
"*I'm quite hungry...*")
elif parrot["StarvedLoops"] == 1:
await self.bot.send_message(
self.bot.get_server(serverid),
"*I'm so hungry I feel weak...*")
else:
await self.bot.send_message(
self.bot.get_server(serverid),
"*I'm going to* ***DIE*** *of starvation very "
"soon if I don't get fed!*")
parrot["WarnedYet"] = True
change = True
if change:
dataIO.save_json(SAVE_FILEPATH, self.save_file)
# Perch
if self.perchtime <= now:
# Choose perched user
for serverid in self.save_file["Servers"]:
feeders = self.save_file["Servers"][serverid]["Feeders"]
parrot = self.save_file["Servers"][serverid]["Parrot"]
weights = [(feeders[feederid]["PelletsFed"] / parrot["Appetite"])
* 100 for feederid in feeders]
population = list(feeders)
weights.append(100 - sum(weights))
population.append("")
# Randomly choose who Parrot is with. This could be nobody, represented by ""
try:
parrot["UserWith"] = random.choices(population, weights)[0] #random.choices returns a list
except AttributeError:
# DIY random.choices alternative for scrubs who don't have Python 3.6
total = 0
cum_weights = []
for num in weights:
total += num
cum_weights.append(total)
rand = random.uniform(0, 100)
for index, weight in enumerate(cum_weights):
if weight >= rand:
parrot["UserWith"] = population[index]
break
# Reset at checktime (checktime is always on a perchtime)
if self.checktime <= now:
await self.display_collected()
await self.starve_check()
self.update_looptimes() # checktime must be updated daily
# Collect coins for perched user
for serverid in self.save_file["Servers"]:
self.collect_credits(serverid)
self.save_file["Servers"][serverid]["Parrot"]["StealAvailable"] = True
# Update perchtime
interval = self.save_file["Global"]["PerchInterval"]
self.perchtime = self.perchtime + datetime.timedelta(minutes=interval)
dataIO.save_json(SAVE_FILEPATH, self.save_file)
await asyncio.sleep(1)
async def starve_check(self):
"""Check if Parrot has starved or not.
If Parrot has starved, leave the server. If he has survived,
reset for the next loop."""
for serverid in list(self.save_file["Servers"]): # generate a list because servers might
# be removed from the dict while iterating
parrot = self.save_file["Servers"][serverid]["Parrot"]
feeders = self.save_file["Servers"][serverid]["Feeders"]
# don't check on the first loop to give new servers a chance
# in case they got added at an unlucky time (right before the check happens)
reset = False
if parrot["ChecksAlive"] == 0:
parrot["ChecksAlive"] += 1
elif parrot["Fullness"] / parrot["Appetite"] < 0.5:
if parrot["StarvedLoops"] == 2:
await self.bot.send_message(
self.bot.get_server(serverid),
"Oh no! I've starved to death!\n"
"Goodbye, cruel world!")
await self.bot.leave_server(self.bot.get_server(serverid))
del self.save_file["Servers"][serverid]
else:
# advance to the next stage of starvation
parrot["StarvedLoops"] += 1
reset = True
else:
# healthy; reset for the next loop
parrot["StarvedLoops"] = 0
reset = True
if reset:
parrot["ChecksAlive"] += 1
parrot["Appetite"] = round(random.normalvariate(50*(1.75**parrot["StarvedLoops"]), 6))
parrot["Fullness"] = 0
parrot["WarnedYet"] = False
self.save_file["Servers"][serverid]["Feeders"].clear()
# https://stackoverflow.com/questions/369898/difference-between-dict-clear-and-assigning-in-python
if parrot["UserWith"]:
feeders[parrot["UserWith"]] = copy.deepcopy(FEEDER_DEFAULT)
dataIO.save_json(SAVE_FILEPATH, self.save_file)
async def display_collected(self):
"""Display a leaderboard in each server with how many credits
Parrot collected for users. Award CreditsCollected to each feeder."""
bank = self.bot.get_cog('Economy').bank
for serverid in self.save_file["Servers"]:
server = self.bot.get_server(serverid)
leaderboard = ("Here's how many credits I collected for "
"everyone I perched on today:\n\n")
leaderboard += "```py\n"
feeders = self.save_file["Servers"][serverid]["Feeders"]
perched_users = [feederid for feederid in feeders
if round(feeders[feederid]["CreditsCollected"]) > 0]
if not perched_users:
continue # nobody gets credits, skip this server
ranked = sorted(perched_users,
key=lambda idnum: feeders[idnum]["CreditsCollected"],
reverse=True)
max_creds_len = len(str(round(feeders[ranked[0]]["CreditsCollected"])))
for user_id in ranked:
user = server.get_member(user_id)
if len(user.display_name) > 26 - max_creds_len - 1:
name = user.display_name[22 - max_creds_len] + "..."
else:
name = user.display_name
leaderboard += name
collected = round(feeders[user_id]["CreditsCollected"])
bank.deposit_credits(user, collected)
leaderboard += " " * (26 - len(name) - len(str(collected)))
leaderboard += str(collected) + "\n"
leaderboard += "```"
await self.bot.send_message(server, leaderboard)
def add_server(self, server):
"""Add the server to the file if it isn't already in it."""
if server.id not in self.save_file["Servers"]:
self.save_file["Servers"][server.id] = copy.deepcopy(SERVER_DEFAULT)
self.save_file["Servers"][server.id]["Parrot"]["Appetite"] = round(random.normalvariate(50, 6))
dataIO.save_json(SAVE_FILEPATH, self.save_file)
print("{} New server \"{}\" found and added to Parrot data file!"
.format(datetime.datetime.now(), server.name))
def update_looptimes(self, warn=True):
"""Update self.checktime for the latest StarveTime. If StarveTime
has already passed today, self.checktime will be StarveTime tomorrow.
Update self.perchtime for the latest StarveTime or PerchInterval."""
# Update self.checktime
starvetime = self.save_file["Global"]["StarveTime"]
checktime = datetime.datetime.utcnow().replace(hour=starvetime[0],
minute=starvetime[1],
second=0,
microsecond=0)
if datetime.datetime.utcnow().time() >= checktime.time():
checktime = checktime + datetime.timedelta(days=1)
if self.checktime != checktime: # if StarveTime changed (this will always be true
# when Parrot is first loaded due to self.checktime's
# initial value)
self.checktime = checktime
if warn:
for serverid in self.save_file["Servers"]:
self.save_file["Servers"][serverid]["Parrot"]["WarnedYet"] = False
dataIO.save_json(SAVE_FILEPATH, self.save_file)
# Update self.perchtime
interval = self.save_file["Global"]["PerchInterval"]
self.perchtime = self.checktime + datetime.timedelta(days=-1)
while self.perchtime < datetime.datetime.utcnow():
self.perchtime = self.perchtime + datetime.timedelta(minutes=interval)
def collect_credits(self, serverid):
"""Calculate how many credits Parrot will collect during the perch."""
parrot = self.save_file["Servers"][serverid]["Parrot"]
feeders = self.save_file["Servers"][serverid]["Feeders"]
interval = self.save_file["Global"]["PerchInterval"]
# Generate multiplier
since_checktime = datetime.datetime.utcnow() - self.checktime
current_minute = round(since_checktime.total_seconds() / 60)
current_minute = current_minute % 1440
multiplier = 0
for i in range(current_minute, current_minute + interval):
multiplier += 1.003**i
multiplier = multiplier / 24568
for feederid in feeders:
pellets = feeders[feederid]["PelletsFed"]
if pellets > 50: # Feeding more than 50 pellets (average healthy appetite) is ignored
pellets = 50
# 1.5 * parrot["Cost"] * pellets is exactly
# how much the feeder would earn at the end of
# the day if they fed right after checktime
feeders[feederid]["CreditsCollected"] += 1.5 * parrot["Cost"] * pellets * multiplier
dataIO.save_json(SAVE_FILEPATH, self.save_file)
def update_version(self):
"""Update the save file if necessary."""
if "Version" not in self.save_file["Global"]: # if version == 1
for serverid in self.save_file["Servers"]:
parrot = self.save_file["Servers"][serverid]["Parrot"]
starvetime = self.save_file["Global"]["StarveTime"]
parrot["HoursAlive"] = round((starvetime * parrot["LoopsAlive"]) / 3600)
parrot["ChecksAlive"] = parrot["LoopsAlive"]
del parrot["LoopsAlive"]
parrot["WarnedYet"] = False
self.save_file["Global"]["StarveTime"] = [5, 0]
self.save_file["Global"]["Version"] = "2"
if self.save_file["Global"]["Version"] == "2":
for serverid in self.save_file["Servers"]:
parrot = self.save_file["Servers"][serverid]["Parrot"]
feeders = self.save_file["Servers"][serverid]["Feeders"]
for feederid in feeders:
if "StealAvailable" in feeders[feederid]:
feeders[feederid]["StolenFrom"] = []
parrot["StealAvailable"] = True
self.save_file["Global"]["Version"] = "2.1"
if self.save_file["Global"]["Version"] == "2.1":
for serverid in self.save_file["Servers"]:
feeders = self.save_file["Servers"][serverid]["Feeders"]
for feederid in feeders:
feeders[feederid]["CreditsCollected"] = 0
feeders[feederid]["StolenFrom"] = []
feeders[feederid]["AirhornUses"] = 0
feeders[feederid]["HeistBoostAvailable"] = True
self.save_file["Global"]["Version"] = "2.2"
if self.save_file["Global"]["Version"] == "2.2":
self.save_file["Global"]["PerchInterval"] = 20
self.save_file["Global"]["Version"] = "2.3"
dataIO.save_json(SAVE_FILEPATH, self.save_file)
def parrot_perched_on(self, server):
"""Return the user ID of whoever Parrot is perched on.
This is for Heist.py to use for heist boost."""
self.add_server(server) # make sure the server is in the data file
return self.save_file["Servers"][server.id]["Parrot"]["UserWith"]
def heist_boost_available(self, server, user, availability=True):
"""Return whether the user has a Heist boost available.
Optionally set availability to False to set the user's HeistBoostAvailable to False.
This is for Heist.py to use for heist boost."""
self.add_server(server) # make sure the server is in the data file
if availability is False:
self.save_file["Servers"][server.id]["Feeders"][user.id]["HeistBoostAvailable"] = False
dataIO.save_json(SAVE_FILEPATH, self.save_file)
return self.save_file["Servers"][server.id]["Feeders"][user.id]["HeistBoostAvailable"]
def __unload(self):
self.loop_task.cancel()
def dir_check():
"""Create a folder and save file for the cog if they don't exist."""
if not os.path.exists("data/KeaneCogs/parrot"):
print("Creating data/KeaneCogs/parrot folder...")
os.makedirs("data/KeaneCogs/parrot")
if not dataIO.is_valid_json(SAVE_FILEPATH):
print("Creating default parrot.json...")
dataIO.save_json(SAVE_FILEPATH, SAVE_DEFAULT)
def setup(bot):
"""Create a Parrot object."""
dir_check()
bot.add_cog(Parrot(bot))
|
|
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.db.models.query import QuerySet
from django.test import TestCase
from guardian.shortcuts import get_perms_for_model
from guardian.core import ObjectPermissionChecker
from guardian.compat import get_user_model
from guardian.compat import get_user_permission_full_codename
from guardian.shortcuts import assign
from guardian.shortcuts import assign_perm
from guardian.shortcuts import remove_perm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_objects_for_user
from guardian.shortcuts import get_objects_for_group
from guardian.exceptions import MixedContentTypeError
from guardian.exceptions import NotUserNorGroup
from guardian.exceptions import WrongAppError
from guardian.testapp.tests.core_test import ObjectPermissionTestCase
from guardian.models import Group, Permission
import warnings
User = get_user_model()
user_app_label = User._meta.app_label
user_module_name = User._meta.module_name
class ShortcutsTests(ObjectPermissionTestCase):
def test_get_perms_for_model(self):
self.assertEqual(get_perms_for_model(self.user).count(), 3)
self.assertTrue(list(get_perms_for_model(self.user)) ==
list(get_perms_for_model(User)))
self.assertEqual(get_perms_for_model(Permission).count(), 3)
model_str = 'contenttypes.ContentType'
self.assertEqual(
sorted(get_perms_for_model(model_str).values_list()),
sorted(get_perms_for_model(ContentType).values_list()))
obj = ContentType()
self.assertEqual(
sorted(get_perms_for_model(model_str).values_list()),
sorted(get_perms_for_model(obj).values_list()))
class AssignPermTest(ObjectPermissionTestCase):
"""
Tests permission assigning for user/group and object.
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, assign_perm,
perm="change_object",
user_or_group="Not a Model",
obj=self.ctype)
def test_global_wrong_perm(self):
self.assertRaises(ValueError, assign_perm,
perm="change_site", # for global permissions must provide app_label
user_or_group=self.user)
def test_user_assign_perm(self):
assign_perm("change_contenttype", self.user, self.ctype)
assign_perm("change_contenttype", self.group, self.ctype)
self.assertTrue(self.user.has_perm("change_contenttype", self.ctype))
def test_group_assign_perm(self):
assign_perm("change_contenttype", self.group, self.ctype)
assign_perm("delete_contenttype", self.group, self.ctype)
check = ObjectPermissionChecker(self.group)
self.assertTrue(check.has_perm("change_contenttype", self.ctype))
self.assertTrue(check.has_perm("delete_contenttype", self.ctype))
def test_user_assign_perm_global(self):
perm = assign_perm("contenttypes.change_contenttype", self.user)
self.assertTrue(self.user.has_perm("contenttypes.change_contenttype"))
self.assertTrue(isinstance(perm, Permission))
def test_group_assign_perm_global(self):
perm = assign_perm("contenttypes.change_contenttype", self.group)
self.assertTrue(self.user.has_perm("contenttypes.change_contenttype"))
self.assertTrue(isinstance(perm, Permission))
def test_deprecation_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
assign("contenttypes.change_contenttype", self.group)
self.assertEqual(len(warns), 1)
self.assertTrue(isinstance(warns[0].message, DeprecationWarning))
class RemovePermTest(ObjectPermissionTestCase):
"""
Tests object permissions removal.
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, remove_perm,
perm="change_object",
user_or_group="Not a Model",
obj=self.ctype)
def test_global_wrong_perm(self):
self.assertRaises(ValueError, remove_perm,
perm="change_site", # for global permissions must provide app_label
user_or_group=self.user)
def test_user_remove_perm(self):
# assign perm first
assign_perm("change_contenttype", self.user, self.ctype)
remove_perm("change_contenttype", self.user, self.ctype)
self.assertFalse(self.user.has_perm("change_contenttype", self.ctype))
def test_group_remove_perm(self):
# assign perm first
assign_perm("change_contenttype", self.group, self.ctype)
remove_perm("change_contenttype", self.group, self.ctype)
check = ObjectPermissionChecker(self.group)
self.assertFalse(check.has_perm("change_contenttype", self.ctype))
def test_user_remove_perm_global(self):
# assign perm first
perm = "contenttypes.change_contenttype"
assign_perm(perm, self.user)
remove_perm(perm, self.user)
self.assertFalse(self.user.has_perm(perm))
def test_group_remove_perm_global(self):
# assign perm first
perm = "contenttypes.change_contenttype"
assign_perm(perm, self.group)
remove_perm(perm, self.group)
app_label, codename = perm.split('.')
perm_obj = Permission.objects.get(codename=codename,
content_type__app_label=app_label)
self.assertFalse(perm_obj in self.group.permissions.all())
class GetPermsTest(ObjectPermissionTestCase):
"""
Tests get_perms function (already done at core tests but left here as a
placeholder).
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, get_perms,
user_or_group=None,
obj=self.ctype)
def test_user(self):
perms_to_assign = ("change_contenttype",)
for perm in perms_to_assign:
assign_perm("change_contenttype", self.user, self.ctype)
perms = get_perms(self.user, self.ctype)
for perm in perms_to_assign:
self.assertTrue(perm in perms)
class GetUsersWithPermsTest(TestCase):
"""
Tests get_users_with_perms function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_empty(self):
result = get_users_with_perms(self.obj1)
self.assertTrue(isinstance(result, QuerySet))
self.assertEqual(list(result), [])
result = get_users_with_perms(self.obj1, attach_perms=True)
self.assertTrue(isinstance(result, dict))
self.assertFalse(bool(result))
def test_simple(self):
assign_perm("change_contenttype", self.user1, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user3, self.obj2)
result = get_users_with_perms(self.obj1)
result_vals = result.values_list('username', flat=True)
self.assertEqual(
set(result_vals),
set([user.username for user in (self.user1, self.user2)]),
)
def test_users_groups_perms(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
result = get_users_with_perms(self.obj1).values_list('pk',
flat=True)
self.assertEqual(
set(result),
set([u.pk for u in (self.user1, self.user2)])
)
def test_users_groups_after_removal(self):
self.test_users_groups_perms()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1).values_list('pk',
flat=True)
self.assertEqual(
set(result),
set([self.user2.pk]),
)
def test_attach_perms(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.user3, self.obj2)
# Check contenttype1
result = get_users_with_perms(self.obj1, attach_perms=True)
expected = {
self.user1: ["change_contenttype"],
self.user2: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
# Check contenttype2
result = get_users_with_perms(self.obj2, attach_perms=True)
expected = {
self.user3: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
def test_attach_groups_only_has_perms(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, attach_perms=True)
expected = {self.user1: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_mixed(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj2)
assign_perm("change_contenttype", self.user3, self.obj2)
assign_perm("change_%s" % user_module_name, self.user3, self.user1)
result = get_users_with_perms(self.obj1)
self.assertEqual(
set(result),
set([self.user1, self.user2]),
)
def test_with_superusers(self):
admin = User.objects.create(username='admin', is_superuser=True)
assign_perm("change_contenttype", self.user1, self.obj1)
result = get_users_with_perms(self.obj1, with_superusers=True)
self.assertEqual(
set(result),
set([self.user1, admin]),
)
def test_without_group_users(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False)
expected = set([self.user2])
self.assertEqual(set(result), expected)
def test_without_group_users_but_perms_attached(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False,
attach_perms=True)
expected = {self.user2: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_without_group_users_no_result(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, attach_perms=True,
with_group_users=False)
expected = {}
self.assertEqual(result, expected)
def test_without_group_users_no_result_but_with_superusers(self):
admin = User.objects.create(username='admin', is_superuser=True)
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False,
with_superusers=True)
expected = [admin]
self.assertEqual(set(result), set(expected))
class GetGroupsWithPerms(TestCase):
"""
Tests get_groups_with_perms function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_empty(self):
result = get_groups_with_perms(self.obj1)
self.assertTrue(isinstance(result, QuerySet))
self.assertFalse(bool(result))
result = get_groups_with_perms(self.obj1, attach_perms=True)
self.assertTrue(isinstance(result, dict))
self.assertFalse(bool(result))
def test_simple(self):
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], self.group1)
def test_simple_after_removal(self):
self.test_simple()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(len(result), 0)
def test_simple_attach_perms(self):
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
expected = {self.group1: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_simple_attach_perms_after_removal(self):
self.test_simple_attach_perms()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
self.assertEqual(len(result), 0)
def test_mixed(self):
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group1, self.obj2)
assign_perm("change_%s" % user_module_name, self.group1, self.user3)
assign_perm("change_contenttype", self.group2, self.obj2)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group2, self.obj1)
assign_perm("change_%s" % user_module_name, self.group3, self.user1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(set(result), set([self.group1, self.group2]))
def test_mixed_attach_perms(self):
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group1, self.obj2)
assign_perm("change_group", self.group1, self.group3)
assign_perm("change_contenttype", self.group2, self.obj2)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group2, self.obj1)
assign_perm("change_group", self.group3, self.group1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
expected = {
self.group1: ["change_contenttype"],
self.group2: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
class GetObjectsForUser(TestCase):
def setUp(self):
self.user = User.objects.create(username='joe')
self.group = Group.objects.create(name='group')
self.ctype = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
def test_superuser(self):
self.user.is_superuser = True
ctypes = ContentType.objects.all()
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes)
self.assertEqual(set(ctypes), set(objects))
def test_with_superuser_true(self):
self.user.is_superuser = True
ctypes = ContentType.objects.all()
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes, with_superuser=True)
self.assertEqual(set(ctypes), set(objects))
def test_with_superuser_false(self):
self.user.is_superuser = True
ctypes = ContentType.objects.all()
obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
assign_perm('change_contenttype', self.user, obj1)
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes, with_superuser=False)
self.assertEqual(set([obj1]), set(objects))
def test_anonymous(self):
self.user = AnonymousUser()
ctypes = ContentType.objects.all()
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes)
obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
assign_perm('change_contenttype', self.user, obj1)
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes)
self.assertEqual(set([obj1]), set(objects))
def test_mixed_perms(self):
codenames = [
get_user_permission_full_codename('change'),
'auth.change_permission',
]
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, codenames)
def test_perms_with_mixed_apps(self):
codenames = [
get_user_permission_full_codename('change'),
'contenttypes.change_contenttype',
]
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, codenames)
def test_mixed_perms_and_klass(self):
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, ['auth.change_group'], User)
def test_no_app_label_nor_klass(self):
self.assertRaises(WrongAppError, get_objects_for_user, self.user,
['change_group'])
def test_empty_perms_sequence(self):
self.assertEqual(
set(get_objects_for_user(self.user, [], Group.objects.all())),
set()
)
def test_perms_single(self):
perm = 'auth.change_group'
assign_perm(perm, self.user, self.group)
self.assertEqual(
set(get_objects_for_user(self.user, perm)),
set(get_objects_for_user(self.user, [perm])))
def test_klass_as_model(self):
assign_perm('contenttypes.change_contenttype', self.user, self.ctype)
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ContentType)
self.assertEqual([obj.name for obj in objects], [self.ctype.name])
def test_klass_as_manager(self):
assign_perm('auth.change_group', self.user, self.group)
objects = get_objects_for_user(self.user, ['auth.change_group'],
Group.objects)
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_klass_as_queryset(self):
assign_perm('auth.change_group', self.user, self.group)
objects = get_objects_for_user(self.user, ['auth.change_group'],
Group.objects.all())
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_ensure_returns_queryset(self):
objects = get_objects_for_user(self.user, ['auth.change_group'])
self.assertTrue(isinstance(objects, QuerySet))
def test_simple(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm('change_group', self.user, group)
objects = get_objects_for_user(self.user, ['auth.change_group'])
self.assertEqual(len(objects), len(groups))
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects),
set(groups))
def test_multiple_perms_to_check(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm('auth.change_group', self.user, group)
assign_perm('auth.delete_group', self.user, groups[1])
objects = get_objects_for_user(self.user, ['auth.change_group',
'auth.delete_group'])
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set([groups[1].name]))
def test_multiple_perms_to_check_no_groups(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm('auth.change_group', self.user, group)
assign_perm('auth.delete_group', self.user, groups[1])
objects = get_objects_for_user(self.user, ['auth.change_group',
'auth.delete_group'], use_groups=False)
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set([groups[1].name]))
def test_any_of_multiple_perms_to_check(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
assign_perm('auth.change_group', self.user, groups[0])
assign_perm('auth.delete_group', self.user, groups[2])
objects = get_objects_for_user(self.user, ['auth.change_group',
'auth.delete_group'], any_perm=True)
self.assertEqual(len(objects), 2)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set([groups[0].name, groups[2].name]))
def test_groups_perms(self):
group1 = Group.objects.create(name='group1')
group2 = Group.objects.create(name='group2')
group3 = Group.objects.create(name='group3')
groups = [group1, group2, group3]
for group in groups:
self.user.groups.add(group)
# Objects to operate on
ctypes = list(ContentType.objects.all().order_by('id'))
assign_perm('change_contenttype', self.user, ctypes[0])
assign_perm('change_contenttype', self.user, ctypes[1])
assign_perm('delete_contenttype', self.user, ctypes[1])
assign_perm('delete_contenttype', self.user, ctypes[2])
assign_perm('change_contenttype', groups[0], ctypes[3])
assign_perm('change_contenttype', groups[1], ctypes[3])
assign_perm('change_contenttype', groups[2], ctypes[4])
assign_perm('delete_contenttype', groups[0], ctypes[0])
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set(ctypes[i].id for i in [0, 1, 3, 4]))
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype',
'contenttypes.delete_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set(ctypes[i].id for i in [0, 1]))
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set(ctypes[i].id for i in [0, 1, 3, 4]))
class GetObjectsForGroup(TestCase):
"""
Tests get_objects_for_group function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.obj3 = ContentType.objects.create(name='ct3', model='baz',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_mixed_perms(self):
codenames = [
get_user_permission_full_codename('change'),
'auth.change_permission',
]
self.assertRaises(MixedContentTypeError, get_objects_for_group,
self.group1, codenames)
def test_perms_with_mixed_apps(self):
codenames = [
get_user_permission_full_codename('change'),
'contenttypes.contenttypes.change_contenttype',
]
self.assertRaises(MixedContentTypeError, get_objects_for_group,
self.group1, codenames)
def test_mixed_perms_and_klass(self):
self.assertRaises(MixedContentTypeError, get_objects_for_group,
self.group1, ['auth.change_group'], User)
def test_no_app_label_nor_klass(self):
self.assertRaises(WrongAppError, get_objects_for_group, self.group1,
['change_contenttype'])
def test_empty_perms_sequence(self):
self.assertEqual(
set(get_objects_for_group(self.group1, [], ContentType)),
set()
)
def test_perms_single(self):
perm = 'contenttypes.change_contenttype'
assign_perm(perm, self.group1, self.obj1)
self.assertEqual(
set(get_objects_for_group(self.group1, perm)),
set(get_objects_for_group(self.group1, [perm]))
)
def test_klass_as_model(self):
assign_perm('contenttypes.change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1,
['contenttypes.change_contenttype'], ContentType)
self.assertEqual([obj.name for obj in objects], [self.obj1.name])
def test_klass_as_manager(self):
assign_perm('contenttypes.change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1, ['change_contenttype'],
ContentType.objects)
self.assertEqual(list(objects), [self.obj1])
def test_klass_as_queryset(self):
assign_perm('contenttypes.change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1, ['change_contenttype'],
ContentType.objects.all())
self.assertEqual(list(objects), [self.obj1])
def test_ensure_returns_queryset(self):
objects = get_objects_for_group(self.group1, ['contenttypes.change_contenttype'])
self.assertTrue(isinstance(objects, QuerySet))
def test_simple(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('change_contenttype', self.group1, self.obj2)
objects = get_objects_for_group(self.group1, 'contenttypes.change_contenttype')
self.assertEqual(len(objects), 2)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects),
set([self.obj1, self.obj2]))
def test_simple_after_removal(self):
self.test_simple()
remove_perm('change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1, 'contenttypes.change_contenttype')
self.assertEqual(len(objects), 1)
self.assertEqual(objects[0], self.obj2)
def test_multiple_perms_to_check(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('delete_contenttype', self.group1, self.obj1)
assign_perm('change_contenttype', self.group1, self.obj2)
objects = get_objects_for_group(self.group1, [
'contenttypes.change_contenttype',
'contenttypes.delete_contenttype'])
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(objects[0], self.obj1)
def test_any_of_multiple_perms_to_check(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('delete_contenttype', self.group1, self.obj1)
assign_perm('add_contenttype', self.group1, self.obj2)
assign_perm('delete_contenttype', self.group1, self.obj3)
objects = get_objects_for_group(self.group1,
['contenttypes.change_contenttype',
'contenttypes.delete_contenttype'], any_perm=True)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual([obj for obj in objects.order_by('name')],
[self.obj1, self.obj3])
def test_results_for_different_groups_are_correct(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('delete_contenttype', self.group2, self.obj2)
self.assertEqual(set(get_objects_for_group(self.group1, 'contenttypes.change_contenttype')),
set([self.obj1]))
self.assertEqual(set(get_objects_for_group(self.group2, 'contenttypes.change_contenttype')),
set())
self.assertEqual(set(get_objects_for_group(self.group2, 'contenttypes.delete_contenttype')),
set([self.obj2]))
|
|
#!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script for plotting various data sets from the servo controllers."""
import collections
import ctypes
import importlib
import signal
import socket
import sys
import time
from makani.avionics.common import aio
from makani.avionics.common import pack_avionics_messages as avionics_messages
from makani.avionics.common import servo_types
from makani.avionics.firmware.monitors import servo_types as servo_monitor_types
from makani.avionics.network import aio_node
from makani.avionics.servo.firmware import r22_types
from makani.lib.python import c_helpers
from makani.lib.python import ctype_util
import numpy
from PySide import QtCore
from PySide import QtGui
aio_node_helper = c_helpers.EnumHelper('AioNode', aio_node)
servo_status_bits = c_helpers.EnumHelper('ServoStatusFlag', servo_types,
'kServoStatus')
servo_warning_bits = c_helpers.EnumHelper('ServoWarningFlag', servo_types,
'kServoWarning')
servo_error_bits = c_helpers.EnumHelper('ServoErrorFlag', servo_types,
'kServoError')
r22_status_bits = c_helpers.EnumHelper('R22StatusBit', r22_types)
# Must be included after PySide in order to force pyqtgraph to use it.
pyqtgraph = importlib.import_module('pyqtgraph')
dockarea = importlib.import_module('pyqtgraph.dockarea')
Alias = collections.namedtuple('Alias', ['base_name'])
Operation = collections.namedtuple('Operation', ['function', 'dtype'])
def MultiplyOp(param, factor):
return Operation(lambda c, s, d: factor * d[s][param][c], numpy.float64)
def CountsToDegreesOp(param, counts):
return Operation(
lambda c, s, d: numpy.mod(d[s][param][c] * 360.0 / counts, 360.0) - 180.0,
numpy.float64)
def RadiansToDegreesOp(param):
return MultiplyOp(param, 180.0 / numpy.pi)
def DeltaFirstSourceOp(param):
return Operation(lambda c, s, d: d[s][param][c] - d[0][param][c],
numpy.float64)
class PlotDockArea(dockarea.DockArea):
"""Create plot dock area for GUI.
This class handles all plotting functionality in the main dock area. Add
the instantiated object to a QtGui layout, and then call the appropriate
create plots function(s).
"""
def __init__(self):
super(PlotDockArea, self).__init__()
self._lines = {}
self._bits = {}
self._plots = []
def _AddLine(self, node, name, plot):
"""Add a new data signal to plot redraw list."""
if node not in self._lines:
self._lines[node] = []
self._lines[node].append((name, plot))
def _AddBit(self, node, name, bit, offset, plot):
"""Add a new bit data signal to plot redraw list from a bit field."""
if node not in self._bits:
self._bits[node] = []
self._bits[node].append((name, bit, offset, plot))
def _GetPlotPen(self, servos, signals):
"""Helper function to generate different pen colors."""
signals = [i for i, sig in enumerate(signals) if sig]
pen = {}
for i, (_, node) in enumerate(servos):
for j, sig in enumerate(signals):
pen[(node, sig)] = i * len(signals) + j
return (pen, len(pen))
def _NewPlot(self, title):
"""Helper function to generate new plots with default options."""
dock = dockarea.Dock(name=title)
glw = pyqtgraph.GraphicsLayoutWidget()
dock.addWidget(glw)
p = glw.addPlot(title=title)
p.showGrid(True, True)
p.setMouseEnabled(x=False)
self._plots.append(p)
return (p, dock)
def _PlotAngleBias(self, servos):
"""Plot servo angle bias estimates (for paired servos)."""
return self.CreatePlot(servos, 'Angle bias', 'Bias', 'deg', ['ang_bias'])
def _PlotVelocity(self, servos):
"""Plot servo velocity measurements."""
return self.CreatePlot(servos, 'Velocity', 'Velocity', 'deg/s', ['vel_m'])
def _PlotCurrent(self, servos):
"""Plot servo current measurements."""
return self.CreatePlot(servos, 'Current', 'Current', 'A',
['cur_m', 'cur_limit', 'cur_nlimit'])
def _PlotLineVoltage(self, servos):
"""Plot servo line voltage measurements."""
return self.CreatePlot(servos, 'Line voltage', 'Line voltage', 'V',
['v_lv_in_a', 'v_lv_in_b', 'v_servo'])
def _PlotAngleError(self, servos):
"""Plot servo angle error measurements (relative to first servo)."""
return self.CreatePlot(servos[1:],
'Angle error (rel {0})'.format(servos[0][0]),
'Angle', 'deg', ['ang_err'])
def _PlotVelocityError(self, servos):
"""Plot servo velocity error measurements (relative to first servo)."""
return self.CreatePlot(servos[1:],
'Velocity error (rel {0})'.format(servos[0][0]),
'Velocity', 'deg/s', ['vel_err'])
def _PlotCurrentError(self, servos):
"""Plot servo current error measurements (relative to first servo)."""
return self.CreatePlot(servos[1:],
'Current error (rel {0})'.format(servos[0][0]),
'Current', 'A', ['cur_err'])
def CreateLogicPlot(self, servos, title, bits):
"""Plot status bits."""
(p, dock) = self._NewPlot(title)
p.addLegend()
p.setLabel('bottom', 'Time')
p.setLabel('left', 'Status bits')
p.setYRange(0.0, len(bits))
p.setMouseEnabled(x=False, y=False)
(pen, pens) = self._GetPlotPen(servos, [True] * len(bits))
for name, node in servos:
for idx, bitinfo in enumerate(bits):
param, enum, shortname = bitinfo
value = enum.Value(shortname)
# Determine which bit is set.
bit = bin(value)[::-1].index('1')
bit_name = name + ' ' + shortname
self._AddBit(node, param, bit, len(bits) - idx - 1,
p.plot(name=bit_name, pen=(pen[(node, idx)], pens)))
return dock
def CreatePlot(self, actuators, title, ylabel, yunit, params):
"""Plot a list of params."""
(p, dock) = self._NewPlot(title)
p.addLegend()
p.setLabel('bottom', 'Time')
p.setLabel('left', ylabel, yunit)
(pen, pens) = self._GetPlotPen(actuators, [True] * len(params))
for name, node in actuators:
for i, param in enumerate(params):
plot_label = name + ' ' + param
self._AddLine(node, param,
p.plot(name=plot_label, pen=(pen[(node, i)], pens)))
return dock
def Redraw(self, data, history):
"""Redraw all plots with new data."""
data.lock()
a, b = data.GetIndices(history)
for node, line in self._lines.items():
for (name, plot) in line:
plot.setData(x=data.time[a:b], y=data.GetData(name, node, a, b))
for node, line in self._bits.items():
for (name, bit, offset, plot) in line:
plot.setData(x=data.time[a:b],
y=offset + 0.9 * data.GetDataBit(name, bit, node, a, b))
data.unlock()
for p in self._plots:
p.setXRange(-history, 0.0)
def ClearPlots(self):
"""Clear all plot windows."""
self._lines = {}
self._bits = {}
self._plots = []
# pylint: disable=invalid-name
if self.topContainer:
self.topContainer.close()
self.topContainer = None
def CreateCommandPlots(self, servos):
"""Create servo command plots."""
return [self.CreatePlot(servos, 'Angle command', 'Angle', 'deg',
['ang_m', 'ang_cmd'])]
def CreateEstimatorPlots(self, servos):
"""Create servo estimator plots."""
return [self.CreatePlot(servos, 'Angle estimate', 'Angle', 'deg',
['ang_m', 'ang_est']),
self._PlotVelocity(servos)]
def CreateStatusPlots(self, servos):
"""Create servo status bit plots."""
servo_status = [('flags.status', servo_status_bits, 'Paired'),
('flags.status', servo_status_bits, 'Commanded'),
('flags.status', servo_status_bits, 'Armed'),
('flags.status', servo_status_bits, 'Reset'),
('flags.warning', servo_warning_bits, 'PairTimeout'),
('flags.warning', servo_warning_bits, 'PairFailed')]
r22_supply = ['ShortCircuitDetected',
'OverVoltage',
'UnderVoltage',
'CurrentOutputLimited',
'VoltageOutputLimited']
r22_feedback = ['FeedbackError',
'MotorPhasingError',
'EnableInputNotActive',
'DriveFault']
return [
self.CreateLogicPlot(servos, 'Servo status', servo_status),
self.CreateLogicPlot(
servos, 'R22 supply',
[('r22.status_bits', r22_status_bits, x) for x in r22_supply]),
self.CreateLogicPlot(
servos, 'R22 feedback',
[('r22.status_bits', r22_status_bits, x) for x in r22_feedback])]
def CreateCurrentPlots(self, servos):
"""Create servo current plots."""
return [self._PlotCurrent(servos)]
def CreateVoltagePlots(self, servos):
"""Create servo voltage plots."""
return [self._PlotLineVoltage(servos)]
def CreatePairedPlots(self, servos):
"""Create paired servo plots."""
return [self._PlotAngleBias(servos),
self._PlotAngleError(servos),
self._PlotVelocityError(servos),
self._PlotCurrentError(servos)]
def BuildPlotStack(self, plots, position):
"""Add plots to stack and bring first plot to foreground."""
self.addDock(plots[0], position)
for prev, plot in enumerate(plots[1:]):
self.addDock(plot, 'below', plots[prev])
# Bring first plot to foreground.
if len(plots) > 1:
stack = plots[0].container().stack
current = stack.currentWidget()
current.label.setDim(True)
stack.setCurrentWidget(plots[0])
plots[0].label.setDim(False)
class MainWindow(QtGui.QMainWindow):
"""Create main window for GUI.
This class handles the main window, user interface, and plot display.
"""
def __init__(self, history=60):
super(MainWindow, self).__init__()
self._threads = []
self._history = history
self._redraw_timer = QtCore.QTimer(self)
self._InitUserInterface(history)
self.connect(self._redraw_timer, QtCore.SIGNAL('timeout()'), self._Redraw)
def _InitUserInterface(self, history):
"""Initialize widgets and layout of user interface."""
central_widget = QtGui.QWidget(self)
# Command line.
command_cbox = QtGui.QComboBox(self)
command_cbox.setEditable(True)
command_cbox.lineEdit().returnPressed.connect(self._HandleCommandRequest)
self._command_cbox = command_cbox
# Time history.
history_sbox = QtGui.QSpinBox(self)
history_sbox.setRange(1, history)
history_sbox.setSingleStep(1)
history_sbox.setSuffix(' s')
history_sbox.setValue(history)
history_sbox.valueChanged.connect(self._HandleHistoryLength)
self._history_sbox = history_sbox
# Refresh rate.
refresh_sbox = QtGui.QSpinBox(self)
refresh_sbox.setSuffix(' Hz')
refresh_sbox.setValue(20)
refresh_sbox.valueChanged.connect(self._HandleRedrawRate)
self._refresh_sbox = refresh_sbox
# Pause button.
self._pause_btn = QtGui.QPushButton('Pause', self)
self._pause_btn.clicked.connect(self._HandlePauseButton)
# Plot area.
self._plots = PlotDockArea()
# Status message.
self._status_message = QtGui.QLabel('', self)
# Layout.
hbox = QtGui.QHBoxLayout()
hbox.addWidget(QtGui.QLabel('Command:', self))
hbox.addWidget(self._command_cbox, stretch=1)
hbox.addWidget(QtGui.QLabel('History:', self))
hbox.addWidget(self._history_sbox)
hbox.addWidget(QtGui.QLabel('Refresh:', self))
hbox.addWidget(self._refresh_sbox)
hbox.addWidget(self._pause_btn)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(self._plots, stretch=1)
central_widget.setLayout(vbox)
# Main window.
self.setCentralWidget(central_widget)
self.setGeometry(300, 150, 1200, 1000)
self.setWindowTitle('Servo Plotter')
self.statusBar().addWidget(self._status_message)
self._SetRedrawRate(refresh_sbox.value())
self.show()
def _SelectServoSources(self, sources, history):
"""Close existing plots, then create new plots for specified servos."""
self._TryCloseThreads()
self._servo_status = ServoStatusBuffer(allowed_sources=sources,
period=0.01, history=history)
self._servo_status.start()
self._data_source = self._servo_status
self._threads.append(self._servo_status)
def _SelectMotorSources(self, sources, history):
"""Close existing plots, then create new plots for specified motors."""
self._TryCloseThreads()
self._motor_status = MotorStatusBuffer(allowed_sources=sources,
period=0.01, history=history)
self._motor_status.start()
self._data_source = self._motor_status
self._threads.append(self._motor_status)
def _HandleCommandRequest(self):
"""Handle a user command from text entry."""
text = self._command_cbox.currentText()
try:
command, param = text.split(' ', 1)
except ValueError:
command = text
param = ''
handlers = {'select': self._HandleSelectCommand}
command = command.lower()
if command in handlers:
handlers[command](param)
return
self._PrintError('Unknown command: %s' % command)
def _HandleSelectCommand(self, param):
"""Handle user select command."""
params = param.split()
# Possible nodes to select.
servo_nodes = [n for n, _ in aio_node_helper
if n.startswith('kAioNodeServo')]
motor_nodes = [n for n, _ in aio_node_helper
if n.startswith('kAioNodeMotor')]
# Parse node selection.
selected_servos = [s for s in params
if 'kAioNodeServo' + s.capitalize() in servo_nodes]
selected_motors = [m for m in params
if 'kAioNodeMotor' + m.capitalize() in motor_nodes]
for s in selected_servos + selected_motors:
params.remove(s)
plots = []
actuators = []
if selected_servos and selected_motors:
# Only one source type can be selected at a time.
self._PrintError('Only one source type (servos or motors) can be '
'selected at a time')
elif selected_servos:
# Servos were selected.
sources = ['kAioNodeServo' + s.capitalize() for s in selected_servos]
actuators = [(aio_node_helper.ShortName(s), aio_node_helper.Value(s))
for s in sources]
# Select data sources.
self._SelectServoSources(sources, self._history)
# Possible plots to select.
plot_types = {'cmd': (self._plots.CreateCommandPlots, 'top'),
'est': (self._plots.CreateEstimatorPlots, 'top'),
'status': (self._plots.CreateStatusPlots, 'top'),
'cur': (self._plots.CreateCurrentPlots, 'top'),
'volt': (self._plots.CreateVoltagePlots, 'top'),
'paired': (self._plots.CreatePairedPlots, 'bottom')}
# Parse plot selection.
plot_params = [p for p in params if p.lower() in plot_types]
for p in plot_params:
params.remove(p)
# Custom plots.
for p in [p for p in params if p in self._servo_status.GetParams()]:
params.remove(p)
def _GenerateCustomPlot(actuators, p=p):
return [self._plots.CreatePlot(actuators, p, p, '', [p])]
plots.append((_GenerateCustomPlot, 'top'))
# Add default plot selection and add standard plot types to plot list.
if not plot_params and not plots:
plot_params = ['cmd']
for p in plot_params:
plots.append(plot_types[p.lower()])
self._PrintMessage('Selected servos: %s' % ', '.join(selected_servos))
elif selected_motors:
# Motors were selected.
sources = ['kAioNodeMotor' + s.capitalize() for s in selected_motors]
actuators = [(aio_node_helper.ShortName(s), aio_node_helper.Value(s))
for s in sources]
# Select data sources.
self._SelectMotorSources(sources, self._history)
# Custom plots.
for p in [p for p in params if p in self._motor_status.GetParams()]:
params.remove(p)
def _GenerateCustomPlot(actuators, p=p):
return [self._plots.CreatePlot(actuators, p, p, '', [p])]
plots.append((_GenerateCustomPlot, 'top'))
self._PrintMessage('Selected motors: %s' % ', '.join(selected_motors))
else:
# No nodes were selected.
self._PrintError('No nodes were selected')
if params:
self._PrintError('Unknown parameters: %s' % ' '.join(params))
# Create plots.
stacks = {}
self._plots.ClearPlots()
for plot in plots:
func, stack = plot
if stack not in stacks:
stacks[stack] = []
stacks[stack].extend(func(actuators))
for key, value in stacks.iteritems():
self._plots.BuildPlotStack(value, key)
def _HandleRedrawRate(self, value):
"""Handle change to plot refresh rate."""
self._SetRedrawRate(value)
def _HandleHistoryLength(self, value):
"""Handle change to history length."""
pass
def _HandlePauseButton(self):
"""Handle toggling of pause button."""
if self._pause_btn.text() == 'Pause':
self._StopRedraw()
else:
self._StartRedraw()
def _SetRedrawRate(self, hz):
"""Set plot redraw rate."""
if hz > 0:
self._redraw_timer.start(int(1000.0 / hz))
self._StartRedraw()
else:
self._StopRedraw()
def _StartRedraw(self):
"""Start plot redraw timer."""
self._redraw_timer.start()
palette = self._pause_btn.palette()
palette.setColor(QtGui.QPalette.Button, QtCore.Qt.green)
self._pause_btn.setText('Pause')
self._pause_btn.setAutoFillBackground(True)
self._pause_btn.setPalette(palette)
self._pause_btn.update()
def _StopRedraw(self):
"""Stop plot redraw timer."""
self._redraw_timer.stop()
palette = self._pause_btn.palette()
palette.setColor(QtGui.QPalette.Button, QtCore.Qt.red)
self._pause_btn.setText('Paused')
self._pause_btn.setAutoFillBackground(True)
self._pause_btn.setPalette(palette)
self._pause_btn.update()
def _Redraw(self):
"""Redraw plots."""
if hasattr(self, '_data_source'):
self._plots.Redraw(self._data_source, self._history_sbox.value())
def _PrintMessage(self, msg):
"""Print status message."""
self._status_message.setText(msg)
def _PrintError(self, error):
"""Print error message."""
self._PrintMessage('ERROR: ' + error)
def _TryCloseThreads(self):
"""Try to close running threads."""
for thread in self._threads:
thread.should_exit = True
for thread in self._threads:
if thread.isRunning():
thread.wait(2000)
if thread.isRunning():
self._PrintError('Could not terminate {:s}'.format(thread))
self.close()
self._threads = []
def closeEvent(self, event):
"""Override close event in order to close threads."""
self._TryCloseThreads()
event.accept()
class AioDataStream(QtCore.QThread, QtCore.QMutex):
"""Handle incoming AIO data.
This class provides a general interface to handling a circular buffer of
network data.
"""
def __init__(self, allowed_sources, message_type, message_template, period,
history, parent=None):
QtCore.QThread.__init__(self, parent)
QtCore.QMutex.__init__(self)
self.should_exit = False
self._half_size = int(numpy.ceil(history / period))
self._buffer_size = 2 * self._half_size
self._period = period
self._head = 0
self._timestamp = time.time()
self._source_map = {aio_node_helper.Value(x): i
for i, x in enumerate(allowed_sources)}
self._aio_client = aio.AioClient(message_types=[message_type],
allowed_sources=allowed_sources,
timeout=0.2)
self.time = period * numpy.arange(-self._half_size + 1, 1)
self._data = [None] * len(allowed_sources)
self._derived_params = collections.OrderedDict()
message_dict = ctype_util.CTypeToPython(message_template)
for i in range(len(self._data)):
self._data[i] = self._InitBuffers(message_dict)
def run(self): # QThread Virtual function.
"""Poll for new messages."""
while not self.should_exit:
try:
(_, header, message) = self._aio_client.Recv()
self.lock()
message_dict = ctype_util.CTypeToPython(message)
self.HandleMessage(header, message_dict, time.time())
self.unlock()
except socket.timeout:
pass
self._aio_client.Close()
def HandleMessage(self, header, message, timestamp):
"""Handle new messages."""
row = self._source_map[header.source]
dt = timestamp - self._timestamp
count = min(int(dt / self._period), self._half_size)
if count:
# Advance position in circular buffer.
self.ZeroOrderHold(self._head, count)
shadow = numpy.mod(self._head + self._half_size, self._buffer_size)
self.ZeroOrderHold(shadow, count)
self._head = numpy.mod(self._head + count, self._buffer_size)
self._timestamp = timestamp
self.ExtractData(row, self._head, header.type, message)
shadow = numpy.mod(self._head + self._half_size, self._buffer_size)
self.ExtractData(row, shadow, header.type, message)
def _GetNumpyType(self, value):
if isinstance(value, (float, ctypes.c_float, ctypes.c_double,
ctypes.c_longdouble)):
return numpy.float64
if isinstance(value, (int, ctypes.c_int, ctypes.c_char, ctypes.c_wchar,
ctypes.c_byte, ctypes.c_int)):
return numpy.int32
if isinstance(value, (ctypes.c_ubyte, ctypes.c_ushort, ctypes.c_uint,
ctypes.c_bool)):
return numpy.uint32
if isinstance(value, (long, ctypes.c_long, ctypes.c_longlong)):
return numpy.int64
if isinstance(value, (ctypes.c_ulong, ctypes.c_ulonglong)):
return numpy.uint64
raise TypeError('Unsupported type encountered in conversion to numpy array')
def _InitBuffers(self, message, databuf=None, prefix=''):
if databuf is None:
databuf = {}
if isinstance(message, dict):
for param, value in message.iteritems():
self._InitBuffers(value, databuf, prefix + '.' + param)
elif isinstance(message, list):
for i in range(len(message)):
self._InitBuffers(message[i], databuf, prefix + '[%d]' % i)
elif message is not None and not isinstance(message, str):
databuf[prefix[1:]] = numpy.zeros(self._buffer_size,
dtype=self._GetNumpyType(message))
return databuf
def _ExtractData(self, col, message, databuf, prefix=''):
if isinstance(message, dict):
for param, value in message.iteritems():
self._ExtractData(col, value, databuf,
prefix + '.' + param)
elif isinstance(message, list):
for i in range(len(message)):
self._ExtractData(col, message[i], databuf, prefix + '[%d]' % i)
elif message is not None and not isinstance(message, str):
databuf[prefix[1:]][col] = message
def ExtractData(self, source, col, message_type, message):
"""Extract data from message and insert into circular buffer."""
self._ExtractData(col, message, self._data[source])
# Derive parameters.
for param_name, operation in self._derived_params.iteritems():
for source in range(len(self._source_map)):
self._data[source][param_name][col] = operation(col, source, self._data)
def ZeroOrderHold(self, start, count):
"""Duplicate data at start index for the following count indices."""
for databuf in self._data:
for arr in databuf.values():
for index in range(start + 1, start + 1 + count):
arr[index % self._buffer_size] = arr[start]
def DeriveParam(self, name, operation):
"""Derive a new parameter based on other message parameters.
Derived parameters are referred to by name, and consist of an operation to
be performed on input data. The operation has a function which performs
the actual computation to produce a specific sample of that parameter, and
the data type which is used to allocate the array containing the resulting
data values.
Args:
name: The name used to plot the derived parameter.
operation: The calculation applied to the data buffer to compute the
derived parameter value.
"""
if isinstance(operation, Alias):
for i in range(len(self._data)):
self._data[i][name] = self._data[i][operation.base_name]
elif isinstance(operation, Operation):
if name in self._derived_params:
del self._derived_params[name]
self._derived_params[name] = operation.function
for i in range(len(self._data)):
self._data[i][name] = numpy.zeros(self._buffer_size,
dtype=operation.dtype)
def GetIndices(self, history):
"""Get circular buffer indices for a given buffer length (history)."""
b = self._half_size
a = max(int(numpy.ceil(self._half_size - history / self._period)), 0)
return (a, b)
def GetData(self, name, node, a, b):
"""Extract data from circular buffer."""
offset = numpy.mod(self._head, self._half_size) + 1
row = self._source_map[node]
a += offset
b += offset
return self._data[row][name][a:b]
def GetDataBit(self, name, bit, node, a, b):
"""Extract data from a bitmask from circular buffer."""
offset = numpy.mod(self._head, self._half_size) + 1
row = self._source_map[node]
a += offset
b += offset
return (self._data[row][name][a:b] & (1 << bit)) >> bit
def GetParams(self):
return self._data[0].keys()
class MotorStatusBuffer(AioDataStream):
"""Handle incoming AIO data with message type of MotorStatusMessage."""
def __init__(self, allowed_sources, period, history, parent=None):
"""Initialize circular buffer data storage."""
super(MotorStatusBuffer, self).__init__(
message_type='kMessageTypeMotorStatus',
message_template=avionics_messages.MotorStatusMessage(),
allowed_sources=allowed_sources,
period=period, history=history, parent=parent)
self.DeriveParam('vref', Operation(
lambda c, s, d: d[s]['vd'][c]**2 + d[s]['vq'][c]**2,
numpy.float64))
class ServoStatusBuffer(AioDataStream):
"""Handle incoming AIO data with message type of ServoStatusMessage."""
def __init__(self, allowed_sources, period, history, parent=None):
"""Initialize circular buffer data storage."""
super(ServoStatusBuffer, self).__init__(
message_type='kMessageTypeServoStatus',
message_template=avionics_messages.ServoStatusMessage(),
allowed_sources=allowed_sources,
period=period, history=history, parent=parent)
self.DeriveParam('ang_m', RadiansToDegreesOp('angle_measured'))
self.DeriveParam('ang_err', DeltaFirstSourceOp('ang_m'))
self.DeriveParam('ang_bias', RadiansToDegreesOp('angle_bias'))
self.DeriveParam('ang_fb', RadiansToDegreesOp('angle_feedback'))
self.DeriveParam('ang_est', RadiansToDegreesOp('angle_estimate'))
self.DeriveParam('ang_var', RadiansToDegreesOp('angle_variance'))
self.DeriveParam('ang_cmd', RadiansToDegreesOp('angle_desired'))
self.DeriveParam('vel_m', RadiansToDegreesOp('angular_velocity'))
self.DeriveParam('vel_err', DeltaFirstSourceOp('vel_m'))
self.DeriveParam('cur_m', MultiplyOp('r22.current', 0.01))
self.DeriveParam('cur_err', DeltaFirstSourceOp('cur_m'))
self.DeriveParam('cur_limit', MultiplyOp('r22.current_limit', 0.01))
self.DeriveParam('cur_nlimit', MultiplyOp('cur_limit', -1))
self.DeriveParam('i_servo', Alias(
'servo_mon.analog_data[%d]'
% servo_monitor_types.kServoAnalogVoltageIServo))
self.DeriveParam('v_lv_in_a', Alias(
'servo_mon.analog_data[%d]'
% servo_monitor_types.kServoAnalogVoltageLvA))
self.DeriveParam('v_lv_in_b', Alias(
'servo_mon.analog_data[%d]'
% servo_monitor_types.kServoAnalogVoltageLvB))
self.DeriveParam('v_servo', Alias(
'servo_mon.analog_data[%d]'
% servo_monitor_types.kServoAnalogVoltageVServo))
def _HandleSigint(*unused_args): # pylint: disable=invalid-name
"""Quit the application when SIGINT is received."""
sys.stderr.write('\n')
QtGui.QApplication.quit()
def main(argv):
"""Entry point."""
signal.signal(signal.SIGINT, _HandleSigint)
app = QtGui.QApplication(argv)
unused_win = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main(sys.argv)
|
|
"""Here is defined the AttributeSet class."""
import re
import sys
import warnings
import pickle
import numpy as np
from . import hdf5extension
from .utils import SizeType
from .registry import class_name_dict
from .exceptions import ClosedNodeError, PerformanceWarning
from .path import check_attribute_name
from .undoredo import attr_to_shadow
from .filters import Filters
# System attributes
SYS_ATTRS = ["CLASS", "VERSION", "TITLE", "NROWS", "EXTDIM",
"ENCODING", "PYTABLES_FORMAT_VERSION",
"FLAVOR", "FILTERS", "AUTO_INDEX",
"DIRTY", "NODE_TYPE", "NODE_TYPE_VERSION",
"PSEUDOATOM"]
# Prefixes of other system attributes
SYS_ATTRS_PREFIXES = ["FIELD_"]
# RO_ATTRS will be disabled and let the user modify them if they
# want to. The user is still not allowed to remove or rename
# system attributes. Francesc Alted 2004-12-19
# Read-only attributes:
# RO_ATTRS = ["CLASS", "FLAVOR", "VERSION", "NROWS", "EXTDIM",
# "PYTABLES_FORMAT_VERSION", "FILTERS",
# "NODE_TYPE", "NODE_TYPE_VERSION"]
# RO_ATTRS = []
# The next attributes are not meant to be copied during a Node copy process
SYS_ATTRS_NOTTOBECOPIED = ["CLASS", "VERSION", "TITLE", "NROWS", "EXTDIM",
"PYTABLES_FORMAT_VERSION", "FILTERS", "ENCODING"]
# Attributes forced to be copied during node copies
FORCE_COPY_CLASS = ['CLASS', 'VERSION']
# Regular expression for column default values.
_field_fill_re = re.compile('^FIELD_[0-9]+_FILL$')
# Regular expression for fixing old pickled filters.
_old_filters_re = re.compile(br'\(([ic])tables\.Leaf\n')
# Fixed version of the previous string.
_new_filters_sub = br'(\1tables.filters\n'
def issysattrname(name):
"""Check if a name is a system attribute or not"""
return bool(name in SYS_ATTRS or np.prod(
[name.startswith(prefix) for prefix in SYS_ATTRS_PREFIXES]))
class AttributeSet(hdf5extension.AttributeSet):
"""Container for the HDF5 attributes of a Node.
This class provides methods to create new HDF5 node attributes,
and to get, rename or delete existing ones.
Like in Group instances (see :ref:`GroupClassDescr`), AttributeSet
instances make use of the *natural naming* convention, i.e. you can
access the attributes on disk as if they were normal Python
attributes of the AttributeSet instance.
This offers the user a very convenient way to access HDF5 node
attributes. However, for this reason and in order not to pollute the
object namespace, one can not assign *normal* attributes to
AttributeSet instances, and their members use names which start by
special prefixes as happens with Group objects.
.. rubric:: Notes on native and pickled attributes
The values of most basic types are saved as HDF5 native data in the
HDF5 file. This includes Python bool, int, float, complex and str
(but not long nor unicode) values, as well as their NumPy scalar
versions and homogeneous or *structured* NumPy arrays of them. When
read, these values are always loaded as NumPy scalar or array
objects, as needed.
For that reason, attributes in native HDF5 files will be always
mapped into NumPy objects. Specifically, a multidimensional
attribute will be mapped into a multidimensional ndarray and a
scalar will be mapped into a NumPy scalar object (for example, a
scalar H5T_NATIVE_LLONG will be read and returned as a numpy.int64
scalar).
However, other kinds of values are serialized using pickle, so you
only will be able to correctly retrieve them using a Python-aware
HDF5 library. Thus, if you want to save Python scalar values and
make sure you are able to read them with generic HDF5 tools, you
should make use of *scalar or homogeneous/structured array NumPy
objects* (for example, numpy.int64(1) or numpy.array([1, 2, 3],
dtype='int16')).
One more advice: because of the various potential difficulties in
restoring a Python object stored in an attribute, you may end up
getting a pickle string where a Python object is expected. If this
is the case, you may wish to run pickle.loads() on that string to
get an idea of where things went wrong, as shown in this example::
>>> import os, tempfile
>>> import tables as tb
>>>
>>> class MyClass:
... foo = 'bar'
...
>>> myObject = MyClass() # save object of custom class in HDF5 attr
>>> h5fname = tempfile.mktemp(suffix='.h5')
>>> h5f = tb.open_file(h5fname, 'w')
>>> h5f.root._v_attrs.obj = myObject # store the object
>>> print(h5f.root._v_attrs.obj.foo) # retrieve it
bar
>>> h5f.close()
>>>
>>> del MyClass, myObject # delete class of object and reopen file
>>> h5f = tb.open_file(h5fname, 'r')
>>> print(repr(h5f.root._v_attrs.obj))
b'ccopy_reg\\n_reconstructor...
>>> import pickle # let's unpickle that to see what went wrong
>>> pickle.loads(h5f.root._v_attrs.obj)
Traceback (most recent call last):
...
AttributeError: Can't get attribute 'MyClass' ...
>>> # So the problem was not in the stored object,
... # but in the *environment* where it was restored.
... h5f.close()
>>> os.remove(h5fname)
.. rubric:: Notes on AttributeSet methods
Note that this class overrides the __getattr__(), __setattr__(),
__delattr__() and __dir__() special methods. This allows you to
read, assign or delete attributes on disk by just using the next
constructs::
leaf.attrs.myattr = 'str attr' # set a string (native support)
leaf.attrs.myattr2 = 3 # set an integer (native support)
leaf.attrs.myattr3 = [3, (1, 2)] # a generic object (Pickled)
attrib = leaf.attrs.myattr # get the attribute ``myattr``
del leaf.attrs.myattr # delete the attribute ``myattr``
In addition, the dictionary-like __getitem__(), __setitem__() and
__delitem__() methods are available, so you may write things like
this::
for name in node._v_attrs._f_list():
print("name: %s, value: %s" % (name, node._v_attrs[name]))
Use whatever idiom you prefer to access the attributes.
Finally, on interactive python sessions you may get autocompletions of
attributes named as *valid python identifiers* by pressing the `[Tab]`
key, or to use the dir() global function.
If an attribute is set on a target node that already has a large
number of attributes, a PerformanceWarning will be issued.
.. rubric:: AttributeSet attributes
.. attribute:: _v_attrnames
A list with all attribute names.
.. attribute:: _v_attrnamessys
A list with system attribute names.
.. attribute:: _v_attrnamesuser
A list with user attribute names.
.. attribute:: _v_unimplemented
A list of attribute names with unimplemented native HDF5 types.
"""
def _g_getnode(self):
return self._v__nodefile._get_node(self._v__nodepath)
@property
def _v_node(self):
"""The :class:`Node` instance this attribute set is associated with."""
return self._g_getnode()
def __init__(self, node):
"""Create the basic structures to keep the attribute information.
Reads all the HDF5 attributes (if any) on disk for the node "node".
Parameters
----------
node
The parent node
"""
# Refuse to create an instance of an already closed node
if not node._v_isopen:
raise ClosedNodeError("the node for attribute set is closed")
dict_ = self.__dict__
self._g_new(node)
dict_["_v__nodefile"] = node._v_file
dict_["_v__nodepath"] = node._v_pathname
dict_["_v_attrnames"] = self._g_list_attr(node)
# The list of unimplemented attribute names
dict_["_v_unimplemented"] = []
# Get the file version format. This is an optimization
# in order to avoid accessing it too much.
try:
format_version = node._v_file.format_version
except AttributeError:
parsed_version = None
else:
if format_version == 'unknown':
parsed_version = None
else:
parsed_version = tuple(map(int, format_version.split('.')))
dict_["_v__format_version"] = parsed_version
# Split the attribute list in system and user lists
dict_["_v_attrnamessys"] = []
dict_["_v_attrnamesuser"] = []
for attr in self._v_attrnames:
# put the attributes on the local dictionary to allow
# tab-completion
self.__getattr__(attr)
if issysattrname(attr):
self._v_attrnamessys.append(attr)
else:
self._v_attrnamesuser.append(attr)
# Sort the attributes
self._v_attrnames.sort()
self._v_attrnamessys.sort()
self._v_attrnamesuser.sort()
def _g_update_node_location(self, node):
"""Updates the location information about the associated `node`."""
dict_ = self.__dict__
dict_['_v__nodefile'] = node._v_file
dict_['_v__nodepath'] = node._v_pathname
# hdf5extension operations:
self._g_new(node)
def _f_list(self, attrset='user'):
"""Get a list of attribute names.
The attrset string selects the attribute set to be used. A
'user' value returns only user attributes (this is the default).
A 'sys' value returns only system attributes. Finally, 'all'
returns both system and user attributes.
"""
if attrset == "user":
return self._v_attrnamesuser[:]
elif attrset == "sys":
return self._v_attrnamessys[:]
elif attrset == "all":
return self._v_attrnames[:]
def __dir__(self):
"""Autocomplete only children named as valid python identifiers.
Only PY3 supports this special method.
"""
return list({c for c in
super().__dir__() + self._v_attrnames
if c.isidentifier()})
def __getattr__(self, name):
"""Get the attribute named "name"."""
# If attribute does not exist, raise AttributeError
if name not in self._v_attrnames:
raise AttributeError(f"Attribute {name!r} does not exist "
f"in node: {self._v__nodepath!r}")
# Read the attribute from disk. This is an optimization to read
# quickly system attributes that are _string_ values, but it
# takes care of other types as well as for example NROWS for
# Tables and EXTDIM for EArrays
format_version = self._v__format_version
value = self._g_getattr(self._v_node, name)
# Check whether the value is pickled
# Pickled values always seems to end with a "."
maybe_pickled = (
isinstance(value, np.generic) and # NumPy scalar?
value.dtype.type == np.bytes_ and # string type?
value.itemsize > 0 and value.endswith(b'.'))
if (maybe_pickled and value in [b"0", b"0."]):
# Workaround for a bug in many versions of Python (starting
# somewhere after Python 2.6.1). See ticket #253.
retval = value
elif (maybe_pickled and _field_fill_re.match(name)
and format_version == (1, 5)):
# This format was used during the first 1.2 releases, just
# for string defaults.
try:
retval = pickle.loads(value)
retval = np.array(retval)
except ImportError:
retval = None # signal error avoiding exception
elif (maybe_pickled and
name == 'FILTERS' and
format_version is not None and
format_version < (2, 0)):
# This is a big hack, but we don't have other way to recognize
# pickled filters of PyTables 1.x files.
value = _old_filters_re.sub(_new_filters_sub, value, 1)
retval = pickle.loads(value) # pass unpickling errors through
elif maybe_pickled:
try:
retval = pickle.loads(value)
# except cPickle.UnpicklingError:
# It seems that pickle may raise other errors than UnpicklingError
# Perhaps it would be better just an "except:" clause?
# except (cPickle.UnpicklingError, ImportError):
# Definitely (see SF bug #1254636)
except UnicodeDecodeError:
# Object maybe pickled on python 2 and unpickled on python 3.
# encoding='bytes' was added in python 3.4 to resolve this.
# However 'bytes' mangles class attributes as they are
# unplicked as bytestrings. Hence try 'latin1' first.
# Ref: http://bugs.python.org/issue6784
try:
retval = pickle.loads(value, encoding='latin1')
except TypeError:
try:
retval = pickle.loads(value, encoding='bytes')
except Exception:
retval = value
except Exception:
retval = value
except Exception:
# catch other unpickling errors:
# ivb (2005-09-07): It is too hard to tell
# whether the unpickling failed
# because of the string not being a pickle one at all,
# because of a malformed pickle string,
# or because of some other problem in object reconstruction,
# thus making inconvenient even the issuing of a warning here.
# The documentation contains a note on this issue,
# explaining how the user can tell where the problem was.
retval = value
# Additional check for allowing a workaround for #307
if isinstance(retval, str) and retval == '':
retval = np.array(retval)[()]
elif (name == 'FILTERS' and
format_version is not None and
format_version >= (2, 0)):
try:
retval = Filters._unpack(value)
except ValueError:
sys.stderr.write('Failed parsing FILTERS key\n')
sys.stderr.flush()
retval = None
elif name == 'TITLE' and not isinstance(value, str):
retval = value.decode('utf-8')
elif (issysattrname(name) and isinstance(value, (bytes, str)) and
not isinstance(value, str) and not _field_fill_re.match(name)):
# system attributes should always be str
# python 3, bytes and not "FIELD_[0-9]+_FILL"
retval = value.decode('utf-8')
else:
retval = value
# Put this value in local directory
self.__dict__[name] = retval
return retval
def _g__setattr(self, name, value):
"""Set a PyTables attribute.
Sets a (maybe new) PyTables attribute with the specified `name`
and `value`. If the attribute already exists, it is simply
replaced.
It does not log the change.
"""
# Save this attribute to disk
# (overwriting an existing one if needed)
stvalue = value
if issysattrname(name):
if name in ["EXTDIM", "AUTO_INDEX", "DIRTY", "NODE_TYPE_VERSION"]:
stvalue = np.array(value, dtype=np.int32)
value = stvalue[()]
elif name == "NROWS":
stvalue = np.array(value, dtype=SizeType)
value = stvalue[()]
elif (name == "FILTERS" and
self._v__format_version is not None and
self._v__format_version >= (2, 0)):
stvalue = value._pack()
# value will remain as a Filters instance here
# Convert value from a Python scalar into a NumPy scalar
# (only in case it has not been converted yet)
# Fixes ticket #59
if (stvalue is value and
type(value) in (bool, bytes, int, float, complex, str,
np.unicode_)):
# Additional check for allowing a workaround for #307
if isinstance(value, str) and len(value) == 0:
stvalue = np.array('')
else:
stvalue = np.array(value)
value = stvalue[()]
self._g_setattr(self._v_node, name, stvalue)
# New attribute or value. Introduce it into the local
# directory
self.__dict__[name] = value
# Finally, add this attribute to the list if not present
attrnames = self._v_attrnames
if name not in attrnames:
attrnames.append(name)
attrnames.sort()
if issysattrname(name):
attrnamessys = self._v_attrnamessys
attrnamessys.append(name)
attrnamessys.sort()
else:
attrnamesuser = self._v_attrnamesuser
attrnamesuser.append(name)
attrnamesuser.sort()
def __setattr__(self, name, value):
"""Set a PyTables attribute.
Sets a (maybe new) PyTables attribute with the specified `name`
and `value`. If the attribute already exists, it is simply
replaced.
A ``ValueError`` is raised when the name starts with a reserved
prefix or contains a ``/``. A `NaturalNameWarning` is issued if
the name is not a valid Python identifier. A
`PerformanceWarning` is issued when the recommended maximum
number of attributes in a node is going to be exceeded.
"""
nodefile = self._v__nodefile
attrnames = self._v_attrnames
# Check for name validity
check_attribute_name(name)
nodefile._check_writable()
# Check if there are too many attributes.
max_node_attrs = nodefile.params['MAX_NODE_ATTRS']
if len(attrnames) >= max_node_attrs:
warnings.warn("""\
node ``%s`` is exceeding the recommended maximum number of attributes (%d);\
be ready to see PyTables asking for *lots* of memory and possibly slow I/O"""
% (self._v__nodepath, max_node_attrs),
PerformanceWarning)
undo_enabled = nodefile.is_undo_enabled()
# Log old attribute removal (if any).
if undo_enabled and (name in attrnames):
self._g_del_and_log(name)
# Set the attribute.
self._g__setattr(name, value)
# Log new attribute addition.
if undo_enabled:
self._g_log_add(name)
def _g_log_add(self, name):
self._v__nodefile._log('ADDATTR', self._v__nodepath, name)
def _g_del_and_log(self, name):
nodefile = self._v__nodefile
node_pathname = self._v__nodepath
# Log *before* moving to use the right shadow name.
nodefile._log('DELATTR', node_pathname, name)
attr_to_shadow(nodefile, node_pathname, name)
def _g__delattr(self, name):
"""Delete a PyTables attribute.
Deletes the specified existing PyTables attribute.
It does not log the change.
"""
# Delete the attribute from disk
self._g_remove(self._v_node, name)
# Delete the attribute from local lists
self._v_attrnames.remove(name)
if name in self._v_attrnamessys:
self._v_attrnamessys.remove(name)
else:
self._v_attrnamesuser.remove(name)
# Delete the attribute from the local directory
# closes (#1049285)
del self.__dict__[name]
def __delattr__(self, name):
"""Delete a PyTables attribute.
Deletes the specified existing PyTables attribute from the
attribute set. If a nonexistent or system attribute is
specified, an ``AttributeError`` is raised.
"""
nodefile = self._v__nodefile
# Check if attribute exists
if name not in self._v_attrnames:
raise AttributeError(
"Attribute ('%s') does not exist in node '%s'"
% (name, self._v__nodepath))
nodefile._check_writable()
# Remove the PyTables attribute or move it to shadow.
if nodefile.is_undo_enabled():
self._g_del_and_log(name)
else:
self._g__delattr(name)
def __getitem__(self, name):
"""The dictionary like interface for __getattr__()."""
try:
return self.__getattr__(name)
except AttributeError:
# Capture the AttributeError an re-raise a KeyError one
raise KeyError(
"Attribute ('%s') does not exist in node '%s'"
% (name, self._v__nodepath))
def __setitem__(self, name, value):
"""The dictionary like interface for __setattr__()."""
self.__setattr__(name, value)
def __delitem__(self, name):
"""The dictionary like interface for __delattr__()."""
try:
self.__delattr__(name)
except AttributeError:
# Capture the AttributeError an re-raise a KeyError one
raise KeyError(
"Attribute ('%s') does not exist in node '%s'"
% (name, self._v__nodepath))
def __contains__(self, name):
"""Is there an attribute with that name?
A true value is returned if the attribute set has an attribute
with the given name, false otherwise.
"""
return name in self._v_attrnames
def _f_rename(self, oldattrname, newattrname):
"""Rename an attribute from oldattrname to newattrname."""
if oldattrname == newattrname:
# Do nothing
return
# First, fetch the value of the oldattrname
attrvalue = getattr(self, oldattrname)
# Now, create the new attribute
setattr(self, newattrname, attrvalue)
# Finally, remove the old attribute
delattr(self, oldattrname)
def _g_copy(self, newset, set_attr=None, copyclass=False):
"""Copy set attributes.
Copies all user and allowed system PyTables attributes to the
given attribute set, replacing the existing ones.
You can specify a *bound* method of the destination set that
will be used to set its attributes. Else, its `_g__setattr`
method will be used.
Changes are logged depending on the chosen setting method. The
default setting method does not log anything.
.. versionchanged:: 3.0
The *newSet* parameter has been renamed into *newset*.
.. versionchanged:: 3.0
The *copyClass* parameter has been renamed into *copyclass*.
"""
copysysattrs = newset._v__nodefile.params['PYTABLES_SYS_ATTRS']
if set_attr is None:
set_attr = newset._g__setattr
for attrname in self._v_attrnamesuser:
# Do not copy the unimplemented attributes.
if attrname not in self._v_unimplemented:
set_attr(attrname, getattr(self, attrname))
# Copy the system attributes that we are allowed to.
if copysysattrs:
for attrname in self._v_attrnamessys:
if ((attrname not in SYS_ATTRS_NOTTOBECOPIED) and
# Do not copy the FIELD_ attributes in tables as this can
# be really *slow* (don't know exactly the reason).
# See #304.
not attrname.startswith("FIELD_")):
set_attr(attrname, getattr(self, attrname))
# Copy CLASS and VERSION attributes if requested
if copyclass:
for attrname in FORCE_COPY_CLASS:
if attrname in self._v_attrnamessys:
set_attr(attrname, getattr(self, attrname))
def _f_copy(self, where):
"""Copy attributes to the where node.
Copies all user and certain system attributes to the given where
node (a Node instance - see :ref:`NodeClassDescr`), replacing
the existing ones.
"""
# AttributeSet must be defined in order to define a Node.
# However, we need to know Node here.
# Using class_name_dict avoids a circular import.
if not isinstance(where, class_name_dict['Node']):
raise TypeError(f"destination object is not a node: {where!r}")
self._g_copy(where._v_attrs, where._v_attrs.__setattr__)
def _g_close(self):
# Nothing will be done here, as the existing instance is completely
# operative now.
pass
def __str__(self):
"""The string representation for this object."""
# The pathname
pathname = self._v__nodepath
# Get this class name
classname = self.__class__.__name__
# The attribute names
attrnumber = sum(1 for _ in self._v_attrnames)
return f"{pathname}._v_attrs ({classname}), {attrnumber} attributes"
def __repr__(self):
"""A detailed string representation for this object."""
# print additional info only if there are attributes to show
attrnames = list(self._v_attrnames)
if attrnames:
rep = [f'{attr} := {getattr(self, attr)!r}' for attr in attrnames]
return f"{self!s}:\n [" + ',\n '.join(rep) + "]"
else:
return str(self)
class NotLoggedAttributeSet(AttributeSet):
def _g_log_add(self, name):
pass
def _g_del_and_log(self, name):
self._g__delattr(name)
|
|
from __future__ import absolute_import
import pytest
import os.path
from subprocess import check_call, check_output
from changes.testutils import TestCase
from changes.vcs.base import (
ContentReadError, MissingFileError, UnknownChildRevision, UnknownParentRevision, UnknownRevision,
)
from changes.vcs.git import GitVcs
from tests.changes.vcs.asserts import VcsAsserts
class GitVcsTest(TestCase, VcsAsserts):
root = '/tmp/changes-git-test'
path = '%s/clone' % (root,)
remote_path = '%s/remote' % (root,)
url = 'file://%s' % (remote_path,)
def _get_last_two_revisions(self, marker, revisions):
if marker in revisions[0].branches:
return revisions[0], revisions[1]
else:
return revisions[1], revisions[0]
def _set_author(self, name, email, path=None):
if not path:
path = self.remote_path
path = "%s/.git" % (path,)
check_call(['git', '--git-dir', path, 'config', '--replace-all',
'user.name', name])
check_call(['git', '--git-dir', path, 'config', '--replace-all',
'user.email', email])
def setUp(self):
self.reset()
self.addCleanup(check_call, ['rm', '-rf', self.root],)
def reset(self):
check_call(['rm', '-rf', self.root])
check_call(['mkdir', '-p', self.path, self.remote_path])
check_call(['git', 'init', self.remote_path])
self._set_author('Foo Bar', 'foo@example.com')
self._add_file('FOO', self.remote_path, commit_msg="test\nlol\n")
self._add_file('BAR', self.remote_path, commit_msg="biz\nbaz\n")
def _add_file(self, filename, repo_path, commit_msg=None, content='', target=None):
if target:
check_output(['ln', '-s', target, filename], cwd=repo_path)
else:
with open(os.path.join(repo_path, filename), 'w') as f:
f.write(content)
check_call(['git', 'add', filename], cwd=repo_path)
check_call(['git', 'commit', '-m', commit_msg], cwd=repo_path)
def get_vcs(self):
return GitVcs(
url=self.url,
path=self.path
)
def test_get_default_revision(self):
vcs = self.get_vcs()
assert vcs.get_default_revision() == 'master'
def test_log_with_authors(self):
vcs = self.get_vcs()
# Create a commit with a new author
self._set_author('Another Committer', 'ac@d.not.zm.exist')
self._add_file('BAZ', self.remote_path, commit_msg="bazzy")
vcs.clone()
vcs.update()
revisions = list(vcs.log())
assert len(revisions) == 3
revisions = list(vcs.log(author='Another Committer'))
assert len(revisions) == 1
self.assertRevision(revisions[0],
author='Another Committer <ac@d.not.zm.exist>',
message='bazzy')
revisions = list(vcs.log(author='ac@d.not.zm.exist'))
assert len(revisions) == 1
self.assertRevision(revisions[0],
author='Another Committer <ac@d.not.zm.exist>',
message='bazzy')
revisions = list(vcs.log(branch=vcs.get_default_revision(),
author='Foo'))
assert len(revisions) == 2
def test_log_with_paths(self):
vcs = self.get_vcs()
# Create a third commit
self._set_author('Another Committer', 'ac@d.not.zm.exist')
self._add_file('BAZ', self.remote_path, commit_msg="bazzy")
vcs.clone()
vcs.update()
revisions = list(vcs.log())
assert len(revisions) == 3
# one revision
revisions = list(vcs.log(paths=["BAZ"]))
assert len(revisions) == 1, "one path, len " + len(revisions)
self.assertRevision(revisions[0],
message='bazzy')
# multiple revisions
revisions = list(vcs.log(paths=["FOO", "BAZ"]))
assert len(revisions) == 2, "two paths without wildcard, len " + len(revisions)
revisions = list(vcs.log(paths=["FO*", "BAZ"]))
assert len(revisions) == 2, "two paths with wildcards, len " + len(revisions)
self.assertRevision(revisions[0],
message='bazzy')
self.assertRevision(revisions[1],
message="test\nlol\n")
# TODO: and a different branch!
def test_log_with_paths_and_branches(self):
# branch is also a bare parameter in git, so let's make sure branches
# and paths play nicely together. Not as important to test this in hg
vcs = self.get_vcs()
# Create another branch and move it ahead of the master branch
check_call('git checkout -b B2'.split(' '), cwd=self.remote_path)
self._add_file('BAZ', self.remote_path, commit_msg='second branch commit')
# Create a third branch off master with a commit not in B2
check_call(['git', 'checkout', vcs.get_default_revision()], cwd=self.remote_path)
check_call('git checkout -b B3'.split(' '), cwd=self.remote_path)
self._add_file('IPSUM', self.remote_path, commit_msg='3rd branch')
vcs.clone()
vcs.update()
# Ensure git log normally includes commits from all branches
revisions = list(vcs.log())
assert len(revisions) == 4
# While in B3, do a git log on B2. FOO and BAZ should show up, but not
# IPSUM
revisions = list(vcs.log(branch='B2', paths=["FOO", "BAZ", "IPSUM"]))
assert len(revisions) == 2
# Sanity check master
check_call(['git', 'checkout', vcs.get_default_revision()], cwd=self.remote_path)
revisions = list(vcs.log(branch=vcs.get_default_revision()))
assert len(revisions) == 2
def test_log_with_branches(self):
vcs = self.get_vcs()
# Create another branch and move it ahead of the master branch
check_call('git checkout -b B2'.split(' '), cwd=self.remote_path)
self._add_file('BAZ', self.remote_path, commit_msg='second branch commit')
# Create a third branch off master with a commit not in B2
check_call(['git', 'checkout', vcs.get_default_revision()], cwd=self.remote_path)
check_call('git checkout -b B3'.split(' '), cwd=self.remote_path)
self._add_file('IPSUM', self.remote_path, commit_msg='3rd branch')
vcs.clone()
vcs.update()
# Ensure git log normally includes commits from all branches
revisions = list(vcs.log())
assert len(revisions) == 4
# Git timestamps are only accurate to the second. But since this test
# creates these commits so close to each other, there's a race
# condition here. Ultimately, we only care that both commits appear
# last in the log, so allow them to be out of order.
last_rev, previous_rev = self._get_last_two_revisions('B3', revisions)
self.assertRevision(last_rev,
message='3rd branch',
branches=['B3'])
self.assertRevision(previous_rev,
message='second branch commit',
branches=['B2'])
# Note that the list of branches here differs from the hg version
# because hg only returns the branch name from the changeset, which
# does not include any ancestors.
self.assertRevision(revisions[3],
message='test',
branches=[vcs.get_default_revision(), 'B2', 'B3'])
# Ensure git log with B3 only
revisions = list(vcs.log(branch='B3'))
assert len(revisions) == 3
self.assertRevision(revisions[0],
message='3rd branch',
branches=['B3'])
self.assertRevision(revisions[2],
message='test',
branches=[vcs.get_default_revision(), 'B2', 'B3'])
# Sanity check master
check_call(['git', 'checkout', vcs.get_default_revision()], cwd=self.remote_path)
revisions = list(vcs.log(branch=vcs.get_default_revision()))
assert len(revisions) == 2
def test_first_parent(self):
vcs = self.get_vcs()
self._add_file('BAZ', self.remote_path, commit_msg='baz')
self._add_file('BAZ2', self.remote_path, commit_msg='baz2')
# Create the commit that will be the second parent.
check_call(['git', 'checkout', 'HEAD^'], cwd=self.remote_path)
self._add_file('SECOND_PARENT', self.remote_path, commit_msg='second parent')
to_merge = check_output(['git', 'rev-parse', 'HEAD'], cwd=self.remote_path)
# Merge commit into master.
check_call('git checkout master'.split(' '), cwd=self.remote_path)
check_call(['git', 'merge', to_merge.strip('\n')], cwd=self.remote_path)
vcs.clone()
vcs.update()
revisions = list(vcs.log())
assert len(revisions) == 5
revisions = list(vcs.log(first_parent=False))
assert len(revisions) == 6
def test_log_throws_errors_when_needed(self):
vcs = self.get_vcs()
try:
vcs.log(parent='HEAD', branch='master').next()
self.fail('log passed with both branch and master specified')
except ValueError:
pass
def test_simple(self):
vcs = self.get_vcs()
vcs.clone()
vcs.update()
revision = vcs.log(parent='HEAD', limit=1).next()
assert len(revision.id) == 40
self.assertRevision(revision,
author='Foo Bar <foo@example.com>',
message='biz\nbaz\n',
subject='biz')
revisions = list(vcs.log())
assert len(revisions) == 2
assert revisions[0].subject == 'biz'
assert revisions[0].message == 'biz\nbaz\n'
assert revisions[0].author == 'Foo Bar <foo@example.com>'
assert revisions[0].committer == 'Foo Bar <foo@example.com>'
assert revisions[0].parents == [revisions[1].id]
assert revisions[0].author_date == revisions[0].committer_date is not None
assert revisions[0].branches == ['master']
assert revisions[1].subject == 'test'
assert revisions[1].message == 'test\nlol\n'
assert revisions[1].author == 'Foo Bar <foo@example.com>'
assert revisions[1].committer == 'Foo Bar <foo@example.com>'
assert revisions[1].parents == []
assert revisions[1].author_date == revisions[1].committer_date is not None
assert revisions[1].branches == ['master']
diff = vcs.export(revisions[0].id)
assert diff == """diff --git a/BAR b/BAR
new file mode 100644
index 0000000..e69de29
"""
assert vcs.get_changed_files(revisions[0].id) == set(["BAR"])
revisions = list(vcs.log(offset=0, limit=1))
assert len(revisions) == 1
assert revisions[0].subject == 'biz'
revisions = list(vcs.log(offset=1, limit=1))
assert len(revisions) == 1
assert revisions[0].subject == 'test'
def test_is_child_parent(self):
vcs = self.get_vcs()
vcs.clone()
vcs.update()
revisions = list(vcs.log())
assert vcs.is_child_parent(child_in_question=revisions[0].id,
parent_in_question=revisions[1].id)
assert not vcs.is_child_parent(child_in_question=revisions[1].id,
parent_in_question=revisions[0].id)
unknown_sha = 'ffffffffffffffffffffffffffffffffffffffff'
with pytest.raises(UnknownChildRevision):
vcs.is_child_parent(child_in_question=unknown_sha,
parent_in_question=revisions[1].id)
with pytest.raises(UnknownParentRevision):
vcs.is_child_parent(child_in_question=revisions[1].id,
parent_in_question=unknown_sha)
def test_get_known_branches(self):
vcs = self.get_vcs()
vcs.clone()
vcs.update()
branches = vcs.get_known_branches()
self.assertEquals(1, len(branches))
self.assertIn('master', branches)
check_call('git checkout -B test_branch'.split(), cwd=self.remote_path)
vcs.update()
branches = vcs.get_known_branches()
self.assertEquals(2, len(branches))
self.assertIn('test_branch', branches)
def test_update_repo_url(self):
# Create a second remote
remote_path2 = '%s/remote2/' % (self.root,)
check_call(['mkdir', '-p', remote_path2])
check_call(['git', 'clone', self.remote_path, remote_path2], cwd=self.root)
self._set_author('Remote2 Committer', 'Remote2Committer@example.com', path=remote_path2)
self._add_file('BAZ', remote_path2, commit_msg='bazzy')
# Clone original remote
vcs = self.get_vcs()
vcs.clone()
vcs.update()
revisions = list(vcs.log())
assert len(revisions) == 2
# Update to new remote
vcs.url = remote_path2
vcs.update()
revisions = list(vcs.log())
assert len(revisions) == 3
# Revert to original
vcs.url = self.remote_path
vcs.update()
revisions = list(vcs.log())
assert len(revisions) == 2
def test_read_file(self):
vcs = self.get_vcs()
vcs.clone()
vcs.update()
# simple case
assert vcs.read_file('HEAD', 'FOO') == ''
# unknown file
with pytest.raises(ContentReadError):
vcs.read_file('HEAD', 'doesnotexist')
# unknown sha
with pytest.raises(UnknownRevision):
vcs.read_file('a' * 40, 'FOO')
def test_read_file_symlink(self):
content = 'Line 1\nLine 2\n'
self._add_file('REAL', self.remote_path, content=content, commit_msg='Target file.')
self._add_file('INDIRECT', self.remote_path, target='REAL', commit_msg="Here we go.")
vcs = self.get_vcs()
vcs.clone()
vcs.update()
assert vcs.read_file('HEAD', 'INDIRECT') == content
with pytest.raises(ContentReadError):
vcs.read_file('HEAD', 'does_not_exist.txt')
def test_read_file_symlink_out_of_tree(self):
oot = os.path.join(self.root, 'out_of_tree.txt')
with open(oot, 'w') as f:
f.write("Out of tree!\n")
self._add_file('INDIRECT', self.remote_path, target=oot, commit_msg="Here we go.")
vcs = self.get_vcs()
vcs.clone()
vcs.update()
with pytest.raises(ContentReadError):
vcs.read_file('HEAD', 'INDIRECT')
def test_read_file_with_diff(self):
PATCH = """diff --git a/FOO b/FOO
index e69de29..d0c77a5 100644
--- a/FOO
+++ b/FOO
@@ -0,0 +1 @@
+blah
diff --git a/FOO1 b/FOO1
index e69de29..d0c77a5 100644
--- a/FOO1
+++ b/FOO1
@@ -1,1 +1 @@
-blah
+blah
"""
vcs = self.get_vcs()
vcs.clone()
vcs.update()
assert vcs.read_file('HEAD', 'FOO', diff=PATCH) == 'blah\n'
def test_read_file_with_new_file_in_diff(self):
PATCH = """diff --git a/newly_added b/newly_added
new file mode 100644
index 0000000..d0c77a5
--- /dev/null
+++ b/newly_added
@@ -0,0 +1 @@
+hello
"""
vcs = self.get_vcs()
vcs.clone()
vcs.update()
assert vcs.read_file('HEAD', 'newly_added', diff=PATCH) == 'hello\n'
with pytest.raises(MissingFileError):
vcs.read_file('HEAD', 'still_does_not_exist', diff=PATCH)
def test_get_patch_hash(self):
vcs = self.get_vcs()
vcs.clone()
vcs.update()
patch_hash = vcs.get_patch_hash('HEAD')
assert isinstance(patch_hash, str) and len(patch_hash) == 40
class GetRepositoryTestCase(TestCase):
def test_correct(self):
for (url, expected_name) in [
('example.com:test.git', 'test.git'),
('example.com:test', 'test.git'),
('ssh@example.com:test.git', 'test.git'),
('ssh@example.com:test', 'test.git'),
('example.com:prefix/test.git', 'test.git'),
('example.com:prefix/test', 'test.git'),
('example.com:test-with-hyphen', 'test-with-hyphen.git'),
('example.com:some-prefix/test-with-hyphen', 'test-with-hyphen.git'),
]:
assert GitVcs.get_repository_name(url) == expected_name
|
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is used to validate that the library is correct.
This checks:
* All files in lib/ appear when compiling +@complete
* Runs a compiler pass over the test code to check for type errors
* Run the linter to check for style violations.
"""
import logging
import os
import re
import sys
import build
import shakaBuildHelpers
def get_lint_files():
"""Returns the absolute paths to all the files to run the linter over."""
match = re.compile(r'.*\.js$')
base = shakaBuildHelpers.get_source_base()
def get(arg):
return shakaBuildHelpers.get_all_files(os.path.join(base, arg), match)
return get('test') + get('lib') + get('externs') + get('demo')
def check_closure_compiler_linter():
"""Runs the Closure Compiler linter."""
logging.info('Running Closure Compiler linter...')
base = shakaBuildHelpers.get_source_base()
closure_linter_path = os.path.join(base, 'third_party', 'closure', 'linter.jar')
cmd_line = ['java', '-jar', closure_linter_path] + get_lint_files()
# The compiler's linter tool doesn't return a status code (as of v20171203)
# and has no options. Instead of checking status, success is no output.
output = shakaBuildHelpers.execute_get_output(cmd_line)
if output != '':
print output
return False
return True
def check_js_lint():
"""Runs the JavaScript linter."""
# TODO: things not enforced: property doc requirements
logging.info('Running eslint...')
eslint = shakaBuildHelpers.get_node_binary('eslint')
cmd_line = eslint + get_lint_files()
return shakaBuildHelpers.execute_get_code(cmd_line) == 0
def check_html_lint():
"""Runs the HTML linter over the HTML files.
Returns:
True on success, False on failure.
"""
logging.info('Running htmlhint...')
htmlhint = shakaBuildHelpers.get_node_binary('htmlhint')
base = shakaBuildHelpers.get_source_base()
files = ['index.html', 'demo/index.html', 'support.html']
file_paths = [os.path.join(base, x) for x in files]
config_path = os.path.join(base, '.htmlhintrc')
cmd_line = htmlhint + ['--config=' + config_path] + file_paths
return shakaBuildHelpers.execute_get_code(cmd_line) == 0
def check_complete():
"""Checks whether the 'complete' build references every file.
This is used by the build script to ensure that every file is included in at
least one build type.
Returns:
True on success, False on failure.
"""
logging.info('Checking that the build files are complete...')
complete = build.Build()
# Normally we don't need to include @core, but because we look at the build
# object directly, we need to include it here. When using main(), it will
# call addCore which will ensure core is included.
if not complete.parse_build(['+@complete', '+@core'], os.getcwd()):
logging.error('Error parsing complete build')
return False
match = re.compile(r'.*\.js$')
base = shakaBuildHelpers.get_source_base()
all_files = shakaBuildHelpers.get_all_files(os.path.join(base, 'lib'), match)
missing_files = set(all_files) - complete.include
if missing_files:
logging.error('There are files missing from the complete build:')
for missing in missing_files:
# Convert to a path relative to source base.
logging.error(' ' + os.path.relpath(missing, base))
return False
return True
def check_tests():
"""Runs an extra compile pass over the test code to check for type errors.
Returns:
True on success, False on failure.
"""
logging.info('Checking the tests for type errors...')
match = re.compile(r'.*\.js$')
base = shakaBuildHelpers.get_source_base()
def get(*args):
return shakaBuildHelpers.get_all_files(os.path.join(base, *args), match)
files = set(get('lib') + get('externs') + get('test') +
get('third_party', 'closure'))
files.add(os.path.join(base, 'demo', 'common', 'assets.js'))
test_build = build.Build(files)
closure_opts = build.common_closure_opts + build.common_closure_defines
closure_opts += build.debug_closure_opts + build.debug_closure_defines
# Ignore missing goog.require since we assume the whole library is
# already included.
closure_opts += [
'--jscomp_off=missingRequire', '--jscomp_off=strictMissingRequire',
'--checks-only', '-O', 'SIMPLE'
]
return test_build.build_raw(closure_opts)
def check_externs():
"""Runs an extra compile pass over the generated externs to ensure that they
are usable.
Returns:
True on success, False on failure.
"""
logging.info('Checking the usability of generated externs...')
# Create a complete "build" object.
externs_build = build.Build()
if not externs_build.parse_build(['+@complete'], os.getcwd()):
return False
externs_build.add_core()
# Use it to generate externs for the next check.
if not externs_build.generate_externs('check'):
return False
# Create a custom "build" object, add all manually-written externs, then add
# the generated externs we just generated.
source_base = shakaBuildHelpers.get_source_base()
manual_externs = shakaBuildHelpers.get_all_files(
os.path.join(source_base, 'externs'), re.compile(r'.*\.js$'))
generated_externs = os.path.join(
source_base, 'dist', 'shaka-player.check.externs.js')
check_build = build.Build()
check_build.include = set(manual_externs)
check_build.include.add(generated_externs)
# Build with the complete set of externs, but without any application code.
# This will help find issues in the generated externs, independent of the app.
# Since we have no app, don't use the defines. Unused defines cause a
# compilation error.
closure_opts = build.common_closure_opts + build.debug_closure_opts + [
'--checks-only', '-O', 'SIMPLE'
]
ok = check_build.build_raw(closure_opts)
# Clean up the temporary externs we just generated.
os.unlink(generated_externs)
# Return the success/failure of the build above.
return ok
def usage():
print 'Usage:', sys.argv[0]
print
print __doc__
def main(args):
for arg in args:
if arg == '--help':
usage()
return 0
else:
logging.error('Unknown option: %s', arg)
usage()
return 1
# Update node modules if needed.
if not shakaBuildHelpers.update_node_modules():
return 1
steps = [
check_closure_compiler_linter,
check_js_lint,
check_html_lint,
check_complete,
check_tests,
check_externs,
]
for step in steps:
if not step():
return 1
return 0
if __name__ == '__main__':
shakaBuildHelpers.run_main(main)
|
|
import copy
import pandas as pd
from threeML.plugins.SpectrumLike import SpectrumLike
from threeML.utils.OGIP.response import InstrumentResponse
from threeML.utils.spectrum.binned_spectrum import (
BinnedSpectrumWithDispersion,
ChannelSet,
)
__instrument_name = "General binned spectral data with energy dispersion"
class DispersionSpectrumLike(SpectrumLike):
def __init__(
self,
name,
observation,
background=None,
background_exposure=None,
verbose=True,
tstart=None,
tstop=None,
):
"""
A plugin for generic spectral data with energy dispersion, accepts an observed binned spectrum,
and a background binned spectrum or plugin with the background data.
In the case of a binned background spectrum, the background model is profiled
out and the appropriate profile-likelihood is used to fit the total spectrum. In this
case, caution must be used when there are zero background counts in bins as the
profiled background parameters (one per channel) will then have zero information from which to
constrain the background. It is recommended to bin the spectrum such that there is one background count
per channel.
If either an SpectrumLike or XYLike instance is provided as background, it is assumed that this is the
background data and the likelihood model from this plugin is used to simultaneously fit the background
and source.
:param name: the plugin name
:param observation: the observed spectrum
:param background: the background spectrum or a plugin from which the background will be modeled
:param background_exposure: (optional) adjust the background exposure of the modeled background data comes from and
XYLike plugin
:param verbose: turn on/off verbose logging
"""
assert isinstance(
observation, BinnedSpectrumWithDispersion
), "observed spectrum is not an instance of BinnedSpectrumWithDispersion"
assert (
observation.response is not None
), "the observed spectrum does not have a response"
# assign the response to the plugins
self._rsp = observation.response # type: InstrumentResponse
super(DispersionSpectrumLike, self).__init__(
name=name,
observation=observation,
background=background,
background_exposure=background_exposure,
verbose=verbose,
tstart=tstart,
tstop=tstop,
)
def set_model(self, likelihoodModel):
"""
Set the model to be used in the joint minimization.
"""
# Store likelihood model
self._like_model = likelihoodModel
# We assume there are no extended sources, since we cannot handle them here
assert self._like_model.get_number_of_extended_sources() == 0, (
"OGIP-like plugins do not support " "extended sources"
)
# Get the differential flux function, and the integral function, with no dispersion,
# we simply integrate the model over the bins
differential_flux, integral = self._get_diff_flux_and_integral(self._like_model)
self._rsp.set_function(integral)
def _evaluate_model(self):
"""
evaluates the full model over all channels
:return:
"""
return self._rsp.convolve()
def get_simulated_dataset(self, new_name=None, **kwargs):
"""
Returns another DispersionSpectrumLike instance where data have been obtained by randomizing the current expectation from the
model, as well as from the background (depending on the respective noise models)
:return: a DispersionSpectrumLike simulated instance
"""
# pass the response thru to the constructor
return super(DispersionSpectrumLike, self).get_simulated_dataset(
new_name=new_name, **kwargs
)
def get_pha_files(self):
info = {}
# we want to pass copies so that
# the user doesn't grab the instance
# and try to modify things. protection
info["pha"] = copy.copy(self._observed_spectrum)
if self._background_spectrum is not None:
info["bak"] = copy.copy(self._background_spectrum)
info["rsp"] = copy.copy(self._rsp)
return info
def display_rsp(self):
"""
Display the currently loaded full response matrix, i.e., RMF and ARF convolved
:return:
"""
self._rsp.plot_matrix()
@property
def response(self):
return self._rsp
def _output(self):
# type: () -> pd.Series
super_out = super(DispersionSpectrumLike, self)._output() # type: pd.Series
the_df = pd.Series({"response": self._rsp.rsp_filename})
return super_out.append(the_df)
def write_pha(self, filename, overwrite=False, force_rsp_write=False):
"""
Writes the observation, background and (optional) rsp to PHAII fits files
:param filename: base file name to write out
:param overwrite: if you would like to force overwriting of the files
:param force_rsp_write: force the writing of an rsp even if not required
"""
# we need to pass up the variables to an OGIPLike
# so that we have the proper variable name
# a local import here because OGIPLike is dependent on this
from threeML.plugins.OGIPLike import OGIPLike
ogiplike = OGIPLike.from_general_dispersion_spectrum(self)
ogiplike.write_pha(
file_name=filename, overwrite=overwrite, force_rsp_write=force_rsp_write
)
@staticmethod
def _build_fake_observation(
fake_data, channel_set, source_errors, source_sys_errors, is_poisson, **kwargs
):
"""
This is the fake observation builder for SpectrumLike which builds data
for a binned spectrum without dispersion. It must be overridden in child classes.
:param fake_data: series of values... they are ignored later
:param channel_set: a channel set
:param source_errors:
:param source_sys_errors:
:param is_poisson:
:return:
"""
assert (
"response" in kwargs
), "A response was not provided. Cannor build synthetic observation"
response = kwargs.pop("response")
observation = BinnedSpectrumWithDispersion(
fake_data,
exposure=1.0,
response=response,
count_errors=source_errors,
sys_errors=source_sys_errors,
quality=None,
scale_factor=1.0,
is_poisson=is_poisson,
mission="fake_mission",
instrument="fake_instrument",
tstart=0.0,
tstop=1.0,
)
return observation
@classmethod
def from_function(
cls,
name,
source_function,
response,
source_errors=None,
source_sys_errors=None,
background_function=None,
background_errors=None,
background_sys_errors=None,
):
"""
Construct a simulated spectrum from a given source function and (optional) background function. If source and/or background errors are not supplied, the likelihood is assumed to be Poisson.
:param name: simulated data set name
:param source_function: astromodels function
:param response: 3ML Instrument response
:param source_errors: (optional) gaussian source errors
:param source_sys_errors: (optional) systematic source errors
:param background_function: (optional) astromodels background function
:param background_errors: (optional) gaussian background errors
:param background_sys_errors: (optional) background systematic errors
:return: simulated DispersionSpectrumLike plugin
"""
channel_set = ChannelSet.from_instrument_response(response)
energy_min, energy_max = channel_set.bin_stack.T
# pass the variables to the super class
return super(DispersionSpectrumLike, cls).from_function(
name,
source_function,
energy_min,
energy_max,
source_errors,
source_sys_errors,
background_function,
background_errors,
background_sys_errors,
response=response,
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations(object):
"""RoutesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2015_06_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2015_06_15.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> "_models.Route"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_table_name, # type: str
route_name, # type: str
route_parameters, # type: "_models.Route"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Route"]
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2015_06_15.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2015_06_15.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteListResult"]
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2015_06_15.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.api_core import operations_v1
from google.cloud.vision_v1p3beta1.proto import product_search_service_pb2_grpc
class ProductSearchGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.vision.v1p3beta1 ProductSearch API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-vision",
)
def __init__(
self, channel=None, credentials=None, address="vision.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
options={
"grpc.max_send_message_length": -1,
"grpc.max_receive_message_length": -1,
}.items(),
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"product_search_stub": product_search_service_pb2_grpc.ProductSearchStub(
channel
)
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel
)
@classmethod
def create_channel(
cls, address="vision.googleapis.com:443", credentials=None, **kwargs
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def create_product_set(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.create_product_set`.
Creates and returns a new ProductSet resource.
Possible errors:
- Returns INVALID\_ARGUMENT if display\_name is missing, or is longer
than 4096 characters.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].CreateProductSet
@property
def list_product_sets(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.list_product_sets`.
Lists ProductSets in an unspecified order.
Possible errors:
- Returns INVALID\_ARGUMENT if page\_size is greater than 100, or less
than 1.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].ListProductSets
@property
def get_product_set(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.get_product_set`.
Gets information associated with a ProductSet.
Possible errors:
- Returns NOT\_FOUND if the ProductSet does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].GetProductSet
@property
def update_product_set(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.update_product_set`.
Makes changes to a ProductSet resource. Only display\_name can be
updated currently.
Possible errors:
- Returns NOT\_FOUND if the ProductSet does not exist.
- Returns INVALID\_ARGUMENT if display\_name is present in update\_mask
but missing from the request or longer than 4096 characters.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].UpdateProductSet
@property
def delete_product_set(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.delete_product_set`.
Permanently deletes a ProductSet. All Products and ReferenceImages in
the ProductSet will be deleted.
The actual image files are not deleted from Google Cloud Storage.
Possible errors:
- Returns NOT\_FOUND if the ProductSet does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].DeleteProductSet
@property
def create_product(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.create_product`.
Creates and returns a new product resource.
Possible errors:
- Returns INVALID\_ARGUMENT if display\_name is missing or longer than
4096 characters.
- Returns INVALID\_ARGUMENT if description is longer than 4096
characters.
- Returns INVALID\_ARGUMENT if product\_category is missing or invalid.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].CreateProduct
@property
def list_products(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.list_products`.
Lists products in an unspecified order.
Possible errors:
- Returns INVALID\_ARGUMENT if page\_size is greater than 100 or less
than 1.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].ListProducts
@property
def get_product(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.get_product`.
Gets information associated with a Product.
Possible errors:
- Returns NOT\_FOUND if the Product does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].GetProduct
@property
def update_product(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.update_product`.
Makes changes to a Product resource. Only display\_name, description and
labels can be updated right now.
If labels are updated, the change will not be reflected in queries until
the next index time.
Possible errors:
- Returns NOT\_FOUND if the Product does not exist.
- Returns INVALID\_ARGUMENT if display\_name is present in update\_mask
but is missing from the request or longer than 4096 characters.
- Returns INVALID\_ARGUMENT if description is present in update\_mask
but is longer than 4096 characters.
- Returns INVALID\_ARGUMENT if product\_category is present in
update\_mask.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].UpdateProduct
@property
def delete_product(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.delete_product`.
Permanently deletes a product and its reference images.
Metadata of the product and all its images will be deleted right away,
but search queries against ProductSets containing the product may still
work until all related caches are refreshed.
Possible errors:
- Returns NOT\_FOUND if the product does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].DeleteProduct
@property
def create_reference_image(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.create_reference_image`.
Creates and returns a new ReferenceImage resource.
The ``bounding_poly`` field is optional. If ``bounding_poly`` is not
specified, the system will try to detect regions of interest in the
image that are compatible with the product\_category on the parent
product. If it is specified, detection is ALWAYS skipped. The system
converts polygons into non-rotated rectangles.
Note that the pipeline will resize the image if the image resolution is
too large to process (above 50MP).
Possible errors:
- Returns INVALID\_ARGUMENT if the image\_uri is missing or longer than
4096 characters.
- Returns INVALID\_ARGUMENT if the product does not exist.
- Returns INVALID\_ARGUMENT if bounding\_poly is not provided, and
nothing compatible with the parent product's product\_category is
detected.
- Returns INVALID\_ARGUMENT if bounding\_poly contains more than 10
polygons.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].CreateReferenceImage
@property
def delete_reference_image(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.delete_reference_image`.
Permanently deletes a reference image.
The image metadata will be deleted right away, but search queries
against ProductSets containing the image may still work until all
related caches are refreshed.
The actual image files are not deleted from Google Cloud Storage.
Possible errors:
- Returns NOT\_FOUND if the reference image does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].DeleteReferenceImage
@property
def list_reference_images(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.list_reference_images`.
Lists reference images.
Possible errors:
- Returns NOT\_FOUND if the parent product does not exist.
- Returns INVALID\_ARGUMENT if the page\_size is greater than 100, or
less than 1.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].ListReferenceImages
@property
def get_reference_image(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.get_reference_image`.
Gets information associated with a ReferenceImage.
Possible errors:
- Returns NOT\_FOUND if the specified image does not exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].GetReferenceImage
@property
def add_product_to_product_set(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.add_product_to_product_set`.
Adds a Product to the specified ProductSet. If the Product is already
present, no change is made.
One Product can be added to at most 100 ProductSets.
Possible errors:
- Returns NOT\_FOUND if the Product or the ProductSet doesn't exist.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].AddProductToProductSet
@property
def remove_product_from_product_set(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.remove_product_from_product_set`.
Removes a Product from the specified ProductSet.
Possible errors:
- Returns NOT\_FOUND If the Product is not found under the ProductSet.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].RemoveProductFromProductSet
@property
def list_products_in_product_set(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.list_products_in_product_set`.
Lists the Products in a ProductSet, in an unspecified order. If the
ProductSet does not exist, the products field of the response will be
empty.
Possible errors:
- Returns INVALID\_ARGUMENT if page\_size is greater than 100 or less
than 1.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].ListProductsInProductSet
@property
def import_product_sets(self):
"""Return the gRPC stub for :meth:`ProductSearchClient.import_product_sets`.
Asynchronous API that imports a list of reference images to specified
product sets based on a list of image information.
The ``google.longrunning.Operation`` API can be used to keep track of
the progress and results of the request. ``Operation.metadata`` contains
``BatchOperationMetadata``. (progress) ``Operation.response`` contains
``ImportProductSetsResponse``. (results)
The input source of this method is a csv file on Google Cloud Storage.
For the format of the csv file please see
``ImportProductSetsGcsSource.csv_file_uri``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["product_search_stub"].ImportProductSets
|
|
"""SCons.Job
This module defines the Serial and Parallel classes that execute tasks to
complete a build. The Jobs class provides a higher level interface to start,
stop, and wait on jobs.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Job.py 2014/01/04 01:12:18 root"
import SCons.compat
import os
import signal
import SCons.Errors
# The default stack size (in kilobytes) of the threads used to execute
# jobs in parallel.
#
# We use a stack size of 256 kilobytes. The default on some platforms
# is too large and prevents us from creating enough threads to fully
# parallelized the build. For example, the default stack size on linux
# is 8 MBytes.
explicit_stack_size = None
default_stack_size = 256
interrupt_msg = 'Build interrupted.'
class InterruptState(object):
def __init__(self):
self.interrupted = False
def set(self):
self.interrupted = True
def __call__(self):
return self.interrupted
class Jobs(object):
"""An instance of this class initializes N jobs, and provides
methods for starting, stopping, and waiting on all N jobs.
"""
def __init__(self, num, taskmaster):
"""
create 'num' jobs using the given taskmaster.
If 'num' is 1 or less, then a serial job will be used,
otherwise a parallel job with 'num' worker threads will
be used.
The 'num_jobs' attribute will be set to the actual number of jobs
allocated. If more than one job is requested but the Parallel
class can't do it, it gets reset to 1. Wrapping interfaces that
care should check the value of 'num_jobs' after initialization.
"""
self.job = None
if num > 1:
stack_size = explicit_stack_size
if stack_size is None:
stack_size = default_stack_size
try:
self.job = Parallel(taskmaster, num, stack_size)
self.num_jobs = num
except NameError:
pass
if self.job is None:
self.job = Serial(taskmaster)
self.num_jobs = 1
def run(self, postfunc=lambda: None):
"""Run the jobs.
postfunc() will be invoked after the jobs has run. It will be
invoked even if the jobs are interrupted by a keyboard
interrupt (well, in fact by a signal such as either SIGINT,
SIGTERM or SIGHUP). The execution of postfunc() is protected
against keyboard interrupts and is guaranteed to run to
completion."""
self._setup_sig_handler()
try:
self.job.start()
finally:
postfunc()
self._reset_sig_handler()
def were_interrupted(self):
"""Returns whether the jobs were interrupted by a signal."""
return self.job.interrupted()
def _setup_sig_handler(self):
"""Setup an interrupt handler so that SCons can shutdown cleanly in
various conditions:
a) SIGINT: Keyboard interrupt
b) SIGTERM: kill or system shutdown
c) SIGHUP: Controlling shell exiting
We handle all of these cases by stopping the taskmaster. It
turns out that it very difficult to stop the build process
by throwing asynchronously an exception such as
KeyboardInterrupt. For example, the python Condition
variables (threading.Condition) and queue's do not seem to
asynchronous-exception-safe. It would require adding a whole
bunch of try/finally block and except KeyboardInterrupt all
over the place.
Note also that we have to be careful to handle the case when
SCons forks before executing another process. In that case, we
want the child to exit immediately.
"""
def handler(signum, stack, self=self, parentpid=os.getpid()):
if os.getpid() == parentpid:
self.job.taskmaster.stop()
self.job.interrupted.set()
else:
os._exit(2)
self.old_sigint = signal.signal(signal.SIGINT, handler)
self.old_sigterm = signal.signal(signal.SIGTERM, handler)
try:
self.old_sighup = signal.signal(signal.SIGHUP, handler)
except AttributeError:
pass
def _reset_sig_handler(self):
"""Restore the signal handlers to their previous state (before the
call to _setup_sig_handler()."""
signal.signal(signal.SIGINT, self.old_sigint)
signal.signal(signal.SIGTERM, self.old_sigterm)
try:
signal.signal(signal.SIGHUP, self.old_sighup)
except AttributeError:
pass
class Serial(object):
"""This class is used to execute tasks in series, and is more efficient
than Parallel, but is only appropriate for non-parallel builds. Only
one instance of this class should be in existence at a time.
This class is not thread safe.
"""
def __init__(self, taskmaster):
"""Create a new serial job given a taskmaster.
The taskmaster's next_task() method should return the next task
that needs to be executed, or None if there are no more tasks. The
taskmaster's executed() method will be called for each task when it
is successfully executed or failed() will be called if it failed to
execute (e.g. execute() raised an exception)."""
self.taskmaster = taskmaster
self.interrupted = InterruptState()
def start(self):
"""Start the job. This will begin pulling tasks from the taskmaster
and executing them, and return when there are no more tasks. If a task
fails to execute (i.e. execute() raises an exception), then the job will
stop."""
while True:
task = self.taskmaster.next_task()
if task is None:
break
try:
task.prepare()
if task.needs_execute():
task.execute()
except:
if self.interrupted():
try:
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
except:
task.exception_set()
else:
task.exception_set()
# Let the failed() callback function arrange for the
# build to stop if that's appropriate.
task.failed()
else:
task.executed()
task.postprocess()
self.taskmaster.cleanup()
# Trap import failure so that everything in the Job module but the
# Parallel class (and its dependent classes) will work if the interpreter
# doesn't support threads.
try:
import queue
import threading
except ImportError:
pass
else:
class Worker(threading.Thread):
"""A worker thread waits on a task to be posted to its request queue,
dequeues the task, executes it, and posts a tuple including the task
and a boolean indicating whether the task executed successfully. """
def __init__(self, requestQueue, resultsQueue, interrupted):
threading.Thread.__init__(self)
self.setDaemon(1)
self.requestQueue = requestQueue
self.resultsQueue = resultsQueue
self.interrupted = interrupted
self.start()
def run(self):
while True:
task = self.requestQueue.get()
if task is None:
# The "None" value is used as a sentinel by
# ThreadPool.cleanup(). This indicates that there
# are no more tasks, so we should quit.
break
try:
if self.interrupted():
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
task.execute()
except:
task.exception_set()
ok = False
else:
ok = True
self.resultsQueue.put((task, ok))
class ThreadPool(object):
"""This class is responsible for spawning and managing worker threads."""
def __init__(self, num, stack_size, interrupted):
"""Create the request and reply queues, and 'num' worker threads.
One must specify the stack size of the worker threads. The
stack size is specified in kilobytes.
"""
self.requestQueue = queue.Queue(0)
self.resultsQueue = queue.Queue(0)
try:
prev_size = threading.stack_size(stack_size*1024)
except AttributeError, e:
# Only print a warning if the stack size has been
# explicitly set.
if not explicit_stack_size is None:
msg = "Setting stack size is unsupported by this version of Python:\n " + \
e.args[0]
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
except ValueError, e:
msg = "Setting stack size failed:\n " + str(e)
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
# Create worker threads
self.workers = []
for _ in range(num):
worker = Worker(self.requestQueue, self.resultsQueue, interrupted)
self.workers.append(worker)
if 'prev_size' in locals():
threading.stack_size(prev_size)
def put(self, task):
"""Put task into request queue."""
self.requestQueue.put(task)
def get(self):
"""Remove and return a result tuple from the results queue."""
return self.resultsQueue.get()
def preparation_failed(self, task):
self.resultsQueue.put((task, False))
def cleanup(self):
"""
Shuts down the thread pool, giving each worker thread a
chance to shut down gracefully.
"""
# For each worker thread, put a sentinel "None" value
# on the requestQueue (indicating that there's no work
# to be done) so that each worker thread will get one and
# terminate gracefully.
for _ in self.workers:
self.requestQueue.put(None)
# Wait for all of the workers to terminate.
#
# If we don't do this, later Python versions (2.4, 2.5) often
# seem to raise exceptions during shutdown. This happens
# in requestQueue.get(), as an assertion failure that
# requestQueue.not_full is notified while not acquired,
# seemingly because the main thread has shut down (or is
# in the process of doing so) while the workers are still
# trying to pull sentinels off the requestQueue.
#
# Normally these terminations should happen fairly quickly,
# but we'll stick a one-second timeout on here just in case
# someone gets hung.
for worker in self.workers:
worker.join(1.0)
self.workers = []
class Parallel(object):
"""This class is used to execute tasks in parallel, and is somewhat
less efficient than Serial, but is appropriate for parallel builds.
This class is thread safe.
"""
def __init__(self, taskmaster, num, stack_size):
"""Create a new parallel job given a taskmaster.
The taskmaster's next_task() method should return the next
task that needs to be executed, or None if there are no more
tasks. The taskmaster's executed() method will be called
for each task when it is successfully executed or failed()
will be called if the task failed to execute (i.e. execute()
raised an exception).
Note: calls to taskmaster are serialized, but calls to
execute() on distinct tasks are not serialized, because
that is the whole point of parallel jobs: they can execute
multiple tasks simultaneously. """
self.taskmaster = taskmaster
self.interrupted = InterruptState()
self.tp = ThreadPool(num, stack_size, self.interrupted)
self.maxjobs = num
def start(self):
"""Start the job. This will begin pulling tasks from the
taskmaster and executing them, and return when there are no
more tasks. If a task fails to execute (i.e. execute() raises
an exception), then the job will stop."""
jobs = 0
while True:
# Start up as many available tasks as we're
# allowed to.
while jobs < self.maxjobs:
task = self.taskmaster.next_task()
if task is None:
break
try:
# prepare task for execution
task.prepare()
except:
task.exception_set()
task.failed()
task.postprocess()
else:
if task.needs_execute():
# dispatch task
self.tp.put(task)
jobs = jobs + 1
else:
task.executed()
task.postprocess()
if not task and not jobs: break
# Let any/all completed tasks finish up before we go
# back and put the next batch of tasks on the queue.
while True:
task, ok = self.tp.get()
jobs = jobs - 1
if ok:
task.executed()
else:
if self.interrupted():
try:
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
except:
task.exception_set()
# Let the failed() callback function arrange
# for the build to stop if that's appropriate.
task.failed()
task.postprocess()
if self.tp.resultsQueue.empty():
break
self.tp.cleanup()
self.taskmaster.cleanup()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci
def nearest_neighbor_interp_mkldnn_np(X,
out_h,
out_w,
out_size=None,
actual_shape=None,
data_layout='NCHW'):
"""nearest neighbor interpolation implement in shape [N, C, H, W]"""
if data_layout == "NHWC":
X = np.transpose(X, (0, 3, 1, 2)) # NHWC => NCHW
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
n, c, in_h, in_w = X.shape
fh = fw = 0.0
if (out_h > 1):
fh = out_h * 1.0 / in_h
if (out_w > 1):
fw = out_w * 1.0 / in_w
out = np.zeros((n, c, out_h, out_w))
for oh in range(out_h):
ih = int(round((oh + 0.5) / fh - 0.5))
for ow in range(out_w):
iw = int(round((ow + 0.5) / fw - 0.5))
out[:, :, oh, ow] = X[:, :, ih, iw]
if data_layout == "NHWC":
out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC
return out.astype(X.dtype)
@skip_check_grad_ci(reason="Haven not implement interpolate grad kernel.")
class TestNearestInterpMKLDNNOp(OpTest):
def init_test_case(self):
pass
def init_data_type(self):
pass
def setUp(self):
self.op_type = "nearest_interp"
self.interp_method = 'nearest'
self._cpu_only = True
self.use_mkldnn = True
self.input_shape = [1, 1, 2, 2]
self.data_layout = 'NCHW'
self.dtype = np.float32
# priority: actual_shape > out_size > scale > out_h & out_w
self.out_h = 1
self.out_w = 1
self.scale = 2.0
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.init_data_type()
if self.dtype == np.float32:
input_np = np.random.random(self.input_shape).astype(self.dtype)
else:
init_low, init_high = (-5, 5) if self.dtype == np.int8 else (0, 10)
input_np = np.random.randint(init_low, init_high,
self.input_shape).astype(self.dtype)
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
if self.scale > 0:
out_h = int(in_h * self.scale)
out_w = int(in_w * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = nearest_neighbor_interp_mkldnn_np(
input_np, out_h, out_w, self.out_size, self.actual_shape,
self.data_layout)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.attrs = {
'interp_method': self.interp_method,
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'data_layout': self.data_layout,
'use_mkldnn': self.use_mkldnn
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_dygraph=False)
class TestNearestInterpOpMKLDNNNHWC(TestNearestInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [3, 2, 32, 16]
self.out_h = 27
self.out_w = 49
self.scale = 2.0
self.data_layout = 'NHWC'
class TestNearestNeighborInterpMKLDNNCase2(TestNearestInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 1.
class TestNearestNeighborInterpCase3(TestNearestInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 128
self.scale = 0.
class TestNearestNeighborInterpCase4(TestNearestInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([65, 129]).astype("int32")
class TestNearestNeighborInterpSame(TestNearestInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 0.
def create_test_class(parent):
class TestFp32Case(parent):
def init_data_type(self):
self.dtype = np.float32
class TestInt8Case(parent):
def init_data_type(self):
self.dtype = np.int8
class TestUint8Case(parent):
def init_data_type(self):
self.dtype = np.uint8
TestFp32Case.__name__ = parent.__name__
TestInt8Case.__name__ = parent.__name__
TestUint8Case.__name__ = parent.__name__
globals()[parent.__name__] = TestFp32Case
globals()[parent.__name__] = TestInt8Case
globals()[parent.__name__] = TestUint8Case
create_test_class(TestNearestInterpMKLDNNOp)
create_test_class(TestNearestInterpOpMKLDNNNHWC)
create_test_class(TestNearestNeighborInterpMKLDNNCase2)
create_test_class(TestNearestNeighborInterpCase3)
create_test_class(TestNearestNeighborInterpCase4)
create_test_class(TestNearestInterpOpMKLDNNNHWC)
create_test_class(TestNearestNeighborInterpSame)
if __name__ == "__main__":
from paddle import enable_static
enable_static()
unittest.main()
|
|
"""The Hyperion component."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from contextlib import suppress
import logging
from typing import Any, cast
from awesomeversion import AwesomeVersion
from hyperion import client, const as hyperion_const
from homeassistant.components.camera.const import DOMAIN as CAMERA_DOMAIN
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_TOKEN
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.typing import ConfigType
from .const import (
CONF_INSTANCE_CLIENTS,
CONF_ON_UNLOAD,
CONF_ROOT_CLIENT,
DEFAULT_NAME,
DOMAIN,
HYPERION_RELEASES_URL,
HYPERION_VERSION_WARN_CUTOFF,
SIGNAL_INSTANCE_ADD,
SIGNAL_INSTANCE_REMOVE,
)
PLATFORMS = [LIGHT_DOMAIN, SWITCH_DOMAIN, CAMERA_DOMAIN]
_LOGGER = logging.getLogger(__name__)
# Unique ID
# =========
# A config entry represents a connection to a single Hyperion server. The config entry
# unique_id is the server id returned from the Hyperion instance (a unique ID per
# server).
#
# Each server connection may create multiple entities. The unique_id for each entity is
# <server id>_<instance #>_<name>, where <server_id> will be the unique_id on the
# relevant config entry (as above), <instance #> will be the server instance # and
# <name> will be a unique identifying type name for each entity associated with this
# server/instance (e.g. "hyperion_light").
#
# The get_hyperion_unique_id method will create a per-entity unique id when given the
# server id, an instance number and a name.
# hass.data format
# ================
#
# hass.data[DOMAIN] = {
# <config_entry.entry_id>: {
# "ROOT_CLIENT": <Hyperion Client>,
# "ON_UNLOAD": [<callable>, ...],
# }
# }
def get_hyperion_unique_id(server_id: str, instance: int, name: str) -> str:
"""Get a unique_id for a Hyperion instance."""
return f"{server_id}_{instance}_{name}"
def get_hyperion_device_id(server_id: str, instance: int) -> str:
"""Get an id for a Hyperion device/instance."""
return f"{server_id}_{instance}"
def split_hyperion_unique_id(unique_id: str) -> tuple[str, int, str] | None:
"""Split a unique_id into a (server_id, instance, type) tuple."""
data = tuple(unique_id.split("_", 2))
if len(data) != 3:
return None
try:
return (data[0], int(data[1]), data[2])
except ValueError:
return None
def create_hyperion_client(
*args: Any,
**kwargs: Any,
) -> client.HyperionClient:
"""Create a Hyperion Client."""
return client.HyperionClient(*args, **kwargs)
async def async_create_connect_hyperion_client(
*args: Any,
**kwargs: Any,
) -> client.HyperionClient | None:
"""Create and connect a Hyperion Client."""
hyperion_client = create_hyperion_client(*args, **kwargs)
if not await hyperion_client.async_client_connect():
return None
return hyperion_client
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Hyperion component."""
hass.data[DOMAIN] = {}
return True
@callback
def listen_for_instance_updates(
hass: HomeAssistant,
config_entry: ConfigEntry,
add_func: Callable,
remove_func: Callable,
) -> None:
"""Listen for instance additions/removals."""
hass.data[DOMAIN][config_entry.entry_id][CONF_ON_UNLOAD].extend(
[
async_dispatcher_connect(
hass,
SIGNAL_INSTANCE_ADD.format(config_entry.entry_id),
add_func,
),
async_dispatcher_connect(
hass,
SIGNAL_INSTANCE_REMOVE.format(config_entry.entry_id),
remove_func,
),
]
)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Hyperion from a config entry."""
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
token = entry.data.get(CONF_TOKEN)
hyperion_client = await async_create_connect_hyperion_client(
host, port, token=token, raw_connection=True
)
# Client won't connect? => Not ready.
if not hyperion_client:
raise ConfigEntryNotReady
version = await hyperion_client.async_sysinfo_version()
if version is not None:
with suppress(ValueError):
if AwesomeVersion(version) < AwesomeVersion(HYPERION_VERSION_WARN_CUTOFF):
_LOGGER.warning(
"Using a Hyperion server version < %s is not recommended -- "
"some features may be unavailable or may not function correctly. "
"Please consider upgrading: %s",
HYPERION_VERSION_WARN_CUTOFF,
HYPERION_RELEASES_URL,
)
# Client needs authentication, but no token provided? => Reauth.
auth_resp = await hyperion_client.async_is_auth_required()
if (
auth_resp is not None
and client.ResponseOK(auth_resp)
and auth_resp.get(hyperion_const.KEY_INFO, {}).get(
hyperion_const.KEY_REQUIRED, False
)
and token is None
):
await hyperion_client.async_client_disconnect()
raise ConfigEntryAuthFailed
# Client login doesn't work? => Reauth.
if not await hyperion_client.async_client_login():
await hyperion_client.async_client_disconnect()
raise ConfigEntryAuthFailed
# Cannot switch instance or cannot load state? => Not ready.
if (
not await hyperion_client.async_client_switch_instance()
or not client.ServerInfoResponseOK(await hyperion_client.async_get_serverinfo())
):
await hyperion_client.async_client_disconnect()
raise ConfigEntryNotReady
# We need 1 root client (to manage instances being removed/added) and then 1 client
# per Hyperion server instance which is shared for all entities associated with
# that instance.
hass.data[DOMAIN][entry.entry_id] = {
CONF_ROOT_CLIENT: hyperion_client,
CONF_INSTANCE_CLIENTS: {},
CONF_ON_UNLOAD: [],
}
async def async_instances_to_clients(response: dict[str, Any]) -> None:
"""Convert instances to Hyperion clients."""
if not response or hyperion_const.KEY_DATA not in response:
return
await async_instances_to_clients_raw(response[hyperion_const.KEY_DATA])
async def async_instances_to_clients_raw(instances: list[dict[str, Any]]) -> None:
"""Convert instances to Hyperion clients."""
device_registry = dr.async_get(hass)
running_instances: set[int] = set()
stopped_instances: set[int] = set()
existing_instances = hass.data[DOMAIN][entry.entry_id][CONF_INSTANCE_CLIENTS]
server_id = cast(str, entry.unique_id)
# In practice, an instance can be in 3 states as seen by this function:
#
# * Exists, and is running: Should be present in HASS/registry.
# * Exists, but is not running: Cannot add it yet, but entity may have be
# registered from a previous time it was running.
# * No longer exists at all: Should not be present in HASS/registry.
# Add instances that are missing.
for instance in instances:
instance_num = instance.get(hyperion_const.KEY_INSTANCE)
if instance_num is None:
continue
if not instance.get(hyperion_const.KEY_RUNNING, False):
stopped_instances.add(instance_num)
continue
running_instances.add(instance_num)
if instance_num in existing_instances:
continue
hyperion_client = await async_create_connect_hyperion_client(
host, port, instance=instance_num, token=token
)
if not hyperion_client:
continue
existing_instances[instance_num] = hyperion_client
instance_name = instance.get(hyperion_const.KEY_FRIENDLY_NAME, DEFAULT_NAME)
async_dispatcher_send(
hass,
SIGNAL_INSTANCE_ADD.format(entry.entry_id),
instance_num,
instance_name,
)
# Remove entities that are are not running instances on Hyperion.
for instance_num in set(existing_instances) - running_instances:
del existing_instances[instance_num]
async_dispatcher_send(
hass, SIGNAL_INSTANCE_REMOVE.format(entry.entry_id), instance_num
)
# Ensure every device associated with this config entry is still in the list of
# motionEye cameras, otherwise remove the device (and thus entities).
known_devices = {
get_hyperion_device_id(server_id, instance_num)
for instance_num in running_instances | stopped_instances
}
for device_entry in dr.async_entries_for_config_entry(
device_registry, entry.entry_id
):
for (kind, key) in device_entry.identifiers:
if kind == DOMAIN and key in known_devices:
break
else:
device_registry.async_remove_device(device_entry.id)
hyperion_client.set_callbacks(
{
f"{hyperion_const.KEY_INSTANCE}-{hyperion_const.KEY_UPDATE}": async_instances_to_clients,
}
)
async def setup_then_listen() -> None:
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_setup(entry, platform)
for platform in PLATFORMS
)
)
assert hyperion_client
if hyperion_client.instances is not None:
await async_instances_to_clients_raw(hyperion_client.instances)
hass.data[DOMAIN][entry.entry_id][CONF_ON_UNLOAD].append(
entry.add_update_listener(_async_entry_updated)
)
hass.async_create_task(setup_then_listen())
return True
async def _async_entry_updated(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Handle entry updates."""
await hass.config_entries.async_reload(config_entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok and config_entry.entry_id in hass.data[DOMAIN]:
config_data = hass.data[DOMAIN].pop(config_entry.entry_id)
for func in config_data[CONF_ON_UNLOAD]:
func()
# Disconnect the shared instance clients.
await asyncio.gather(
*(
config_data[CONF_INSTANCE_CLIENTS][
instance_num
].async_client_disconnect()
for instance_num in config_data[CONF_INSTANCE_CLIENTS]
)
)
# Disconnect the root client.
root_client = config_data[CONF_ROOT_CLIENT]
await root_client.async_client_disconnect()
return unload_ok
|
|
from __future__ import print_function, division
from sympy.core.basic import C
from sympy.core.expr import Expr
from sympy.core.relational import Eq
from sympy.core.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Wild, Symbol)
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, xrange
from sympy.core.containers import Tuple
from sympy.functions.elementary.piecewise import piecewise_fold, Piecewise
from sympy.utilities import flatten
def _process_limits(*symbols):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
"""
limits = []
orientation = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
V = sympify(flatten(V))
if V[0].is_Symbol:
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
orientation *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim ))
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = sympify(function)
if hasattr(function, 'func') and function.func is C.Equality:
lhs = function.lhs
rhs = function.rhs
return C.Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Only limits with lower and upper bounds are supported; the indefinite form
# is not supported
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the dummy variables
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
if self.function.is_zero:
return set()
return self._free_symbols()
def as_dummy(self):
"""
see _as_dummy() for documentation
"""
return self._as_dummy()
def _free_symbols(self):
"""
This method returns the symbols that will exist when the object is
evaluated. This is useful if one is trying to determine whether the
objet contains a certain symbol or not.
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
set([y])
"""
function, limits = self.function, self.limits
if function.is_zero:
return set()
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
def _as_dummy(self):
"""
Replace instances of the given dummy variables with explicit dummy
counterparts to make clear what are dummy variables and what
are real-world symbols in an object.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
If the object supperts the "integral at" limit ``(x,)`` it
is not treated as a dummy, but the explicit form, ``(x, x)``
of length 2 does treat the variable as a dummy.
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> Integral(x, (x, x)).as_dummy()
Integral(_x, (_x, x))
If there were no dummies in the original expression, then the
the symbols which cannot be changed by subs() are clearly seen as
those with an underscore prefix.
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the integration variable
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in xrange(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
continue
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return self.func(f, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s,n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x,a
>>> Integral(a*x**2,x).subs(x,4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for intgrals
change_index : Perform mapping on the sum and product dummy variables
"""
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, C.Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old,C.AppliedUndef) or isinstance(old,C.UndefinedFunction):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution can not create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
class AddWithLimits(ExprWithLimits):
r"""Represents unevaluated oriented additions.
Parent class for Integral and Sum.
"""
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
#
# This constructor only differs from ExprWithLimits
# in the application of the orientation variable. Perhaps merge?
function = sympify(function)
if hasattr(function, 'func') and function.func is C.Equality:
lhs = function.lhs
rhs = function.rhs
return C.Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
obj = Expr.__new__(cls, **assumptions)
arglist = [orientation*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def _eval_adjoint(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.conjugate(), *self.limits)
return None
def _eval_transpose(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_factor(self, **hints):
summand = self.function.factor(**hints)
keep_inside = []
pull_outside = []
if summand.is_Mul and summand.is_commutative:
for i in summand.args:
if not i.atoms(C.Symbol).intersection(self.variables):
pull_outside.append(i)
else:
keep_inside.append(i)
return C.Mul(*pull_outside) * self.func(C.Mul(*keep_inside), *self.limits)
return self
def _eval_expand_basic(self, **hints):
summand = self.function.expand(**hints)
if summand.is_Add and summand.is_commutative:
return C.Add(*[ self.func(i, *self.limits) for i in summand.args ])
elif summand != self.function:
return self.func(summand, *self.limits)
return self
|
|
#! /usr/bin/env python
# $Id: test_inline_markup.py 5642 2008-09-05 18:18:28Z goodger $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for inline markup in docutils/parsers/rst/states.py.
Interpreted text tests are in a separate module, test_interpreted.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['emphasis'] = [
["""\
*emphasis*
""",
"""\
<document source="test data">
<paragraph>
<emphasis>
emphasis
"""],
[u"""\
l'*emphasis* and l\u2019*emphasis* with apostrophe
""",
u"""\
<document source="test data">
<paragraph>
l'
<emphasis>
emphasis
and l\u2019
<emphasis>
emphasis
with apostrophe
"""],
["""\
*emphasized sentence
across lines*
""",
"""\
<document source="test data">
<paragraph>
<emphasis>
emphasized sentence
across lines
"""],
["""\
*emphasis without closing asterisk
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
*
emphasis without closing asterisk
<system_message backrefs="id2" ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline emphasis start-string without end-string.
"""],
["""\
'*emphasis*' and 1/*emphasis*/2 and 3-*emphasis*-4 and 5:*emphasis*:6
but not '*' or '"*"' or x*2* or 2*x* or \\*args or *
or *the\\* *stars\\\\\\* *inside*
(however, '*args' will trigger a warning and may be problematic)
what about *this**?
""",
"""\
<document source="test data">
<paragraph>
'
<emphasis>
emphasis
' and 1/
<emphasis>
emphasis
/2 and 3-
<emphasis>
emphasis
-4 and 5:
<emphasis>
emphasis
:6
but not '*' or '"*"' or x*2* or 2*x* or *args or *
or \n\
<emphasis>
the* *stars\* *inside
<paragraph>
(however, '
<problematic ids="id2" refid="id1">
*
args' will trigger a warning and may be problematic)
<system_message backrefs="id2" ids="id1" level="2" line="5" source="test data" type="WARNING">
<paragraph>
Inline emphasis start-string without end-string.
<paragraph>
what about \n\
<emphasis>
this*
?
"""],
["""\
Emphasized asterisk: *\\**
Emphasized double asterisk: *\\***
""",
"""\
<document source="test data">
<paragraph>
Emphasized asterisk: \n\
<emphasis>
*
<paragraph>
Emphasized double asterisk: \n\
<emphasis>
**
"""],
]
totest['strong'] = [
["""\
**strong**
""",
"""\
<document source="test data">
<paragraph>
<strong>
strong
"""],
[u"""\
l'**strong** and l\u2019**strong** with apostrophe
""",
u"""\
<document source="test data">
<paragraph>
l'
<strong>
strong
and l\u2019
<strong>
strong
with apostrophe
"""],
[u"""\
quoted '**strong**', quoted "**strong**",
quoted \u2018**strong**\u2019, quoted \u201c**strong**\u201d,
quoted \xab**strong**\xbb
""",
u"""\
<document source="test data">
<paragraph>
quoted '
<strong>
strong
', quoted "
<strong>
strong
",
quoted \u2018
<strong>
strong
\u2019, quoted \u201c
<strong>
strong
\u201d,
quoted \xab
<strong>
strong
\xbb
"""],
["""\
(**strong**) but not (**) or '(** ' or x**2 or \\**kwargs or **
(however, '**kwargs' will trigger a warning and may be problematic)
""",
"""\
<document source="test data">
<paragraph>
(
<strong>
strong
) but not (**) or '(** ' or x**2 or **kwargs or **
<paragraph>
(however, '
<problematic ids="id2" refid="id1">
**
kwargs' will trigger a warning and may be problematic)
<system_message backrefs="id2" ids="id1" level="2" line="3" source="test data" type="WARNING">
<paragraph>
Inline strong start-string without end-string.
"""],
["""\
Strong asterisk: *****
Strong double asterisk: ******
""",
"""\
<document source="test data">
<paragraph>
Strong asterisk: \n\
<strong>
*
<paragraph>
Strong double asterisk: \n\
<strong>
**
"""],
["""\
**strong without closing asterisks
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
**
strong without closing asterisks
<system_message backrefs="id2" ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline strong start-string without end-string.
"""],
]
totest['literal'] = [
["""\
``literal``
""",
"""\
<document source="test data">
<paragraph>
<literal>
literal
"""],
["""\
``\\literal``
""",
"""\
<document source="test data">
<paragraph>
<literal>
\\literal
"""],
["""\
``lite\\ral``
""",
"""\
<document source="test data">
<paragraph>
<literal>
lite\\ral
"""],
["""\
``literal\\``
""",
"""\
<document source="test data">
<paragraph>
<literal>
literal\\
"""],
[u"""\
l'``literal`` and l\u2019``literal`` with apostrophe
""",
u"""\
<document source="test data">
<paragraph>
l'
<literal>
literal
and l\u2019
<literal>
literal
with apostrophe
"""],
[u"""\
quoted '``literal``', quoted "``literal``",
quoted \u2018``literal``\u2019, quoted \u201c``literal``\u201d,
quoted \xab``literal``\xbb
""",
u"""\
<document source="test data">
<paragraph>
quoted '
<literal>
literal
', quoted "
<literal>
literal
",
quoted \u2018
<literal>
literal
\u2019, quoted \u201c
<literal>
literal
\u201d,
quoted \xab
<literal>
literal
\xbb
"""],
[u"""\
``'literal'`` with quotes, ``"literal"`` with quotes,
``\u2018literal\u2019`` with quotes, ``\u201cliteral\u201d`` with quotes,
``\xabliteral\xbb`` with quotes
""",
u"""\
<document source="test data">
<paragraph>
<literal>
'literal'
with quotes,
<literal>
"literal"
with quotes,
<literal>
\u2018literal\u2019
with quotes,
<literal>
\u201cliteral\u201d
with quotes,
<literal>
\xabliteral\xbb
with quotes
"""],
["""\
``literal ``TeX quotes'' & \\backslash`` but not "``" or ``
(however, ``standalone TeX quotes'' will trigger a warning
and may be problematic)
""",
"""\
<document source="test data">
<paragraph>
<literal>
literal ``TeX quotes'' & \\backslash
but not "``" or ``
<paragraph>
(however, \n\
<problematic ids="id2" refid="id1">
``
standalone TeX quotes'' will trigger a warning
and may be problematic)
<system_message backrefs="id2" ids="id1" level="2" line="3" source="test data" type="WARNING">
<paragraph>
Inline literal start-string without end-string.
"""],
["""\
Find the ```interpreted text``` in this paragraph!
""",
"""\
<document source="test data">
<paragraph>
Find the \n\
<literal>
`interpreted text`
in this paragraph!
"""],
["""\
``literal without closing backquotes
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
``
literal without closing backquotes
<system_message backrefs="id2" ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline literal start-string without end-string.
"""],
["""\
Python ``list``\\s use square bracket syntax.
""",
"""\
<document source="test data">
<paragraph>
Python \n\
<literal>
list
s use square bracket syntax.
"""],
]
totest['references'] = [
["""\
ref_
""",
"""\
<document source="test data">
<paragraph>
<reference name="ref" refname="ref">
ref
"""],
[u"""\
l'ref_ and l\u2019ref_ with apostrophe
""",
u"""\
<document source="test data">
<paragraph>
l'
<reference name="ref" refname="ref">
ref
and l\u2019
<reference name="ref" refname="ref">
ref
with apostrophe
"""],
[u"""\
quoted 'ref_', quoted "ref_",
quoted \u2018ref_\u2019, quoted \u201cref_\u201d,
quoted \xabref_\xbb,
but not 'ref ref'_, "ref ref"_, \u2018ref ref\u2019_,
\u201cref ref\u201d_, or \xabref ref\xbb_
""",
u"""\
<document source="test data">
<paragraph>
quoted '
<reference name="ref" refname="ref">
ref
', quoted "
<reference name="ref" refname="ref">
ref
",
quoted \u2018
<reference name="ref" refname="ref">
ref
\u2019, quoted \u201c
<reference name="ref" refname="ref">
ref
\u201d,
quoted \xab
<reference name="ref" refname="ref">
ref
\xbb,
but not 'ref ref'_, "ref ref"_, \u2018ref ref\u2019_,
\u201cref ref\u201d_, or \xabref ref\xbb_
"""],
["""\
ref__
""",
"""\
<document source="test data">
<paragraph>
<reference anonymous="1" name="ref">
ref
"""],
[u"""\
l'ref__ and l\u2019ref__ with apostrophe
""",
u"""\
<document source="test data">
<paragraph>
l'
<reference anonymous="1" name="ref">
ref
and l\u2019
<reference anonymous="1" name="ref">
ref
with apostrophe
"""],
[u"""\
quoted 'ref__', quoted "ref__",
quoted \u2018ref__\u2019, quoted \u201cref__\u201d,
quoted \xabref__\xbb,
but not 'ref ref'__, "ref ref"__, \u2018ref ref\u2019__,
\u201cref ref\u201d__, or \xabref ref\xbb__
""",
u"""\
<document source="test data">
<paragraph>
quoted '
<reference anonymous="1" name="ref">
ref
', quoted "
<reference anonymous="1" name="ref">
ref
",
quoted \u2018
<reference anonymous="1" name="ref">
ref
\u2019, quoted \u201c
<reference anonymous="1" name="ref">
ref
\u201d,
quoted \xab
<reference anonymous="1" name="ref">
ref
\xbb,
but not 'ref ref'__, "ref ref"__, \u2018ref ref\u2019__,
\u201cref ref\u201d__, or \xabref ref\xbb__
"""],
["""\
ref_, r_, r_e-f_, -ref_, and anonymousref__,
but not _ref_ or __attr__ or object.__attr__
""",
"""\
<document source="test data">
<paragraph>
<reference name="ref" refname="ref">
ref
, \n\
<reference name="r" refname="r">
r
, \n\
<reference name="r_e-f" refname="r_e-f">
r_e-f
, -
<reference name="ref" refname="ref">
ref
, and \n\
<reference anonymous="1" name="anonymousref">
anonymousref
,
but not _ref_ or __attr__ or object.__attr__
"""],
]
totest['phrase_references'] = [
["""\
`phrase reference`_
""",
"""\
<document source="test data">
<paragraph>
<reference name="phrase reference" refname="phrase reference">
phrase reference
"""],
[u"""\
l'`phrase reference`_ and l\u2019`phrase reference`_ with apostrophe
""",
u"""\
<document source="test data">
<paragraph>
l'
<reference name="phrase reference" refname="phrase reference">
phrase reference
and l\u2019
<reference name="phrase reference" refname="phrase reference">
phrase reference
with apostrophe
"""],
[u"""\
quoted '`phrase reference`_', quoted "`phrase reference`_",
quoted \u2018`phrase reference`_\u2019,
quoted \u201c`phrase reference`_\u201d,
quoted \xab`phrase reference`_\xbb
""",
u"""\
<document source="test data">
<paragraph>
quoted '
<reference name="phrase reference" refname="phrase reference">
phrase reference
', quoted "
<reference name="phrase reference" refname="phrase reference">
phrase reference
",
quoted \u2018
<reference name="phrase reference" refname="phrase reference">
phrase reference
\u2019,
quoted \u201c
<reference name="phrase reference" refname="phrase reference">
phrase reference
\u201d,
quoted \xab
<reference name="phrase reference" refname="phrase reference">
phrase reference
\xbb
"""],
[u"""\
`'phrase reference'`_ with quotes, `"phrase reference"`_ with quotes,
`\u2018phrase reference\u2019`_ with quotes,
`\u201cphrase reference\u201d`_ with quotes,
`\xabphrase reference\xbb`_ with quotes
""",
u"""\
<document source="test data">
<paragraph>
<reference name="'phrase reference'" refname="'phrase reference'">
'phrase reference'
with quotes,
<reference name=""phrase reference"" refname=""phrase reference"">
"phrase reference"
with quotes,
<reference name="\u2018phrase reference\u2019" refname="\u2018phrase reference\u2019">
\u2018phrase reference\u2019
with quotes,
<reference name="\u201cphrase reference\u201d" refname="\u201cphrase reference\u201d">
\u201cphrase reference\u201d
with quotes,
<reference name="\xabphrase reference\xbb" refname="\xabphrase reference\xbb">
\xabphrase reference\xbb
with quotes
"""],
["""\
`anonymous reference`__
""",
"""\
<document source="test data">
<paragraph>
<reference anonymous="1" name="anonymous reference">
anonymous reference
"""],
[u"""\
l'`anonymous reference`__ and l\u2019`anonymous reference`__ with apostrophe
""",
u"""\
<document source="test data">
<paragraph>
l'
<reference anonymous="1" name="anonymous reference">
anonymous reference
and l\u2019
<reference anonymous="1" name="anonymous reference">
anonymous reference
with apostrophe
"""],
[u"""\
quoted '`anonymous reference`__', quoted "`anonymous reference`__",
quoted \u2018`anonymous reference`__\u2019,
quoted \u201c`anonymous reference`__\u201d,
quoted \xab`anonymous reference`__\xbb
""",
u"""\
<document source="test data">
<paragraph>
quoted '
<reference anonymous="1" name="anonymous reference">
anonymous reference
', quoted "
<reference anonymous="1" name="anonymous reference">
anonymous reference
",
quoted \u2018
<reference anonymous="1" name="anonymous reference">
anonymous reference
\u2019,
quoted \u201c
<reference anonymous="1" name="anonymous reference">
anonymous reference
\u201d,
quoted \xab
<reference anonymous="1" name="anonymous reference">
anonymous reference
\xbb
"""],
[u"""\
`'anonymous reference'`__ with quotes, `"anonymous reference"`__ with quotes,
`\u2018anonymous reference\u2019`__ with quotes,
`\u201canonymous reference\u201d`__ with quotes,
`\xabanonymous reference\xbb`__ with quotes
""",
u"""\
<document source="test data">
<paragraph>
<reference anonymous="1" name="'anonymous reference'">
'anonymous reference'
with quotes,
<reference anonymous="1" name=""anonymous reference"">
"anonymous reference"
with quotes,
<reference anonymous="1" name="\u2018anonymous reference\u2019">
\u2018anonymous reference\u2019
with quotes,
<reference anonymous="1" name="\u201canonymous reference\u201d">
\u201canonymous reference\u201d
with quotes,
<reference anonymous="1" name="\xabanonymous reference\xbb">
\xabanonymous reference\xbb
with quotes
"""],
["""\
`phrase reference
across lines`_
""",
"""\
<document source="test data">
<paragraph>
<reference name="phrase reference across lines" refname="phrase reference across lines">
phrase reference
across lines
"""],
["""\
`phrase\`_ reference`_
""",
"""\
<document source="test data">
<paragraph>
<reference name="phrase`_ reference" refname="phrase`_ reference">
phrase`_ reference
"""],
["""\
Invalid phrase reference:
:role:`phrase reference`_
""",
"""\
<document source="test data">
<paragraph>
Invalid phrase reference:
<paragraph>
<problematic ids="id2" refid="id1">
:role:`phrase reference`_
<system_message backrefs="id2" ids="id1" level="2" line="3" source="test data" type="WARNING">
<paragraph>
Mismatch: both interpreted text role prefix and reference suffix.
"""],
["""\
Invalid phrase reference:
`phrase reference`:role:_
""",
"""\
<document source="test data">
<paragraph>
Invalid phrase reference:
<paragraph>
<problematic ids="id2" refid="id1">
`phrase reference`:role:_
<system_message backrefs="id2" ids="id1" level="2" line="3" source="test data" type="WARNING">
<paragraph>
Mismatch: both interpreted text role suffix and reference suffix.
"""],
["""\
`phrase reference_ without closing backquote
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
`
phrase \n\
<reference name="reference" refname="reference">
reference
without closing backquote
<system_message backrefs="id2" ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline interpreted text or phrase reference start-string without end-string.
"""],
["""\
`anonymous phrase reference__ without closing backquote
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
`
anonymous phrase \n\
<reference anonymous="1" name="reference">
reference
without closing backquote
<system_message backrefs="id2" ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline interpreted text or phrase reference start-string without end-string.
"""],
]
totest['embedded_URIs'] = [
["""\
`phrase reference <http://example.com>`_
""",
"""\
<document source="test data">
<paragraph>
<reference name="phrase reference" refuri="http://example.com">
phrase reference
<target ids="phrase-reference" names="phrase\ reference" refuri="http://example.com">
"""],
["""\
`anonymous reference <http://example.com>`__
""",
"""\
<document source="test data">
<paragraph>
<reference name="anonymous reference" refuri="http://example.com">
anonymous reference
"""],
["""\
`embedded URI on next line
<http://example.com>`__
""",
"""\
<document source="test data">
<paragraph>
<reference name="embedded URI on next line" refuri="http://example.com">
embedded URI on next line
"""],
["""\
`embedded URI across lines <http://example.com/
long/path>`__
""",
"""\
<document source="test data">
<paragraph>
<reference name="embedded URI across lines" refuri="http://example.com/long/path">
embedded URI across lines
"""],
["""\
`embedded URI with whitespace <http://example.com/
long/path /and /whitespace>`__
""",
"""\
<document source="test data">
<paragraph>
<reference name="embedded URI with whitespace" refuri="http://example.com/long/path/and/whitespace">
embedded URI with whitespace
"""],
["""\
`embedded email address <jdoe@example.com>`__
`embedded email address broken across lines <jdoe
@example.com>`__
""",
"""\
<document source="test data">
<paragraph>
<reference name="embedded email address" refuri="mailto:jdoe@example.com">
embedded email address
<paragraph>
<reference name="embedded email address broken across lines" refuri="mailto:jdoe@example.com">
embedded email address broken across lines
"""],
["""\
`embedded URI with too much whitespace < http://example.com/
long/path /and /whitespace >`__
`embedded URI with too much whitespace at end <http://example.com/
long/path /and /whitespace >`__
`embedded URI with no preceding whitespace<http://example.com>`__
`escaped URI \\<http://example.com>`__
See `HTML Anchors: \\<a>`_.
""",
"""\
<document source="test data">
<paragraph>
<reference anonymous="1" name="embedded URI with too much whitespace < http://example.com/ long/path /and /whitespace >">
embedded URI with too much whitespace < http://example.com/
long/path /and /whitespace >
<paragraph>
<reference anonymous="1" name="embedded URI with too much whitespace at end <http://example.com/ long/path /and /whitespace >">
embedded URI with too much whitespace at end <http://example.com/
long/path /and /whitespace >
<paragraph>
<reference anonymous="1" name="embedded URI with no preceding whitespace<http://example.com>">
embedded URI with no preceding whitespace<http://example.com>
<paragraph>
<reference anonymous="1" name="escaped URI <http://example.com>">
escaped URI <http://example.com>
<paragraph>
See \n\
<reference name="HTML Anchors: <a>" refname="html anchors: <a>">
HTML Anchors: <a>
.
"""],
["""\
Relative URIs' reference text can be omitted:
`<reference>`_
`<anonymous>`__
""",
"""\
<document source="test data">
<paragraph>
Relative URIs' reference text can be omitted:
<paragraph>
<reference name="reference" refuri="reference">
reference
<target ids="reference" names="reference" refuri="reference">
<paragraph>
<reference name="anonymous" refuri="anonymous">
anonymous
"""],
]
totest['inline_targets'] = [
["""\
_`target`
Here is _`another target` in some text. And _`yet
another target`, spanning lines.
_`Here is a TaRgeT` with case and spacial difficulties.
""",
"""\
<document source="test data">
<paragraph>
<target ids="target" names="target">
target
<paragraph>
Here is \n\
<target ids="another-target" names="another\ target">
another target
in some text. And \n\
<target ids="yet-another-target" names="yet\ another\ target">
yet
another target
, spanning lines.
<paragraph>
<target ids="here-is-a-target" names="here\ is\ a\ target">
Here is a TaRgeT
with case and spacial difficulties.
"""],
[u"""\
l'_`target1` and l\u2019_`target2` with apostrophe
""",
u"""\
<document source="test data">
<paragraph>
l'
<target ids="target1" names="target1">
target1
and l\u2019
<target ids="target2" names="target2">
target2
with apostrophe
"""],
[u"""\
quoted '_`target1`', quoted "_`target2`",
quoted \u2018_`target3`\u2019, quoted \u201c_`target4`\u201d,
quoted \xab_`target5`\xbb
""",
u"""\
<document source="test data">
<paragraph>
quoted '
<target ids="target1" names="target1">
target1
', quoted "
<target ids="target2" names="target2">
target2
",
quoted \u2018
<target ids="target3" names="target3">
target3
\u2019, quoted \u201c
<target ids="target4" names="target4">
target4
\u201d,
quoted \xab
<target ids="target5" names="target5">
target5
\xbb
"""],
[u"""\
_`'target1'` with quotes, _`"target2"` with quotes,
_`\u2018target3\u2019` with quotes, _`\u201ctarget4\u201d` with quotes,
_`\xabtarget5\xbb` with quotes
""",
u"""\
<document source="test data">
<paragraph>
<target ids="target1" names="'target1'">
'target1'
with quotes,
<target ids="target2" names=""target2"">
"target2"
with quotes,
<target ids="target3" names="\u2018target3\u2019">
\u2018target3\u2019
with quotes,
<target ids="target4" names="\u201ctarget4\u201d">
\u201ctarget4\u201d
with quotes,
<target ids="target5" names="\xabtarget5\xbb">
\xabtarget5\xbb
with quotes
"""],
["""\
But this isn't a _target; targets require backquotes.
And _`this`_ is just plain confusing.
""",
"""\
<document source="test data">
<paragraph>
But this isn't a _target; targets require backquotes.
<paragraph>
And \n\
<problematic ids="id2" refid="id1">
_`
this`_ is just plain confusing.
<system_message backrefs="id2" ids="id1" level="2" line="3" source="test data" type="WARNING">
<paragraph>
Inline target start-string without end-string.
"""],
["""\
_`inline target without closing backquote
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
_`
inline target without closing backquote
<system_message backrefs="id2" ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline target start-string without end-string.
"""],
]
totest['footnote_reference'] = [
["""\
[1]_
""",
"""\
<document source="test data">
<paragraph>
<footnote_reference ids="id1" refname="1">
1
"""],
["""\
[#]_
""",
"""\
<document source="test data">
<paragraph>
<footnote_reference auto="1" ids="id1">
"""],
["""\
[#label]_
""",
"""\
<document source="test data">
<paragraph>
<footnote_reference auto="1" ids="id1" refname="label">
"""],
["""\
[*]_
""",
"""\
<document source="test data">
<paragraph>
<footnote_reference auto="*" ids="id1">
"""],
["""\
Adjacent footnote refs are not possible: [*]_[#label]_ [#]_[2]_ [1]_[*]_
""",
"""\
<document source="test data">
<paragraph>
Adjacent footnote refs are not possible: [*]_[#label]_ [#]_[2]_ [1]_[*]_
"""],
]
totest['citation_reference'] = [
["""\
[citation]_
""",
"""\
<document source="test data">
<paragraph>
<citation_reference ids="id1" refname="citation">
citation
"""],
["""\
[citation]_ and [cit-ation]_ and [cit.ation]_ and [CIT1]_ but not [CIT 1]_
""",
"""\
<document source="test data">
<paragraph>
<citation_reference ids="id1" refname="citation">
citation
and \n\
<citation_reference ids="id2" refname="cit-ation">
cit-ation
and \n\
<citation_reference ids="id3" refname="cit.ation">
cit.ation
and \n\
<citation_reference ids="id4" refname="cit1">
CIT1
but not [CIT 1]_
"""],
["""\
Adjacent citation refs are not possible: [citation]_[CIT1]_
""",
"""\
<document source="test data">
<paragraph>
Adjacent citation refs are not possible: [citation]_[CIT1]_
"""],
]
totest['substitution_references'] = [
["""\
|subref|
""",
"""\
<document source="test data">
<paragraph>
<substitution_reference refname="subref">
subref
"""],
["""\
|subref|_ and |subref|__
""",
"""\
<document source="test data">
<paragraph>
<reference refname="subref">
<substitution_reference refname="subref">
subref
and \n\
<reference anonymous="1">
<substitution_reference refname="subref">
subref
"""],
["""\
|substitution reference|
""",
"""\
<document source="test data">
<paragraph>
<substitution_reference refname="substitution reference">
substitution reference
"""],
["""\
|substitution
reference|
""",
"""\
<document source="test data">
<paragraph>
<substitution_reference refname="substitution reference">
substitution
reference
"""],
["""\
|substitution reference without closing verbar
""",
"""\
<document source="test data">
<paragraph>
<problematic ids="id2" refid="id1">
|
substitution reference without closing verbar
<system_message backrefs="id2" ids="id1" level="2" line="1" source="test data" type="WARNING">
<paragraph>
Inline substitution_reference start-string without end-string.
"""],
["""\
first | then || and finally |||
""",
"""\
<document source="test data">
<paragraph>
first | then || and finally |||
"""],
]
totest['standalone_hyperlink'] = [
["""\
http://www.standalone.hyperlink.com
http:/one-slash-only.absolute.path
[http://example.com]
(http://example.com)
<http://example.com>
http://[1080:0:0:0:8:800:200C:417A]/IPv6address.html
http://[3ffe:2a00:100:7031::1] (the final "]" is ambiguous in text)
http://[3ffe:2a00:100:7031::1]/
mailto:someone@somewhere.com
news:comp.lang.python
An email address in a sentence: someone@somewhere.com.
ftp://ends.with.a.period.
(a.question.mark@end?)
""",
"""\
<document source="test data">
<paragraph>
<reference refuri="http://www.standalone.hyperlink.com">
http://www.standalone.hyperlink.com
<paragraph>
<reference refuri="http:/one-slash-only.absolute.path">
http:/one-slash-only.absolute.path
<paragraph>
[
<reference refuri="http://example.com">
http://example.com
]
<paragraph>
(
<reference refuri="http://example.com">
http://example.com
)
<paragraph>
<
<reference refuri="http://example.com">
http://example.com
>
<paragraph>
<reference refuri="http://[1080:0:0:0:8:800:200C:417A]/IPv6address.html">
http://[1080:0:0:0:8:800:200C:417A]/IPv6address.html
<paragraph>
<reference refuri="http://[3ffe:2a00:100:7031::1">
http://[3ffe:2a00:100:7031::1
] (the final "]" is ambiguous in text)
<paragraph>
<reference refuri="http://[3ffe:2a00:100:7031::1]/">
http://[3ffe:2a00:100:7031::1]/
<paragraph>
<reference refuri="mailto:someone@somewhere.com">
mailto:someone@somewhere.com
<paragraph>
<reference refuri="news:comp.lang.python">
news:comp.lang.python
<paragraph>
An email address in a sentence: \n\
<reference refuri="mailto:someone@somewhere.com">
someone@somewhere.com
.
<paragraph>
<reference refuri="ftp://ends.with.a.period">
ftp://ends.with.a.period
.
<paragraph>
(
<reference refuri="mailto:a.question.mark@end">
a.question.mark@end
?)
"""],
["""\
Valid URLs with escaped markup characters:
http://example.com/\\*content\\*/whatever
http://example.com/\\*content*/whatever
""",
"""\
<document source="test data">
<paragraph>
Valid URLs with escaped markup characters:
<paragraph>
<reference refuri="http://example.com/*content*/whatever">
http://example.com/*content*/whatever
<paragraph>
<reference refuri="http://example.com/*content*/whatever">
http://example.com/*content*/whatever
"""],
["""\
Valid URLs may end with punctuation inside "<>":
<http://example.org/ends-with-dot.>
""",
"""\
<document source="test data">
<paragraph>
Valid URLs may end with punctuation inside "<>":
<paragraph>
<
<reference refuri="http://example.org/ends-with-dot.">
http://example.org/ends-with-dot.
>
"""],
["""\
Valid URLs with interesting endings:
http://example.org/ends-with-pluses++
""",
"""\
<document source="test data">
<paragraph>
Valid URLs with interesting endings:
<paragraph>
<reference refuri="http://example.org/ends-with-pluses++">
http://example.org/ends-with-pluses++
"""],
["""\
None of these are standalone hyperlinks (their "schemes"
are not recognized): signal:noise, a:b.
""",
"""\
<document source="test data">
<paragraph>
None of these are standalone hyperlinks (their "schemes"
are not recognized): signal:noise, a:b.
"""],
["""\
Escaped email addresses are not recognized: test\@example.org
""",
"""\
<document source="test data">
<paragraph>
Escaped email addresses are not recognized: test@example.org
"""],
]
totest['miscellaneous'] = [
["""\
__This__ should be left alone.
""",
"""\
<document source="test data">
<paragraph>
__This__ should be left alone.
"""],
[r"""
Character-level m\ *a*\ **r**\ ``k``\ `u`:title:\p
with backslash-escaped whitespace, including new\
lines.
""",
"""\
<document source="test data">
<paragraph>
Character-level m
<emphasis>
a
<strong>
r
<literal>
k
<title_reference>
u
p
with backslash-escaped whitespace, including newlines.
"""],
[u"""\
quoted '*emphasis*', quoted "*emphasis*",
quoted \u2018*emphasis*\u2019, quoted \u201c*emphasis*\u201d,
quoted \xab*emphasis*\xbb
""",
u"""\
<document source="test data">
<paragraph>
quoted '
<emphasis>
emphasis
', quoted "
<emphasis>
emphasis
",
quoted \u2018
<emphasis>
emphasis
\u2019, quoted \u201c
<emphasis>
emphasis
\u201d,
quoted \xab
<emphasis>
emphasis
\xbb
"""],
[u"""\
text-*separated*\u2010*by*\u2011*various*\u2012*dashes*\u2013*and*\u2014*hyphens*.
\u00bf*punctuation*? \u00a1*examples*!\u00a0*too*.
""",
u"""\
<document source="test data">
<paragraph>
text-
<emphasis>
separated
\u2010
<emphasis>
by
\u2011
<emphasis>
various
\u2012
<emphasis>
dashes
\u2013
<emphasis>
and
\u2014
<emphasis>
hyphens
.
\xbf
<emphasis>
punctuation
? \xa1
<emphasis>
examples
!\xa0
<emphasis>
too
.
"""],
[u"""\
None of these should be markup (matched openers & closers):
\u2018*\u2019 \u201c*\u201d \xab*\xbb \u00bf*? \u00a1*!
But this should:
l\u2019*exception*.
""",
u"""\
<document source="test data">
<paragraph>
None of these should be markup (matched openers & closers):
<paragraph>
\u2018*\u2019 \u201c*\u201d \xab*\xbb \xbf*? \xa1*!
<paragraph>
But this should:
<paragraph>
l\u2019
<emphasis>
exception
.
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
|
"""
Pure Python GeoIP API. The API is based off of U{MaxMind's C-based Python API<http://www.maxmind.com/app/python>},
but the code itself is based on the U{pure PHP5 API<http://pear.php.net/package/Net_GeoIP/>}
by Jim Winstead and Hans Lellelid.
It is mostly a drop-in replacement, except the
C{new} and C{open} methods are gone. You should instantiate the L{GeoIP} class yourself:
C{gi = GeoIP('/path/to/GeoIP.dat', pygeoip.MEMORY_CACHE)}
@author: Jennifer Ennis <zaylea at gmail dot com>
@license:
Copyright(C) 2004 MaxMind LLC
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.txt>.
"""
from __future__ import with_statement, absolute_import, division
import os
import math
import socket
#import mmap
import gzip
import codecs
from . import const
from .util import ip2long
from .timezone import time_zone_by_country_and_region
#import six
#MMAP_CACHE = const.MMAP_CACHE
MEMORY_CACHE = const.MEMORY_CACHE
STANDARD = const.STANDARD
class GeoIPError(Exception):
pass
class GeoIPMetaclass(type):
def __new__(cls, *args, **kwargs):
"""
Singleton method to gets an instance without reparsing the db. Unique
instances are instantiated based on the filename of the db. Flags are
ignored for this, i.e. if you initialize one with STANDARD flag (default)
and then try later to initialize with MEMORY_CACHE, it will still
return the STANDARD one.
"""
if not hasattr(cls, '_instances'):
cls._instances = {}
if len(args) > 0:
filename = args[0]
elif 'filename' in kwargs:
filename = kwargs['filename']
if not filename in cls._instances:
cls._instances[filename] = type.__new__(cls, *args, **kwargs)
return cls._instances[filename]
GeoIPBase = GeoIPMetaclass('GeoIPBase', (object,), {})
class GeoIP(GeoIPBase):
def __init__(self, filename, flags=0):
"""
Initialize the class.
@param filename: path to a geoip database. If MEMORY_CACHE is used,
the file can be gzipped.
@type filename: str
@param flags: flags that affect how the database is processed.
Currently the only supported flags are STANDARD (the default),
MEMORY_CACHE (preload the whole file into memory), and
MMAP_CACHE (access the file via mmap).
@type flags: int
"""
self._filename = filename
self._flags = flags
#if self._flags & const.MMAP_CACHE:
if '2' == '3':
with open(filename, 'rb') as f:
self._filehandle = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
elif self._flags & const.MEMORY_CACHE:
try:
self._filehandle = open(filename, 'rb')
#self._memoryBuffer = self._filehandle.read()
except IOError:
self._filehandle = codecs.open(filename, 'rb', 'latin_1')
self._memoryBuffer = self._filehandle.read()
else:
self._filehandle = codecs.open(filename, 'rb','latin_1')
self._setup_segments()
def _setup_segments(self):
"""
Parses the database file to determine what kind of database is being used and setup
segment sizes and start points that will be used by the seek*() methods later.
"""
self._databaseType = const.COUNTRY_EDITION
self._recordLength = const.STANDARD_RECORD_LENGTH
filepos = self._filehandle.tell()
self._filehandle.seek(-3, os.SEEK_END)
for i in range(const.STRUCTURE_INFO_MAX_SIZE):
delim = self._filehandle.read(3)
if delim == (chr(255) * 3):
self._databaseType = ord(self._filehandle.read(1))
if (self._databaseType >= 106):
# backwards compatibility with databases from April 2003 and earlier
self._databaseType -= 105
if self._databaseType == const.REGION_EDITION_REV0:
self._databaseSegments = const.STATE_BEGIN_REV0
elif self._databaseType == const.REGION_EDITION_REV1:
self._databaseSegments = const.STATE_BEGIN_REV1
elif self._databaseType in (const.CITY_EDITION_REV0,
const.CITY_EDITION_REV1,
const.ORG_EDITION,
const.ISP_EDITION,
const.ASNUM_EDITION):
self._databaseSegments = 0
buf = self._filehandle.read(const.SEGMENT_RECORD_LENGTH)
for j in range(const.SEGMENT_RECORD_LENGTH):
self._databaseSegments += (ord(buf[j]) << (j * 8))
if self._databaseType in (const.ORG_EDITION, const.ISP_EDITION):
self._recordLength = const.ORG_RECORD_LENGTH
break
else:
self._filehandle.seek(-4, os.SEEK_CUR)
if self._databaseType == const.COUNTRY_EDITION:
self._databaseSegments = const.COUNTRY_BEGIN
self._filehandle.seek(filepos, os.SEEK_SET)
def _lookup_country_id(self, addr):
"""
Get the country index.
This method is called by the _lookupCountryCode and _lookupCountryName
methods. It looks up the index ('id') for the country which is the key
for the code and name.
@param addr: The IP address
@type addr: str
@return: network byte order 32-bit integer
@rtype: int
"""
ipnum = ip2long(addr)
if not ipnum:
raise ValueError("Invalid IP address: %s" % addr)
if self._databaseType != const.COUNTRY_EDITION:
raise GeoIPError('Invalid database type; country_* methods expect '\
'Country database')
return self._seek_country(ipnum) - const.COUNTRY_BEGIN
def _seek_country(self, ipnum):
"""
Using the record length and appropriate start points, seek to the
country that corresponds to the converted IP address integer.
@param ipnum: result of ip2long conversion
@type ipnum: int
@return: offset of start of record
@rtype: int
"""
offset = 0
for depth in range(31, -1, -1):
#if self._flags & const.MEMORY_CACHE:
if '2' == '3':
startIndex = 2 * self._recordLength * offset
length = 2 * self._recordLength
endIndex = startIndex + length
buf = self._memoryBuffer[startIndex:endIndex]
else:
self._filehandle.seek(2 * self._recordLength * offset, os.SEEK_SET)
buf = self._filehandle.read(2 * self._recordLength)
x = [0,0]
for i in range(2):
for j in range(self._recordLength):
x[i] += ord(buf[self._recordLength * i + j]) << (j * 8)
if ipnum & (1 << depth):
if x[1] >= self._databaseSegments:
return x[1]
offset = x[1]
else:
if x[0] >= self._databaseSegments:
return x[0]
offset = x[0]
raise Exception('Error traversing database - perhaps it is corrupt?')
def _get_org(self, ipnum):
"""
Seek and return organization (or ISP) name for converted IP addr.
@param ipnum: Converted IP address
@type ipnum: int
@return: org/isp name
@rtype: str
"""
seek_org = self._seek_country(ipnum)
if seek_org == self._databaseSegments:
return None
record_pointer = seek_org + (2 * self._recordLength - 1) * self._databaseSegments
self._filehandle.seek(record_pointer, os.SEEK_SET)
org_buf = self._filehandle.read(const.MAX_ORG_RECORD_LENGTH)
return org_buf[:org_buf.index(chr(0))]
def _get_region(self, ipnum):
"""
Seek and return the region info (dict containing country_code and region_name).
@param ipnum: converted IP address
@type ipnum: int
@return: dict containing country_code and region_name
@rtype: dict
"""
country_code = ''
region = ''
if self._databaseType == const.REGION_EDITION_REV0:
seek_country = self._seek_country(ipnum)
seek_region = seek_country - const.STATE_BEGIN_REV0
if seek_region >= 1000:
country_code = 'US'
region = ''.join([chr((seek_region // 1000) // 26 + 65), chr((seek_region // 1000) % 26 + 65)])
else:
country_code = const.COUNTRY_CODES[seek_region]
region = ''
elif self._databaseType == const.REGION_EDITION_REV1:
seek_country = self._seek_country(ipnum)
seek_region = seek_country - const.STATE_BEGIN_REV1
if seek_region < const.US_OFFSET:
country_code = '';
region = ''
elif seek_region < const.CANADA_OFFSET:
country_code = 'US'
region = ''.join([chr((seek_region - const.US_OFFSET) // 26 + 65), chr((seek_region - const.US_OFFSET) % 26 + 65)])
elif seek_region < const.WORLD_OFFSET:
country_code = 'CA'
region = ''.join([chr((seek_region - const.CANADA_OFFSET) // 26 + 65), chr((seek_region - const.CANADA_OFFSET) % 26 + 65)])
else:
i = (seek_region - const.WORLD_OFFSET) // const.FIPS_RANGE
if i < len(const.COUNTRY_CODES):
#country_code = const.COUNTRY_CODES[(seek_region - const.WORLD_OFFSET) // const.FIPS_RANGE]
country_code = const.COUNTRY_CODES[i]
else:
country_code = ''
region = ''
elif self._databaseType in (const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
rec = self._get_record(ipnum)
country_code = rec['country_code'] if rec and 'country_code' in rec else ''
region = rec['region_name'] if rec and 'region_name' in rec else ''
return {'country_code' : country_code, 'region_name' : region }
def _get_record(self, ipnum):
"""
Populate location dict for converted IP.
@param ipnum: converted IP address
@type ipnum: int
@return: dict with country_code, country_code3, country_name,
region, city, postal_code, latitude, longitude,
dma_code, metro_code, area_code, region_name, time_zone
@rtype: dict
"""
seek_country = self._seek_country(ipnum)
if seek_country == self._databaseSegments:
return None
record_pointer = seek_country + (2 * self._recordLength - 1) * self._databaseSegments
self._filehandle.seek(record_pointer, os.SEEK_SET)
record_buf = self._filehandle.read(const.FULL_RECORD_LENGTH)
record = {}
record_buf_pos = 0
char = ord(record_buf[record_buf_pos])
#char = record_buf[record_buf_pos] if six.PY3 else ord(record_buf[record_buf_pos])
record['country_code'] = const.COUNTRY_CODES[char]
record['country_code3'] = const.COUNTRY_CODES3[char]
record['country_name'] = const.COUNTRY_NAMES[char]
record_buf_pos += 1
str_length = 0
# get region
char = ord(record_buf[record_buf_pos+str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos+str_length])
if str_length > 0:
record['region_name'] = record_buf[record_buf_pos:record_buf_pos+str_length]
record_buf_pos += str_length + 1
str_length = 0
# get city
char = ord(record_buf[record_buf_pos+str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos+str_length])
if str_length > 0:
record['city'] = record_buf[record_buf_pos:record_buf_pos+str_length]
else:
record['city'] = ''
record_buf_pos += str_length + 1
str_length = 0
# get the postal code
char = ord(record_buf[record_buf_pos+str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos+str_length])
if str_length > 0:
record['postal_code'] = record_buf[record_buf_pos:record_buf_pos+str_length]
else:
record['postal_code'] = None
record_buf_pos += str_length + 1
str_length = 0
latitude = 0
longitude = 0
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
latitude += (char << (j * 8))
record['latitude'] = (latitude/10000.0) - 180.0
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
longitude += (char << (j * 8))
record['longitude'] = (longitude/10000.0) - 180.0
if self._databaseType == const.CITY_EDITION_REV1:
dmaarea_combo = 0
if record['country_code'] == 'US':
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
dmaarea_combo += (char << (j*8))
record['dma_code'] = int(math.floor(dmaarea_combo/1000))
record['area_code'] = dmaarea_combo%1000
else:
record['dma_code'] = 0
record['area_code'] = 0
if 'dma_code' in record and record['dma_code'] in const.DMA_MAP:
record['metro_code'] = const.DMA_MAP[record['dma_code']]
else:
record['metro_code'] = ''
if 'country_code' in record:
record['time_zone'] = time_zone_by_country_and_region(
record['country_code'], record.get('region_name')) or ''
else:
record['time_zone'] = ''
return record
def country_code_by_addr(self, addr):
"""
Returns 2-letter country code (e.g. 'US') for specified IP address.
Use this method if you have a Country, Region, or City database.
@param addr: IP address
@type addr: str
@return: 2-letter country code
@rtype: str
"""
try:
if self._databaseType == const.COUNTRY_EDITION:
country_id = self._lookup_country_id(addr)
return const.COUNTRY_CODES[country_id]
elif self._databaseType in (const.REGION_EDITION_REV0, const.REGION_EDITION_REV1,
const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
return self.region_by_addr(addr)['country_code']
else:
raise GeoIPError('Invalid database type; country_* methods expect '\
'Country, City, or Region database')
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def country_code_by_name(self, hostname):
"""
Returns 2-letter country code (e.g. 'US') for specified hostname.
Use this method if you have a Country, Region, or City database.
@param hostname: host name
@type hostname: str
@return: 2-letter country code
@rtype: str
"""
addr = socket.gethostbyname(hostname)
return self.country_code_by_addr(addr)
def country_name_by_addr(self, addr):
"""
Returns full country name for specified IP address.
Use this method if you have a Country or City database.
@param addr: IP address
@type addr: str
@return: country name
@rtype: str
"""
try:
if self._databaseType == const.COUNTRY_EDITION:
country_id = self._lookup_country_id(addr)
return const.COUNTRY_NAMES[country_id]
elif self._databaseType in (const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
return self.record_by_addr(addr)['country_name']
else:
raise GeoIPError('Invalid database type; country_* methods expect '\
'Country or City database')
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def country_name_by_name(self, hostname):
"""
Returns full country name for specified hostname.
Use this method if you have a Country database.
@param hostname: host name
@type hostname: str
@return: country name
@rtype: str
"""
addr = socket.gethostbyname(hostname)
return self.country_name_by_addr(addr)
def org_by_addr(self, addr):
"""
Lookup the organization (or ISP) for given IP address.
Use this method if you have an Organization/ISP database.
@param addr: IP address
@type addr: str
@return: organization or ISP name
@rtype: str
"""
try:
ipnum = ip2long(addr)
if not ipnum:
raise ValueError("Invalid IP address: %s" % addr)
if self._databaseType not in (const.ORG_EDITION, const.ISP_EDITION, const.ASNUM_EDITION):
raise GeoIPError('Invalid database type; org_* methods expect '\
'Org/ISP database')
return self._get_org(ipnum)
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def org_by_name(self, hostname):
"""
Lookup the organization (or ISP) for hostname.
Use this method if you have an Organization/ISP database.
@param hostname: host name
@type hostname: str
@return: organization or ISP name
@rtype: str
"""
addr = socket.gethostbyname(hostname)
return self.org_by_addr(addr)
def record_by_addr(self, addr):
"""
Look up the record for a given IP address.
Use this method if you have a City database.
@param addr: IP address
@type addr: str
@return: dict with country_code, country_code3, country_name,
region, city, postal_code, latitude, longitude,
dma_code, metro_code, area_code, region_name, time_zone
@rtype: dict
"""
try:
ipnum = ip2long(addr)
if not ipnum:
raise ValueError("Invalid IP address: %s" % addr)
if not self._databaseType in (const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
raise GeoIPError('Invalid database type; record_* methods expect City database')
return self._get_record(ipnum)
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def record_by_name(self, hostname):
"""
Look up the record for a given hostname.
Use this method if you have a City database.
@param hostname: host name
@type hostname: str
@return: dict with country_code, country_code3, country_name,
region, city, postal_code, latitude, longitude,
dma_code, metro_code, area_code, region_name, time_zone
@rtype: dict
"""
addr = socket.gethostbyname(hostname)
return self.record_by_addr(addr)
def region_by_addr(self, addr):
"""
Lookup the region for given IP address.
Use this method if you have a Region database.
@param addr: IP address
@type addr: str
@return: dict containing country_code, region,
and region_name
@rtype: dict
"""
try:
ipnum = ip2long(addr)
if not ipnum:
raise ValueError("Invalid IP address: %s" % addr)
if not self._databaseType in (const.REGION_EDITION_REV0, const.REGION_EDITION_REV1,
const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
raise GeoIPError('Invalid database type; region_* methods expect '\
'Region or City database')
return self._get_region(ipnum)
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def region_by_name(self, hostname):
"""
Lookup the region for given hostname.
Use this method if you have a Region database.
@param hostname: host name
@type hostname: str
@return: dict containing country_code, region,
and region_name
@rtype: dict
"""
addr = socket.gethostbyname(hostname)
return self.region_by_addr(addr)
def time_zone_by_addr(self, addr):
"""
Look up the time zone for a given IP address.
Use this method if you have a Region or City database.
@param hostname: IP address
@type hostname: str
@return: Time zone
@rtype: str
"""
try:
ipnum = ip2long(addr)
if not ipnum:
raise ValueError("Invalid IP address: %s" % addr)
if not self._databaseType in (const.REGION_EDITION_REV0, const.REGION_EDITION_REV1,
const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
raise GeoIPError('Invalid database type; region_* methods expect '\
'Region or City database')
return self._get_record(ipnum)['time_zone']
except ValueError:
raise GeoIPError('*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % addr)
def time_zone_by_name(self, hostname):
"""
Look up the time zone for a given hostname.
Use this method if you have a Region or City database.
@param hostname: host name
@type hostname: str
@return: Time zone
@rtype: str
"""
addr = socket.gethostbyname(hostname)
return self.time_zone_by_addr(addr)
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import functools
import os
import six
import webob.dec
import webob.exc
import nova.api.openstack
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
import nova.policy
LOG = logging.getLogger(__name__)
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
# The name of the extension, e.g., 'Fox In Socks'
name = None
# The alias for the extension, e.g., 'FOXNSOX'
alias = None
# Description comes from the docstring for the class
# The XML namespace for the extension, e.g.,
# 'http://www.fox.in.socks/api/ext/pie/v1.0'
namespace = None
# The timestamp when the extension was last updated, e.g.,
# '2011-01-22T19:25:27Z'
updated = None
def __init__(self, ext_mgr):
"""Register extension with the extension manager."""
ext_mgr.register(self)
self.ext_mgr = ext_mgr
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_controller_extensions(self):
"""List of extensions.ControllerExtension extension objects.
Controller extensions are used to extend existing controllers.
"""
controller_exts = []
return controller_exts
@classmethod
def nsmap(cls):
"""Synthesize a namespace map from extension."""
# Start with a base nsmap
nsmap = ext_nsmap.copy()
# Add the namespace for the extension
nsmap[cls.alias] = cls.namespace
return nsmap
@classmethod
def xmlname(cls, name):
"""Synthesize element and attribute names."""
return '{%s}%s' % (cls.namespace, name)
def make_ext(elem):
elem.set('name')
elem.set('namespace')
elem.set('alias')
elem.set('updated')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
xmlutil.make_links(elem, 'links')
ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class ExtensionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extension', selector='extension')
make_ext(root)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extensions')
elem = xmlutil.SubTemplateElement(root, 'extension',
selector='extensions')
make_ext(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsController(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
super(ExtensionsController, self).__init__(None)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.name
ext_data['alias'] = ext.alias
ext_data['description'] = ext.__doc__
ext_data['namespace'] = ext.namespace
ext_data['updated'] = ext.updated
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
@wsgi.serializers(xml=ExtensionsTemplate)
def index(self, req):
extensions = []
for ext in self.extension_manager.sorted_extensions():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
@wsgi.serializers(xml=ExtensionTemplate)
def show(self, req, id):
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req, body):
raise webob.exc.HTTPNotFound()
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See nova/tests/api/openstack/compute/extensions/foxinsocks.py or an
example extension implementation.
"""
def sorted_extensions(self):
if self.sorted_ext_list is None:
self.sorted_ext_list = sorted(self.extensions.iteritems())
for _alias, ext in self.sorted_ext_list:
yield ext
def is_loaded(self, alias):
return alias in self.extensions
def register(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.alias
LOG.audit(_('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exception.NovaException("Found duplicate extension: %s"
% alias)
self.extensions[alias] = ext
self.sorted_ext_list = None
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionsController(self)))
for ext in self.sorted_extensions():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_controller_extensions(self):
"""Returns a list of ControllerExtension objects."""
controller_exts = []
for ext in self.sorted_extensions():
try:
get_ext_method = ext.get_controller_extensions
except AttributeError:
# NOTE(Vek): Extensions aren't required to have
# controller extensions
continue
controller_exts.extend(get_ext_method())
return controller_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug('Ext name: %s', extension.name)
LOG.debug('Ext alias: %s', extension.alias)
LOG.debug('Ext description: %s',
' '.join(extension.__doc__.strip().split()))
LOG.debug('Ext namespace: %s', extension.namespace)
LOG.debug('Ext updated: %s', extension.updated)
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def load_extension(self, ext_factory):
"""Execute an extension factory.
Loads an extension. The 'ext_factory' is the name of a
callable that will be imported and called with one
argument--the extension manager. The factory callable is
expected to call the register() method at least once.
"""
LOG.debug("Loading extension %s", ext_factory)
if isinstance(ext_factory, six.string_types):
# Load the factory
factory = importutils.import_class(ext_factory)
else:
factory = ext_factory
# Call it
LOG.debug("Calling extension factory %s", ext_factory)
factory(self)
def _load_extensions(self):
"""Load extensions specified on the command line."""
extensions = list(self.cls_list)
for ext_factory in extensions:
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warn(_LW('Failed to load extension %(ext_factory)s: '
'%(exc)s'),
{'ext_factory': ext_factory, 'exc': exc})
class ControllerExtension(object):
"""Extend core controllers of nova OpenStack API.
Provide a way to extend existing nova OpenStack API core
controllers.
"""
def __init__(self, extension, collection, controller):
self.extension = extension
self.collection = collection
self.controller = controller
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in nova."""
def __init__(self, collection, controller=None, parent=None,
collection_actions=None, member_actions=None,
custom_routes_fn=None, inherits=None, member_name=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.custom_routes_fn = custom_routes_fn
self.inherits = inherits
self.member_name = member_name
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
"""Registers all standard API extensions."""
# Walk through all the modules in our directory...
our_dir = path[0]
for dirpath, dirnames, filenames in os.walk(our_dir):
# Compute the relative package name from the dirpath
relpath = os.path.relpath(dirpath, our_dir)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
# Now, consider each file in turn, only considering .py files
for fname in filenames:
root, ext = os.path.splitext(fname)
# Skip __init__ and anything that's not .py
if ext != '.py' or root == '__init__':
continue
# Try loading it
classname = "%s%s" % (root[0].upper(), root[1:])
classpath = ("%s%s.%s.%s" %
(package, relpkg, root, classname))
if ext_list is not None and classname not in ext_list:
logger.debug("Skipping extension: %s" % classpath)
continue
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
logger.warn(_('Failed to load extension %(classpath)s: '
'%(exc)s'),
{'classpath': classpath, 'exc': exc})
# Now, let's consider any subdirectories we may have...
subdirs = []
for dname in dirnames:
# Skip it if it does not have __init__.py
if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')):
continue
# If it has extension(), delegate...
ext_name = "%s%s.%s.extension" % (package, relpkg, dname)
try:
ext = importutils.import_class(ext_name)
except ImportError:
# extension() doesn't exist on it, so we'll explore
# the directory for ourselves
subdirs.append(dname)
else:
try:
ext(ext_mgr)
except Exception as exc:
logger.warn(_('Failed to load extension %(ext_name)s:'
'%(exc)s'),
{'ext_name': ext_name, 'exc': exc})
# Update the list of directories we'll explore...
dirnames[:] = subdirs
def core_authorizer(api_name, extension_name):
def authorize(context, target=None, action=None):
if target is None:
target = {'project_id': context.project_id,
'user_id': context.user_id}
if action is None:
act = '%s:%s' % (api_name, extension_name)
else:
act = '%s:%s:%s' % (api_name, extension_name, action)
nova.policy.enforce(context, act, target)
return authorize
def extension_authorizer(api_name, extension_name):
return core_authorizer('%s_extension' % api_name, extension_name)
def soft_extension_authorizer(api_name, extension_name):
hard_authorize = extension_authorizer(api_name, extension_name)
def authorize(context, action=None):
try:
hard_authorize(context, action=action)
return True
except exception.Forbidden:
return False
return authorize
def check_compute_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
@six.add_metaclass(abc.ABCMeta)
class V3APIExtensionBase(object):
"""Abstract base class for all V3 API extensions.
All V3 API extensions must derive from this class and implement
the abstract methods get_resources and get_controller_extensions
even if they just return an empty list. The extensions must also
define the abstract properties.
"""
def __init__(self, extension_info):
self.extension_info = extension_info
@abc.abstractmethod
def get_resources(self):
"""Return a list of resources extensions.
The extensions should return a list of ResourceExtension
objects. This list may be empty.
"""
pass
@abc.abstractmethod
def get_controller_extensions(self):
"""Return a list of controller extensions.
The extensions should return a list of ControllerExtension
objects. This list may be empty.
"""
pass
@abc.abstractproperty
def name(self):
"""Name of the extension."""
pass
@abc.abstractproperty
def alias(self):
"""Alias for the extension."""
pass
@abc.abstractproperty
def version(self):
"""Version of the extension."""
pass
def expected_errors(errors):
"""Decorator for v3 API methods which specifies expected exceptions.
Specify which exceptions may occur when an API method is called. If an
unexpected exception occurs then return a 500 instead and ask the user
of the API to file a bug report.
"""
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as exc:
if isinstance(exc, webob.exc.WSGIHTTPException):
if isinstance(errors, int):
t_errors = (errors,)
else:
t_errors = errors
if exc.code in t_errors:
raise
elif isinstance(exc, exception.PolicyNotAuthorized):
# Note(cyeoh): Special case to handle
# PolicyNotAuthorized exceptions so every
# extension method does not need to wrap authorize
# calls. ResourceExceptionHandler silently
# converts NotAuthorized to HTTPForbidden
raise
elif isinstance(exc, exception.ValidationError):
# Note(oomichi): Handle a validation error, which
# happens due to invalid API parameters, as an
# expected error.
raise
LOG.exception(_("Unexpected exception in API method"))
msg = _('Unexpected API Error. Please report this at '
'http://bugs.launchpad.net/nova/ and attach the Nova '
'API log if possible.\n%s') % type(exc)
raise webob.exc.HTTPInternalServerError(explanation=msg)
return wrapped
return decorator
|
|
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import uuid
import mock
import jacket.storage.image.glance
from jacket.storage import exception
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {
'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
self.temp_images = mock.MagicMock()
super(_FakeImageService, self).__init__()
# TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, data):
self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except Exception:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
def add_location(self, context, image_id, url, metadata):
self.update(context, image_id, {'locations': [{'url': url,
'metadata': metadata}]})
return True
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def stub_out_image_service(stubs):
stubs.Set(jacket.storage.image.glance, 'get_remote_image_service',
lambda x, y: (FakeImageService(), y))
stubs.Set(jacket.storage.image.glance, 'get_default_image_service',
lambda: FakeImageService())
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import dataproc_v1beta2
from google.cloud.dataproc_v1beta2.proto import clusters_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestClusterControllerClient(object):
def test_create_cluster(self):
# Setup Expected Response
project_id_2 = 'projectId2939242356'
cluster_name = 'clusterName-1018081872'
cluster_uuid = 'clusterUuid-1017854240'
expected_response = {
'project_id': project_id_2,
'cluster_name': cluster_name,
'cluster_uuid': cluster_uuid
}
expected_response = clusters_pb2.Cluster(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_create_cluster', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup Request
project_id = 'projectId-1969970175'
region = 'region-934795532'
cluster = {}
response = client.create_cluster(project_id, region, cluster)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = clusters_pb2.CreateClusterRequest(
project_id=project_id, region=region, cluster=cluster)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_cluster_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_create_cluster_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup Request
project_id = 'projectId-1969970175'
region = 'region-934795532'
cluster = {}
response = client.create_cluster(project_id, region, cluster)
exception = response.exception()
assert exception.errors[0] == error
def test_update_cluster(self):
# Setup Expected Response
project_id_2 = 'projectId2939242356'
cluster_name_2 = 'clusterName2875867491'
cluster_uuid = 'clusterUuid-1017854240'
expected_response = {
'project_id': project_id_2,
'cluster_name': cluster_name_2,
'cluster_uuid': cluster_uuid
}
expected_response = clusters_pb2.Cluster(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_update_cluster', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup Request
project_id = 'projectId-1969970175'
region = 'region-934795532'
cluster_name = 'clusterName-1018081872'
cluster = {}
update_mask = {}
response = client.update_cluster(project_id, region, cluster_name,
cluster, update_mask)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = clusters_pb2.UpdateClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
cluster=cluster,
update_mask=update_mask)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_cluster_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_update_cluster_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup Request
project_id = 'projectId-1969970175'
region = 'region-934795532'
cluster_name = 'clusterName-1018081872'
cluster = {}
update_mask = {}
response = client.update_cluster(project_id, region, cluster_name,
cluster, update_mask)
exception = response.exception()
assert exception.errors[0] == error
def test_delete_cluster(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_delete_cluster', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup Request
project_id = 'projectId-1969970175'
region = 'region-934795532'
cluster_name = 'clusterName-1018081872'
response = client.delete_cluster(project_id, region, cluster_name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = clusters_pb2.DeleteClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_cluster_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_delete_cluster_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup Request
project_id = 'projectId-1969970175'
region = 'region-934795532'
cluster_name = 'clusterName-1018081872'
response = client.delete_cluster(project_id, region, cluster_name)
exception = response.exception()
assert exception.errors[0] == error
def test_get_cluster(self):
# Setup Expected Response
project_id_2 = 'projectId2939242356'
cluster_name_2 = 'clusterName2875867491'
cluster_uuid = 'clusterUuid-1017854240'
expected_response = {
'project_id': project_id_2,
'cluster_name': cluster_name_2,
'cluster_uuid': cluster_uuid
}
expected_response = clusters_pb2.Cluster(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup Request
project_id = 'projectId-1969970175'
region = 'region-934795532'
cluster_name = 'clusterName-1018081872'
response = client.get_cluster(project_id, region, cluster_name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = clusters_pb2.GetClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_cluster_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup request
project_id = 'projectId-1969970175'
region = 'region-934795532'
cluster_name = 'clusterName-1018081872'
with pytest.raises(CustomException):
client.get_cluster(project_id, region, cluster_name)
def test_list_clusters(self):
# Setup Expected Response
next_page_token = ''
clusters_element = {}
clusters = [clusters_element]
expected_response = {
'next_page_token': next_page_token,
'clusters': clusters
}
expected_response = clusters_pb2.ListClustersResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup Request
project_id = 'projectId-1969970175'
region = 'region-934795532'
paged_list_response = client.list_clusters(project_id, region)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.clusters[0] == resources[0]
assert len(channel.requests) == 1
expected_request = clusters_pb2.ListClustersRequest(
project_id=project_id, region=region)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_clusters_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup request
project_id = 'projectId-1969970175'
region = 'region-934795532'
paged_list_response = client.list_clusters(project_id, region)
with pytest.raises(CustomException):
list(paged_list_response)
def test_diagnose_cluster(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_diagnose_cluster', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup Request
project_id = 'projectId-1969970175'
region = 'region-934795532'
cluster_name = 'clusterName-1018081872'
response = client.diagnose_cluster(project_id, region, cluster_name)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = clusters_pb2.DiagnoseClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_diagnose_cluster_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_diagnose_cluster_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = dataproc_v1beta2.ClusterControllerClient()
# Setup Request
project_id = 'projectId-1969970175'
region = 'region-934795532'
cluster_name = 'clusterName-1018081872'
response = client.diagnose_cluster(project_id, region, cluster_name)
exception = response.exception()
assert exception.errors[0] == error
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.