gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for checks."""
import os
import yaml
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.checks import checks
from grr.lib.checks import checks_test_lib
from grr.lib.checks import filters
from grr.lib.rdfvalues import anomaly as rdf_anomaly
from grr.lib.rdfvalues import client as rdf_client
from grr.parsers import config_file as config_file_parsers
from grr.parsers import linux_cmd_parser
from grr.parsers import wmi_parser
CHECKS_DIR = os.path.join(config_lib.CONFIG["Test.data_dir"], "checks")
TRIGGER_1 = ("DebianPackagesStatus", "Linux", None, None)
TRIGGER_2 = ("WMIInstalledSoftware", "Windows", None, None)
TRIGGER_3 = ("DebianPackagesStatus", None, None, "foo")
DPKG_SW = []
WMI_SW = []
SSHD_CFG = []
def GetDPKGData():
if DPKG_SW:
return DPKG_SW
# Load some dpkg data
parser = linux_cmd_parser.DpkgCmdParser()
test_data = os.path.join(CHECKS_DIR, "data/dpkg.out")
with open(test_data, "rb") as f:
DPKG_SW.extend(
parser.Parse("/usr/bin/dpkg", ["-l"], f.read(), "", 0, 5, None))
return DPKG_SW
def GetWMIData():
if WMI_SW:
return WMI_SW
# Load some wmi data
parser = wmi_parser.WMIInstalledSoftwareParser()
test_data = os.path.join(CHECKS_DIR, "data/wmi_sw.yaml")
with open(test_data, "rb") as f:
wmi = yaml.safe_load(f)
for sw in wmi:
WMI_SW.extend(parser.Parse(None, sw, None))
def GetSSHDConfig():
if SSHD_CFG:
return SSHD_CFG
# Load an sshd config
parser = config_file_parsers.SshdConfigParser()
test_data = os.path.join(config_lib.CONFIG["Test.data_dir"],
"VFSFixture/etc/ssh/sshd_config")
with open(test_data, "rb") as f:
SSHD_CFG.extend(parser.Parse(None, f, None))
return SSHD_CFG
class MatchMethodTests(test_lib.GRRBaseTest):
"""Test match method selection and comparisons."""
def setUp(self):
super(MatchMethodTests, self).setUp()
self.none = []
self.one = [1]
self.some = [1, 2, 3]
self.baselines = [self.none, self.one, self.some]
self.hint = checks.Hint()
def testCheckNone(self):
"""NONE returns an anomaly if there are no results."""
matcher = checks.Matcher(["NONE"], self.hint)
for baseline in self.baselines:
self.assertIsInstance(
matcher.Detect(baseline, self.none), checks.CheckResult)
for result in [self.one, self.some]:
self.assertFalse(matcher.Detect(baseline, result))
def testCheckOne(self):
"""ONE operations should return anomalies if there is not one result."""
matcher = checks.Matcher(["ONE"], self.hint)
for baseline in self.baselines:
self.assertIsInstance(
matcher.Detect(baseline, self.one), checks.CheckResult)
for result in [self.none, self.some]:
self.assertFalse(matcher.Detect(baseline, result))
def testCheckSome(self):
"""SOME operations should return anomalies if there is >1 result."""
matcher = checks.Matcher(["SOME"], self.hint)
for baseline in self.baselines:
self.assertIsInstance(
matcher.Detect(baseline, self.some), checks.CheckResult)
for result in [self.none, self.one]:
self.assertFalse(matcher.Detect(baseline, result))
def testCheckAny(self):
"""ANY operations should not return anomalies if there are results."""
matcher = checks.Matcher(["ANY"], self.hint)
for baseline in self.baselines:
for result in [self.one, self.some]:
self.assertIsInstance(
matcher.Detect(baseline, result), checks.CheckResult)
self.assertFalse(matcher.Detect(baseline, self.none))
def testCheckAll(self):
"""ALL operations return anomalies if input and result counts differ."""
matcher = checks.Matcher(["ALL"], self.hint)
will_detect = [(self.one, self.one), (self.some, self.some)]
not_detect = [(self.none, self.none), (self.some, self.one),
(self.some, self.none)]
will_raise = [(self.none, self.one), (self.one, self.some),
(self.none, self.some)]
for base, result in will_detect:
self.assertIsInstance(matcher.Detect(base, result), checks.CheckResult)
for base, result in not_detect:
self.assertFalse(matcher.Detect(base, result))
for base, result in will_raise:
self.assertRaises(checks.ProcessingError, matcher.Detect, base, result)
def testMultipleMatch(self):
"""Checks with multiple match methods emit results if any methods fire."""
matcher = checks.Matcher(["NONE", "ONE"], self.hint)
for baseline in self.baselines:
for result in [self.none, self.one]:
self.assertIsInstance(
matcher.Detect(baseline, result), checks.CheckResult)
self.assertFalse(matcher.Detect(baseline, self.some))
class CheckLoaderTests(test_lib.GRRBaseTest):
"""Check definitions can be loaded."""
def testLoadToDict(self):
result = checks.LoadConfigsFromFile(os.path.join(CHECKS_DIR, "sshd.yaml"))
self.assertItemsEqual(["SSHD-CHECK", "SSHD-PERMS"], result)
# Start with basic check attributes.
result_check = result["SSHD-CHECK"]
self.assertEqual("SSHD-CHECK", result_check["check_id"])
self.assertEqual("NONE", result_check["match"])
# Now dive into the method.
result_method = result_check["method"][0]
self.assertEqual({"os": ["Linux", "Darwin"]}, result_method["target"])
self.assertEqual(["ANY"], result_method["match"])
expect_hint = {
"problem": "Sshd allows protocol 1.",
"format": "Configured protocols: {config.protocol}"
}
self.assertDictEqual(expect_hint, result_method["hint"])
# Now dive into the probe.
result_probe = result_method["probe"][0]
self.assertEqual("SshdConfigFile", result_probe["artifact"])
self.assertEqual(["ANY"], result_probe["match"])
# Now dive into the filters.
expect_filters = {
"type": "ObjectFilter",
"expression": "config.protocol contains 1"
}
result_filters = result_probe["filters"][0]
self.assertDictEqual(expect_filters, result_filters)
# Make sure any specified probe context is set.
result_check = result["SSHD-PERMS"]
probe = result_check["method"][0]["probe"][0]
result_context = str(probe["result_context"])
self.assertItemsEqual("RAW", result_context)
def testLoadFromFiles(self):
check_defs = [os.path.join(CHECKS_DIR, "sshd.yaml")]
checks.LoadChecksFromFiles(check_defs)
self.assertTrue(checks.CheckRegistry.checks.get("SSHD-CHECK"))
class CheckRegistryTests(test_lib.GRRBaseTest):
sw_chk = None
sshd_chk = None
sshd_perms = None
def _LoadCheck(self, cfg_file, check_id):
configs = checks.LoadConfigsFromFile(os.path.join(CHECKS_DIR, cfg_file))
cfg = configs.get(check_id)
return checks.Check(**cfg)
def setUp(self):
super(CheckRegistryTests, self).setUp()
if self.sw_chk is None:
self.sw_chk = self._LoadCheck("sw.yaml", "SW-CHECK")
checks.CheckRegistry.RegisterCheck(
check=self.sw_chk, source="dpkg.out", overwrite_if_exists=True)
if self.sshd_chk is None:
self.sshd_chk = self._LoadCheck("sshd.yaml", "SSHD-CHECK")
checks.CheckRegistry.RegisterCheck(
check=self.sshd_chk, source="sshd_config", overwrite_if_exists=True)
if self.sshd_perms is None:
self.sshd_perms = self._LoadCheck("sshd.yaml", "SSHD-PERMS")
checks.CheckRegistry.RegisterCheck(
check=self.sshd_perms, source="sshd_config", overwrite_if_exists=True)
self.kb = rdf_client.KnowledgeBase()
self.kb.hostname = "test.example.com"
self.host_data = {
"KnowledgeBase": self.kb,
"WMIInstalledSoftware": GetWMIData(),
"DebianPackagesStatus": GetDPKGData(),
"SshdConfigFile": GetSSHDConfig()
}
def testRegisterChecks(self):
"""Defined checks are present in the check registry."""
self.assertEqual(self.sw_chk, checks.CheckRegistry.checks["SW-CHECK"])
self.assertEqual(self.sshd_chk, checks.CheckRegistry.checks["SSHD-CHECK"])
self.assertEqual(self.sshd_perms, checks.CheckRegistry.checks["SSHD-PERMS"])
def testMapChecksToTriggers(self):
"""Checks are identified and run when their prerequisites are met."""
expect = ["SW-CHECK"]
result = checks.CheckRegistry.FindChecks(
artifact="WMIInstalledSoftware", os_name="Windows")
self.assertItemsEqual(expect, result)
result = checks.CheckRegistry.FindChecks(
artifact="DebianPackagesStatus", os_name="Linux")
self.assertItemsEqual(expect, result)
result = checks.CheckRegistry.FindChecks(
artifact="DebianPackagesStatus", labels="foo")
self.assertItemsEqual(expect, result)
expect = set(["SSHD-CHECK"])
result = set(
checks.CheckRegistry.FindChecks(
artifact="SshdConfigFile", os_name="Darwin"))
residual = expect - result
self.assertFalse(residual)
result = set(
checks.CheckRegistry.FindChecks(
artifact="SshdConfigFile", os_name="Linux"))
residual = expect - result
self.assertFalse(residual)
# All sshd config checks specify an OS, so should get no results.
expect = set([])
result = set(checks.CheckRegistry.FindChecks(artifact="SshdConfigFile"))
residual = expect - result
self.assertFalse(residual)
result = set(
checks.CheckRegistry.FindChecks(
artifact="SshdConfigFile", os_name="Windows"))
residual = expect - result
self.assertFalse(residual)
def testRestrictChecksFiltersCheckOptions(self):
result = set(
checks.CheckRegistry.FindChecks(
artifact="SshdConfigFile",
os_name="Linux",
restrict_checks=["SSHD-CHECK"]))
self.assertItemsEqual(["SSHD-CHECK"], result)
result = set(
checks.CheckRegistry.FindChecks(
artifact="SshdConfigFile",
os_name="Linux",
restrict_checks=["SW_CHECK"]))
self.assertFalse(result)
def testMapArtifactsToTriggers(self):
"""Identify the artifacts that should be collected based on criteria."""
# Test whether all expected checks were mapped.
expect = set(["DebianPackagesStatus", "SshdConfigFile"])
result = set(checks.CheckRegistry.SelectArtifacts(os_name="Linux"))
residual = expect - result
self.assertFalse(residual)
expect = set(["WMIInstalledSoftware"])
result = set(checks.CheckRegistry.SelectArtifacts(os_name="Windows"))
residual = expect - result
self.assertFalse(residual)
expect = set(["DebianPackagesStatus"])
result = set(
checks.CheckRegistry.SelectArtifacts(
os_name=None, cpe=None, labels="foo"))
residual = expect - result
self.assertFalse(residual)
expect = set(["DebianPackagesStatus"])
result = set(
checks.CheckRegistry.SelectArtifacts(
os_name="Linux", restrict_checks=["SW-CHECK"]))
self.assertItemsEqual(expect, result)
class ProcessHostDataTests(checks_test_lib.HostCheckTest):
def setUp(self):
super(ProcessHostDataTests, self).setUp()
registered = checks.CheckRegistry.checks.keys()
if "SW-CHECK" not in registered:
checks.LoadChecksFromFiles([os.path.join(CHECKS_DIR, "sw.yaml")])
if "SSHD-CHECK" not in registered:
checks.LoadChecksFromFiles([os.path.join(CHECKS_DIR, "sshd.yaml")])
self.netcat = checks.CheckResult(
check_id="SW-CHECK",
anomaly=[
rdf_anomaly.Anomaly(
finding=["netcat-traditional 1.10-40 is installed"],
symptom="Found: l337 software installed",
type="ANALYSIS_ANOMALY")
])
self.sshd = checks.CheckResult(
check_id="SSHD-CHECK",
anomaly=[
rdf_anomaly.Anomaly(
finding=["Configured protocols: 2,1"],
symptom="Found: Sshd allows protocol 1.",
type="ANALYSIS_ANOMALY")
])
self.windows = checks.CheckResult(
check_id="SW-CHECK",
anomaly=[
rdf_anomaly.Anomaly(
finding=["Java 6.0.240 is installed"],
symptom="Found: Old Java installation.",
type="ANALYSIS_ANOMALY"), rdf_anomaly.Anomaly(
finding=["Adware 2.1.1 is installed"],
symptom="Found: Malicious software.",
type="ANALYSIS_ANOMALY")
])
self.data = {
"WMIInstalledSoftware": self.SetArtifactData(parsed=GetWMIData()),
"DebianPackagesStatus": self.SetArtifactData(parsed=GetDPKGData()),
"SshdConfigFile": self.SetArtifactData(parsed=GetSSHDConfig())
}
def testProcessLinuxHost(self):
"""Checks detect issues and return anomalies as check results."""
host_data = self.SetKnowledgeBase("host.example.org", "Linux", self.data)
results = self.RunChecks(host_data)
self.assertRanChecks(["SW-CHECK", "SSHD-CHECK"], results)
self.assertResultEqual(self.netcat, results["SW-CHECK"])
self.assertResultEqual(self.sshd, results["SSHD-CHECK"])
def testProcessLinuxRestrictChecks(self):
"""Checks detect issues and return anomalies as check results."""
host_data = self.SetKnowledgeBase("host.example.org", "Linux", self.data)
results = self.RunChecks(host_data, restrict_checks=["SW-CHECK"])
self.assertRanChecks(["SW-CHECK"], results)
self.assertResultEqual(self.netcat, results["SW-CHECK"])
def testProcessWindowsHost(self):
host_data = self.SetKnowledgeBase("host.example.org", "Windows", self.data)
results = self.RunChecks(host_data)
self.assertRanChecks(["SW-CHECK"], results)
self.assertResultEqual(self.windows, results["SW-CHECK"])
def testProcessDarwinHost(self):
host_data = self.SetKnowledgeBase("host.example.org", "Darwin", self.data)
results = self.RunChecks(host_data)
self.assertRanChecks(["SSHD-CHECK"], results)
self.assertResultEqual(self.sshd, results["SSHD-CHECK"])
class ChecksTestBase(test_lib.GRRBaseTest):
pass
class FilterTests(ChecksTestBase):
"""Test 'Filter' setup and operations."""
def setUp(self, *args, **kwargs):
super(FilterTests, self).setUp(*args, **kwargs)
filters.Filter.filters = {}
def tearDown(self, *args, **kwargs):
filters.Filter.filters = {}
super(FilterTests, self).tearDown(*args, **kwargs)
def testNonexistentFilterIsError(self):
self.assertRaises(filters.DefinitionError, checks.Filter, type="NoFilter")
def testAddFilters(self):
base_filt = checks.Filter(type="Filter", expression="do nothing")
self.assertIsInstance(base_filt._filter, filters.Filter)
obj_filt = checks.Filter(type="ObjectFilter", expression="test is 'ok'")
self.assertIsInstance(obj_filt._filter, filters.ObjectFilter)
rdf_filt = checks.Filter(
type="RDFFilter", expression="AttributedDict,SSHConfig")
self.assertIsInstance(rdf_filt._filter, filters.RDFFilter)
class ProbeTest(ChecksTestBase):
"""Test 'Probe' operations."""
configs = {}
def setUp(self, **kwargs):
super(ProbeTest, self).setUp(**kwargs)
if not self.configs:
config_file = os.path.join(CHECKS_DIR, "probes.yaml")
with open(config_file, "rb") as data:
for cfg in yaml.safe_load_all(data):
name = cfg.get("name")
probe_cfg = cfg.get("probe", [{}])
self.configs[name] = probe_cfg[0]
def Init(self, name, artifact, handler_class, result_context):
"""Helper method to verify that the Probe sets up the right handler."""
cfg = self.configs.get(name)
probe = checks.Probe(**cfg)
self.assertEqual(artifact, probe.artifact)
self.assertIsInstance(probe.handler, handler_class)
self.assertIsInstance(probe.matcher, checks.Matcher)
self.assertItemsEqual(result_context, str(probe.result_context))
def testInitialize(self):
"""Tests the input/output sequence validation."""
self.Init("NO-FILTER", "DpkgDb", filters.NoOpHandler, "PARSER")
self.Init("ANOM-CONTEXT", "DpkgDb", filters.NoOpHandler, "ANOMALY")
self.Init("SERIAL", "DpkgDb", filters.SerialHandler, "PARSER")
self.Init("PARALLEL", "DpkgDb", filters.ParallelHandler, "PARSER")
self.Init("BASELINE", "DpkgDb", filters.SerialHandler, "PARSER")
def testParse(self):
"""Host data should be passed to filters, results should be returned."""
pass
def testParseWithBaseline(self):
pass
def testValidate(self):
cfg = self.configs.get("NO-ARTIFACT")
self.assertRaises(filters.DefinitionError, checks.Probe, cfg)
class MethodTest(ChecksTestBase):
"""Test 'Method' operations."""
configs = {}
def setUp(self, **kwargs):
super(MethodTest, self).setUp(**kwargs)
if not self.configs:
config_file = os.path.join(CHECKS_DIR, "sw.yaml")
with open(config_file, "rb") as data:
check_def = yaml.safe_load(data)
self.configs = check_def["method"]
def testMethodRegistersTriggers(self):
m_1, m_2, m_3 = [checks.Method(**cfg) for cfg in self.configs]
expect_1 = [TRIGGER_1]
result_1 = [c.attr for c in m_1.triggers.conditions]
self.assertEqual(expect_1, result_1)
expect_2 = [TRIGGER_2]
result_2 = [c.attr for c in m_2.triggers.conditions]
self.assertEqual(expect_2, result_2)
expect_3 = [TRIGGER_3]
result_3 = [c.attr for c in m_3.triggers.conditions]
self.assertEqual(expect_3, result_3)
def testMethodRoutesDataToProbes(self):
pass
def testValidate(self):
pass
class CheckTest(ChecksTestBase):
"""Test 'Check' operations."""
cfg = {}
def setUp(self, **kwargs):
super(CheckTest, self).setUp(**kwargs)
if not self.cfg:
config_file = os.path.join(CHECKS_DIR, "sw.yaml")
with open(config_file, "rb") as data:
self.cfg = yaml.safe_load(data)
self.host_data = {
"DebianPackagesStatus": {
"ANOMALY": [],
"PARSER": GetDPKGData(),
"RAW": []
},
"WMIInstalledSoftware": {
"ANOMALY": [],
"PARSER": GetWMIData(),
"RAW": []
}
}
def testInitializeCheck(self):
chk = checks.Check(**self.cfg)
self.assertEqual("SW-CHECK", chk.check_id)
self.assertItemsEqual(["ANY"], [str(c) for c in chk.match])
def testGenerateTriggerMap(self):
chk = checks.Check(**self.cfg)
expect = [TRIGGER_1, TRIGGER_3]
result = [c.attr for c in chk.triggers.Search("DebianPackagesStatus")]
self.assertItemsEqual(expect, result)
expect = [TRIGGER_2]
result = [c.attr for c in chk.triggers.Search("WMIInstalledSoftware")]
self.assertItemsEqual(expect, result)
def testParseCheckFromConfig(self):
chk = checks.Check(**self.cfg)
# Triggers 1 (linux packages) & 2 (windows software) should return results.
# Trigger 3 should not return results as no host data has the label 'foo'.
result_1 = chk.Parse([TRIGGER_1], self.host_data)
result_2 = chk.Parse([TRIGGER_2], self.host_data)
result_3 = chk.Parse([TRIGGER_3], self.host_data)
self.assertTrue(result_1)
self.assertTrue(result_2)
self.assertFalse(result_3)
def testValidate(self):
pass
class CheckResultsTest(ChecksTestBase):
"""Test 'CheckResult' operations."""
def testExtendAnomalies(self):
anomaly1 = {
"finding": ["Adware 2.1.1 is installed"],
"symptom": "Found: Malicious software.",
"explanation": "Remove software.",
"type": "ANALYSIS_ANOMALY"
}
anomaly2 = {
"finding": ["Java 6.0.240 is installed"],
"symptom": "Found: Old Java installation.",
"explanation": "Update Java.",
"type": "ANALYSIS_ANOMALY"
}
result = checks.CheckResult(
check_id="SW-CHECK", anomaly=rdf_anomaly.Anomaly(**anomaly1))
other = checks.CheckResult(
check_id="SW-CHECK", anomaly=rdf_anomaly.Anomaly(**anomaly2))
result.ExtendAnomalies(other)
expect = {"check_id": "SW-CHECK", "anomaly": [anomaly1, anomaly2]}
self.assertDictEqual(expect, result.ToPrimitiveDict())
class HintDefinitionTests(ChecksTestBase):
"""Test 'Hint' operations."""
configs = {}
def setUp(self, **kwargs):
super(HintDefinitionTests, self).setUp(**kwargs)
if not self.configs:
config_file = os.path.join(CHECKS_DIR, "sw.yaml")
with open(config_file, "rb") as data:
cfg = yaml.safe_load(data)
chk = checks.Check(**cfg)
self.lin_method, self.win_method, self.foo_method = list(chk.method)
def testInheritHintConfig(self):
# Adding newlines to ensure they get stripped (can happen when reading from
# YAML).
lin_problem = "l337 software installed\n"
lin_format = "{name} {version} is installed\n"
# Methods should not have a hint template.
self.assertEqual(lin_problem.strip(), self.lin_method.hint.problem)
self.assertFalse(self.lin_method.hint.hinter.template)
# Formatting should be present in probes, if defined.
for probe in self.lin_method.probe:
self.assertEqual(lin_problem.strip(), probe.hint.problem)
self.assertEqual(lin_format.strip(), probe.hint.format)
foo_problem = "Sudo not installed\n"
# Methods should not have a hint template.
self.assertEqual(foo_problem.strip(), self.foo_method.hint.problem)
self.assertFalse(self.foo_method.hint.hinter.template)
# Formatting should be missing in probes, if undefined.
for probe in self.foo_method.probe:
self.assertEqual(foo_problem.strip(), probe.hint.problem)
self.assertFalse(probe.hint.format)
def testOverlayHintConfig(self):
# Adding newlines to ensure they get stripped (can happen when reading from
# YAML).
generic_problem = "Malicious software.\n"
java_problem = "Old Java installation.\n"
generic_format = "{name} {version} is installed\n"
# Methods should not have a hint template.
self.assertEqual(generic_problem.strip(), self.win_method.hint.problem)
self.assertFalse(self.win_method.hint.hinter.template)
# Formatting should be present in probes.
probe_1, probe_2 = list(self.win_method.probe)
self.assertEqual(java_problem.strip(), probe_1.hint.problem)
self.assertEqual(generic_format.strip(), probe_1.hint.format)
self.assertEqual(generic_problem.strip(), probe_2.hint.problem)
self.assertEqual(generic_format.strip(), probe_2.hint.format)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
"""Decorator functions
"""
class Var(object):
"""A variable, consisting of a name and potentially an assignment"""
def __init__( self, name ):
"""Create a variable with the given name
>>> v = Var('name')
>>> v
Var('name')
>>> v.name
'name'
"""
self.name = name
def __repr__( self ):
return "%s(%r)" % (self.__class__.__name__, self.name)
def substitute( self, valuedict ):
"""Return the result of applying substitution given the dict of values
>>> v = Var('hi')
>>> v.substitute({'x': 'stuff'})
Var('hi')
>>> v.substitute({'hi': 'hi there'})
'hi there'
"""
return valuedict.get(self.name, self)
class Predicate(object):
"""A predicate.
- Can be matched with other predicates.
- Can contain variables.
"""
def __init__( self ):
self.vars = []
class FilePattern(Predicate):
def __init__( self, pattern ):
"""Parses the pattern into an internal representation.
The representation can be used for matching, and contains a list of the
variables in the pattern. The input is designed to follow the format of
3.0 substitution strings, e.g.
"{base}.cc"
"myfile-{number}.{ext}"
>>> fp = FilePattern("myfile-{number}.{ext}")
>>> fp.pattern
'myfile-{number}.{ext}'
>>> fp.vars
[Var('number'), Var('ext')]
>>> fp.consts
['myfile-', '.']
"""
super(FilePattern, self).__init__()
self.pattern = pattern
self.components = list(self._components(self._tokens(pattern)))
self.vars = list(c for c in self.components if isinstance(c, Var))
self.consts = list(c for c in self.components if not isinstance(c, Var))
def realized( self, **values ):
"""Return a realized version of the pattern, with variables filled in. It
is okay to leave some of them unset.
>>> fp = FilePattern("{{}}my}}file-{number}.{ext}")
>>> fp.realized(number = '1')
FilePattern('{{}}my}}file-1.{ext}')
>>> fp.realized(number = '2', ext = 'hi')
FilePattern('{{}}my}}file-2.hi')
"""
sub = self._substitute
pattern_list = []
for component in self.components:
s = sub(component, values)
if isinstance(s, Var):
pattern_list.append("{%s}" % s.name)
else:
pattern_list.append(s.replace('{', '{{').replace('}', '}}'))
return FilePattern("".join(pattern_list))
def unify( self, other ):
pass
def __repr__( self ):
return "%s(%r)" % (self.__class__.__name__, self.pattern)
@staticmethod
def _substitute( item, valuedict ):
"""Perform a substitution on this item given the values in the dict
>>> FP = FilePattern
>>> FP._substitute(Var('hi'), {'x': '1'})
Var('hi')
>>> FP._substitute(Var('hi'), {'hi': 'hi there'})
'hi there'
>>> FP._substitute(Var('hi'), {'hi': Var('hi there')})
Var('hi there')
>>> FP._substitute('a string', {'a string': 'something else'})
'a string'
"""
if isinstance(item, Var) and item:
return item.substitute(valuedict)
else:
return item
@staticmethod
def _components( tokens ):
""" Returns the main components, split into consts and vars.
Variable names may not contain braces
>>> FP = FilePattern
>>> list(FP._components(FP._tokens("ab{cde}fg}}{{{hi}")))
['ab', Var('cde'), 'fg}{', Var('hi')]
>>> try:
... list(FP._components(FP._tokens("{abc}{}")))
... except ValueError, e:
... print e
Empty variable name
>>> list(FP._components(FP._tokens("{abc}d")))
[Var('abc'), 'd']
>>> list(FP._components(FP._tokens("a{bcd}")))
['a', Var('bcd')]
"""
unmatched_open = False
unmatched_close = False
unfilled_var = False
full_token = ""
for token in tokens:
if unmatched_close:
if token == '}':
full_token += token
unmatched_close = False
else:
raise ValueError("Unmatched, unescaped '}'")
elif unmatched_open:
if token == '{':
if unfilled_var:
# This token has just been doubled
full_token += token
unmatched_open = False
unfilled_var = False
else:
raise ValueError("'{' within a variable name")
elif token == '}':
# Just closed a variable name. Emit.
if unfilled_var:
raise ValueError("Empty variable name")
else:
unmatched_open = False
yield Var(full_token)
full_token = ""
else:
# We have a non-brace token, and are inside of a variable name. Emit
# the current token, and make this variable name the new token
if full_token:
yield full_token
full_token = token
unfilled_var = False
else:
# Nothing unmatched. This is just a token.
if token == '{':
unmatched_open = True
unfilled_var = True
elif token == '}':
unmatched_close = True
else:
full_token += token
if full_token:
yield full_token
@staticmethod
def _tokens( s ):
"""Returns an interator over tokens in s. Mostly just separates braces out.
>>> list(FilePattern._tokens("ab{cde}fg}}{{{hi}"))
['ab', '{', 'cde', '}', 'fg', '}', '}', '{', '{', '{', 'hi', '}']
"""
open_iter = iter(s.split("{"))
open_token = open_iter.next()
close_iter = iter(open_token.split("}"))
token = close_iter.next()
if token:
yield token
for token in close_iter:
yield "}"
if token:
yield token
for open_token in open_iter:
yield "{"
close_iter = iter(open_token.split("}"))
token = close_iter.next()
if token:
yield token
for token in close_iter:
yield "}"
if token:
yield token
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because map_fn_lib.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = map_fn_lib.map_fn(
tf_fn, labeled_tensor.tensor, dtype=first_map_lt.dtype)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keepdims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.get_static_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
|
|
from twisted.trial import unittest
from twisted.internet import defer
from coherence.dispatcher import Dispatcher, UnknownSignal, Receiver, \
SignalingProperty, ChangedSignalingProperty, CustomSignalingProperty
class TestDispatcher(Dispatcher):
__signals__ = {'test': 'Test signal'}
class SimpleTarget(object):
def __init__(self):
self.called = 0
self.called_a = 0
self.called_b = 0
self.called_c = 0
self.called_d = 0
def callback(self):
self.called += 1
def updater(self, arg1, arg2, value, arg4, key_a='p', variable=None):
setattr(self, variable, value)
setattr(self, "%s_%s" % (variable, arg2), key_a)
def plus(self, plus, variable=False):
setattr(self, variable, getattr(self, variable) + plus)
def fail_before(self, plus, variable=False):
raise TypeError
self.update(plus, variable=variable)
class TestDispatching(unittest.TestCase):
def setUp(self):
self.called_counter = 0
self.dispatcher = TestDispatcher()
self.target = SimpleTarget()
def test_simple_emit(self):
receiver = self.dispatcher.connect('test', self.target.callback)
self.dispatcher.emit('test')
self.assertEquals(self.target.called, 1)
self.dispatcher.emit('test')
self.assertEquals(self.target.called, 2)
self.dispatcher.disconnect(receiver)
self.dispatcher.emit('test')
self.assertEquals(self.target.called, 2)
def test_simple_deferred_emit(self):
receiver = self.dispatcher.connect('test', self.target.callback)
self.dispatcher.deferred_emit('test')
self.assertEquals(self.target.called, 1)
self.dispatcher.deferred_emit('test')
self.assertEquals(self.target.called, 2)
self.dispatcher.disconnect(receiver)
self.dispatcher.deferred_emit('test')
self.assertEquals(self.target.called, 2)
def test_simple_save_emit(self):
def call(res):
return self.dispatcher.save_emit('test')
def test(res, val):
self.assertEquals(self.target.called, val)
receiver = self.dispatcher.connect('test', self.target.callback)
dfr = defer.succeed(None)
dfr.addCallback(call)
dfr.addCallback(test, 1)
dfr.addCallback(call)
dfr.addCallback(test, 2)
dfr.addCallback(lambda x: self.dispatcher.disconnect(receiver))
dfr.addCallback(call)
dfr.addCallback(test, 2)
return dfr
def test_connect_typo(self):
self.assertRaises(UnknownSignal, self.dispatcher.connect, 'Test', None)
def test_disconnect_none_receiver(self):
"""
trying to disconnect with None shouldn't fail, it is a valid use case
"""
self.dispatcher.disconnect(None)
def test_disconnect_false_receiver(self):
"""
this receiver isn't coming from this dispatcher
"""
# this is REALLY constructed. you may *not* instantiate a Receiver yourself anyway
rec = Receiver('test', None, None, None)
self.dispatcher.disconnect(rec)
def test_disconnect_wrong_signal_receiver(self):
rec = Receiver('Test', None, None, None)
self.assertRaises(UnknownSignal, self.dispatcher.disconnect, rec)
def test_disconnect_not_receiver(self):
self.assertRaises(TypeError, self.dispatcher.disconnect, 'test')
def test_emit_false_signal(self):
self.assertRaises(UnknownSignal, self.dispatcher.emit, False)
def test_emit_without_receivers(self):
self.dispatcher.emit('test')
self.assertEquals(self.target.called, 0)
def test_emit_with_multiple_receiver(self):
rc1 = self.dispatcher.connect('test', self.target.updater,
1, 2, variable='va1')
rc2 = self.dispatcher.connect('test', self.target.updater,
'value', 2, variable='variable')
rc3 = self.dispatcher.connect('test', self.target.updater,
'other', 2, variable='one')
self.dispatcher.emit('test', self, 'other', key_a='q')
# check rc1
self.assertEquals(self.target.va1, 1)
self.assertEquals(self.target.va1_other, 'q')
#check rc2
self.assertEquals(self.target.variable, 'value')
self.assertEquals(self.target.variable_other, 'q')
# check rc3
self.assertEquals(self.target.one, 'other')
self.assertEquals(self.target.one_other, 'q')
# now removing the one in the middel
self.dispatcher.disconnect(rc2)
# and try again with other data
self.dispatcher.emit('test', self, 'other', key_a='thistime')
# check rc1
self.assertEquals(self.target.va1, 1)
self.assertEquals(self.target.va1_other, 'thistime')
#check rc2
self.assertEquals(self.target.variable, 'value')
self.assertEquals(self.target.variable_other, 'q')
# check rc3
self.assertEquals(self.target.one, 'other')
self.assertEquals(self.target.one_other, 'thistime')
# no keyword
self.dispatcher.emit('test', self, 'a')
# worked for rc1 and rc3 with the default value
self.assertEquals(self.target.va1_a, 'p')
self.assertEquals(self.target.one_a, 'p')
# but not on rc2
self.assertFalse(hasattr(self.target, 'variable_a'))
self.dispatcher.disconnect(rc1)
self.dispatcher.disconnect(rc3)
def test_emit_multiple_with_failing_in_between(self):
rc1 = self.dispatcher.connect('test', self.target.plus,
1, variable='called_a')
rc2 = self.dispatcher.connect('test', self.target.plus,
2, variable='called_b')
rc3 = self.dispatcher.connect('test', self.target.fail_before,
3, variable='called_c')
rc4 = self.dispatcher.connect('test', self.target.plus,
4, variable='called_d')
self.dispatcher.emit('test')
self.assertEquals(self.target.called_a, 1)
self.assertEquals(self.target.called_b, 2)
self.assertEquals(self.target.called_c, 0)
self.assertEquals(self.target.called_d, 4)
self.dispatcher.emit('test')
self.assertEquals(self.target.called_a, 2)
self.assertEquals(self.target.called_b, 4)
self.assertEquals(self.target.called_c, 0)
self.assertEquals(self.target.called_d, 8)
self.dispatcher.disconnect(rc1)
self.dispatcher.disconnect(rc2)
self.dispatcher.disconnect(rc3)
self.dispatcher.disconnect(rc4)
# Receiver tests
class TestReceiver(unittest.TestCase):
def setUp(self):
self.called = 0
def _callback(self, *args, **kw):
self.called += 1
self.args = args
self.kw = kw
def test_simple_calling(self):
rec = Receiver('test', self._callback, (), {})
self.assertEquals(rec.signal, 'test')
rec()
self.assertEquals(self.called, 1)
self.assertEquals(self.args, ())
self.assertEquals(self.kw, {})
rec()
self.assertEquals(self.called, 2)
self.assertEquals(self.args, ())
self.assertEquals(self.kw, {})
rec()
self.assertEquals(self.called, 3)
self.assertEquals(self.args, ())
self.assertEquals(self.kw, {})
def test_calling_with_args(self):
rec = Receiver('test', self._callback, (1, 2, 3), {'test': 'a'})
self.assertEquals(rec.signal, 'test')
rec(0)
self.assertEquals(self.called, 1)
self.assertEquals(self.args, (0, 1, 2, 3))
self.assertEquals(self.kw, {'test': 'a'})
rec(-1)
self.assertEquals(self.called, 2)
self.assertEquals(self.args, (-1, 1, 2, 3))
self.assertEquals(self.kw, {'test': 'a'})
rec(-2)
self.assertEquals(self.called, 3)
self.assertEquals(self.args, (-2, 1, 2, 3))
self.assertEquals(self.kw, {'test': 'a'})
def test_calling_with_kw(self):
rec = Receiver('test', self._callback, (1, 2, 3), {'test': 'a'})
self.assertEquals(rec.signal, 'test')
rec(p='q')
self.assertEquals(self.called, 1)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'p': 'q'})
rec(other='wise')
self.assertEquals(self.called, 2)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'other': 'wise'})
rec(and_one='more')
self.assertEquals(self.called, 3)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'and_one': 'more'})
def test_calling_with_clashing_kw(self):
rec = Receiver('test', self._callback, (1, 2, 3), {'test': 'a', 'p': 'a'})
self.assertEquals(rec.signal, 'test')
rec(p='q')
self.assertEquals(self.called, 1)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'p': 'q'})
rec(other='wise')
self.assertEquals(self.called, 2)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'other': 'wise', 'p': 'a'})
def test_calling_with_clashing_kw_and_args(self):
rec = Receiver('test', self._callback, (1, 2, 3), {'test': 'a', 'p': 'a'})
self.assertEquals(rec.signal, 'test')
# without
rec()
self.assertEquals(self.called, 1)
self.assertEquals(self.args, (1, 2, 3))
self.assertEquals(self.kw, {'test': 'a', 'p': 'a'})
rec(1, 2, 7, test='True', o='p')
self.assertEquals(self.called, 2)
self.assertEquals(self.args, (1, 2, 7, 1, 2, 3))
self.assertEquals(self.kw, {'test': 'True', 'o': 'p', 'p': 'a'})
def test_repr(self):
rec = Receiver('test', 'callback', (0, 1, 2), {})
self.assertIn('%s' % id(rec), '%r' % rec)
self.assertIn('test', '%r' % rec)
self.assertIn('callback', '%r' % rec)
self.assertIn('0, 1, 2', '%r' % rec)
# Signal Descriptor test
class SimpleSignaler(object):
simple = SignalingProperty('simple')
def __init__(self):
self.emitted = []
def emit(self, signal, *values, **kw):
self.emitted.append((signal, values, kw))
class DummySignaler(SimpleSignaler):
simple_with_default = SignalingProperty('simple2', default=0)
double_a = SignalingProperty('same-signal')
double_b = SignalingProperty('same-signal')
double_c = SignalingProperty('dif-var', var_name='_a')
double_d = SignalingProperty('dif-var', var_name='_b')
changer = ChangedSignalingProperty('state')
changer_with_default = ChangedSignalingProperty('state2', default='off')
def __init__(self):
SimpleSignaler.__init__(self)
self._x = 0
self.x_get = 0
self.x_set = 0
def xget(self):
self.x_get += 1
return self._x
def xset(self, value):
self.x_set += 1
self._x = value
def xsq(self, value):
self.x_set += 1
self._x = value * value
x = CustomSignalingProperty('x-changed', xget, xset)
x_square = CustomSignalingProperty('x-square', xget, xsq)
class TestSignalingDescriptors(unittest.TestCase):
def setUp(self):
self.signaler = DummySignaler()
def test_simple(self):
self.signaler.simple = 'A'
self._check(values=[('simple', ('A', ), {})])
# empty
self.signaler.emitted = []
self.signaler.simple = 'A'
# stays empty
self._check()
def test_simple_with_default(self):
self.signaler.simple_with_default = 'B'
self._check(values=[('simple2', ('B', ), {})])
# empty
self.signaler.emitted = []
self.signaler.simple_with_default = 'B'
# stays empty
self._check()
def test_changer(self):
self.signaler.changer = 'Yes'
self._check(values=[('state', ('Yes', None), {})])
# empty
self.signaler.emitted = []
self.signaler.changer = 'Yes'
# stays empty
self._check()
def test_changer_with_default(self):
self.signaler.changer_with_default = 'another'
self._check(values=[('state2', ('another', 'off'), {})])
# empty
self.signaler.emitted = []
self.signaler.changer_with_default = 'another'
# stays empty
self._check()
def test_double_same_var(self):
self.signaler.double_a = 'A1'
self.signaler.double_b = 'B2'
self._check(values=[('same-signal', ('A1', ), {}),
('same-signal', ('B2', ), {})])
# empty
self.signaler.emitted = []
# sending B2 over double a even thought it was changed by b
self.signaler.double_a = 'B2'
self.signaler.double_b = 'B2'
# stays empty
self._check()
# but changing them different works
self.signaler.double_a = 'B1'
self.signaler.double_b = 'A2'
self._check(values=[('same-signal', ('B1', ), {}),
('same-signal', ('A2', ), {})])
def test_double_differnt_var(self):
self.signaler.double_c = 'A1'
self.signaler.double_d = 'B2'
self._check(values=[('dif-var', ('A1', ), {}),
('dif-var', ('B2', ), {})])
# empty
self.signaler.emitted = []
self.signaler.double_c = 'A1'
self.signaler.double_d = 'B2'
# stays empty
self._check()
# but they still allow changes
self.signaler.double_c = 'B1'
self.signaler.double_d = 'A2'
self._check(values=[('dif-var', ('B1', ), {}),
('dif-var', ('A2', ), {})])
def test_custom(self):
self.signaler.x = 'Pocahontas'
self._check(values=[('x-changed', ('Pocahontas', ), {})],
x='Pocahontas', x_get=2, x_set=1)
self.assertEquals(self.signaler.x, 'Pocahontas')
# settings again to the same value is boring me
self.signaler.emitted = []
self.signaler.x_get = 0
self.signaler.x_set = 0
self.signaler.x = 'Pocahontas'
self.assertEquals(self.signaler.emitted, [])
self.assertEquals(self.signaler.x, 'Pocahontas')
def test_custom_square(self):
self.signaler.x_square = 10
self._check(values=[('x-square', (100, ), {})],
x=100, x_get=2, x_set=1)
self.assertEquals(self.signaler.x, 100)
def test_custom_square_nearly_the_same(self):
self.signaler._x = 10
self.signaler.x_square = 10
self._check(values=[('x-square', (100, ), {})],
x=100, x_get=2, x_set=1)
self.assertEquals(self.signaler.x, 100)
def _check(self, values=[], x=0, x_set=0, x_get=0):
self.assertEquals(self.signaler._x, x)
self.assertEquals(self.signaler.x_set, x_set)
self.assertEquals(self.signaler.x_get, x_get)
self.assertEquals(self.signaler.emitted, values)
class TestStayInObjectSignaling(unittest.TestCase):
def setUp(self):
self.foo = SimpleSignaler()
self.bar = SimpleSignaler()
def test_double_different_values(self):
self.foo.simple = 'A'
self.bar.simple = 'B'
self.assertEquals(self.foo.simple, 'A')
self.assertEquals(self.bar.simple, 'B')
self.assertEquals(len(self.foo.emitted), 1)
self.assertEquals(len(self.bar.emitted), 1)
self.assertEquals(self.foo.emitted[0][1][0], 'A')
self.assertEquals(self.bar.emitted[0][1][0], 'B')
|
|
# Webhooks for external integrations.
from __future__ import absolute_import
from typing import Any, Dict, List, Optional, Text, Tuple
from django.utils.translation import ugettext as _
from django.db.models import Q
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from zerver.models import UserProfile, get_user, Realm
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import api_key_only_webhook_view, has_request_variables, REQ
import logging
import re
import ujson
IGNORED_EVENTS = [
'comment_created', # we handle issue_update event instead
'comment_updated', # we handle issue_update event instead
'comment_deleted', # we handle issue_update event instead
]
def guess_zulip_user_from_jira(jira_username, realm):
# type: (Text, Realm) -> Optional[UserProfile]
try:
# Try to find a matching user in Zulip
# We search a user's full name, short name,
# and beginning of email address
user = UserProfile.objects.filter(
Q(full_name__iexact=jira_username) |
Q(short_name__iexact=jira_username) |
Q(email__istartswith=jira_username),
is_active=True,
realm=realm).order_by("id")[0]
return user
except IndexError:
return None
def convert_jira_markup(content, realm):
# type: (Text, Realm) -> Text
# Attempt to do some simplistic conversion of JIRA
# formatting to Markdown, for consumption in Zulip
# Jira uses *word* for bold, we use **word**
content = re.sub(r'\*([^\*]+)\*', r'**\1**', content)
# Jira uses {{word}} for monospacing, we use `word`
content = re.sub(r'{{([^\*]+?)}}', r'`\1`', content)
# Starting a line with bq. block quotes that line
content = re.sub(r'bq\. (.*)', r'> \1', content)
# Wrapping a block of code in {quote}stuff{quote} also block-quotes it
quote_re = re.compile(r'{quote}(.*?){quote}', re.DOTALL)
content = re.sub(quote_re, r'~~~ quote\n\1\n~~~', content)
# {noformat}stuff{noformat} blocks are just code blocks with no
# syntax highlighting
noformat_re = re.compile(r'{noformat}(.*?){noformat}', re.DOTALL)
content = re.sub(noformat_re, r'~~~\n\1\n~~~', content)
# Code blocks are delineated by {code[: lang]} {code}
code_re = re.compile(r'{code[^\n]*}(.*?){code}', re.DOTALL)
content = re.sub(code_re, r'~~~\n\1\n~~~', content)
# Links are of form: [https://www.google.com] or [Link Title|https://www.google.com]
# In order to support both forms, we don't match a | in bare links
content = re.sub(r'\[([^\|~]+?)\]', r'[\1](\1)', content)
# Full links which have a | are converted into a better markdown link
full_link_re = re.compile(r'\[(?:(?P<title>[^|~]+)\|)(?P<url>.*)\]')
content = re.sub(full_link_re, r'[\g<title>](\g<url>)', content)
# Try to convert a JIRA user mention of format [~username] into a
# Zulip user mention. We don't know the email, just the JIRA username,
# so we naively guess at their Zulip account using this
if realm:
mention_re = re.compile(u'\[~(.*?)\]')
for username in mention_re.findall(content):
# Try to look up username
user_profile = guess_zulip_user_from_jira(username, realm)
if user_profile:
replacement = u"**{}**".format(user_profile.full_name)
else:
replacement = u"**{}**".format(username)
content = content.replace("[~{}]".format(username,), replacement)
return content
def get_in(payload, keys, default=''):
# type: (Dict[str, Any], List[str], Text) -> Any
try:
for key in keys:
payload = payload[key]
except (AttributeError, KeyError, TypeError):
return default
return payload
def get_issue_string(payload, issue_id=None):
# type: (Dict[str, Any], Text) -> Text
# Guess the URL as it is not specified in the payload
# We assume that there is a /browse/BUG-### page
# from the REST url of the issue itself
if issue_id is None:
issue_id = get_issue_id(payload)
base_url = re.match("(.*)\/rest\/api/.*", get_in(payload, ['issue', 'self']))
if base_url and len(base_url.groups()):
return u"[{}]({}/browse/{})".format(issue_id, base_url.group(1), issue_id)
else:
return issue_id
def get_assignee_mention(assignee_email, realm):
# type: (Text, Realm) -> Text
if assignee_email != '':
try:
assignee_name = get_user(assignee_email, realm).full_name
except UserProfile.DoesNotExist:
assignee_name = assignee_email
return u"**{}**".format(assignee_name)
return ''
def get_issue_author(payload):
# type: (Dict[str, Any]) -> Text
return get_in(payload, ['user', 'displayName'])
def get_issue_id(payload):
# type: (Dict[str, Any]) -> Text
return get_in(payload, ['issue', 'key'])
def get_issue_title(payload):
# type: (Dict[str, Any]) -> Text
return get_in(payload, ['issue', 'fields', 'summary'])
def get_issue_subject(payload):
# type: (Dict[str, Any]) -> Text
return u"{}: {}".format(get_issue_id(payload), get_issue_title(payload))
def get_sub_event_for_update_issue(payload):
# type: (Dict[str, Any]) -> Text
sub_event = payload.get('issue_event_type_name', '')
if sub_event == '':
if payload.get('comment'):
return 'issue_commented'
elif payload.get('transition'):
return 'issue_transited'
return sub_event
def get_event_type(payload):
# type: (Dict[str, Any]) -> Optional[Text]
event = payload.get('webhookEvent')
if event is None and payload.get('transition'):
event = 'jira:issue_updated'
return event
def add_change_info(content, field, from_field, to_field):
# type: (Text, Text, Text, Text) -> Text
content += u"* Changed {}".format(field)
if from_field:
content += u" from **{}**".format(from_field)
if to_field:
content += u" to {}\n".format(to_field)
return content
def handle_updated_issue_event(payload, user_profile):
# type: (Dict[str, Any], UserProfile) -> Text
# Reassigned, commented, reopened, and resolved events are all bundled
# into this one 'updated' event type, so we try to extract the meaningful
# event that happened
issue_id = get_in(payload, ['issue', 'key'])
issue = get_issue_string(payload, issue_id)
assignee_email = get_in(payload, ['issue', 'fields', 'assignee', 'emailAddress'], '')
assignee_mention = get_assignee_mention(assignee_email, user_profile.realm)
if assignee_mention != '':
assignee_blurb = u" (assigned to {})".format(assignee_mention)
else:
assignee_blurb = ''
sub_event = get_sub_event_for_update_issue(payload)
if 'comment' in sub_event:
if sub_event == 'issue_commented':
verb = 'added comment to'
elif sub_event == 'issue_comment_edited':
verb = 'edited comment on'
else:
verb = 'deleted comment from'
content = u"{} **{}** {}{}".format(get_issue_author(payload), verb, issue, assignee_blurb)
comment = get_in(payload, ['comment', 'body'])
if comment:
comment = convert_jira_markup(comment, user_profile.realm)
content = u"{}:\n\n\n{}\n".format(content, comment)
else:
content = u"{} **updated** {}{}:\n\n".format(get_issue_author(payload), issue, assignee_blurb)
changelog = get_in(payload, ['changelog'])
if changelog != '':
# Use the changelog to display the changes, whitelist types we accept
items = changelog.get('items')
for item in items:
field = item.get('field')
if field == 'assignee' and assignee_mention != '':
target_field_string = assignee_mention
else:
# Convert a user's target to a @-mention if possible
target_field_string = u"**{}**".format(item.get('toString'))
from_field_string = item.get('fromString')
if target_field_string or from_field_string:
content = add_change_info(content, field, from_field_string, target_field_string)
elif sub_event == 'issue_transited':
from_field_string = get_in(payload, ['transition', 'from_status'])
target_field_string = u'**{}**'.format(get_in(payload, ['transition', 'to_status']))
if target_field_string or from_field_string:
content = add_change_info(content, 'status', from_field_string, target_field_string)
return content
def handle_created_issue_event(payload):
# type: (Dict[str, Any]) -> Text
return u"{} **created** {} priority {}, assigned to **{}**:\n\n> {}".format(
get_issue_author(payload),
get_issue_string(payload),
get_in(payload, ['issue', 'fields', 'priority', 'name']),
get_in(payload, ['issue', 'fields', 'assignee', 'displayName'], 'no one'),
get_issue_title(payload)
)
def handle_deleted_issue_event(payload):
# type: (Dict[str, Any]) -> Text
return u"{} **deleted** {}!".format(get_issue_author(payload), get_issue_string(payload))
@api_key_only_webhook_view("JIRA")
@has_request_variables
def api_jira_webhook(request, user_profile,
payload=REQ(argument_type='body'),
stream=REQ(default='jira')):
# type: (HttpRequest, UserProfile, Dict[str, Any], Text) -> HttpResponse
event = get_event_type(payload)
if event == 'jira:issue_created':
subject = get_issue_subject(payload)
content = handle_created_issue_event(payload)
elif event == 'jira:issue_deleted':
subject = get_issue_subject(payload)
content = handle_deleted_issue_event(payload)
elif event == 'jira:issue_updated':
subject = get_issue_subject(payload)
content = handle_updated_issue_event(payload, user_profile)
elif event in IGNORED_EVENTS:
return json_success()
else:
if event is None:
if not settings.TEST_SUITE:
message = u"Got JIRA event with None event type: {}".format(payload)
logging.warning(message)
return json_error(_("Event is not given by JIRA"))
else:
if not settings.TEST_SUITE:
logging.warning("Got JIRA event type we don't support: {}".format(event))
return json_success()
check_send_message(user_profile, request.client, "stream", [stream], subject, content)
return json_success()
|
|
from __future__ import unicode_literals
import os
import urlparse
import re
from lxml import etree
from docutil.etree_util import get_text
from docutil.commands_util import download_html_tree
from channel.syncer.generic_syncer import MessageSyncer, ThreadSyncer
class ApacheMailSyncer(MessageSyncer):
reverse_entries = False
xsection_urls = etree.XPath('//td[@class="links"]/span/a[1]')
xentries = etree.XPath('//td[@class="subject"]/a[1]')
xnext_pages = etree.XPath('//th/a')
def _get_section_urls(self, channel_url):
tree = download_html_tree(channel_url)
links = self.xsection_urls(tree)
section_urls = []
for link in reversed(links):
url = link.attrib['href']
# change "browser" for "date"
url = os.path.join(os.path.split(url)[0], 'date')
url = urlparse.urljoin(channel_url, url)
section_urls.append(url)
return section_urls
def _parse_toc_entries(self, page_url, tree):
links = self.xentries(tree)
entry_urls = []
for link in links:
url = link.attrib['href']
url = urlparse.urljoin(page_url, url)
entry_urls.append(url)
return entry_urls
def _get_next_toc_page(self, page_url, tree):
next_page_url = None
for page_link in self.xnext_pages(tree):
if page_link.text.find('Next') > -1:
next_page_url = page_link.attrib['href']
next_page_url = urlparse.urljoin(page_url, next_page_url)
break
return next_page_url
class PHPBBForumSyncer(ThreadSyncer):
ENTRY_PER_PAGE = 25
section_url = \
'{0}&sd=a&start={1}'
xnumber_pages = etree.XPath('//td[@class="nav"]/strong[2]')
xentries = etree.XPath('//tr/td[@class="row1"][2]/a')
xnext_links = etree.XPath('//td[@class="gensmall"]/b/a')
def _get_section_url(self, base_url, index):
url = self.section_url.format(base_url, self.ENTRY_PER_PAGE * index)
return url
def _clean_url(self, url):
index = url.find('&sid=')
if index > -1:
index2 = url.find('&', index+1)
if index2 > -1:
url = url[:index] + url[index2:]
else:
url = url[:index]
return url
def _get_number_of_pages(self, url):
tree = download_html_tree(url)
number_of_pages = self.xnumber_pages(tree)
if len(number_of_pages) == 0:
return 1
else:
number = number_of_pages[0].text.strip()
return int(number)
def _parse_toc_entries(self, page_url, tree):
entry_urls = []
links = self.xentries(tree)
size = len(links)
for link in links[size - self.ENTRY_PER_PAGE:]:
url = urlparse.urljoin(page_url, link.attrib['href'])
url = self._clean_url(url)
entry_urls.append(url)
return entry_urls
def _get_next_entry_url(self, url, next_page_id, tree):
next_links = self.xnext_links(tree)
next_url = None
for link in next_links:
if link.text.strip().lower() == 'next':
next_url = urlparse.urljoin(url, link.attrib['href'])
next_url = self._clean_url(next_url)
break
return next_url
class FUDEclipseForumSyncer(ThreadSyncer):
ENTRY_PER_PAGE = 40
section_url = '{0}1/{1}/'
xnumber_pages =\
etree.XPath('//table[@class="wa"]//td[@class="vt"]/span[1]')
xtext = etree.XPath('string()')
xentries = etree.XPath('//table[@class="pad"]//a[@class="big"]')
xnext_links = None
page_regex = re.compile(r'Pages \((\d+)\)')
def _get_number_of_pages(self, url):
tree = download_html_tree(url)
number_of_pages = self.xnumber_pages(tree)
if len(number_of_pages) == 0:
return 1
else:
number_element = number_of_pages[0]
number = get_text(number_element)
match = self.page_regex.search(number)
if match:
return int(match.group(1))
else:
return 1
def _get_section_url(self, base_url, index):
url = self.section_url.format(base_url, index * self.ENTRY_PER_PAGE)
return url
def _parse_toc_entries(self, page_url, tree):
entry_urls = []
links = self.xentries(tree)
for link in links:
url = urlparse.urljoin('http://www.eclipse.org/forums/',
link.attrib['href'])
entry_urls.append(url)
return entry_urls
def _get_next_entry_url(self, url, next_page_id, tree):
# It seems that all messages are presented on the same page.
return None
class SourceForgeSyncer(MessageSyncer):
reverse_entries = False
xsection_urls = etree.XPath('//table//table//a')
xentries = etree.XPath('//div[@class="forum"]/div/b/a[1]')
xnext_pages = etree.XPath('//tr[@bgcolor="#eeeeee"]/td[3]/a[1]')
def _get_section_urls(self, channel_url):
tree = download_html_tree(channel_url)
links = self.xsection_urls(tree)
section_urls = []
for link in reversed(links):
url = link.attrib['href']
url = url.replace('style=ultimate', 'style=flat')
url = url.replace('max_rows=25', 'max_rows=100')
url = urlparse.urljoin(channel_url, url)
section_urls.append(url)
return section_urls
def _parse_toc_entries(self, page_url, tree):
links = self.xentries(tree)
entry_urls = []
for link in links:
url = link.attrib['href']
url = urlparse.urljoin(page_url, url)
entry_urls.append(url)
return entry_urls
def _get_next_toc_page(self, page_url, tree):
next_page_url = None
for page_link in self.xnext_pages(tree):
next_page_url = page_link.attrib['href']
next_page_url = urlparse.urljoin(page_url, next_page_url)
return next_page_url
class GmaneSyncer(MessageSyncer):
reverse_entries = False
xsection_urls = etree.XPath('//select[@name="page"]/option')
xentries = etree.XPath('//td/a')
def _get_section_urls(self, channel_url):
tree = download_html_tree(channel_url)
section_numbers = len(self.xsection_urls(tree))
section_urls = []
for section_number in reversed(xrange(section_numbers)):
url = channel_url.replace('page=0',
'page={0}'.format(section_number))
section_urls.append(url)
return section_urls
def _parse_toc_entries(self, page_url, tree):
links = self.xentries(tree)
entry_urls = []
for link in links:
url = link.attrib['href']
entry_urls.append(url)
return entry_urls
def _get_next_toc_page(self, page_url, tree):
return None
|
|
#
# Cython - Compilation-wide options and pragma declarations
#
from __future__ import absolute_import
class ShouldBeFromDirective(object):
known_directives = []
def __init__(self, options_name, directive_name=None, disallow=False):
self.options_name = options_name
self.directive_name = directive_name or options_name
self.disallow = disallow
self.known_directives.append(self)
def __nonzero__(self):
self._bad_access()
def __int__(self):
self._bad_access()
def _bad_access(self):
raise RuntimeError(repr(self))
def __repr__(self):
return (
"Illegal access of '%s' from Options module rather than directive '%s'"
% (self.options_name, self.directive_name))
# Include docstrings.
docstrings = True
# Embed the source code position in the docstrings of functions and classes.
embed_pos_in_docstring = False
# Copy the original source code line by line into C code comments
# in the generated code file to help with understanding the output.
emit_code_comments = True
pre_import = None # undocumented
# Decref global variables in this module on exit for garbage collection.
# 0: None, 1+: interned objects, 2+: cdef globals, 3+: types objects
# Mostly for reducing noise in Valgrind, only executes at process exit
# (when all memory will be reclaimed anyways).
generate_cleanup_code = False
# Should tp_clear() set object fields to None instead of clearing them to NULL?
clear_to_none = True
# Generate an annotated HTML version of the input source files.
annotate = False
# When annotating source files in HTML, include coverage information from
# this file.
annotate_coverage_xml = None
# This will abort the compilation on the first error occurred rather than trying
# to keep going and printing further error messages.
fast_fail = False
# Make all warnings into errors.
warning_errors = False
# Make unknown names an error. Python raises a NameError when
# encountering unknown names at runtime, whereas this option makes
# them a compile time error. If you want full Python compatibility,
# you should disable this option and also 'cache_builtins'.
error_on_unknown_names = True
# Make uninitialized local variable reference a compile time error.
# Python raises UnboundLocalError at runtime, whereas this option makes
# them a compile time error. Note that this option affects only variables
# of "python object" type.
error_on_uninitialized = True
# This will convert statements of the form "for i in range(...)"
# to "for i from ..." when i is a cdef'd integer type, and the direction
# (i.e. sign of step) can be determined.
# WARNING: This may change the semantics if the range causes assignment to
# i to overflow. Specifically, if this option is set, an error will be
# raised before the loop is entered, whereas without this option the loop
# will execute until an overflowing value is encountered.
convert_range = True
# Perform lookups on builtin names only once, at module initialisation
# time. This will prevent the module from getting imported if a
# builtin name that it uses cannot be found during initialisation.
cache_builtins = True
# Generate branch prediction hints to speed up error handling etc.
gcc_branch_hints = True
# Enable this to allow one to write your_module.foo = ... to overwrite the
# definition if the cpdef function foo, at the cost of an extra dictionary
# lookup on every call.
# If this is false it generates only the Python wrapper and no override check.
lookup_module_cpdef = False
# Whether or not to embed the Python interpreter, for use in making a
# standalone executable or calling from external libraries.
# This will provide a method which initialises the interpreter and
# executes the body of this module.
embed = None
# In previous iterations of Cython, globals() gave the first non-Cython module
# globals in the call stack. Sage relies on this behavior for variable injection.
old_style_globals = ShouldBeFromDirective('old_style_globals')
# Allows cimporting from a pyx file without a pxd file.
cimport_from_pyx = False
# max # of dims for buffers -- set lower than number of dimensions in numpy, as
# slices are passed by value and involve a lot of copying
buffer_max_dims = 8
# Number of function closure instances to keep in a freelist (0: no freelists)
closure_freelist_size = 8
def get_directive_defaults():
# To add an item to this list, all accesses should be changed to use the new
# directive, and the global option itself should be set to an instance of
# ShouldBeFromDirective.
for old_option in ShouldBeFromDirective.known_directives:
value = globals().get(old_option.options_name)
assert old_option.directive_name in _directive_defaults
if not isinstance(value, ShouldBeFromDirective):
if old_option.disallow:
raise RuntimeError(
"Option '%s' must be set from directive '%s'" % (
old_option.option_name, old_option.directive_name))
else:
# Warn?
_directive_defaults[old_option.directive_name] = value
return _directive_defaults
# Declare compiler directives
_directive_defaults = {
'boundscheck' : True,
'nonecheck' : False,
'initializedcheck' : True,
'embedsignature' : False,
'locals' : {},
'auto_cpdef': False,
'cdivision': False, # was True before 0.12
'cdivision_warnings': False,
'overflowcheck': False,
'overflowcheck.fold': True,
'always_allow_keywords': False,
'allow_none_for_extension_args': True,
'wraparound' : True,
'ccomplex' : False, # use C99/C++ for complex types and arith
'callspec' : "",
'final' : False,
'internal' : False,
'profile': False,
'no_gc_clear': False,
'no_gc': False,
'linetrace': False,
'emit_code_comments': True, # copy original source code into C code comments
'annotation_typing': False, # read type declarations from Python function annotations
'infer_types': None,
'infer_types.verbose': False,
'autotestdict': True,
'autotestdict.cdef': False,
'autotestdict.all': False,
'language_level': 2,
'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere.
'py2_import': False, # For backward compatibility of Cython's source code in Py3 source mode
'c_string_type': 'bytes',
'c_string_encoding': '',
'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types
'unraisable_tracebacks': False,
'old_style_globals': False,
# set __file__ and/or __path__ to known source/target path at import time (instead of not having them available)
'set_initial_path' : None, # SOURCEFILE or "/full/path/to/module"
'warn': None,
'warn.undeclared': False,
'warn.unreachable': True,
'warn.maybe_uninitialized': False,
'warn.unused': False,
'warn.unused_arg': False,
'warn.unused_result': False,
'warn.multiple_declarators': True,
# optimizations
'optimize.inline_defnode_calls': True,
'optimize.unpack_method_calls': True, # increases code size when True
'optimize.use_switch': True,
# remove unreachable code
'remove_unreachable': True,
# control flow debug directives
'control_flow.dot_output': "", # Graphviz output filename
'control_flow.dot_annotate_defs': False, # Annotate definitions
# test support
'test_assert_path_exists' : [],
'test_fail_if_path_exists' : [],
# experimental, subject to change
'binding': None,
'freelist': 0,
'formal_grammar': False,
}
# Extra warning directives
extra_warnings = {
'warn.maybe_uninitialized': True,
'warn.unreachable': True,
'warn.unused': True,
}
def one_of(*args):
def validate(name, value):
if value not in args:
raise ValueError("%s directive must be one of %s, got '%s'" % (
name, args, value))
else:
return value
return validate
def normalise_encoding_name(option_name, encoding):
"""
>>> normalise_encoding_name('c_string_encoding', 'ascii')
'ascii'
>>> normalise_encoding_name('c_string_encoding', 'AsCIi')
'ascii'
>>> normalise_encoding_name('c_string_encoding', 'us-ascii')
'ascii'
>>> normalise_encoding_name('c_string_encoding', 'utF8')
'utf8'
>>> normalise_encoding_name('c_string_encoding', 'utF-8')
'utf8'
>>> normalise_encoding_name('c_string_encoding', 'deFAuLT')
'default'
>>> normalise_encoding_name('c_string_encoding', 'default')
'default'
>>> normalise_encoding_name('c_string_encoding', 'SeriousLyNoSuch--Encoding')
'SeriousLyNoSuch--Encoding'
"""
if not encoding:
return ''
if encoding.lower() in ('default', 'ascii', 'utf8'):
return encoding.lower()
import codecs
try:
decoder = codecs.getdecoder(encoding)
except LookupError:
return encoding # may exists at runtime ...
for name in ('ascii', 'utf8'):
if codecs.getdecoder(name) == decoder:
return name
return encoding
# Override types possibilities above, if needed
directive_types = {
'final' : bool, # final cdef classes and methods
'internal' : bool, # cdef class visibility in the module dict
'infer_types' : bool, # values can be True/None/False
'binding' : bool,
'cfunc' : None, # decorators do not take directive value
'ccall' : None,
'inline' : None,
'staticmethod' : None,
'cclass' : None,
'returns' : type,
'set_initial_path': str,
'freelist': int,
'c_string_type': one_of('bytes', 'bytearray', 'str', 'unicode'),
'c_string_encoding': normalise_encoding_name,
}
for key, val in _directive_defaults.items():
if key not in directive_types:
directive_types[key] = type(val)
directive_scopes = { # defaults to available everywhere
# 'module', 'function', 'class', 'with statement'
'final' : ('cclass', 'function'),
'inline' : ('function',),
'staticmethod' : ('function',), # FIXME: analysis currently lacks more specific function scope
'no_gc_clear' : ('cclass',),
'no_gc' : ('cclass',),
'internal' : ('cclass',),
'autotestdict' : ('module',),
'autotestdict.all' : ('module',),
'autotestdict.cdef' : ('module',),
'set_initial_path' : ('module',),
'test_assert_path_exists' : ('function', 'class', 'cclass'),
'test_fail_if_path_exists' : ('function', 'class', 'cclass'),
'freelist': ('cclass',),
'emit_code_comments': ('module',),
'annotation_typing': ('module',), # FIXME: analysis currently lacks more specific function scope
# Avoid scope-specific to/from_py_functions for c_string.
'c_string_type': ('module',),
'c_string_encoding': ('module',),
'type_version_tag': ('module', 'cclass'),
'language_level': ('module',),
# globals() could conceivably be controlled at a finer granularity,
# but that would complicate the implementation
'old_style_globals': ('module',),
}
def parse_directive_value(name, value, relaxed_bool=False):
"""
Parses value as an option value for the given name and returns
the interpreted value. None is returned if the option does not exist.
>>> print(parse_directive_value('nonexisting', 'asdf asdfd'))
None
>>> parse_directive_value('boundscheck', 'True')
True
>>> parse_directive_value('boundscheck', 'true')
Traceback (most recent call last):
...
ValueError: boundscheck directive must be set to True or False, got 'true'
>>> parse_directive_value('c_string_encoding', 'us-ascii')
'ascii'
>>> parse_directive_value('c_string_type', 'str')
'str'
>>> parse_directive_value('c_string_type', 'bytes')
'bytes'
>>> parse_directive_value('c_string_type', 'bytearray')
'bytearray'
>>> parse_directive_value('c_string_type', 'unicode')
'unicode'
>>> parse_directive_value('c_string_type', 'unnicode')
Traceback (most recent call last):
ValueError: c_string_type directive must be one of ('bytes', 'bytearray', 'str', 'unicode'), got 'unnicode'
"""
type = directive_types.get(name)
if not type:
return None
orig_value = value
if type is bool:
value = str(value)
if value == 'True':
return True
if value == 'False':
return False
if relaxed_bool:
value = value.lower()
if value in ("true", "yes"):
return True
elif value in ("false", "no"):
return False
raise ValueError("%s directive must be set to True or False, got '%s'" % (
name, orig_value))
elif type is int:
try:
return int(value)
except ValueError:
raise ValueError("%s directive must be set to an integer, got '%s'" % (
name, orig_value))
elif type is str:
return str(value)
elif callable(type):
return type(name, value)
else:
assert False
def parse_directive_list(s, relaxed_bool=False, ignore_unknown=False,
current_settings=None):
"""
Parses a comma-separated list of pragma options. Whitespace
is not considered.
>>> parse_directive_list(' ')
{}
>>> (parse_directive_list('boundscheck=True') ==
... {'boundscheck': True})
True
>>> parse_directive_list(' asdf')
Traceback (most recent call last):
...
ValueError: Expected "=" in option "asdf"
>>> parse_directive_list('boundscheck=hey')
Traceback (most recent call last):
...
ValueError: boundscheck directive must be set to True or False, got 'hey'
>>> parse_directive_list('unknown=True')
Traceback (most recent call last):
...
ValueError: Unknown option: "unknown"
>>> warnings = parse_directive_list('warn.all=True')
>>> len(warnings) > 1
True
>>> sum(warnings.values()) == len(warnings) # all true.
True
"""
if current_settings is None:
result = {}
else:
result = current_settings
for item in s.split(','):
item = item.strip()
if not item:
continue
if not '=' in item:
raise ValueError('Expected "=" in option "%s"' % item)
name, value = [s.strip() for s in item.strip().split('=', 1)]
if name not in _directive_defaults:
found = False
if name.endswith('.all'):
prefix = name[:-3]
for directive in _directive_defaults:
if directive.startswith(prefix):
found = True
parsed_value = parse_directive_value(directive, value, relaxed_bool=relaxed_bool)
result[directive] = parsed_value
if not found and not ignore_unknown:
raise ValueError('Unknown option: "%s"' % name)
else:
parsed_value = parse_directive_value(name, value, relaxed_bool=relaxed_bool)
result[name] = parsed_value
return result
|
|
#!/usr/bin/env python
from __future__ import with_statement
import unittest
from datetime import datetime
import flask
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import sessionmaker
import flask_sqlalchemy as fsa
def make_todo_model(db):
class Todo(db.Model):
__tablename__ = 'todos'
id = db.Column('todo_id', db.Integer, primary_key=True)
title = db.Column(db.String(60))
text = db.Column(db.String)
done = db.Column(db.Boolean)
pub_date = db.Column(db.DateTime)
def __init__(self, title, text):
self.title = title
self.text = text
self.done = False
self.pub_date = datetime.utcnow()
return Todo
class BasicAppTestCase(unittest.TestCase):
def setUp(self):
app = flask.Flask(__name__)
app.config['TESTING'] = True
db = fsa.SQLAlchemy(app)
self.Todo = make_todo_model(db)
@app.route('/')
def index():
return '\n'.join(x.title for x in self.Todo.query.all())
@app.route('/add', methods=['POST'])
def add():
form = flask.request.form
todo = self.Todo(form['title'], form['text'])
db.session.add(todo)
db.session.commit()
return 'added'
db.create_all()
self.app = app
self.db = db
def tearDown(self):
self.db.drop_all()
def test_basic_insert(self):
c = self.app.test_client()
c.post('/add', data=dict(title='First Item', text='The text'))
c.post('/add', data=dict(title='2nd Item', text='The text'))
rv = c.get('/')
self.assertEqual(rv.data, b'First Item\n2nd Item')
def test_query_recording(self):
with self.app.test_request_context():
todo = self.Todo('Test 1', 'test')
self.db.session.add(todo)
self.db.session.commit()
queries = fsa.get_debug_queries()
self.assertEqual(len(queries), 1)
query = queries[0]
self.assertTrue('insert into' in query.statement.lower())
self.assertEqual(query.parameters[0], 'Test 1')
self.assertEqual(query.parameters[1], 'test')
self.assertTrue('test_sqlalchemy.py' in query.context)
self.assertTrue('test_query_recording' in query.context)
def test_helper_api(self):
self.assertEqual(self.db.metadata, self.db.Model.metadata)
class MetaDataTestCase(unittest.TestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['TESTING'] = True
def test_default_metadata(self):
db = fsa.SQLAlchemy(self.app, metadata=None)
self.db = db
class One(db.Model):
id = db.Column(db.Integer, primary_key=True)
myindex = db.Column(db.Integer, index=True)
class Two(db.Model):
id = db.Column(db.Integer, primary_key=True)
one_id = db.Column(db.Integer, db.ForeignKey(One.id))
myunique = db.Column(db.Integer, unique=True)
self.assertTrue(One.metadata.__class__ is sa.MetaData)
self.assertTrue(Two.metadata.__class__ is sa.MetaData)
self.assertEqual(One.__table__.schema, None)
self.assertEqual(Two.__table__.schema, None)
def test_custom_metadata(self):
class CustomMetaData(sa.MetaData):
pass
custom_metadata = CustomMetaData(schema="test_schema")
db = fsa.SQLAlchemy(self.app, metadata=custom_metadata)
self.db = db
class One(db.Model):
id = db.Column(db.Integer, primary_key=True)
myindex = db.Column(db.Integer, index=True)
class Two(db.Model):
id = db.Column(db.Integer, primary_key=True)
one_id = db.Column(db.Integer, db.ForeignKey(One.id))
myunique = db.Column(db.Integer, unique=True)
self.assertTrue(One.metadata is custom_metadata)
self.assertTrue(Two.metadata is custom_metadata)
self.assertFalse(One.metadata.__class__ is sa.MetaData)
self.assertTrue(One.metadata.__class__ is CustomMetaData)
self.assertFalse(Two.metadata.__class__ is sa.MetaData)
self.assertTrue(Two.metadata.__class__ is CustomMetaData)
self.assertEqual(One.__table__.schema, "test_schema")
self.assertEqual(Two.__table__.schema, "test_schema")
class TestQueryProperty(unittest.TestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['TESTING'] = True
def test_no_app_bound(self):
db = fsa.SQLAlchemy()
db.init_app(self.app)
Todo = make_todo_model(db)
# If no app is bound to the SQLAlchemy instance, a
# request context is required to access Model.query.
self.assertRaises(RuntimeError, getattr, Todo, 'query')
with self.app.test_request_context():
db.create_all()
todo = Todo('Test', 'test')
db.session.add(todo)
db.session.commit()
self.assertEqual(len(Todo.query.all()), 1)
def test_app_bound(self):
db = fsa.SQLAlchemy(self.app)
Todo = make_todo_model(db)
db.create_all()
# If an app was passed to the SQLAlchemy constructor,
# the query property is always available.
todo = Todo('Test', 'test')
db.session.add(todo)
db.session.commit()
self.assertEqual(len(Todo.query.all()), 1)
class SignallingTestCase(unittest.TestCase):
def setUp(self):
self.app = app = flask.Flask(__name__)
app.config['TESTING'] = True
self.db = fsa.SQLAlchemy(app)
self.Todo = make_todo_model(self.db)
self.db.create_all()
def tearDown(self):
self.db.drop_all()
def test_before_committed(self):
class Namespace(object):
is_received = False
def before_committed(sender, changes):
Namespace.is_received = True
fsa.before_models_committed.connect(before_committed)
todo = self.Todo('Awesome', 'the text')
self.db.session.add(todo)
self.db.session.commit()
self.assertTrue(Namespace.is_received)
fsa.before_models_committed.disconnect(before_committed)
def test_model_signals(self):
recorded = []
def committed(sender, changes):
self.assertTrue(isinstance(changes, list))
recorded.extend(changes)
fsa.models_committed.connect(committed)
todo = self.Todo('Awesome', 'the text')
self.db.session.add(todo)
self.assertEqual(len(recorded), 0)
self.db.session.commit()
self.assertEqual(len(recorded), 1)
self.assertEqual(recorded[0][0], todo)
self.assertEqual(recorded[0][1], 'insert')
del recorded[:]
todo.text = 'aha'
self.db.session.commit()
self.assertEqual(len(recorded), 1)
self.assertEqual(recorded[0][0], todo)
self.assertEqual(recorded[0][1], 'update')
del recorded[:]
self.db.session.delete(todo)
self.db.session.commit()
self.assertEqual(len(recorded), 1)
self.assertEqual(recorded[0][0], todo)
self.assertEqual(recorded[0][1], 'delete')
fsa.models_committed.disconnect(committed)
class TablenameTestCase(unittest.TestCase):
def test_name(self):
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
class FOOBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
class BazBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Ham(db.Model):
__tablename__ = 'spam'
id = db.Column(db.Integer, primary_key=True)
self.assertEqual(FOOBar.__tablename__, 'foo_bar')
self.assertEqual(BazBar.__tablename__, 'baz_bar')
self.assertEqual(Ham.__tablename__, 'spam')
def test_single_name(self):
"""Single table inheritance should not set a new name."""
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Mallard(Duck):
pass
self.assertEqual(Mallard.__tablename__, 'duck')
def test_joined_name(self):
"""Model has a separate primary key; it should set a new name."""
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Donald(Duck):
id = db.Column(db.Integer, db.ForeignKey(Duck.id), primary_key=True)
self.assertEqual(Donald.__tablename__, 'donald')
def test_mixin_name(self):
"""Primary key provided by mixin should still allow model to set tablename."""
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
class Base(object):
id = db.Column(db.Integer, primary_key=True)
class Duck(Base, db.Model):
pass
self.assertFalse(hasattr(Base, '__tablename__'))
self.assertEqual(Duck.__tablename__, 'duck')
def test_abstract_name(self):
"""Abstract model should not set a name. Subclass should set a name."""
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
class Duck(Base):
pass
self.assertFalse(hasattr(Base, '__tablename__'))
self.assertEqual(Duck.__tablename__, 'duck')
def test_complex_inheritance(self):
"""Joined table inheritance, but the new primary key is provided by a mixin, not directly on the class."""
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class IdMixin(object):
@declared_attr
def id(cls):
return db.Column(db.Integer, db.ForeignKey(Duck.id), primary_key=True)
class RubberDuck(IdMixin, Duck):
pass
self.assertEqual(RubberDuck.__tablename__, 'rubber_duck')
class PaginationTestCase(unittest.TestCase):
def test_basic_pagination(self):
p = fsa.Pagination(None, 1, 20, 500, [])
self.assertEqual(p.page, 1)
self.assertFalse(p.has_prev)
self.assertTrue(p.has_next)
self.assertEqual(p.total, 500)
self.assertEqual(p.pages, 25)
self.assertEqual(p.next_num, 2)
self.assertEqual(list(p.iter_pages()),
[1, 2, 3, 4, 5, None, 24, 25])
p.page = 10
self.assertEqual(list(p.iter_pages()),
[1, 2, None, 8, 9, 10, 11, 12, 13, 14, None, 24, 25])
def test_pagination_pages_when_0_items_per_page(self):
p = fsa.Pagination(None, 1, 0, 500, [])
self.assertEqual(p.pages, 0)
def test_query_paginate(self):
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
Todo = make_todo_model(db)
db.create_all()
with app.app_context():
db.session.add_all([Todo('', '') for _ in range(100)])
db.session.commit()
@app.route('/')
def index():
p = Todo.query.paginate()
return '{0} items retrieved'.format(len(p.items))
c = app.test_client()
# request default
r = c.get('/')
self.assertEqual(r.status_code, 200)
# request args
r = c.get('/?per_page=10')
self.assertEqual(r.data.decode('utf8'), '10 items retrieved')
with app.app_context():
# query default
p = Todo.query.paginate()
self.assertEqual(p.total, 100)
class BindsTestCase(unittest.TestCase):
def test_basic_binds(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_BINDS'] = {
'foo': 'sqlite://',
'bar': 'sqlite://'
}
db = fsa.SQLAlchemy(app)
class Foo(db.Model):
__bind_key__ = 'foo'
__table_args__ = {"info": {"bind_key": "foo"}}
id = db.Column(db.Integer, primary_key=True)
class Bar(db.Model):
__bind_key__ = 'bar'
id = db.Column(db.Integer, primary_key=True)
class Baz(db.Model):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
# simple way to check if the engines are looked up properly
self.assertEqual(db.get_engine(app, None), db.engine)
for key in 'foo', 'bar':
engine = db.get_engine(app, key)
connector = app.extensions['sqlalchemy'].connectors[key]
self.assertEqual(engine, connector.get_engine())
self.assertEqual(str(engine.url),
app.config['SQLALCHEMY_BINDS'][key])
# do the models have the correct engines?
self.assertEqual(db.metadata.tables['foo'].info['bind_key'], 'foo')
self.assertEqual(db.metadata.tables['bar'].info['bind_key'], 'bar')
self.assertEqual(db.metadata.tables['baz'].info.get('bind_key'), None)
# see the tables created in an engine
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app, 'foo'))
self.assertEqual(len(metadata.tables), 1)
self.assertTrue('foo' in metadata.tables)
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app, 'bar'))
self.assertEqual(len(metadata.tables), 1)
self.assertTrue('bar' in metadata.tables)
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app))
self.assertEqual(len(metadata.tables), 1)
self.assertTrue('baz' in metadata.tables)
# do the session have the right binds set?
self.assertEqual(db.get_binds(app), {
Foo.__table__: db.get_engine(app, 'foo'),
Bar.__table__: db.get_engine(app, 'bar'),
Baz.__table__: db.get_engine(app, None)
})
def test_abstract_binds(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_BINDS'] = {
'foo': 'sqlite://'
}
db = fsa.SQLAlchemy(app)
class AbstractFooBoundModel(db.Model):
__abstract__ = True
__bind_key__ = 'foo'
class FooBoundModel(AbstractFooBoundModel):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
# does the model have the correct engines?
self.assertEqual(db.metadata.tables['foo_bound_model'].info['bind_key'], 'foo')
# see the tables created in an engine
metadata = db.MetaData()
metadata.reflect(bind=db.get_engine(app, 'foo'))
self.assertEqual(len(metadata.tables), 1)
self.assertTrue('foo_bound_model' in metadata.tables)
class DefaultQueryClassTestCase(unittest.TestCase):
def test_default_query_class(self):
app = flask.Flask(__name__)
app.config['TESTING'] = True
db = fsa.SQLAlchemy(app)
class Parent(db.Model):
id = db.Column(db.Integer, primary_key=True)
children = db.relationship("Child", backref="parent", lazy='dynamic')
class Child(db.Model):
id = db.Column(db.Integer, primary_key=True)
parent_id = db.Column(db.Integer, db.ForeignKey('parent.id'))
p = Parent()
c = Child()
c.parent = p
self.assertEqual(type(Parent.query), fsa.BaseQuery)
self.assertEqual(type(Child.query), fsa.BaseQuery)
self.assertTrue(isinstance(p.children, fsa.BaseQuery))
self.assertTrue(isinstance(db.session.query(Parent), fsa.BaseQuery))
class CustomQueryClassTestCase(unittest.TestCase):
def test_custom_query_class(self):
class CustomQueryClass(fsa.BaseQuery):
pass
class MyModelClass(object):
pass
app = flask.Flask(__name__)
app.config['TESTING'] = True
db = fsa.SQLAlchemy(app, query_class=CustomQueryClass,
model_class=MyModelClass)
class Parent(db.Model):
id = db.Column(db.Integer, primary_key=True)
children = db.relationship("Child", backref="parent", lazy='dynamic')
class Child(db.Model):
id = db.Column(db.Integer, primary_key=True)
parent_id = db.Column(db.Integer, db.ForeignKey('parent.id'))
p = Parent()
c = Child()
c.parent = p
self.assertEqual(type(Parent.query), CustomQueryClass)
self.assertEqual(type(Child.query), CustomQueryClass)
self.assertTrue(isinstance(p.children, CustomQueryClass))
self.assertEqual(db.Query, CustomQueryClass)
self.assertEqual(db.Model.query_class, CustomQueryClass)
self.assertTrue(isinstance(db.session.query(Parent), CustomQueryClass))
def test_dont_override_model_default(self):
class CustomQueryClass(fsa.BaseQuery):
pass
app = flask.Flask(__name__)
app.config['TESTING'] = True
db = fsa.SQLAlchemy(app, query_class=CustomQueryClass)
class SomeModel(db.Model):
id = db.Column(db.Integer, primary_key=True)
query_class = fsa.BaseQuery
self.assertEqual(type(SomeModel.query), fsa.BaseQuery)
class CustomModelClassTestCase(unittest.TestCase):
def test_custom_query_class(self):
class CustomModelClass(fsa.Model):
pass
app = flask.Flask(__name__)
app.config['TESTING'] = True
db = fsa.SQLAlchemy(app, model_class=CustomModelClass)
class SomeModel(db.Model):
id = db.Column(db.Integer, primary_key=True)
self.assertTrue(isinstance(SomeModel(), CustomModelClass))
class SQLAlchemyIncludesTestCase(unittest.TestCase):
def test(self):
"""Various SQLAlchemy objects are exposed as attributes.
"""
db = fsa.SQLAlchemy()
self.assertTrue(db.Column == sa.Column)
# The Query object we expose is actually our own subclass.
from flask_sqlalchemy import BaseQuery
self.assertTrue(db.Query == BaseQuery)
class RegressionTestCase(unittest.TestCase):
def test_joined_inheritance(self):
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
class Base(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Unicode(20))
__mapper_args__ = {'polymorphic_on': type}
class SubBase(Base):
id = db.Column(db.Integer, db.ForeignKey('base.id'),
primary_key=True)
__mapper_args__ = {'polymorphic_identity': 'sub'}
self.assertEqual(Base.__tablename__, 'base')
self.assertEqual(SubBase.__tablename__, 'sub_base')
db.create_all()
def test_single_table_inheritance(self):
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
class Base(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Unicode(20))
__mapper_args__ = {'polymorphic_on': type}
class SubBase(Base):
__mapper_args__ = {'polymorphic_identity': 'sub'}
self.assertEqual(Base.__tablename__, 'base')
self.assertEqual(SubBase.__tablename__, 'base')
db.create_all()
def test_joined_inheritance_relation(self):
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
class Relation(db.Model):
id = db.Column(db.Integer, primary_key=True)
base_id = db.Column(db.Integer, db.ForeignKey('base.id'))
name = db.Column(db.Unicode(20))
def __init__(self, name):
self.name = name
class Base(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Unicode(20))
__mapper_args__ = {'polymorphic_on': type}
class SubBase(Base):
id = db.Column(db.Integer, db.ForeignKey('base.id'),
primary_key=True)
__mapper_args__ = {'polymorphic_identity': u'sub'}
relations = db.relationship(Relation)
db.create_all()
base = SubBase()
base.relations = [Relation(name=u'foo')]
db.session.add(base)
db.session.commit()
base = base.query.one()
def test_connection_binds(self):
app = flask.Flask(__name__)
db = fsa.SQLAlchemy(app)
assert db.session.connection()
class SessionScopingTestCase(unittest.TestCase):
def test_default_session_scoping(self):
app = flask.Flask(__name__)
app.config['TESTING'] = True
db = fsa.SQLAlchemy(app)
class FOOBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
with app.test_request_context():
fb = FOOBar()
db.session.add(fb)
assert fb in db.session
def test_session_scoping_changing(self):
app = flask.Flask(__name__)
app.config['TESTING'] = True
def scopefunc():
return id(dict())
db = fsa.SQLAlchemy(app, session_options=dict(scopefunc=scopefunc))
class FOOBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
db.create_all()
with app.test_request_context():
fb = FOOBar()
db.session.add(fb)
assert fb not in db.session # because a new scope is generated on each call
class CommitOnTeardownTestCase(unittest.TestCase):
def setUp(self):
app = flask.Flask(__name__)
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = fsa.SQLAlchemy(app)
Todo = make_todo_model(db)
db.create_all()
@app.route('/')
def index():
return '\n'.join(x.title for x in Todo.query.all())
@app.route('/create', methods=['POST'])
def create():
db.session.add(Todo('Test one', 'test'))
if flask.request.form.get('fail'):
raise RuntimeError("Failing as requested")
return 'ok'
self.client = app.test_client()
def test_commit_on_success(self):
resp = self.client.post('/create')
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.client.get('/').data, b'Test one')
def test_roll_back_on_failure(self):
resp = self.client.post('/create', data={'fail': 'on'})
self.assertEqual(resp.status_code, 500)
self.assertEqual(self.client.get('/').data, b'')
class StandardSessionTestCase(unittest.TestCase):
def test_insert_update_delete(self):
# Ensure _SignalTrackingMapperExtension doesn't croak when
# faced with a vanilla SQLAlchemy session.
#
# Verifies that "AttributeError: 'SessionMaker' object has no attribute '_model_changes'"
# is not thrown.
app = flask.Flask(__name__)
app.config['TESTING'] = True
db = fsa.SQLAlchemy(app)
Session = sessionmaker(bind=db.engine)
class QazWsx(db.Model):
id = db.Column(db.Integer, primary_key=True)
x = db.Column(db.String, default='')
db.create_all()
session = Session()
session.add(QazWsx())
session.flush() # issues an INSERT.
session.expunge_all()
qaz_wsx = session.query(QazWsx).first()
assert qaz_wsx.x == ''
qaz_wsx.x = 'test'
session.flush() # issues an UPDATE.
session.expunge_all()
qaz_wsx = session.query(QazWsx).first()
assert qaz_wsx.x == 'test'
session.delete(qaz_wsx) # issues a DELETE.
assert session.query(QazWsx).first() is None
def test_listen_to_session_event(self):
app = flask.Flask(__name__)
app.config['TESTING'] = True
db = fsa.SQLAlchemy(app)
sa.event.listen(db.session, 'after_commit', lambda session: None)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RegressionTestCase))
suite.addTest(unittest.makeSuite(BasicAppTestCase))
suite.addTest(unittest.makeSuite(SQLAlchemyIncludesTestCase))
suite.addTest(unittest.makeSuite(MetaDataTestCase))
suite.addTest(unittest.makeSuite(TestQueryProperty))
suite.addTest(unittest.makeSuite(DefaultQueryClassTestCase))
suite.addTest(unittest.makeSuite(CustomQueryClassTestCase))
suite.addTest(unittest.makeSuite(TablenameTestCase))
suite.addTest(unittest.makeSuite(PaginationTestCase))
suite.addTest(unittest.makeSuite(BindsTestCase))
suite.addTest(unittest.makeSuite(StandardSessionTestCase))
suite.addTest(unittest.makeSuite(SessionScopingTestCase))
suite.addTest(unittest.makeSuite(CommitOnTeardownTestCase))
suite.addTest(unittest.makeSuite(CustomModelClassTestCase))
if flask.signals_available:
suite.addTest(unittest.makeSuite(SignallingTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Autocompletion config for YouCompleteMe in Chromium.
#
# USAGE:
#
# 1. Install YCM [https://github.com/Valloric/YouCompleteMe]
# (Googlers should check out [go/ycm])
#
# 2. Point to this config file in your .vimrc:
# let g:ycm_global_ycm_extra_conf =
# '<chrome_depot>/src/tools/vim/chromium.ycm_extra_conf.py'
#
# 3. Profit
#
#
# Usage notes:
#
# * You must use ninja & clang to build Chromium.
#
# * You must have run gyp_chromium and built Chromium recently.
#
#
# Hacking notes:
#
# * The purpose of this script is to construct an accurate enough command line
# for YCM to pass to clang so it can build and extract the symbols.
#
# * Right now, we only pull the -I and -D flags. That seems to be sufficient
# for everything I've used it for.
#
# * That whole ninja & clang thing? We could support other configs if someone
# were willing to write the correct commands and a parser.
#
# * This has only been tested on gPrecise.
import os
import os.path
import re
import subprocess
import sys
def SystemIncludeDirectoryFlags():
"""Determines compile flags to include the system include directories.
Use as a workaround for https://github.com/Valloric/YouCompleteMe/issues/303
Returns:
(List of Strings) Compile flags to append.
"""
try:
with open(os.devnull, 'rb') as DEVNULL:
output = subprocess.check_output(['clang', '-v', '-E', '-x', 'c++', '-'],
stdin=DEVNULL, stderr=subprocess.STDOUT)
except:
return []
includes_regex = r'#include <\.\.\.> search starts here:\s*' \
r'(.*?)End of search list\.'
includes = re.search(includes_regex, output.decode(), re.DOTALL).group(1)
flags = []
for path in includes.splitlines():
path = path.strip()
if os.path.isdir(path):
flags.append('-isystem')
flags.append(path)
return flags
_system_include_flags = SystemIncludeDirectoryFlags()
# Flags from YCM's default config.
flags = [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x',
'c++',
]
def PathExists(*args):
return os.path.exists(os.path.join(*args))
def FindChromeSrcFromFilename(filename):
"""Searches for the root of the Chromium checkout.
Simply checks parent directories until it finds .gclient and src/.
Args:
filename: (String) Path to source file being edited.
Returns:
(String) Path of 'src/', or None if unable to find.
"""
curdir = os.path.normpath(os.path.dirname(filename))
while not (os.path.basename(os.path.realpath(curdir)) == 'src'
and PathExists(curdir, 'DEPS')
and (PathExists(curdir, '..', '.gclient')
or PathExists(curdir, '.git'))):
nextdir = os.path.normpath(os.path.join(curdir, '..'))
if nextdir == curdir:
return None
curdir = nextdir
return curdir
def GetClangCommandFromNinjaForFilename(chrome_root, filename):
"""Returns the command line to build |filename|.
Asks ninja how it would build the source file. If the specified file is a
header, tries to find its companion source file first.
Args:
chrome_root: (String) Path to src/.
filename: (String) Path to source file being edited.
Returns:
(List of Strings) Command line arguments for clang.
"""
if not chrome_root:
return []
# Generally, everyone benefits from including Chromium's src/, because all of
# Chromium's includes are relative to that.
chrome_flags = ['-I' + os.path.join(chrome_root)]
# Version of Clang used to compile Chromium can be newer then version of
# libclang that YCM uses for completion. So it's possible that YCM's libclang
# doesn't know about some used warning options, which causes compilation
# warnings (and errors, because of '-Werror');
chrome_flags.append('-Wno-unknown-warning-option')
# Default file to get a reasonable approximation of the flags for a Blink
# file.
blink_root = os.path.join(chrome_root, 'third_party', 'WebKit')
default_blink_file = os.path.join(blink_root, 'Source', 'core', 'Init.cpp')
# Header files can't be built. Instead, try to match a header file to its
# corresponding source file.
if filename.endswith('.h'):
# Add config.h to Blink headers, which won't have it by default.
if filename.startswith(blink_root):
chrome_flags.append('-include')
chrome_flags.append(os.path.join(blink_root, 'Source', 'config.h'))
alternates = ['.cc', '.cpp']
for alt_extension in alternates:
alt_name = filename[:-2] + alt_extension
if os.path.exists(alt_name):
filename = alt_name
break
else:
if filename.startswith(blink_root):
# If this is a Blink file, we can at least try to get a reasonable
# approximation.
filename = default_blink_file
else:
# If this is a standalone .h file with no source, the best we can do is
# try to use the default flags.
return chrome_flags
sys.path.append(os.path.join(chrome_root, 'tools', 'vim'))
from ninja_output import GetNinjaOutputDirectory
out_dir = os.path.realpath(GetNinjaOutputDirectory(chrome_root))
# Ninja needs the path to the source file relative to the output build
# directory.
rel_filename = os.path.relpath(os.path.realpath(filename), out_dir)
# Ask ninja how it would build our source file.
p = subprocess.Popen(['ninja', '-v', '-C', out_dir, '-t',
'commands', rel_filename + '^'],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode:
return chrome_flags
# Ninja might execute several commands to build something. We want the last
# clang command.
clang_line = None
for line in reversed(stdout.split('\n')):
if 'clang' in line:
clang_line = line
break
else:
return chrome_flags
# Parse flags that are important for YCM's purposes.
for flag in clang_line.split(' '):
if flag.startswith('-I'):
# Relative paths need to be resolved, because they're relative to the
# output dir, not the source.
if flag[2] == '/':
chrome_flags.append(flag)
else:
abs_path = os.path.normpath(os.path.join(out_dir, flag[2:]))
chrome_flags.append('-I' + abs_path)
elif flag.startswith('-std'):
chrome_flags.append(flag)
elif flag.startswith('-') and flag[1] in 'DWFfmO':
if flag == '-Wno-deprecated-register' or flag == '-Wno-header-guard':
# These flags causes libclang (3.3) to crash. Remove it until things
# are fixed.
continue
chrome_flags.append(flag)
return chrome_flags
def FlagsForFile(filename):
"""This is the main entry point for YCM. Its interface is fixed.
Args:
filename: (String) Path to source file being edited.
Returns:
(Dictionary)
'flags': (List of Strings) Command line flags.
'do_cache': (Boolean) True if the result should be cached.
"""
chrome_root = FindChromeSrcFromFilename(filename)
chrome_flags = GetClangCommandFromNinjaForFilename(chrome_root,
filename)
final_flags = flags + chrome_flags + _system_include_flags
return {
'flags': final_flags,
'do_cache': True
}
|
|
"""
Implements a special view to visualize and stage pieces of a project's
current diff.
"""
import os
import re
import bisect
import sublime
from sublime_plugin import WindowCommand, TextCommand, EventListener
from .navigate import GsNavigate
from ..git_command import GitCommand
from ..exceptions import GitSavvyError
from ...common import util
DIFF_TITLE = "DIFF: {}"
DIFF_CACHED_TITLE = "DIFF (cached): {}"
diff_views = {}
class GsDiffCommand(WindowCommand, GitCommand):
"""
Create a new view to display the difference of `target_commit`
against `base_commit`. If `target_commit` is None, compare
working directory with `base_commit`. If `in_cached_mode` is set,
display a diff of the Git index. Set `disable_stage` to True to
disable Ctrl-Enter in the diff view.
"""
def run(self, **kwargs):
sublime.set_timeout_async(lambda: self.run_async(**kwargs), 0)
def run_async(self, in_cached_mode=False, file_path=None, current_file=False, base_commit=None,
target_commit=None, disable_stage=False, title=None):
repo_path = self.repo_path
if current_file:
file_path = self.file_path or file_path
view_key = "{0}{1}+{2}".format(
in_cached_mode,
"-" if base_commit is None else "--" + base_commit,
file_path or repo_path
)
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
if view_key in diff_views and diff_views[view_key] in sublime.active_window().views():
diff_view = diff_views[view_key]
else:
diff_view = util.view.get_scratch_view(self, "diff", read_only=True)
if not title:
title = (DIFF_CACHED_TITLE if in_cached_mode else DIFF_TITLE).format(os.path.basename(repo_path))
diff_view.set_name(title)
diff_view.set_syntax_file("Packages/GitSavvy/syntax/diff.sublime-syntax")
diff_view.settings().set("git_savvy.repo_path", repo_path)
diff_view.settings().set("git_savvy.file_path", file_path)
diff_view.settings().set("git_savvy.diff_view.in_cached_mode", in_cached_mode)
diff_view.settings().set("git_savvy.diff_view.ignore_whitespace", False)
diff_view.settings().set("git_savvy.diff_view.show_word_diff", False)
diff_view.settings().set("git_savvy.diff_view.base_commit", base_commit)
diff_view.settings().set("git_savvy.diff_view.target_commit", target_commit)
diff_view.settings().set("git_savvy.diff_view.show_diffstat", savvy_settings.get("show_diffstat", True))
diff_view.settings().set("git_savvy.diff_view.disable_stage", disable_stage)
diff_views[view_key] = diff_view
self.window.focus_view(diff_view)
diff_view.sel().clear()
diff_view.run_command("gs_diff_refresh")
diff_view.run_command("gs_diff_navigate")
diff_view.run_command("gs_handle_vintageous")
class GsDiffRefreshCommand(TextCommand, GitCommand):
"""
Refresh the diff view with the latest repo state.
"""
def run(self, edit, cursors=None):
if self.view.settings().get("git_savvy.disable_diff"):
return
in_cached_mode = self.view.settings().get("git_savvy.diff_view.in_cached_mode")
ignore_whitespace = self.view.settings().get("git_savvy.diff_view.ignore_whitespace")
show_word_diff = self.view.settings().get("git_savvy.diff_view.show_word_diff")
base_commit = self.view.settings().get("git_savvy.diff_view.base_commit")
target_commit = self.view.settings().get("git_savvy.diff_view.target_commit")
show_diffstat = self.view.settings().get("git_savvy.diff_view.show_diffstat")
try:
stdout = self.git(
"diff",
"--ignore-all-space" if ignore_whitespace else None,
"--word-diff" if show_word_diff else None,
"--stat" if show_diffstat else None,
"--patch",
"--no-color",
"--cached" if in_cached_mode else None,
base_commit,
target_commit,
"--", self.file_path)
except GitSavvyError as err:
# When the output of the above Git command fails to correctly parse,
# the expected notification will be displayed to the user. However,
# once the userpresses OK, a new refresh event will be triggered on
# the view.
#
# This causes an infinite loop of increasingly frustrating error
# messages, ultimately resulting in psychosis and serious medical
# bills. This is a better, though somewhat cludgy, alternative.
#
if err.args and type(err.args[0]) == UnicodeDecodeError:
self.view.settings().set("git_savvy.disable_diff", True)
return
raise err
self.view.run_command("gs_replace_view_text", {"text": stdout})
class GsDiffToggleSetting(TextCommand):
"""
Toggle view settings: `ignore_whitespace` or `show_word_diff`.
"""
def run(self, edit, setting):
setting_str = "git_savvy.diff_view.{}".format(setting)
settings = self.view.settings()
settings.set(setting_str, not settings.get(setting_str))
print("{} is now {}".format(setting, settings.get(setting_str)))
self.view.run_command("gs_diff_refresh")
class GsDiffFocusEventListener(EventListener):
"""
If the current view is a diff view, refresh the view with latest tree status
when the view regains focus.
"""
def on_activated(self, view):
if view.settings().get("git_savvy.diff_view") == True:
sublime.set_timeout_async(lambda: view.run_command("gs_diff_refresh"))
class GsDiffStageOrResetHunkCommand(TextCommand, GitCommand):
"""
Depending on whether the user is in cached mode an what action
the user took, either 1) stage, 2) unstage, or 3) reset the
hunk under the user's cursor(s).
"""
def run(self, edit, reset=False):
ignore_whitespace = self.view.settings().get("git_savvy.diff_view.ignore_whitespace")
show_word_diff = self.view.settings().get("git_savvy.diff_view.show_word_diff")
if ignore_whitespace or show_word_diff:
sublime.error_message("You have to be in a clean diff to stage.")
return None
# Filter out any cursors that are larger than a single point.
cursor_pts = tuple(cursor.a for cursor in self.view.sel() if cursor.a == cursor.b)
self.diff_starts = tuple(region.a for region in self.view.find_all("^diff"))
self.diff_header_ends = tuple(region.b for region in self.view.find_all("^\+\+\+.+\n(?=@@)"))
self.hunk_starts = tuple(region.a for region in self.view.find_all("^@@"))
hunk_starts_following_headers = {region.b for region in self.view.find_all("^\+\+\+.+\n(?=@@)")}
self.hunk_ends = sorted(list(
# Hunks end when the next diff starts.
set(self.diff_starts[1:]) |
# Hunks end when the next hunk starts, except for hunks
# immediately following diff headers.
(set(self.hunk_starts) - hunk_starts_following_headers) |
# The last hunk ends at the end of the file.
set((self.view.size(), ))
))
sublime.set_timeout_async(lambda: self.apply_diffs_for_pts(cursor_pts, reset), 0)
def apply_diffs_for_pts(self, cursor_pts, reset):
in_cached_mode = self.view.settings().get("git_savvy.diff_view.in_cached_mode")
# Apply the diffs in reverse order - otherwise, line number will be off.
for pt in reversed(cursor_pts):
hunk_diff = self.get_hunk_diff(pt)
# The three argument combinations below result from the following
# three scenarios:
#
# 1) The user is in non-cached mode and wants to stage a hunk, so
# do NOT apply the patch in reverse, but do apply it only against
# the cached/indexed file (not the working tree).
# 2) The user is in non-cached mode and wants to undo a line/hunk, so
# DO apply the patch in reverse, and do apply it both against the
# index and the working tree.
# 3) The user is in cached mode and wants to undo a line hunk, so DO
# apply the patch in reverse, but only apply it against the cached/
# indexed file.
#
# NOTE: When in cached mode, no action will be taken when the user
# presses SUPER-BACKSPACE.
self.git(
"apply",
"-R" if (reset or in_cached_mode) else None,
"--cached" if (in_cached_mode or not reset) else None,
"-",
stdin=hunk_diff
)
sublime.set_timeout_async(lambda: self.view.run_command("gs_diff_refresh"))
def get_hunk_diff(self, pt):
"""
Given a cursor position, find and return the diff header and the
diff for the selected hunk/file.
"""
header_start = self.diff_starts[bisect.bisect(self.diff_starts, pt) - 1]
header_end = self.diff_header_ends[bisect.bisect(self.diff_header_ends, pt) - 1]
if not header_end or header_end < header_start:
# The cursor is not within a hunk.
return
diff_start = self.hunk_starts[bisect.bisect(self.hunk_starts, pt) - 1]
diff_end = self.hunk_ends[bisect.bisect(self.hunk_ends, pt)]
header = self.view.substr(sublime.Region(header_start, header_end))
diff = self.view.substr(sublime.Region(diff_start, diff_end))
return header + diff
class GsDiffOpenFileAtHunkCommand(TextCommand, GitCommand):
"""
For each cursor in the view, identify the hunk in which the cursor lies,
and open the file at that hunk in a separate view.
"""
def run(self, edit):
# Filter out any cursors that are larger than a single point.
cursor_pts = tuple(cursor.a for cursor in self.view.sel() if cursor.a == cursor.b)
diff_starts = tuple(region.a for region in self.view.find_all("^diff"))
hunk_starts = tuple(region.a for region in self.view.find_all("^@@"))
for cursor_pt in cursor_pts:
diff_start = diff_starts[bisect.bisect(diff_starts, cursor_pt) - 1]
diff_start_line = self.view.substr(self.view.line(diff_start))
hunk_start = hunk_starts[bisect.bisect(hunk_starts, cursor_pt) - 1]
hunk_line_str = self.view.substr(self.view.line(hunk_start))
hunk_line, _ = self.view.rowcol(hunk_start)
cursor_line, _ = self.view.rowcol(cursor_pt)
additional_lines = cursor_line - hunk_line - 1
# Example: "diff --git a/src/js/main.spec.js b/src/js/main.spec.js" --> "src/js/main.spec.js"
use_prepix = re.search(r" b/(.+?)$", diff_start_line)
if use_prepix is None:
filename = diff_start_line.split(" ")[-1]
else:
filename = use_prepix.groups()[0]
# Example: "@@ -9,6 +9,7 @@" --> 9
lineno = int(re.search(r"^@@ \-\d+(,-?\d+)? \+(\d+)", hunk_line_str).groups()[1])
lineno = lineno + additional_lines
self.load_file_at_line(filename, lineno)
def load_file_at_line(self, filename, lineno):
"""
Show file at target commit if `git_savvy.diff_view.target_commit` is non-empty.
Otherwise, open the file directly.
"""
target_commit = self.view.settings().get("git_savvy.diff_view.target_commit")
full_path = os.path.join(self.repo_path, filename)
if target_commit:
self.view.window().run_command("gs_show_file_at_commit", {
"commit_hash": target_commit,
"filepath": full_path,
"lineno": lineno
})
else:
self.view.window().open_file(
"{file}:{row}:{col}".format(file=full_path, row=lineno, col=0),
sublime.ENCODED_POSITION
)
class GsDiffNavigateCommand(GsNavigate):
"""
Travel between hunks. It is also used by show_commit_view.
"""
offset = 0
def run(self, edit, **kwargs):
super().run(edit, **kwargs)
self.view.run_command("show_at_center")
def get_available_regions(self):
return [self.view.line(region) for region in
self.view.find_by_selector("meta.diff.range.unified")]
|
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos "new" plugin
#
# Copyright 2013 (C) cocos2d-x.org
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"new" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
# python
import os
import sys
import getopt
import ConfigParser
import json
import shutil
import cocos
from MultiLanguage import MultiLanguage
import cocos_project
import re
from collections import OrderedDict
#
# Plugins should be a sublass of CCJSPlugin
#
class CCPluginNew(cocos.CCPlugin):
DEFAULT_PROJ_NAME = {
cocos_project.Project.CPP: 'MyCppGame',
cocos_project.Project.LUA: 'MyLuaGame',
cocos_project.Project.JS: 'MyJSGame'
}
PROJ_CFG_KEY_ENGINE_VERSION = "engine_version"
@staticmethod
def plugin_name():
return "new"
@staticmethod
def brief_description():
return MultiLanguage.get_string('NEW_BRIEF')
def init(self, args):
self._projname = args.name
self._projdir = unicode(
os.path.abspath(os.path.join(args.directory, self._projname)), "utf-8")
self._lang = args.language
self._package = args.package
self._tpname = args.template
# new official ways to get the template and cocos paths
self._templates_paths = self.get_templates_paths()
self._cocosroot = self.get_cocos2d_path()
# search for custom paths
if args.engine_path is not None:
self._cocosroot = os.path.abspath(args.engine_path)
self._cocosroot = unicode(self._cocosroot, "utf-8")
tp_path = os.path.join(self._cocosroot, "templates")
if os.path.isdir(tp_path):
self._templates_paths.append(tp_path)
# remove duplicates keeping order
o = OrderedDict.fromkeys(self._templates_paths)
self._templates_paths = o.keys()
self._other_opts = args
self._mac_bundleid = args.mac_bundleid
self._ios_bundleid = args.ios_bundleid
self._templates = Templates(args.language, self._templates_paths, args.template)
if self._templates.none_active():
self._templates.select_one()
# parse arguments
def parse_args(self, argv):
"""Custom and check param list.
"""
from argparse import ArgumentParser
# set the parser to parse input params
# the correspond variable name of "-x, --xxx" is parser.xxx
name = CCPluginNew.plugin_name()
category = CCPluginNew.plugin_category()
parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(),
description=self.__class__.brief_description())
parser.add_argument(
"name", metavar="PROJECT_NAME", nargs='?', help=MultiLanguage.get_string('NEW_ARG_NAME'))
parser.add_argument(
"-p", "--package", metavar="PACKAGE_NAME", help=MultiLanguage.get_string('NEW_ARG_PACKAGE'))
parser.add_argument("-l", "--language",
required=True,
choices=["cpp", "lua", "js"],
help=MultiLanguage.get_string('NEW_ARG_LANG'))
parser.add_argument("-d", "--directory", metavar="DIRECTORY",
help=MultiLanguage.get_string('NEW_ARG_DIR'))
parser.add_argument("-t", "--template", metavar="TEMPLATE_NAME",
help=MultiLanguage.get_string('NEW_ARG_TEMPLATE'))
parser.add_argument(
"--ios-bundleid", dest="ios_bundleid", help=MultiLanguage.get_string('NEW_ARG_IOS_BUNDLEID'))
parser.add_argument(
"--mac-bundleid", dest="mac_bundleid", help=MultiLanguage.get_string('NEW_ARG_MAC_BUNDLEID'))
parser.add_argument("-e", "--engine-path", dest="engine_path",
help=MultiLanguage.get_string('NEW_ARG_ENGINE_PATH'))
parser.add_argument("--portrait", action="store_true", dest="portrait",
help=MultiLanguage.get_string('NEW_ARG_PORTRAIT'))
group = parser.add_argument_group(MultiLanguage.get_string('NEW_ARG_GROUP_SCRIPT'))
group.add_argument(
"--no-native", action="store_true", dest="no_native",
help=MultiLanguage.get_string('NEW_ARG_NO_NATIVE'))
# parse the params
args = parser.parse_args(argv)
if args.name is None:
args.name = CCPluginNew.DEFAULT_PROJ_NAME[args.language]
if not args.package:
args.package = "org.cocos2dx.%s" % args.name
if not args.ios_bundleid:
args.ios_bundleid = args.package
if not args.mac_bundleid:
args.mac_bundleid = args.package
if not args.directory:
args.directory = os.getcwd()
if not args.template:
args.template = 'default'
self.init(args)
return args
def _stat_engine_version(self):
try:
ver_str = None
engine_type = None
framework_ver_file = os.path.join(self._cocosroot, 'version')
x_ver_file = os.path.join(self._cocosroot, 'cocos/cocos2d.cpp')
js_ver_file = os.path.join(self._cocosroot, 'frameworks/js-bindings/bindings/manual/ScriptingCore.h')
if os.path.isfile(framework_ver_file):
# the engine is Cocos Framework
f = open(framework_ver_file)
ver_str = f.read()
f.close()
engine_type = 'cocosframework'
else:
ver_file = None
pattern = None
if os.path.isfile(x_ver_file):
# the engine is cocos2d-x
pattern = r".*return[ \t]+\"(.*)\";"
ver_file = x_ver_file
engine_type = 'cocos2d-x'
elif os.path.isfile(js_ver_file):
# the engine is cocos2d-js
pattern = r".*#define[ \t]+ENGINE_VERSION[ \t]+\"(.*)\""
ver_file = js_ver_file
engine_type = 'cocos2d-js'
if ver_file is not None:
f = open(ver_file)
import re
for line in f.readlines():
match = re.match(pattern, line)
if match:
ver_str = match.group(1)
break
f.close()
if ver_str is not None:
# stat the engine version info
cocos.DataStatistic.stat_event('new_engine_ver', ver_str, engine_type)
except:
pass
def _create_from_cmd(self):
# check the dst project dir exists
if os.path.exists(self._projdir):
message = MultiLanguage.get_string('NEW_ERROR_FOLDER_EXISTED_FMT', self._projdir)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
tp_dir = self._templates.template_path()
creator = TPCreator(self._lang, self._cocosroot, self._projname, self._projdir,
self._tpname, tp_dir, self._package, self._mac_bundleid, self._ios_bundleid)
# do the default creating step
creator.do_default_step()
data = None
cfg_path = os.path.join(self._projdir, cocos_project.Project.CONFIG)
if os.path.isfile(cfg_path):
f = open(cfg_path)
data = json.load(f)
f.close()
if data is None:
data = {}
if cocos_project.Project.KEY_PROJ_TYPE not in data:
data[cocos_project.Project.KEY_PROJ_TYPE] = self._lang
# script project may add native support
if self._lang in (cocos_project.Project.LUA, cocos_project.Project.JS):
if not self._other_opts.no_native:
creator.do_other_step('do_add_native_support')
data[cocos_project.Project.KEY_HAS_NATIVE] = True
else:
data[cocos_project.Project.KEY_HAS_NATIVE] = False
# record the engine version if not predefined
if not data.has_key(CCPluginNew.PROJ_CFG_KEY_ENGINE_VERSION):
engine_version = get_engine_version(self._cocosroot)
if engine_version is not None:
data[CCPluginNew.PROJ_CFG_KEY_ENGINE_VERSION] = engine_version
# if --portrait is specified, change the orientation
if self._other_opts.portrait:
creator.do_other_step("change_orientation", not_existed_error=False)
# write config files
with open(cfg_path, 'w') as outfile:
json.dump(data, outfile, sort_keys=True, indent=4)
# main entry point
def run(self, argv, dependencies):
self.parse_args(argv)
action_str = 'new_%s' % (self._lang)
cocos.DataStatistic.stat_event('new', action_str, self._tpname)
self._create_from_cmd()
self._stat_engine_version()
VERSION_FILE_PATH = 'cocos/cocos2d.cpp'
VERSION_PATTERN = r".*return[ \t]+\"(.*)\";"
def get_engine_version(engine_path):
ret = None
try:
version_file = os.path.join(engine_path, VERSION_FILE_PATH)
if os.path.isfile(version_file):
f = open(version_file)
for line in f.readlines():
match = re.match(VERSION_PATTERN, line)
if match:
ret = match.group(1)
break
f.close()
except:
pass
return ret
def replace_string(filepath, src_string, dst_string):
""" From file's content replace specified string
Arg:
filepath: Specify a file contains the path
src_string: old string
dst_string: new string
"""
if src_string is None or dst_string is None:
raise TypeError
content = ""
f1 = open(filepath, "rb")
for line in f1:
strline = line.decode('utf8')
if src_string in strline:
content += strline.replace(src_string, dst_string)
else:
content += strline
f1.close()
f2 = open(filepath, "wb")
f2.write(content.encode('utf8'))
f2.close()
# end of replace_string
class Templates(object):
def __init__(self, lang, templates_paths, current):
self._lang = lang
self._templates_paths = templates_paths
self._scan()
self._current = None
if current is not None:
if current in self._template_folders:
self._current = current
else:
cocos.Logging.warning(MultiLanguage.get_string('NEW_TEMPLATE_NOT_FOUND_FMT', current))
def _scan(self):
template_pattern = {
"cpp": 'cpp-template-(.+)',
"lua": 'lua-template-(.+)',
"js": 'js-template-(.+)',
}
self._template_folders = {}
for templates_dir in self._templates_paths:
try:
dirs = [name for name in os.listdir(templates_dir) if os.path.isdir(
os.path.join(templates_dir, name))]
except Exception:
continue
pattern = template_pattern[self._lang]
for name in dirs:
match = re.search(pattern, name)
if match is None:
continue
template_name = match.group(1)
if template_name in self._template_folders.keys():
continue
self._template_folders[template_name] = os.path.join(templates_dir, name)
if len(self._template_folders) == 0:
cur_engine = "cocos2d-x" if self._lang == "js" else "cocos2d-js"
need_engine = "cocos2d-js" if self._lang == "js" else "cocos2d-x"
engine_tip = MultiLanguage.get_string('NEW_ERROR_ENGINE_TIP_FMT', need_engine)
message = MultiLanguage.get_string('NEW_ERROR_TEMPLATE_NOT_FOUND_FMT', (self._lang, engine_tip))
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
def none_active(self):
return self._current is None
def template_path(self):
if self._current is None:
return None
return self._template_folders[self._current]
def select_one(self):
cocos.Logging.warning(MultiLanguage.get_string('NEW_SELECT_TEMPLATE_TIP1'))
p = self._template_folders.keys()
for i in range(len(p)):
cocos.Logging.warning('%d %s' % (i + 1, p[i]))
cocos.Logging.warning(MultiLanguage.get_string('NEW_SELECT_TEMPLATE_TIP2'))
while True:
option = raw_input()
if option.isdigit():
option = int(option) - 1
if option in range(len(p)):
break
self._current = p[option]
class TPCreator(object):
def __init__(self, lang, cocos_root, project_name, project_dir, tp_name, tp_dir, project_package, mac_id, ios_id):
self.lang = lang
self.cocos_root = cocos_root
self.project_dir = project_dir
self.project_name = project_name
self.package_name = project_package
self.mac_bundleid = mac_id
self.ios_bundleid = ios_id
self.tp_name = tp_name
self.tp_dir = tp_dir
self.tp_json = 'cocos-project-template.json'
tp_json_path = os.path.join(tp_dir, self.tp_json)
if not os.path.exists(tp_json_path):
message = MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT', tp_json_path)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
f = open(tp_json_path)
# keep the key order
tpinfo = json.load(f, encoding='utf8', object_pairs_hook=OrderedDict)
# read the default creating step
if 'do_default' not in tpinfo:
message = (MultiLanguage.get_string('NEW_ERROR_DEFAILT_CFG_NOT_FOUND_FMT', tp_json_path))
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_WRONG_CONFIG)
self.tp_default_step = tpinfo.pop('do_default')
# keep the other steps
self.tp_other_step = tpinfo
def cp_self(self, project_dir, exclude_files):
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_COPY_TEMPLATE_FMT', project_dir))
if not os.path.exists(self.project_dir):
os.makedirs(self.project_dir)
copy_cfg = {
"from": self.tp_dir,
"to": self.project_dir,
"exclude": exclude_files
}
cocos.copy_files_with_config(copy_cfg, self.tp_dir, self.project_dir)
def do_default_step(self):
default_cmds = self.tp_default_step
exclude_files = []
if "exclude_from_template" in default_cmds:
exclude_files = exclude_files + \
default_cmds['exclude_from_template']
default_cmds.pop('exclude_from_template')
# should ignore teh xx-template-xx.json
exclude_files.append(self.tp_json)
self.cp_self(self.project_dir, exclude_files)
self.do_cmds(default_cmds)
def do_other_step(self, step, not_existed_error=True):
if step not in self.tp_other_step:
if not_existed_error:
# handle as error
message = MultiLanguage.get_string('NEW_ERROR_STEP_NOT_FOUND_FMT', step)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_WRONG_CONFIG)
else:
# handle as warning
cocos.Logging.warning(MultiLanguage.get_string('NEW_WARNING_STEP_NOT_FOUND_FMT', step))
return
cmds = self.tp_other_step[step]
self.do_cmds(cmds)
def do_cmds(self, cmds):
for k, v in cmds.iteritems():
# call cmd method by method/cmd name
# get from
# http://stackoverflow.com/questions/3951840/python-how-to-invoke-an-function-on-an-object-dynamically-by-name
try:
cmd = getattr(self, k)
except AttributeError:
raise cocos.CCPluginError(MultiLanguage.get_string('NEW_ERROR_CMD_NOT_FOUND_FMT', k),
cocos.CCPluginError.ERROR_WRONG_CONFIG)
try:
cmd(v)
except Exception as e:
raise cocos.CCPluginError(str(e), cocos.CCPluginError.ERROR_RUNNING_CMD)
# cmd methods below
def append_h5_engine(self, v):
src = os.path.join(self.cocos_root, v['from'])
dst = os.path.join(self.project_dir, v['to'])
# check cocos engine exist
moduleConfig = 'moduleConfig.json'
moudle_cfg = os.path.join(src, moduleConfig)
if not os.path.exists(moudle_cfg):
message = MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT', moudle_cfg)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
f = open(moudle_cfg)
data = json.load(f, 'utf8')
f.close()
modules = data['module']
# must copy moduleConfig.json & CCBoot.js
file_list = [moduleConfig, data['bootFile']]
for k, v in modules.iteritems():
module = modules[k]
for f in module:
if f[-2:] == 'js':
file_list.append(f)
# begin copy engine
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_COPY_H5'))
for index in range(len(file_list)):
srcfile = os.path.join(src, file_list[index])
dstfile = os.path.join(dst, file_list[index])
srcfile = cocos.add_path_prefix(srcfile)
dstfile = cocos.add_path_prefix(dstfile)
if not os.path.exists(os.path.dirname(dstfile)):
os.makedirs(cocos.add_path_prefix(os.path.dirname(dstfile)))
# copy file or folder
if os.path.exists(srcfile):
if os.path.isdir(srcfile):
if os.path.exists(dstfile):
shutil.rmtree(dstfile)
shutil.copytree(srcfile, dstfile)
else:
if os.path.exists(dstfile):
os.remove(dstfile)
shutil.copy2(srcfile, dstfile)
def append_x_engine(self, v):
# FIXME this is a hack, but in order to fix it correctly the cocos-project-template.json
# file probably will need to be re-designed.
# As a quick (horrible) fix, we check if we are in distro mode.
# If so, we don't do the "append_x_engine" step
if cocos.CCPlugin.get_cocos2d_mode() == 'distro':
return
src = os.path.join(self.cocos_root, v['from'])
dst = os.path.join(self.project_dir, v['to'])
# check cocos engine exist
cocosx_files_json = os.path.join(
src, 'templates', 'cocos2dx_files.json')
if not os.path.exists(cocosx_files_json):
message = MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT', cocosx_files_json)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
f = open(cocosx_files_json)
data = json.load(f)
f.close()
fileList = data['common']
if self.lang == 'lua':
fileList = fileList + data['lua']
if self.lang == 'js' and 'js' in data.keys():
fileList = fileList + data['js']
# begin copy engine
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_COPY_X'))
for index in range(len(fileList)):
srcfile = os.path.join(src, fileList[index])
dstfile = os.path.join(dst, fileList[index])
srcfile = cocos.add_path_prefix(srcfile)
dstfile = cocos.add_path_prefix(dstfile)
if not os.path.exists(os.path.dirname(dstfile)):
os.makedirs(cocos.add_path_prefix(os.path.dirname(dstfile)))
# copy file or folder
if os.path.exists(srcfile):
if os.path.isdir(srcfile):
if os.path.exists(dstfile):
shutil.rmtree(dstfile)
shutil.copytree(srcfile, dstfile)
else:
if os.path.exists(dstfile):
os.remove(dstfile)
shutil.copy2(srcfile, dstfile)
def append_from_template(self, v):
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_APPEND_TEMPLATE'))
cocos.copy_files_with_config(v, self.tp_dir, self.project_dir)
def append_dir(self, v):
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_APPEND_DIR'))
for item in v:
cocos.copy_files_with_config(
item, self.cocos_root, self.project_dir)
def append_file(self, v):
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_APPEND_FILE'))
for item in v:
src = os.path.join(self.cocos_root, item['from'])
dst = os.path.join(self.project_dir, item['to'])
src = cocos.add_path_prefix(src)
dst = cocos.add_path_prefix(dst)
shutil.copy2(src, dst)
# project cmd
def project_rename(self, v):
""" will modify the file name of the file
"""
dst_project_dir = self.project_dir
dst_project_name = self.project_name
src_project_name = v['src_project_name']
if dst_project_name == src_project_name:
return
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_RENAME_PROJ_FMT',
(src_project_name, dst_project_name)))
files = v['files']
for f in files:
src = f.replace("PROJECT_NAME", src_project_name)
dst = f.replace("PROJECT_NAME", dst_project_name)
src_file_path = os.path.join(dst_project_dir, src)
dst_file_path = os.path.join(dst_project_dir, dst)
if os.path.exists(src_file_path):
if dst_project_name.lower() == src_project_name.lower():
temp_file_path = "%s-temp" % src_file_path
os.rename(src_file_path, temp_file_path)
os.rename(temp_file_path, dst_file_path)
else:
if os.path.exists(dst_file_path):
os.remove(dst_file_path)
os.rename(src_file_path, dst_file_path)
else:
cocos.Logging.warning(MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT',
os.path.join(dst_project_dir, src)))
def project_replace_project_name(self, v):
""" will modify the content of the file
"""
dst_project_dir = self.project_dir
dst_project_name = self.project_name
src_project_name = v['src_project_name']
if dst_project_name == src_project_name:
return
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_REPLACE_PROJ_FMT',
(src_project_name, dst_project_name)))
files = v['files']
for f in files:
dst = f.replace("PROJECT_NAME", dst_project_name)
if os.path.exists(os.path.join(dst_project_dir, dst)):
replace_string(
os.path.join(dst_project_dir, dst), src_project_name, dst_project_name)
else:
cocos.Logging.warning(MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT',
os.path.join(dst_project_dir, dst)))
def project_replace_package_name(self, v):
""" will modify the content of the file
"""
dst_project_dir = self.project_dir
dst_project_name = self.project_name
src_package_name = v['src_package_name']
dst_package_name = self.package_name
if dst_package_name == src_package_name:
return
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_REPLACE_PKG_FMT',
(src_package_name, dst_package_name)))
files = v['files']
if not dst_package_name:
raise cocos.CCPluginError(MultiLanguage.get_string('NEW_ERROR_PKG_NAME_NOT_SPECIFIED'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
for f in files:
dst = f.replace("PROJECT_NAME", dst_project_name)
if os.path.exists(os.path.join(dst_project_dir, dst)):
replace_string(
os.path.join(dst_project_dir, dst), src_package_name, dst_package_name)
else:
cocos.Logging.warning(MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT',
os.path.join(dst_project_dir, dst)))
def project_replace_mac_bundleid(self, v):
""" will modify the content of the file
"""
if self.mac_bundleid is None:
return
dst_project_dir = self.project_dir
dst_project_name = self.project_name
src_bundleid = v['src_bundle_id']
dst_bundleid = self.mac_bundleid
if src_bundleid == dst_bundleid:
return
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_MAC_BUNDLEID_FMT',
(src_bundleid, dst_bundleid)))
files = v['files']
for f in files:
dst = f.replace("PROJECT_NAME", dst_project_name)
if os.path.exists(os.path.join(dst_project_dir, dst)):
replace_string(
os.path.join(dst_project_dir, dst), src_bundleid, dst_bundleid)
else:
cocos.Logging.warning(MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT',
os.path.join(dst_project_dir, dst)))
def project_replace_ios_bundleid(self, v):
""" will modify the content of the file
"""
if self.ios_bundleid is None:
return
dst_project_dir = self.project_dir
dst_project_name = self.project_name
src_bundleid = v['src_bundle_id']
dst_bundleid = self.ios_bundleid
if src_bundleid == dst_bundleid:
return
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_IOS_BUNDLEID_FMT',
(src_bundleid, dst_bundleid)))
files = v['files']
for f in files:
dst = f.replace("PROJECT_NAME", dst_project_name)
if os.path.exists(os.path.join(dst_project_dir, dst)):
replace_string(
os.path.join(dst_project_dir, dst), src_bundleid, dst_bundleid)
else:
cocos.Logging.warning(MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT',
os.path.join(dst_project_dir, dst)))
def modify_files(self, v):
""" will modify the content of the file
format of v is :
[
{
"file_path": The path related with project directory,
"pattern": Find pattern,
"replace_string": Replaced string
},
...
]
"""
cocos.Logging.info(MultiLanguage.get_string('NEW_INFO_STEP_MODIFY_FILE'))
for modify_info in v:
modify_file = modify_info["file_path"]
if not os.path.isabs(modify_file):
modify_file = os.path.abspath(os.path.join(self.project_dir, modify_file))
if not os.path.isfile(modify_file):
cocos.Logging.warning(MultiLanguage.get_string('NEW_WARNING_NOT_A_FILE_FMT', modify_file))
continue
pattern = modify_info["pattern"]
replace_str = modify_info["replace_string"]
f = open(modify_file)
lines = f.readlines()
f.close()
new_lines = []
for line in lines:
new_line = re.sub(pattern, replace_str, line)
new_lines.append(new_line)
f = open(modify_file, "w")
f.writelines(new_lines)
f.close()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import socket
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import requests
import six
from six.moves.urllib import parse
from celebrerclient.common import exceptions as exc
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-celebrerclient'
CHUNKSIZE = 1024 * 64 # 64kB
def get_system_ca_file():
"""Return path to system default CA file."""
# Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
# Suse, FreeBSD/OpenBSD, MacOSX, and the bundled ca
ca_path = ['/etc/ssl/certs/ca-certificates.crt',
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/ssl/ca-bundle.pem',
'/etc/ssl/cert.pem',
'/System/Library/OpenSSL/certs/cacert.pem',
requests.certs.where()]
for ca in ca_path:
LOG.debug("Looking for ca file %s", ca)
if os.path.exists(ca):
LOG.debug("Using ca file %s", ca)
return ca
LOG.warn("System ca file could not be found.")
class HTTPClient(object):
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
self.auth_url = kwargs.get('auth_url')
self.auth_token = kwargs.get('token')
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.region_name = kwargs.get('region_name')
self.include_pass = kwargs.get('include_pass')
self.endpoint_url = endpoint
self.cert_file = kwargs.get('cert_file')
self.key_file = kwargs.get('key_file')
self.timeout = kwargs.get('timeout')
self.ssl_connection_params = {
'cacert': kwargs.get('cacert'),
'cert_file': kwargs.get('cert_file'),
'key_file': kwargs.get('key_file'),
'insecure': kwargs.get('insecure'),
}
self.verify_cert = None
if parse.urlparse(endpoint).scheme == "https":
if kwargs.get('insecure'):
self.verify_cert = False
else:
self.verify_cert = kwargs.get('cacert', get_system_ca_file())
def log_curl_request(self, method, url, kwargs):
curl = ['curl -i -X %s' % method]
for (key, value) in kwargs['headers'].items():
header = '-H \'%s: %s\'' % (encodeutils.safe_decode(key),
encodeutils.safe_decode(value))
curl.append(header)
conn_params_fmt = [
('key_file', '--key %s'),
('cert_file', '--cert %s'),
('cacert', '--cacert %s'),
]
for (key, fmt) in conn_params_fmt:
value = self.ssl_connection_params.get(key)
if value:
curl.append(fmt % value)
if self.ssl_connection_params.get('insecure'):
curl.append('-k')
if 'data' in kwargs:
curl.append('-d \'%s\'' % kwargs['data'])
curl.append('%s%s' % (self.endpoint, url))
LOG.debug(' '.join(curl))
@staticmethod
def log_http_response(resp):
status = (resp.raw.version / 10.0, resp.status_code, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
dump.extend(['%s: %s' % (k, v) for k, v in resp.headers.items()])
dump.append('')
if resp.content:
content = resp.content
if isinstance(content, six.binary_type):
try:
content = encodeutils.safe_decode(resp.content)
except UnicodeDecodeError:
pass
else:
dump.extend([content, ''])
LOG.debug('\n'.join(dump))
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around requests.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
if self.auth_token:
kwargs['headers'].setdefault('X-Auth-Token', self.auth_token)
else:
kwargs['headers'].update(self.credentials_headers())
if self.auth_url:
kwargs['headers'].setdefault('X-Auth-Url', self.auth_url)
if self.region_name:
kwargs['headers'].setdefault('X-Region-Name', self.region_name)
self.log_curl_request(method, url, kwargs)
if self.cert_file and self.key_file:
kwargs['cert'] = (self.cert_file, self.key_file)
if self.verify_cert is not None:
kwargs['verify'] = self.verify_cert
if self.timeout is not None:
kwargs['timeout'] = float(self.timeout)
# Allow the option not to follow redirects
follow_redirects = kwargs.pop('follow_redirects', True)
# Since requests does not follow the RFC when doing redirection to sent
# back the same method on a redirect we are simply bypassing it. For
# example if we do a DELETE/POST/PUT on a URL and we get a 302 RFC says
# that we should follow that URL with the same method as before,
# requests doesn't follow that and send a GET instead for the method.
# Hopefully this could be fixed as they say in a comment in a future
# point version i.e.: 3.x
# See issue: https://github.com/kennethreitz/requests/issues/1704
allow_redirects = False
try:
resp = requests.request(
method,
self.endpoint_url + url,
allow_redirects=allow_redirects,
**kwargs)
except socket.gaierror as e:
message = ("Error finding address for %(url)s: %(e)s" %
{'url': self.endpoint_url + url, 'e': e})
raise exc.InvalidEndpoint(message=message)
except (socket.error,
socket.timeout,
requests.exceptions.ConnectionError) as e:
endpoint = self.endpoint
message = ("Error communicating with %(endpoint)s %(e)s" %
{'endpoint': endpoint, 'e': e})
raise exc.CommunicationError(message=message)
self.log_http_response(resp)
if 'X-Auth-Key' not in kwargs['headers'] and \
(resp.status_code == 401 or
(resp.status_code == 500 and "(HTTP 401)" in resp.content)):
raise exc.HTTPUnauthorized("Authentication failed. Please try"
" again.\n%s"
% resp.content)
elif 400 <= resp.status_code < 600:
raise exc.from_response(resp)
elif resp.status_code in (301, 302, 305):
# Redirected. Reissue the request to the new location,
# unless caller specified follow_redirects=False
if follow_redirects:
location = resp.headers.get('location')
path = self.strip_endpoint(location)
resp = self._http_request(path, method, **kwargs)
elif resp.status_code == 300:
raise exc.from_response(resp)
return resp
def strip_endpoint(self, location):
if location is None:
message = "Location not returned with 302"
raise exc.InvalidEndpoint(message=message)
elif location.startswith(self.endpoint):
return location[len(self.endpoint):]
else:
message = "Prohibited endpoint redirect %s" % location
raise exc.InvalidEndpoint(message=message)
def credentials_headers(self):
creds = {}
if self.username:
creds['X-Auth-User'] = self.username
if self.password:
creds['X-Auth-Key'] = self.password
return creds
def json_request(self, method, url, content_type='application/json',
**kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', content_type)
# Don't set Accept because we aren't always dealing in JSON
if 'body' in kwargs:
if 'data' in kwargs:
raise ValueError("Can't provide both 'data' and "
"'body' to a request")
LOG.warning("Use of 'body' is deprecated; use 'data' instead")
kwargs['data'] = kwargs.pop('body')
if 'data' in kwargs:
kwargs['data'] = jsonutils.dumps(kwargs['data'])
resp = self._http_request(url, method, **kwargs)
body = resp.content
if body and 'application/json' in resp.headers['Content-type']:
try:
body = jsonutils.loads(resp.json())
except ValueError:
LOG.error('Could not decode response body as JSON')
else:
body = None
return resp, body
def json_patch_request(self, url, method='PATCH', **kwargs):
content_type = 'application/celebrer-json-patch'
return self.json_request(
method, url, content_type=content_type, **kwargs)
def raw_request(self, method, url, **kwargs):
if 'body' in kwargs:
if 'data' in kwargs:
raise ValueError("Can't provide both 'data' and "
"'body' to a request")
LOG.warning("Use of 'body' is deprecated; use 'data' instead")
kwargs['data'] = kwargs.pop('body')
# Chunking happens automatically if 'body' is a
# file-like object
return self._http_request(url, method, **kwargs)
def client_request(self, method, url, **kwargs):
resp, body = self.json_request(method, url, **kwargs)
return resp
def head(self, url, **kwargs):
return self.client_request("HEAD", url, **kwargs)
def get(self, url, **kwargs):
return self.client_request("GET", url, **kwargs)
def post(self, url, **kwargs):
return self.client_request("POST", url, **kwargs)
def put(self, url, **kwargs):
return self.client_request("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self.raw_request("DELETE", url, **kwargs)
def patch(self, url, **kwargs):
return self.client_request("PATCH", url, **kwargs)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""EvaluationContext tracks global state, triggers and watermarks."""
from __future__ import absolute_import
import collections
import threading
from apache_beam.transforms import sideinputs
from apache_beam.runners.direct.clock import Clock
from apache_beam.runners.direct.watermark_manager import WatermarkManager
from apache_beam.runners.direct.executor import TransformExecutor
from apache_beam.runners.direct.direct_metrics import DirectMetrics
from apache_beam.transforms.trigger import InMemoryUnmergedState
from apache_beam.utils import counters
class _ExecutionContext(object):
def __init__(self, watermarks, keyed_states):
self.watermarks = watermarks
self.keyed_states = keyed_states
self._step_context = None
def get_step_context(self):
if not self._step_context:
self._step_context = DirectStepContext(self.keyed_states)
return self._step_context
class _SideInputView(object):
def __init__(self, view):
self._view = view
self.callable_queue = collections.deque()
self.elements = []
self.value = None
self.has_result = False
class _SideInputsContainer(object):
"""An in-process container for side inputs.
It provides methods for blocking until a side-input is available and writing
to a side input.
"""
def __init__(self, views):
self._lock = threading.Lock()
self._views = {}
for view in views:
self._views[view] = _SideInputView(view)
def get_value_or_schedule_after_output(self, side_input, task):
with self._lock:
view = self._views[side_input]
if not view.has_result:
view.callable_queue.append(task)
task.blocked = True
return (view.has_result, view.value)
def add_values(self, side_input, values):
with self._lock:
view = self._views[side_input]
assert not view.has_result
view.elements.extend(values)
def finalize_value_and_get_tasks(self, side_input):
with self._lock:
view = self._views[side_input]
assert not view.has_result
assert view.value is None
assert view.callable_queue is not None
view.value = self._pvalue_to_value(side_input, view.elements)
view.elements = None
result = tuple(view.callable_queue)
for task in result:
task.blocked = False
view.callable_queue = None
view.has_result = True
return result
def _pvalue_to_value(self, view, values):
"""Given a side input view, returns the associated value in requested form.
Args:
view: SideInput for the requested side input.
values: Iterable values associated with the side input.
Returns:
The side input in its requested form.
Raises:
ValueError: If values cannot be converted into the requested form.
"""
return sideinputs.SideInputMap(type(view), view._view_options(), values)
class EvaluationContext(object):
"""Evaluation context with the global state information of the pipeline.
The evaluation context for a specific pipeline being executed by the
DirectRunner. Contains state shared within the execution across all
transforms.
EvaluationContext contains shared state for an execution of the
DirectRunner that can be used while evaluating a PTransform. This
consists of views into underlying state and watermark implementations, access
to read and write side inputs, and constructing counter sets and
execution contexts. This includes executing callbacks asynchronously when
state changes to the appropriate point (e.g. when a side input is
requested and known to be empty).
EvaluationContext also handles results by committing finalizing
bundles based on the current global state and updating the global state
appropriately. This includes updating the per-(step,key) state, updating
global watermarks, and executing any callbacks that can be executed.
"""
def __init__(self, pipeline_options, bundle_factory, root_transforms,
value_to_consumers, step_names, views):
self.pipeline_options = pipeline_options
self._bundle_factory = bundle_factory
self._root_transforms = root_transforms
self._value_to_consumers = value_to_consumers
self._step_names = step_names
self.views = views
self._pcollection_to_views = collections.defaultdict(list)
for view in views:
self._pcollection_to_views[view.pvalue].append(view)
self._transform_keyed_states = self._initialize_keyed_states(
root_transforms, value_to_consumers)
self._watermark_manager = WatermarkManager(
Clock(), root_transforms, value_to_consumers,
self._transform_keyed_states)
self._side_inputs_container = _SideInputsContainer(views)
self._pending_unblocked_tasks = []
self._counter_factory = counters.CounterFactory()
self._cache = None
self._metrics = DirectMetrics()
self._lock = threading.Lock()
def _initialize_keyed_states(self, root_transforms, value_to_consumers):
transform_keyed_states = {}
for transform in root_transforms:
transform_keyed_states[transform] = {}
for consumers in value_to_consumers.values():
for consumer in consumers:
transform_keyed_states[consumer] = {}
return transform_keyed_states
def use_pvalue_cache(self, cache):
assert not self._cache
self._cache = cache
def metrics(self):
# TODO. Should this be made a @property?
return self._metrics
@property
def has_cache(self):
return self._cache is not None
def append_to_cache(self, applied_ptransform, tag, elements):
with self._lock:
assert self._cache
self._cache.append(applied_ptransform, tag, elements)
def is_root_transform(self, applied_ptransform):
return applied_ptransform in self._root_transforms
def handle_result(
self, completed_bundle, completed_timers, result):
"""Handle the provided result produced after evaluating the input bundle.
Handle the provided TransformResult, produced after evaluating
the provided committed bundle (potentially None, if the result of a root
PTransform).
The result is the output of running the transform contained in the
TransformResult on the contents of the provided bundle.
Args:
completed_bundle: the bundle that was processed to produce the result.
completed_timers: the timers that were delivered to produce the
completed_bundle.
result: the TransformResult of evaluating the input bundle
Returns:
the committed bundles contained within the handled result.
"""
with self._lock:
committed_bundles, unprocessed_bundles = self._commit_bundles(
result.uncommitted_output_bundles,
result.unprocessed_bundles)
self._watermark_manager.update_watermarks(
completed_bundle, result.transform, completed_timers,
committed_bundles, unprocessed_bundles, result.watermark_hold)
self._metrics.commit_logical(completed_bundle,
result.logical_metric_updates)
# If the result is for a view, update side inputs container.
if (result.uncommitted_output_bundles
and result.uncommitted_output_bundles[0].pcollection
in self._pcollection_to_views):
for view in self._pcollection_to_views[
result.uncommitted_output_bundles[0].pcollection]:
for committed_bundle in committed_bundles:
# side_input must be materialized.
self._side_inputs_container.add_values(
view,
committed_bundle.get_elements_iterable(make_copy=True))
if (self.get_execution_context(result.transform)
.watermarks.input_watermark
== WatermarkManager.WATERMARK_POS_INF):
self._pending_unblocked_tasks.extend(
self._side_inputs_container.finalize_value_and_get_tasks(view))
if result.counters:
for counter in result.counters:
merged_counter = self._counter_factory.get_counter(
counter.name, counter.combine_fn)
merged_counter.accumulator.merge([counter.accumulator])
return committed_bundles
def get_aggregator_values(self, aggregator_or_name):
return self._counter_factory.get_aggregator_values(aggregator_or_name)
def schedule_pending_unblocked_tasks(self, executor_service):
if self._pending_unblocked_tasks:
with self._lock:
for task in self._pending_unblocked_tasks:
executor_service.submit(task)
self._pending_unblocked_tasks = []
def _commit_bundles(self, uncommitted_bundles, unprocessed_bundles):
"""Commits bundles and returns a immutable set of committed bundles."""
for in_progress_bundle in uncommitted_bundles:
producing_applied_ptransform = in_progress_bundle.pcollection.producer
watermarks = self._watermark_manager.get_watermarks(
producing_applied_ptransform)
in_progress_bundle.commit(watermarks.synchronized_processing_output_time)
for unprocessed_bundle in unprocessed_bundles:
unprocessed_bundle.commit(None)
return tuple(uncommitted_bundles), tuple(unprocessed_bundles)
def get_execution_context(self, applied_ptransform):
return _ExecutionContext(
self._watermark_manager.get_watermarks(applied_ptransform),
self._transform_keyed_states[applied_ptransform])
def create_bundle(self, output_pcollection):
"""Create an uncommitted bundle for the specified PCollection."""
return self._bundle_factory.create_bundle(output_pcollection)
def create_empty_committed_bundle(self, output_pcollection):
"""Create empty bundle useful for triggering evaluation."""
return self._bundle_factory.create_empty_committed_bundle(
output_pcollection)
def extract_fired_timers(self):
return self._watermark_manager.extract_fired_timers()
def is_done(self, transform=None):
"""Checks completion of a step or the pipeline.
Args:
transform: AppliedPTransform to check for completion.
Returns:
True if the step will not produce additional output. If transform is None
returns true if all steps are done.
"""
if transform:
return self._is_transform_done(transform)
for applied_ptransform in self._step_names:
if not self._is_transform_done(applied_ptransform):
return False
return True
def _is_transform_done(self, transform):
tw = self._watermark_manager.get_watermarks(transform)
return tw.output_watermark == WatermarkManager.WATERMARK_POS_INF
def get_value_or_schedule_after_output(self, side_input, task):
assert isinstance(task, TransformExecutor)
return self._side_inputs_container.get_value_or_schedule_after_output(
side_input, task)
class DirectUnmergedState(InMemoryUnmergedState):
"""UnmergedState implementation for the DirectRunner."""
def __init__(self):
super(DirectUnmergedState, self).__init__(defensive_copy=False)
class DirectStepContext(object):
"""Context for the currently-executing step."""
def __init__(self, keyed_existing_state):
self.keyed_existing_state = keyed_existing_state
def get_keyed_state(self, key):
# TODO(ccy): consider implementing transactional copy on write semantics
# for state so that work items can be safely retried.
if not self.keyed_existing_state.get(key):
self.keyed_existing_state[key] = DirectUnmergedState()
return self.keyed_existing_state[key]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_get_request(
subscription_id, # type: str
resource_group_name, # type: str
system_topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"systemTopicName": _SERIALIZER.url("system_topic_name", system_topic_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id, # type: str
resource_group_name, # type: str
system_topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"systemTopicName": _SERIALIZER.url("system_topic_name", system_topic_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id, # type: str
resource_group_name, # type: str
system_topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-12-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"systemTopicName": _SERIALIZER.url("system_topic_name", system_topic_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_update_request_initial(
subscription_id, # type: str
resource_group_name, # type: str
system_topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"systemTopicName": _SERIALIZER.url("system_topic_name", system_topic_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_subscription_request(
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
filter = kwargs.pop('filter', None) # type: Optional[str]
top = kwargs.pop('top', None) # type: Optional[int]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.EventGrid/systemTopics')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
filter = kwargs.pop('filter', None) # type: Optional[str]
top = kwargs.pop('top', None) # type: Optional[int]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class SystemTopicsOperations(object):
"""SystemTopicsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.eventgrid.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name, # type: str
system_topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SystemTopic"
"""Get a system topic.
Get properties of a system topic.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param system_topic_name: Name of the system topic.
:type system_topic_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SystemTopic, or the result of cls(response)
:rtype: ~azure.mgmt.eventgrid.models.SystemTopic
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SystemTopic"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
system_topic_name=system_topic_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SystemTopic', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
system_topic_name, # type: str
system_topic_info, # type: "_models.SystemTopic"
**kwargs # type: Any
):
# type: (...) -> "_models.SystemTopic"
cls = kwargs.pop('cls', None) # type: ClsType["_models.SystemTopic"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(system_topic_info, 'SystemTopic')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
system_topic_name=system_topic_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SystemTopic', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SystemTopic', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name, # type: str
system_topic_name, # type: str
system_topic_info, # type: "_models.SystemTopic"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SystemTopic"]
"""Create a system topic.
Asynchronously creates a new system topic with the specified parameters.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param system_topic_name: Name of the system topic.
:type system_topic_name: str
:param system_topic_info: System Topic information.
:type system_topic_info: ~azure.mgmt.eventgrid.models.SystemTopic
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either SystemTopic or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.eventgrid.models.SystemTopic]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SystemTopic"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
system_topic_name=system_topic_name,
system_topic_info=system_topic_info,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SystemTopic', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
system_topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
system_topic_name=system_topic_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name, # type: str
system_topic_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete a system topic.
Delete existing system topic.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param system_topic_name: Name of the system topic.
:type system_topic_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
system_topic_name=system_topic_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
system_topic_name, # type: str
system_topic_update_parameters, # type: "_models.SystemTopicUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.SystemTopic"
cls = kwargs.pop('cls', None) # type: ClsType["_models.SystemTopic"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(system_topic_update_parameters, 'SystemTopicUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
system_topic_name=system_topic_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SystemTopic', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SystemTopic', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name, # type: str
system_topic_name, # type: str
system_topic_update_parameters, # type: "_models.SystemTopicUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SystemTopic"]
"""Update a system topic.
Asynchronously updates a system topic with the specified parameters.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param system_topic_name: Name of the system topic.
:type system_topic_name: str
:param system_topic_update_parameters: SystemTopic update information.
:type system_topic_update_parameters: ~azure.mgmt.eventgrid.models.SystemTopicUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either SystemTopic or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.eventgrid.models.SystemTopic]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SystemTopic"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
system_topic_name=system_topic_name,
system_topic_update_parameters=system_topic_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SystemTopic', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics/{systemTopicName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SystemTopicsListResult"]
"""List system topics under an Azure subscription.
List all the system topics under an Azure subscription.
:param filter: The query used to filter the search results using OData syntax. Filtering is
permitted on the 'name' property only and with limited number of OData operations. These
operations are: the 'contains' function as well as the following logical operations: not, and,
or, eq (for equal), and ne (for not equal). No arithmetic operations are supported. The
following is a valid filter example: $filter=contains(namE, 'PATTERN') and name ne 'PATTERN-1'.
The following is not a valid filter example: $filter=location eq 'westus'.
:type filter: str
:param top: The number of results to return per page for the list operation. Valid range for
top parameter is 1 to 100. If not specified, the default number of results to be returned is 20
items per page.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SystemTopicsListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.eventgrid.models.SystemTopicsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SystemTopicsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SystemTopicsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.EventGrid/systemTopics'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SystemTopicsListResult"]
"""List system topics under a resource group.
List all the system topics under a resource group.
:param resource_group_name: The name of the resource group within the user's subscription.
:type resource_group_name: str
:param filter: The query used to filter the search results using OData syntax. Filtering is
permitted on the 'name' property only and with limited number of OData operations. These
operations are: the 'contains' function as well as the following logical operations: not, and,
or, eq (for equal), and ne (for not equal). No arithmetic operations are supported. The
following is a valid filter example: $filter=contains(namE, 'PATTERN') and name ne 'PATTERN-1'.
The following is not a valid filter example: $filter=location eq 'westus'.
:type filter: str
:param top: The number of results to return per page for the list operation. Valid range for
top parameter is 1 to 100. If not specified, the default number of results to be returned is 20
items per page.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SystemTopicsListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.eventgrid.models.SystemTopicsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SystemTopicsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
filter=filter,
top=top,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SystemTopicsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/systemTopics'} # type: ignore
|
|
"""Pinball domain for reinforcement learning
"""
from .Domain import Domain
import numpy as np
from itertools import tee, izip
import itertools
from Tkinter import Tk, Canvas
import os
from rlpy.Tools import __rlpy_location__
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann",
"William Dabney", "Jonathan P. How"]
__license__ = "BSD 3-Clause"
__author__ = ["Pierre-Luc Bacon", # author of the original version
"Austin Hays"] # adapted for RLPy and TKinter
class Pinball(Domain):
"""
The goal of this domain is to maneuver a small ball on a plate into a hole.
The plate may contain obstacles which should be avoided.
**STATE:**
The state is given by a 4-dimensional vector, consisting of position and
velocity of the ball.
**ACTIONS:**
There are 5 actions, standing for slanting the plat in x or y direction
or a horizontal position
of the plate.
**REWARD:**
Slanting the plate costs -4 reward in addition to -1 reward for each timestep.
When the ball reaches the hole, the agent receives 10000 units of reward.
**REFERENCE:**
.. seealso::
G.D. Konidaris and A.G. Barto:
*Skill Discovery in Continuous Reinforcement Learning Domains using Skill Chaining.*
Advances in Neural Information Processing Systems 22, pages 1015-1023, December 2009.
"""
#: default location of config files shipped with rlpy
default_config_dir = os.path.join(
__rlpy_location__,
"Domains",
"PinballConfigs")
def __init__(self, noise=.1, episodeCap=1000,
configuration=os.path.join(default_config_dir, "pinball_simple_single.cfg")):
"""
configuration:
location of the configuration file
episodeCap:
maximum length of an episode
noise:
with probability noise, a uniformly random action is executed
"""
self.NOISE = noise
self.configuration = configuration
self.screen = None
self.episodeCap = episodeCap
self.actions_num = 5
self.actions = [
PinballModel.ACC_X,
PinballModel.DEC_Y,
PinballModel.DEC_X,
PinballModel.ACC_Y,
PinballModel.ACC_NONE]
self.statespace_limits = np.array(
[[0.0, 1.0], [0.0, 1.0], [-2.0, 2.0], [-2.0, 2.0]])
self.continuous_dims = [4]
super(Pinball, self).__init__()
self.environment = PinballModel(
self.configuration,
random_state=self.random_state)
def showDomain(self, a):
if self.screen is None:
master = Tk()
master.title('RLPY Pinball')
self.screen = Canvas(master, width=500.0, height=500.0)
self.screen.configure(background='LightGray')
self.screen.pack()
self.environment_view = PinballView(
self.screen,
500.0,
500.0,
self.environment)
self.environment_view.blit()
self.screen.pack()
self.screen.update()
def step(self, a):
s = self.state
[self.environment.ball.position[0],
self.environment.ball.position[1],
self.environment.ball.xdot,
self.environment.ball.ydot] = s
if self.random_state.random_sample() < self.NOISE:
# Random Move
a = self.random_state.choice(self.possibleActions())
reward = self.environment.take_action(a)
self.environment._check_bounds()
state = np.array(self.environment.get_state())
self.state = state.copy()
return reward, state, self.isTerminal(), self.possibleActions()
def s0(self):
self.environment.ball.position[0], self.environment.ball.position[
1] = self.environment.start_pos
self.environment.ball.xdot, self.environment.ball.ydot = 0.0, 0.0
self.state = np.array(
[self.environment.ball.position[0], self.environment.ball.position[1],
self.environment.ball.xdot, self.environment.ball.ydot])
return self.state, self.isTerminal(), self.possibleActions()
def possibleActions(self, s=0):
return np.array(self.actions)
def isTerminal(self):
return self.environment.episode_ended()
class BallModel:
""" This class maintains the state of the ball
in the pinball domain. It takes care of moving
it according to the current velocity and drag coefficient.
"""
DRAG = 0.995
def __init__(self, start_position, radius):
"""
:param start_position: The initial position
:type start_position: float
:param radius: The ball radius
:type radius: float
"""
self.position = start_position
self.radius = radius
self.xdot = 0.0
self.ydot = 0.0
def add_impulse(self, delta_xdot, delta_ydot):
""" Change the momentum of the ball
:param delta_xdot: The change in velocity in the x direction
:type delta_xdot: float
:param delta_ydot: The change in velocity in the y direction
:type delta_ydot: float
"""
self.xdot += delta_xdot / 5.0
self.ydot += delta_ydot / 5.0
self.xdot = self._clip(self.xdot)
self.ydot = self._clip(self.ydot)
def add_drag(self):
""" Add a fixed amount of drag to the current velocity """
self.xdot *= self.DRAG
self.ydot *= self.DRAG
def step(self):
""" Move the ball by one increment """
self.position[0] += self.xdot * self.radius / 20.0
self.position[1] += self.ydot * self.radius / 20.0
def _clip(self, val, low=-2, high=2):
""" Clip a value in a given range """
if val > high:
val = high
if val < low:
val = low
return val
class PinballObstacle:
""" This class represents a single polygon obstacle in the
pinball domain and detects when a :class:`BallModel` hits it.
When a collision is detected, it also provides a way to
compute the appropriate effect to apply on the ball.
"""
def __init__(self, points):
"""
:param points: A list of points defining the polygon
:type points: list of lists
"""
self.points = points
self.min_x = min(self.points, key=lambda pt: pt[0])[0]
self.max_x = max(self.points, key=lambda pt: pt[0])[0]
self.min_y = min(self.points, key=lambda pt: pt[1])[1]
self.max_y = max(self.points, key=lambda pt: pt[1])[1]
self._double_collision = False
self._intercept = None
def collision(self, ball):
""" Determines if the ball hits this obstacle
:param ball: An instance of :class:`BallModel`
:type ball: :class:`BallModel`
"""
self._double_collision = False
if ball.position[0] - ball.radius > self.max_x:
return False
if ball.position[0] + ball.radius < self.min_x:
return False
if ball.position[1] - ball.radius > self.max_y:
return False
if ball.position[1] + ball.radius < self.min_y:
return False
a, b = tee(np.vstack([np.array(self.points), self.points[0]]))
next(b, None)
intercept_found = False
for pt_pair in izip(a, b):
if self._intercept_edge(pt_pair, ball):
if intercept_found:
# Ball has hit a corner
self._intercept = self._select_edge(
pt_pair,
self._intercept,
ball)
self._double_collision = True
else:
self._intercept = pt_pair
intercept_found = True
return intercept_found
def collision_effect(self, ball):
""" Based of the collision detection result triggered
in :func:`PinballObstacle.collision`, compute the
change in velocity.
:param ball: An instance of :class:`BallModel`
:type ball: :class:`BallModel`
"""
if self._double_collision:
return [-ball.xdot, -ball.ydot]
# Normalize direction
obstacle_vector = self._intercept[1] - self._intercept[0]
if obstacle_vector[0] < 0:
obstacle_vector = self._intercept[0] - self._intercept[1]
velocity_vector = np.array([ball.xdot, ball.ydot])
theta = self._angle(velocity_vector, obstacle_vector) - np.pi
if theta < 0:
theta += 2 * np.pi
intercept_theta = self._angle([-1, 0], obstacle_vector)
theta += intercept_theta
if theta > 2 * np.pi:
theta -= 2 * np.pi
velocity = np.linalg.norm([ball.xdot, ball.ydot])
return [velocity * np.cos(theta), velocity * np.sin(theta)]
def _select_edge(self, intersect1, intersect2, ball):
""" If the ball hits a corner, select one of two edges.
:param intersect1: A pair of points defining an edge of the polygon
:type intersect1: list of lists
:param intersect2: A pair of points defining an edge of the polygon
:type intersect2: list of lists
:returns: The edge with the smallest angle with the velocity vector
:rtype: list of lists
"""
velocity = np.array([ball.xdot, ball.ydot])
obstacle_vector1 = intersect1[1] - intersect1[0]
obstacle_vector2 = intersect2[1] - intersect2[0]
angle1 = self._angle(velocity, obstacle_vector1)
if angle1 > np.pi:
angle1 -= np.pi
angle2 = self._angle(velocity, obstacle_vector2)
if angle1 > np.pi:
angle2 -= np.pi
if np.abs(angle1 - (np.pi / 2.0)) < np.abs(angle2 - (np.pi / 2.0)):
return intersect1
return intersect2
def _angle(self, v1, v2):
""" Compute the angle difference between two vectors
:param v1: The x,y coordinates of the vector
:type: v1: list
:param v2: The x,y coordinates of the vector
:type: v2: list
:rtype: float
"""
angle_diff = np.arctan2(v1[0], v1[1]) - np.arctan2(v2[0], v2[1])
if angle_diff < 0:
angle_diff += 2 * np.pi
return angle_diff
def _intercept_edge(self, pt_pair, ball):
""" Compute the projection on and edge and find out
if it intercept with the ball.
:param pt_pair: The pair of points defining an edge
:type pt_pair: list of lists
:param ball: An instance of :class:`BallModel`
:type ball: :class:`BallModel`
:returns: True if the ball has hit an edge of the polygon
:rtype: bool
"""
# Find the projection on an edge
obstacle_edge = pt_pair[1] - pt_pair[0]
difference = np.array(ball.position) - pt_pair[0]
scalar_proj = difference.dot(
obstacle_edge) / obstacle_edge.dot(obstacle_edge)
if scalar_proj > 1.0:
scalar_proj = 1.0
elif scalar_proj < 0.0:
scalar_proj = 0.0
# Compute the distance to the closest point
closest_pt = pt_pair[0] + obstacle_edge * scalar_proj
obstacle_to_ball = ball.position - closest_pt
distance = obstacle_to_ball.dot(obstacle_to_ball)
if distance <= ball.radius * ball.radius:
# A collision only if the ball is not already moving away
velocity = np.array([ball.xdot, ball.ydot])
ball_to_obstacle = closest_pt - ball.position
angle = self._angle(ball_to_obstacle, velocity)
if angle > np.pi:
angle = 2 * np.pi - angle
if angle > np.pi / 1.99:
return False
return True
else:
return False
class PinballModel:
""" This class is a self-contained model of the pinball
domain for reinforcement learning.
It can be used either over RL-Glue through the :class:`PinballRLGlue`
adapter or interactively with :class:`PinballView`.
"""
ACC_X = 0
ACC_Y = 1
DEC_X = 2
DEC_Y = 3
ACC_NONE = 4
STEP_PENALTY = -1
THRUST_PENALTY = -5
END_EPISODE = 10000
def __init__(self, configuration, random_state=np.random.RandomState()):
""" Read a configuration file for Pinball and draw the domain to screen
:param configuration: a configuration file containing the polygons,
source(s) and target location.
:type configuration: str
"""
self.random_state = random_state
self.action_effects = {self.ACC_X: (1, 0), self.ACC_Y: (
0, 1), self.DEC_X: (-1, 0), self.DEC_Y: (0, -1), self.ACC_NONE: (0, 0)}
# Set up the environment according to the configuration
self.obstacles = []
self.target_pos = []
self.target_rad = 0.01
ball_rad = 0.01
start_pos = []
with open(configuration) as fp:
for line in fp.readlines():
tokens = line.strip().split()
if not len(tokens):
continue
elif tokens[0] == 'polygon':
self.obstacles.append(
PinballObstacle(zip(*[iter(map(float, tokens[1:]))] * 2)))
elif tokens[0] == 'target':
self.target_pos = [float(tokens[1]), float(tokens[2])]
self.target_rad = float(tokens[3])
elif tokens[0] == 'start':
start_pos = zip(*[iter(map(float, tokens[1:]))] * 2)
elif tokens[0] == 'ball':
ball_rad = float(tokens[1])
self.start_pos = start_pos[0]
a = self.random_state.randint(len(start_pos))
self.ball = BallModel(list(start_pos[a]), ball_rad)
def get_state(self):
""" Access the current 4-dimensional state vector
:returns: a list containing the x position, y position, xdot, ydot
:rtype: list
"""
return (
[self.ball.position[0],
self.ball.position[1],
self.ball.xdot,
self.ball.ydot]
)
def take_action(self, action):
""" Take a step in the environment
:param action: The action to apply over the ball
:type action: int
"""
for i in xrange(20):
if i == 0:
self.ball.add_impulse(*self.action_effects[action])
self.ball.step()
# Detect collisions
ncollision = 0
dxdy = np.array([0, 0])
for obs in self.obstacles:
if obs.collision(self.ball):
dxdy = dxdy + obs.collision_effect(self.ball)
ncollision += 1
if ncollision == 1:
self.ball.xdot = dxdy[0]
self.ball.ydot = dxdy[1]
if i == 19:
self.ball.step()
elif ncollision > 1:
self.ball.xdot = -self.ball.xdot
self.ball.ydot = -self.ball.ydot
if self.episode_ended():
return self.END_EPISODE
self.ball.add_drag()
self._check_bounds()
if action == self.ACC_NONE:
return self.STEP_PENALTY
return self.THRUST_PENALTY
def episode_ended(self):
""" Find out if the ball reached the target
:returns: True if the ball reached the target position
:rtype: bool
"""
return (
np.linalg.norm(np.array(self.ball.position)
- np.array(self.target_pos)) < self.target_rad
)
def _check_bounds(self):
""" Make sure that the ball stays within the environment """
if self.ball.position[0] > 1.0:
self.ball.position[0] = 0.95
if self.ball.position[0] < 0.0:
self.ball.position[0] = 0.05
if self.ball.position[1] > 1.0:
self.ball.position[1] = 0.95
if self.ball.position[1] < 0.0:
self.ball.position[1] = 0.05
class PinballView:
""" This class displays a :class:`PinballModel`
This class is used in conjunction with the :func:`run_pinballview`
function, acting as a *controller*.
"""
def __init__(self, screen, width, height, model):
"""
Changed from original PyGame implementation to work
with Tkinter visualization.
"""
self.screen = screen
self.width = 500.0
self.height = 500.0
self.model = model
self.x, self.y = self._to_pixels(self.model.ball.position)
self.rad = int(self.model.ball.radius * self.width)
self.DARK_GRAY = [64, 64, 64]
self.DARK_BLUE = [0, 0, 128]
self.LIGHT_GRAY = [232, 232, 232]
self.BALL_COLOR = [0, 0, 255]
self.TARGET_COLOR = [255, 0, 0]
for obs in model.obstacles:
coords_list = map(self._to_pixels, obs.points)
chain = itertools.chain(*coords_list)
coords = list(chain)
self.screen.create_polygon(coords, fill='blue')
self.screen.pack()
self.target_x, self.target_y = self._to_pixels(self.model.target_pos)
self.target_rad = int(self.model.target_rad * self.width)
target_id = self.drawcircle(
self.screen,
self.target_x,
self.target_y,
self.target_rad,
'red')
self.ball_id = self.drawcircle(
self.screen,
self.x,
self.y,
self.rad,
'black')
self.screen.pack()
def drawcircle(self, canv, x, y, rad, color):
return (
canv.create_oval(
x - rad,
y - rad,
x + rad,
y + rad,
width=0,
fill=color)
)
def _to_pixels(self, pt):
""" Converts from real units in the 0-1 range to pixel units
:param pt: a point in real units
:type pt: list
:returns: the input point in pixel units
:rtype: list
"""
return [int(pt[0] * self.width), int(pt[1] * self.height)]
def blit(self):
""" Blit the ball onto the background surface """
self.screen.coords(
self.ball_id,
self.x -
self.rad,
self.y -
self.rad,
self.x +
self.rad,
self.y +
self.rad)
self.x, self.y = self._to_pixels(self.model.ball.position)
self.screen.pack()
def run_pinballview(width, height, configuration):
"""
Changed from original Pierre-Luc Bacon implementation to reflect
the visualization changes in the PinballView Class.
"""
width, height = float(width), float(height)
master = Tk()
master.title('RLPY Pinball')
screen = Canvas(master, width=500.0, height=500.0)
screen.configure(background='LightGray')
screen.pack()
environment = PinballModel(configuration)
environment_view = PinballView(screen, width, height, environment)
actions = [
PinballModel.ACC_X,
PinballModel.DEC_Y,
PinballModel.DEC_X,
PinballModel.ACC_Y,
PinballModel.ACC_NONE]
done = False
while not done:
user_action = np.random.choice(actions)
environment_view.blit()
if environment.episode_ended():
done = True
if environment.take_action(user_action) == environment.END_EPISODE:
done = True
environment_view.blit()
screen.update()
|
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgets/markers.py
__version__=''' $Id: markers.py 3660 2010-02-08 18:17:33Z damian $ '''
__doc__="""This modules defines a collection of markers used in charts.
"""
from types import FunctionType, ClassType
from reportlab.graphics.shapes import Rect, Line, Circle, Polygon, Drawing, Group
from reportlab.graphics.widgets.signsandsymbols import SmileyFace
from reportlab.graphics.widgetbase import Widget
from reportlab.lib.validators import isNumber, isColorOrNone, OneOf, Validator
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.lib.colors import black
from reportlab.graphics.widgets.flags import Flag
from math import sin, cos, pi
import copy, new
_toradians = pi/180.0
class Marker(Widget):
'''A polymorphic class of markers'''
_attrMap = AttrMap(BASE=Widget,
kind = AttrMapValue(
OneOf(None, 'Square', 'Diamond', 'Circle', 'Cross', 'Triangle', 'StarSix',
'Pentagon', 'Hexagon', 'Heptagon', 'Octagon', 'StarFive',
'FilledSquare', 'FilledCircle', 'FilledDiamond', 'FilledCross',
'FilledTriangle','FilledStarSix', 'FilledPentagon', 'FilledHexagon',
'FilledHeptagon', 'FilledOctagon', 'FilledStarFive',
'Smiley','ArrowHead', 'FilledArrowHead'),
desc='marker type name'),
size = AttrMapValue(isNumber,desc='marker size'),
x = AttrMapValue(isNumber,desc='marker x coordinate'),
y = AttrMapValue(isNumber,desc='marker y coordinate'),
dx = AttrMapValue(isNumber,desc='marker x coordinate adjustment'),
dy = AttrMapValue(isNumber,desc='marker y coordinate adjustment'),
angle = AttrMapValue(isNumber,desc='marker rotation'),
fillColor = AttrMapValue(isColorOrNone, desc='marker fill colour'),
strokeColor = AttrMapValue(isColorOrNone, desc='marker stroke colour'),
strokeWidth = AttrMapValue(isNumber, desc='marker stroke width'),
arrowBarbDx = AttrMapValue(isNumber, desc='arrow only the delta x for the barbs'),
arrowHeight = AttrMapValue(isNumber, desc='arrow only height'),
)
def __init__(self,*args,**kw):
self.setProperties(kw)
self._setKeywords(
kind = None,
strokeColor = black,
strokeWidth = 0.1,
fillColor = None,
size = 5,
x = 0,
y = 0,
dx = 0,
dy = 0,
angle = 0,
arrowBarbDx = -1.25,
arrowHeight = 1.875,
)
def clone(self):
return new.instance(self.__class__,self.__dict__.copy())
def _Smiley(self):
x, y = self.x+self.dx, self.y+self.dy
d = self.size/2.0
s = SmileyFace()
s.fillColor = self.fillColor
s.strokeWidth = self.strokeWidth
s.strokeColor = self.strokeColor
s.x = x-d
s.y = y-d
s.size = d*2
return s
def _Square(self):
x, y = self.x+self.dx, self.y+self.dy
d = self.size/2.0
s = Rect(x-d,y-d,2*d,2*d,fillColor=self.fillColor,strokeColor=self.strokeColor,strokeWidth=self.strokeWidth)
return s
def _Diamond(self):
d = self.size/2.0
return self._doPolygon((-d,0,0,d,d,0,0,-d))
def _Circle(self):
x, y = self.x+self.dx, self.y+self.dy
s = Circle(x,y,self.size/2.0,fillColor=self.fillColor,strokeColor=self.strokeColor,strokeWidth=self.strokeWidth)
return s
def _Cross(self):
x, y = self.x+self.dx, self.y+self.dy
s = float(self.size)
h, s = s/2, s/6
return self._doPolygon((-s,-h,-s,-s,-h,-s,-h,s,-s,s,-s,h,s,h,s,s,h,s,h,-s,s,-s,s,-h))
def _Triangle(self):
x, y = self.x+self.dx, self.y+self.dy
r = float(self.size)/2
c = 30*_toradians
s = sin(30*_toradians)*r
c = cos(c)*r
return self._doPolygon((0,r,-c,-s,c,-s))
def _StarSix(self):
r = float(self.size)/2
c = 30*_toradians
s = sin(c)*r
c = cos(c)*r
z = s/2
g = c/2
return self._doPolygon((0,r,-z,s,-c,s,-s,0,-c,-s,-z,-s,0,-r,z,-s,c,-s,s,0,c,s,z,s))
def _StarFive(self):
R = float(self.size)/2
r = R*sin(18*_toradians)/cos(36*_toradians)
P = []
angle = 90
for i in xrange(5):
for radius in R, r:
theta = angle*_toradians
P.append(radius*cos(theta))
P.append(radius*sin(theta))
angle = angle + 36
return self._doPolygon(P)
def _Pentagon(self):
return self._doNgon(5)
def _Hexagon(self):
return self._doNgon(6)
def _Heptagon(self):
return self._doNgon(7)
def _Octagon(self):
return self._doNgon(8)
def _ArrowHead(self):
s = self.size
h = self.arrowHeight
b = self.arrowBarbDx
return self._doPolygon((0,0,b,-h,s,0,b,h))
def _doPolygon(self,P):
x, y = self.x+self.dx, self.y+self.dy
if x or y: P = map(lambda i,P=P,A=[x,y]: P[i] + A[i&1], range(len(P)))
return Polygon(P, strokeWidth =self.strokeWidth, strokeColor=self.strokeColor, fillColor=self.fillColor)
def _doFill(self):
old = self.fillColor
if old is None:
self.fillColor = self.strokeColor
r = (self.kind and getattr(self,'_'+self.kind[6:]) or Group)()
self.fillColor = old
return r
def _doNgon(self,n):
P = []
size = float(self.size)/2
for i in xrange(n):
r = (2.*i/n+0.5)*pi
P.append(size*cos(r))
P.append(size*sin(r))
return self._doPolygon(P)
_FilledCircle = _doFill
_FilledSquare = _doFill
_FilledDiamond = _doFill
_FilledCross = _doFill
_FilledTriangle = _doFill
_FilledStarSix = _doFill
_FilledPentagon = _doFill
_FilledHexagon = _doFill
_FilledHeptagon = _doFill
_FilledOctagon = _doFill
_FilledStarFive = _doFill
_FilledArrowHead = _doFill
def draw(self):
if self.kind:
m = getattr(self,'_'+self.kind)
if self.angle:
_x, _dx, _y, _dy = self.x, self.dx, self.y, self.dy
self.x, self.dx, self.y, self.dy = 0,0,0,0
try:
m = m()
finally:
self.x, self.dx, self.y, self.dy = _x, _dx, _y, _dy
if not isinstance(m,Group):
_m, m = m, Group()
m.add(_m)
if self.angle: m.rotate(self.angle)
x, y = _x+_dx, _y+_dy
if x or y: m.shift(x,y)
else:
m = m()
else:
m = Group()
return m
def uSymbol2Symbol(uSymbol,x,y,color):
if type(uSymbol) == FunctionType:
symbol = uSymbol(x, y, 5, color)
elif type(uSymbol) == ClassType and issubclass(uSymbol,Widget):
size = 10.
symbol = uSymbol()
symbol.x = x - (size/2)
symbol.y = y - (size/2)
try:
symbol.size = size
symbol.color = color
except:
pass
elif isinstance(uSymbol,Marker) or isinstance(uSymbol,Flag):
symbol = uSymbol.clone()
if isinstance(uSymbol,Marker): symbol.fillColor = symbol.fillColor or color
symbol.x, symbol.y = x, y
else:
symbol = None
return symbol
class _isSymbol(Validator):
def test(self,x):
return hasattr(x,'__call__') or isinstance(x,Marker) or isinstance(x,Flag) \
or (type(x)==ClassType and issubclass(x,Widget))
isSymbol = _isSymbol()
def makeMarker(name,**kw):
if Marker._attrMap['kind'].validate(name):
m = Marker(**kw)
m.kind = name
elif name[-5:]=='_Flag' and Flag._attrMap['kind'].validate(name[:-5]):
m = Flag(**kw)
m.kind = name[:-5]
m.size = 10
else:
raise ValueError, "Invalid marker name %s" % name
return m
if __name__=='__main__':
D = Drawing()
D.add(Marker())
D.save(fnRoot='Marker',formats=['pdf'], outDir='/tmp')
|
|
# Licensed as BSD by Yuriy Chushkin of the ESRF on 2014-08-06
################################################################################
# Copyright (c) 2014, the European Synchrotron Radiation Facility #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# * Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# * Neither the name of the European Synchrotron Radiation Facility nor the #
# names of its contributors may be used to endorse or promote products #
# derived from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
################################################################################
import sys, os, random, matplotlib
from PyQt4 import QtGui, QtCore
from numpy import *
import numpy.ma as ma
matplotlib.rc('image',origin = 'lower')
matplotlib.rc('image',interpolation = 'nearest')
matplotlib.rc('legend',numpoints = 1)
matplotlib.rc('legend',fontsize = 11)
from matplotlib import pylab
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.widgets import RectangleSelector,Cursor
class MplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
global symbols
styles = ['o', '^', 'v', '<', '>', 's', '+']
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
symbols=[]
for style in styles:
for color in colors:
symbols.append(color+style+'-')
def __init__(self, parent=None, name=None, width=5, height=4, dpi=100, bgcolor=None):
self.parent = parent
if self.parent:
bgc = parent.backgroundBrush().color()
bgcolor = float(bgc.red())/255.0, float(bgc.green())/255.0, float(bgc.blue())/255.0
#bgcolor = "#%02X%02X%02X" % (bgc.red(), bgc.green(), bgc.blue())
self.fig = Figure(figsize=(width, height), dpi=dpi, facecolor=bgcolor, edgecolor=bgcolor)
self.axes = self.fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
FigureCanvas.__init__(self, self.fig)
# self.reparent(parent, QPoint(0, 0))
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
###################################################################
class MplWidget(QtGui.QWidget):
"""Widget defined in Qt Designer"""
def __init__(self, parent = None):
# initialization of Qt MainWindow widget
QtGui.QWidget.__init__(self, parent)
# set the canvas to the Matplotlib widget
self.canvas = MplCanvas()
# create a vertical box layout
self.vbl = QtGui.QVBoxLayout()
# add mpl widget to the vertical box
self.vbl.addWidget(self.canvas)
# set the layout to the vertical box
self.setLayout(self.vbl)
####################################################################
def line_select_callback(self,event1, event2):
'event1 and event2 are the press and release events'
x1, y1 = event1.xdata, event1.ydata
x2, y2 = event2.xdata, event2.ydata
if x1*y1!=x2*y2:
xmin=min(x1,x2)
xmax=max(x1,x2)
ymin=min(y1,y2)
ymax=max(y1,y2)
zoom=array([xmin,xmax,ymin,ymax])
self.axes.axis(zoom)
matplotlib.rc('image',origin = 'lower')
def on_move(self,event):
global text1,image, title,fname
#get the x and y pixel coords
self.canvas.axes.set_title(fname)
if event.inaxes is not None:
x=int(event.xdata)
y=int(event.ydata)
if type(image) is ma.masked_array:
if image.mask[y,x]:
lab= 'data coords: %d,%d \nMasked pixel' % (x, y)
else:
lab= 'data coords: %d,%d \nInt: %3.1f' % (x, y,image[y,x])
else:
lab= 'data coords: %d,%d \nInt: %3.1f' % (x, y,image[y,x])
text1.set_text(lab)
else:
self.canvas.axes.set_title(fname)
text1.set_text(' ')
self.canvas.draw()
################################
def on_movel(self,event):
global text1,image, title,fname
#get the x and y pixel coords
self.canvas.axes.set_title(fname)
if not event.inaxes:
text1.set_text(' ')
return
x,y=float(event.xdata),float(event.ydata)
lab= 'data coords: x=%.4f, y=%.4f' % (x, y)
text1.set_text(lab)
self.canvas.draw()
text1.set_text(' ')
##################################
def sizeHint(self):
w = self.canvas.fig.get_figwidth()
h = self.canvas.fig.get_figheight()
return QtCore.QSize(w, h)
def minimumSizeHint(self):
return QtCore.QSize(10, 10)
def update_figure(self,n,data,filename,mymask='none',logscale='log',Zoom='auto',zmax='auto',zmin='auto'):
from numpy.ma import masked_array
global text1,image,cid,title,fname
try:
self.canvas.fig.canvas.mpl_disconnect(cid)
except: pass
fname=filename
if Zoom=='fixed':
axis_zoom=self.canvas.axes.axis()
text1=self.canvas.fig.text(0,0,'')
self.canvas.axes.set_xscale('linear')
self.canvas.axes.set_yscale('linear')
if logscale=='log':
data=log(data)
if n==0:
image=masked_array(data,mask=mymask)
if n==1:
image=data
cid = self.canvas.fig.canvas.mpl_connect('motion_notify_event', self.on_move)
if ((zmax!='auto')&(zmin!='auto')):
print 'rescaling all'
a=self.canvas.axes.imshow(image, vmax=zmax,vmin=zmin)
elif zmin!='auto':
a=self.canvas.axes.imshow(image, vmin=zmin)
elif zmax!='auto':
a=self.canvas.axes.imshow(image, vmin=zmax)
else:
a=self.canvas.axes.imshow(image)
if Zoom=='fixed':
a=self.canvas.axes.axis(axis_zoom)
LS=RectangleSelector(self.canvas.axes, self.line_select_callback,drawtype='box',rectprops=dict(edgecolor = 'black',
alpha=0.5, fill=False),lineprops = dict(color='black', linestyle='-',linewidth = 10, alpha=0.5))
self.canvas.axes.set_title(filename)
self.canvas.draw()
fname=filename
def update_plot(self,cf_data,hold,firstq,lastq,qs,filename,ylabel):
#styles = ['o', '^', 'v', '<', '>', 's', '+']
#colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
#symbols=[]
#for style in styles:
# for color in colors:
# symbols.append(color+style+'-')
self.canvas.axes.hold(hold)
for i in range(firstq,lastq):
label=r'$%s:\hspace{1} q=%5.4f \hspace{1} \AA^{-1}$' % (i,float(qs[i-1]))
self.canvas.axes.semilogx(cf_data[:,0],cf_data[:,i],symbols[i], label=label)
labels = [line.get_label() for line in self.canvas.axes.lines]
self.canvas.axes.set_xlabel("t (s)")
self.canvas.axes.set_ylabel(ylabel)
self.canvas.axes.legend(self.canvas.axes.lines, labels, 'best')
leg= self.canvas.fig.gca().get_legend()
leg.draw_frame(False)
self.canvas.axes.set_title(filename)
self.canvas.draw()
def update_plotlog(self,x1,y1,x2,y2):
global text1,image,title,fname,cid
try:
self.fig.canvas.mpl_disconnect(cid)
except: pass
text1=self.canvas.fig.text(0,0,'')
self.canvas.axes.set_title('I(q)')
bgcolor='None'
axis_zoom=self.canvas.axes.axis()
self.canvas.axes.axis('tight')
if len(shape(x1))==0:
self.canvas.axes.loglog((x1,),(y1,),'ro',x2,y2,'b-')
else:
self.canvas.axes.loglog(x1,y1,'ro',x2,y2,'b-')
self.canvas.draw()
self.canvas.axes.set_xlabel("Q (1/A)")
self.canvas.axes.set_ylabel("I(q) (arb.u.)")
fname='I(q)'
cid = self.canvas.fig.canvas.mpl_connect('motion_notify_event', self.on_movel)
def remove_plot(self,y):
ry=range(len(y[0,:]))
for i in ry:
for line in self.canvas.axes.lines:
if average(y[:,i]-line.get_ydata())==0:
self.canvas.axes.lines.remove(line)
if self.canvas.axes.lines!=[]:
labels = [line.get_label() for line in self.canvas.axes.lines]
self.canvas.axes.legend(self.canvas.axes.lines, labels, 'best')
leg= self.canvas.fig.gca().get_legend()
leg.draw_frame(False)
else:
labels = []
self.canvas.axes.legend_= None
self.canvas.draw()
def cla(self):
self.canvas.axes.lines=[]
labels = []
self.canvas.axes.legend_= None
self.canvas.draw()
|
|
#!/usr/bin/env python
#===-- x86_64_linux_target_definition.py -----------------------------*- C++ -*-===//
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===//
#----------------------------------------------------------------------
# DESCRIPTION
#
# This file can be used with the following setting:
# plugin.process.gdb-remote.target-definition-file
# This setting should be used when you are trying to connect to a
# remote GDB server that doesn't support any of the register discovery
# packets that LLDB normally uses.
#
# Why is this necessary? LLDB doesn't require a new build of LLDB that
# targets each new architecture you will debug with. Instead, all
# architectures are supported and LLDB relies on extra GDB server
# packets to discover the target we are connecting to so that is can
# show the right registers for each target. This allows the GDB server
# to change and add new registers without requiring a new LLDB build
# just so we can see new registers.
#
# This file implements the x86_64 registers for the darwin version of
# GDB and allows you to connect to servers that use this register set.
#
# USAGE
#
# (lldb) settings set plugin.process.gdb-remote.target-definition-file /path/to/x86_64_linux_target_definition.py
# (lldb) gdb-remote other.baz.com:1234
#
# The target definition file will get used if and only if the
# qRegisterInfo packets are not supported when connecting to a remote
# GDB server.
#----------------------------------------------------------------------
from lldb import *
# Compiler and DWARF register numbers
name_to_gcc_dwarf_regnum = {
'rax': 0,
'rdx': 1,
'rcx': 2,
'rbx': 3,
'rsi': 4,
'rdi': 5,
'rbp': 6,
'rsp': 7,
'r8': 8,
'r9': 9,
'r10': 10,
'r11': 11,
'r12': 12,
'r13': 13,
'r14': 14,
'r15': 15,
'rip': 16,
'xmm0': 17,
'xmm1': 18,
'xmm2': 19,
'xmm3': 20,
'xmm4': 21,
'xmm5': 22,
'xmm6': 23,
'xmm7': 24,
'xmm8': 25,
'xmm9': 26,
'xmm10': 27,
'xmm11': 28,
'xmm12': 29,
'xmm13': 30,
'xmm14': 31,
'xmm15': 32,
'stmm0': 33,
'stmm1': 34,
'stmm2': 35,
'stmm3': 36,
'stmm4': 37,
'stmm5': 38,
'stmm6': 39,
'stmm7': 30,
'ymm0': 41,
'ymm1': 42,
'ymm2': 43,
'ymm3': 44,
'ymm4': 45,
'ymm5': 46,
'ymm6': 47,
'ymm7': 48,
'ymm8': 49,
'ymm9': 40,
'ymm10': 41,
'ymm11': 42,
'ymm12': 43,
'ymm13': 44,
'ymm14': 45,
'ymm15': 46
}
name_to_gdb_regnum = {
'rax': 0,
'rbx': 1,
'rcx': 2,
'rdx': 3,
'rsi': 4,
'rdi': 5,
'rbp': 6,
'rsp': 7,
'r8': 8,
'r9': 9,
'r10': 10,
'r11': 11,
'r12': 12,
'r13': 13,
'r14': 14,
'r15': 15,
'rip': 16,
'rflags': 17,
'cs': 18,
'ss': 19,
'ds': 20,
'es': 21,
'fs': 22,
'gs': 23,
'stmm0': 24,
'stmm1': 25,
'stmm2': 26,
'stmm3': 27,
'stmm4': 28,
'stmm5': 29,
'stmm6': 30,
'stmm7': 31,
'fctrl': 32,
'fstat': 33,
'ftag': 34,
'fiseg': 35,
'fioff': 36,
'foseg': 37,
'fooff': 38,
'fop': 39,
'xmm0': 40,
'xmm1': 41,
'xmm2': 42,
'xmm3': 43,
'xmm4': 44,
'xmm5': 45,
'xmm6': 46,
'xmm7': 47,
'xmm8': 48,
'xmm9': 49,
'xmm10': 50,
'xmm11': 51,
'xmm12': 52,
'xmm13': 53,
'xmm14': 54,
'xmm15': 55,
'mxcsr': 56,
'ymm0': 57,
'ymm1': 58,
'ymm2': 59,
'ymm3': 60,
'ymm4': 61,
'ymm5': 62,
'ymm6': 63,
'ymm7': 64,
'ymm8': 65,
'ymm9': 66,
'ymm10': 67,
'ymm11': 68,
'ymm12': 69,
'ymm13': 70,
'ymm14': 71,
'ymm15': 72
}
name_to_generic_regnum = {
'rip': LLDB_REGNUM_GENERIC_PC,
'rsp': LLDB_REGNUM_GENERIC_SP,
'rbp': LLDB_REGNUM_GENERIC_FP,
'rdi': LLDB_REGNUM_GENERIC_ARG1,
'rsi': LLDB_REGNUM_GENERIC_ARG2,
'rdx': LLDB_REGNUM_GENERIC_ARG3,
'rcx': LLDB_REGNUM_GENERIC_ARG4,
'r8': LLDB_REGNUM_GENERIC_ARG5,
'r9': LLDB_REGNUM_GENERIC_ARG6
}
def get_reg_num(reg_num_dict, reg_name):
if reg_name in reg_num_dict:
return reg_num_dict[reg_name]
return LLDB_INVALID_REGNUM
x86_64_register_infos = [
{'name': 'rax',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'rbx',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'rcx', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg4'},
{'name': 'rdx', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg3'},
{'name': 'rsi', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg2'},
{'name': 'rdi', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg1'},
{'name': 'rbp', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'fp'},
{'name': 'rsp', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'sp'},
{'name': 'r8', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg5'},
{'name': 'r9', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg6'},
{'name': 'r10',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'r11',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'r12',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'r13',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'r14',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'r15',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'rip', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'pc'},
{'name': 'rflags', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'cs', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'ss', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'ds', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'es', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fs', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'gs', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'stmm0',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm1',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm2',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm3',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm4',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm5',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm6',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm7',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'fctrl', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fstat', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'ftag', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fiseg', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fioff', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'foseg', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fooff', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fop', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'xmm0',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm1',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm2',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm3',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm4',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm5',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm6',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm7',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm8',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm9',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm10',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm11',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm12',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm13',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm14',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm15',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'mxcsr', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'orig_rax', 'set': 1, 'bitsize': 64,
'encoding': eEncodingUint, 'format': eFormatHex},
# Registers that are contained in or composed of one of more other
# registers
{'name': 'eax',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rax[31:0]'},
{'name': 'ebx',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbx[31:0]'},
{'name': 'ecx',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rcx[31:0]'},
{'name': 'edx',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdx[31:0]'},
{'name': 'edi',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdi[31:0]'},
{'name': 'esi',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsi[31:0]'},
{'name': 'ebp',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbp[31:0]'},
{'name': 'esp',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsp[31:0]'},
{'name': 'r8d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r8[31:0]'},
{'name': 'r9d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r9[31:0]'},
{'name': 'r10d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r10[31:0]'},
{'name': 'r11d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r11[31:0]'},
{'name': 'r12d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r12[31:0]'},
{'name': 'r13d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r13[31:0]'},
{'name': 'r14d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r14[31:0]'},
{'name': 'r15d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r15[31:0]'},
{'name': 'ax',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rax[15:0]'},
{'name': 'bx',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbx[15:0]'},
{'name': 'cx',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rcx[15:0]'},
{'name': 'dx',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdx[15:0]'},
{'name': 'di',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdi[15:0]'},
{'name': 'si',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsi[15:0]'},
{'name': 'bp',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbp[15:0]'},
{'name': 'sp',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsp[15:0]'},
{'name': 'r8w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r8[15:0]'},
{'name': 'r9w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r9[15:0]'},
{'name': 'r10w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r10[15:0]'},
{'name': 'r11w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r11[15:0]'},
{'name': 'r12w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r12[15:0]'},
{'name': 'r13w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r13[15:0]'},
{'name': 'r14w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r14[15:0]'},
{'name': 'r15w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r15[15:0]'},
{'name': 'ah',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rax[15:8]'},
{'name': 'bh',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbx[15:8]'},
{'name': 'ch',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rcx[15:8]'},
{'name': 'dh',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdx[15:8]'},
{'name': 'al',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rax[7:0]'},
{'name': 'bl',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbx[7:0]'},
{'name': 'cl',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rcx[7:0]'},
{'name': 'dl',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdx[7:0]'},
{'name': 'dil',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdi[7:0]'},
{'name': 'sil',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsi[7:0]'},
{'name': 'bpl',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbp[7:0]'},
{'name': 'spl',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsp[7:0]'},
{'name': 'r8l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r8[7:0]'},
{'name': 'r9l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r9[7:0]'},
{'name': 'r10l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r10[7:0]'},
{'name': 'r11l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r11[7:0]'},
{'name': 'r12l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r12[7:0]'},
{'name': 'r13l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r13[7:0]'},
{'name': 'r14l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r14[7:0]'},
{'name': 'r15l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r15[7:0]'},
]
g_target_definition = None
def get_target_definition():
global g_target_definition
if g_target_definition is None:
g_target_definition = {}
offset = 0
for reg_info in x86_64_register_infos:
reg_name = reg_info['name']
# Only fill in the offset if there is no 'slice' in the register
# info
if 'slice' not in reg_info and 'composite' not in reg_info:
reg_info['offset'] = offset
offset += reg_info['bitsize'] // 8
# Set the GCC/DWARF register number for this register if it has one
reg_num = get_reg_num(name_to_gcc_dwarf_regnum, reg_name)
if reg_num != LLDB_INVALID_REGNUM:
reg_info['gcc'] = reg_num
reg_info['dwarf'] = reg_num
# Set the generic register number for this register if it has one
reg_num = get_reg_num(name_to_generic_regnum, reg_name)
if reg_num != LLDB_INVALID_REGNUM:
reg_info['generic'] = reg_num
# Set the GDB register number for this register if it has one
reg_num = get_reg_num(name_to_gdb_regnum, reg_name)
if reg_num != LLDB_INVALID_REGNUM:
reg_info['gdb'] = reg_num
g_target_definition['sets'] = [
'General Purpose Registers',
'Floating Point Registers']
g_target_definition['registers'] = x86_64_register_infos
g_target_definition[
'host-info'] = {'triple': 'x86_64-*-linux', 'endian': eByteOrderLittle}
g_target_definition['g-packet-size'] = offset
g_target_definition['breakpoint-pc-offset'] = -1
return g_target_definition
def get_dynamic_setting(target, setting_name):
if setting_name == 'gdb-server-target-definition':
return get_target_definition()
|
|
r"""A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
json exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print(json.dumps("\"foo\bar"))
"\"foo\bar"
>>> print(json.dumps('\u1234'))
"\u1234"
>>> print(json.dumps('\\'))
"\\"
>>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
{"a": 0, "b": 0, "c": 0}
>>> from io import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing (using repr() because of extraneous whitespace in the output)::
>>> import json
>>> print(repr(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)))
'{\n "4": 5, \n "6": 7\n}'
Decoding JSON::
>>> import json
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
['foo', {'bar': ['baz', None, 1.0, 2]}]
>>> json.loads('"\\"foo\\bar"')
'"foo\x08ar'
>>> from io import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)
['streaming API']
Specializing JSON object decoding::
>>> import json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal)
Decimal('1.1')
Extending JSONEncoder::
>>> import json
>>> class ComplexEncoder(json.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return json.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Using json.tool from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -mjson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -mjson.tool
Expecting property name: line 1 column 2 (char 2)
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
__version__ = '1.9'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from .decoder import JSONDecoder
from .encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object
containing a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
|
|
from __future__ import division
import numpy as np
from astropy import units as u
from astropy.coordinates import Longitude, Latitude, Angle
from sunpy.time import parse_time, julian_day
from sunpy.wcs import convert_hpc_hg, convert_hg_hpc
from sunpy.sun import constants, sun
__author__ = ["Jose Ivan Campos Rozo", "Stuart Mumford", "Jack Ireland"]
__all__ = ['diff_rot', 'rot_hpc']
@u.quantity_input(duration=u.s, latitude=u.degree)
def diff_rot(duration, latitude, rot_type='howard', frame_time='sidereal'):
"""
This function computes the change in longitude over days in degrees.
Parameters
-----------
duration : `~astropy.units.Quantity`
Number of seconds to rotate over.
latitude : `~astropy.units.Quantity`
heliographic coordinate latitude in Degrees.
rot_type : {'howard' | 'snodgrass' | 'allen'}
howard : Use values for small magnetic features from Howard et al.
snodgrass : Use Values from Snodgrass et. al
allen : Use values from Allen, Astrophysical Quantities, and simpler equation.
frame_time : {'sidereal' | 'synodic'}
Choose 'type of day' time reference frame.
Returns
-------
longitude_delta : `~astropy.units.Quantity`
The change in longitude over days (units=degrees)
Notes
-----
* IDL code equivalent: http://hesperia.gsfc.nasa.gov/ssw/gen/idl/solar/diff_rot.pro
* Howard rotation: http://adsabs.harvard.edu/abs/1990SoPh..130..295H
* A review of rotation parameters (including Snodgrass values): http://link.springer.com/article/10.1023%2FA%3A1005226402796
Examples
--------
Default rotation calculation over two days at 30 degrees latitude:
>>> import numpy as np
>>> import astropy.units as u
>>> from sunpy.physics.differential_rotation import diff_rot
>>> rotation = diff_rot(2 * u.day, 30 * u.deg)
Default rotation over two days for a number of latitudes:
>>> rotation = diff_rot(2 * u.day, np.linspace(-70, 70, 20) * u.deg)
With rotation type 'allen':
>>> rotation = diff_rot(2 * u.day, np.linspace(-70, 70, 20) * u.deg, 'allen')
"""
latitude = latitude.to(u.deg)
delta_seconds = duration.to(u.s).value
delta_days = delta_seconds / 24.0 / 3600.0
sin2l = (np.sin(latitude))**2
sin4l = sin2l**2
rot_params = {'howard': [2.894, -0.428, -0.370],
'snodgrass': [2.851, -0.343, -0.474]
}
if rot_type not in ['howard', 'allen', 'snodgrass']:
raise ValueError("""rot_type must equal one of
{ 'howard' | 'allen' | 'snodgrass' }""")
elif rot_type == 'allen':
rotation_deg = delta_days * (14.44 - (3.0 * sin2l))
else:
A, B, C = rot_params[rot_type]
# This is in micro-radians / sec
rotation_rate = A + B * sin2l + C * sin4l
rotation_deg = rotation_rate * 1e-6 * delta_seconds / np.deg2rad(1)
if frame_time == 'synodic':
rotation_deg -= 0.9856 * delta_days
#return Longitude((np.round(rotation_deg, 4)), u.deg)
return np.round(rotation_deg, 4) * u.deg
@u.quantity_input(x=u.arcsec, y=u.arcsec)
def rot_hpc(x, y, tstart, tend, frame_time='synodic', rot_type='howard', **kwargs):
"""Given a location on the Sun referred to using the Helioprojective
Cartesian co-ordinate system (typically quoted in the units of arcseconds)
use the solar rotation profile to find that location at some later or
earlier time. Note that this function assumes that the data was observed
from the Earth or near Earth vicinity. Specifically, data from SOHO and
STEREO observatories are not supported. Note also that the function does
NOT use solar B0 and L0 values provided in source FITS files - these
quantities are calculated.
Parameters
----------
x : `~astropy.units.Quantity`
Helio-projective x-co-ordinate in arcseconds (can be an array).
y : `~astropy.units.Quantity`
Helio-projective y-co-ordinate in arcseconds (can be an array).
tstart : `sunpy.time.time`
date/time to which x and y are referred.
tend : `sunpy.time.time`
date/time at which x and y will be rotated to.
rot_type : {'howard' | 'snodgrass' | 'allen'}
| howard: Use values for small magnetic features from Howard et al.
| snodgrass: Use Values from Snodgrass et. al
| allen: Use values from Allen, Astrophysical Quantities, and simpler
equation.
frame_time : {'sidereal' | 'synodic'}
Choose type of day time reference frame.
Returns
-------
x : `~astropy.units.Quantity`
Rotated helio-projective x-co-ordinate in arcseconds (can be an array).
y : `~astropy.units.Quantity`
Rotated helio-projective y-co-ordinate in arcseconds (can be an array).
Examples
--------
>>> import astropy.units as u
>>> from sunpy.physics.differential_rotation import rot_hpc
>>> rot_hpc( -570 * u.arcsec, 120 * u.arcsec, '2010-09-10 12:34:56', '2010-09-10 13:34:56')
(<Angle -562.9105822671319 arcsec>, <Angle 119.31920621992195 arcsec>)
Notes
-----
SSWIDL code equivalent: http://hesperia.gsfc.nasa.gov/ssw/gen/idl/solar/rot_xy.pro .
The function rot_xy uses arcmin2hel.pro and hel2arcmin.pro to implement the
same functionality as this function. These two functions seem to perform
inverse operations of each other to a high accuracy. The corresponding
equivalent functions here are convert_hpc_hg and convert_hg_hpc
respectively. These two functions seem to perform inverse
operations of each other to a high accuracy. However, the values
returned by arcmin2hel.pro are slightly different from those provided
by convert_hpc_hg. This leads to very slightly different results from
rot_hpc compared to rot_xy.
"""
# must have pairs of co-ordinates
if np.array(x).shape != np.array(y).shape:
raise ValueError('Input co-ordinates must have the same shape.')
# Make sure we have enough time information to perform a solar differential
# rotation
# Start time
dstart = parse_time(tstart)
dend = parse_time(tend)
interval = (dend - dstart).total_seconds() * u.s
# Get the Sun's position from the vantage point at the start time
vstart = kwargs.get("vstart", _calc_P_B0_SD(dstart))
# Compute heliographic co-ordinates - returns (longitude, latitude). Points
# off the limb are returned as nan
longitude, latitude = convert_hpc_hg(x.to(u.arcsec).value,
y.to(u.arcsec).value,
b0_deg=vstart["b0"].to(u.deg).value,
l0_deg=vstart["l0"].to(u.deg).value,
dsun_meters=(constants.au * sun.sunearth_distance(t=dstart)).value,
angle_units='arcsec')
longitude = Longitude(longitude, u.deg)
latitude = Angle(latitude, u.deg)
# Compute the differential rotation
drot = diff_rot(interval, latitude, frame_time=frame_time,
rot_type=rot_type)
# Convert back to heliocentric cartesian in units of arcseconds
vend = kwargs.get("vend", _calc_P_B0_SD(dend))
# It appears that there is a difference in how the SSWIDL function
# hel2arcmin and the sunpy function below performs this co-ordinate
# transform.
newx, newy = convert_hg_hpc(longitude.to(u.deg).value + drot.to(u.deg).value,
latitude.to(u.deg).value,
b0_deg=vend["b0"].to(u.deg).value,
l0_deg=vend["l0"].to(u.deg).value,
dsun_meters=(constants.au * sun.sunearth_distance(t=dend)).value,
occultation=False)
newx = Angle(newx, u.arcsec)
newy = Angle(newy, u.arcsec)
return newx.to(u.arcsec), newy.to(u.arcsec)
def _calc_P_B0_SD(date):
"""
To calculate the solar P, B0 angles and the semi-diameter as seen from
Earth. This function is assigned as being internal as these quantities
should be calculated in a part of SunPy that can calculate these quantities
accurately.
Parameters
-----------
date : `sunpy.time.time`
the time at which to calculate the solar P, B0 angles and the
semi-diameter.
Returns
-------
A dictionary with the following keys with the following meanings:
p - Solar P (position angle of pole) (degrees)
b0 - latitude of point at disk centre (degrees)
sd - semi-diameter of the solar disk in arcminutes
Notes
-----
SSWIDL code equivalent:
http://hesperia.gsfc.nasa.gov/ssw/gen/idl/solar/pb0r.pro
"""
# number of Julian days since 2415020.0
de = julian_day(parse_time(date)) - 2415020.0
# get the longitude of the sun etc.
sun_position = _sun_pos(date)
longmed = sun_position["longitude"].to(u.deg).value
#ra = sun_position["ra"]
#dec = sun_position["dec"]
appl = sun_position["app_long"].to(u.deg).value
oblt = sun_position["obliq"].to(u.deg).value
# form the aberrated longitude
Lambda = longmed - (20.50 / 3600.0)
# form longitude of ascending node of sun's equator on ecliptic
node = 73.6666660 + (50.250 / 3600.0) * ((de / 365.250) + 50.0)
arg = Lambda - node
# calculate P, the position angle of the pole
p = np.rad2deg(
np.arctan(-np.tan(np.deg2rad(oblt)) * np.cos(np.deg2rad(appl))) +
np.arctan(-0.127220 * np.cos(np.deg2rad(arg))))
# B0 the tilt of the axis...
b = np.rad2deg(np.arcsin(0.12620 * np.sin(np.deg2rad(arg))))
# ... and the semi-diameter
# Form the mean anomalies of Venus(MV),Earth(ME),Mars(MM),Jupiter(MJ)
# and the mean elongation of the Moon from the Sun(D).
t = de / 36525.0
mv = 212.60 + np.mod(58517.80 * t, 360.0)
me = 358.4760 + np.mod(35999.04980 * t, 360.0)
mm = 319.50 + np.mod(19139.860 * t, 360.0)
mj = 225.30 + np.mod(3034.690 * t, 360.0)
d = 350.70 + np.mod(445267.110 * t, 360.0)
# Form the geocentric distance(r) and semi-diameter(sd)
r = 1.0001410 - (0.0167480 - 0.00004180 * t) * np.cos(np.deg2rad(me)) \
- 0.000140 * np.cos(np.deg2rad(2.0 * me)) \
+ 0.0000160 * np.cos(np.deg2rad(58.30 + 2.0 * mv - 2.0 * me)) \
+ 0.0000050 * np.cos(np.deg2rad(209.10 + mv - me)) \
+ 0.0000050 * np.cos(np.deg2rad(253.80 - 2.0 * mm + 2.0 * me)) \
+ 0.0000160 * np.cos(np.deg2rad(89.50 - mj + me)) \
+ 0.0000090 * np.cos(np.deg2rad(357.10 - 2.0 * mj + 2.0 * me)) \
+ 0.0000310 * np.cos(np.deg2rad(d))
sd_const = constants.radius / constants.au
sd = np.arcsin(sd_const / r) * 10800.0 / np.pi
return {"p": Angle(p, u.deg),
"b0": Angle(b, u.deg),
"sd": Angle(sd.value, u.arcmin),
"l0": Angle(0.0, u.deg)}
def _sun_pos(date):
"""
Calculate solar ephemeris parameters. Allows for planetary and lunar
perturbations in the calculation of solar longitude at date and various
other solar positional parameters. This routine is a truncated version of
Newcomb's Sun and is designed to give apparent angular coordinates (T.E.D)
to a precision of one second of time. This function replicates the SSW/
IDL function "sun_pos.pro". This function is assigned to be
internal at the moment as it should really be replaced by accurate
ephemeris calculations in the part of SunPy that handles ephemeris.
Parameters
-----------
date : `sunpy.time.time`
Time at which the solar ephemeris parameters are calculated. The
input time can be in any acceptable time format.
Returns
-------
A dictionary with the following keys with the following meanings:
longitude - Longitude of sun for mean equinox of date (degs)
ra - Apparent RA for true equinox of date (degs)
dec - Apparent declination for true equinox of date (degs)
app_long - Apparent longitude (degs)
obliq - True obliquity (degs)
Notes
-----
SSWIDL code equivalent:
http://hesperia.gsfc.nasa.gov/ssw/gen/idl/solar/sun_pos.pro
Examples
--------
>>> from sunpy.physics.differential_rotation import _sun_pos
>>> sp = _sun_pos('2013-03-27')
"""
# Fractional Julian day with correct offset
dd = julian_day(date) - 2415020.0
# form time in Julian centuries from 1900.0
t = dd / 36525.0
# form sun's mean longitude
l = (279.6966780 + np.mod(36000.7689250 * t, 360.00)) * 3600.0
# allow for ellipticity of the orbit (equation of centre) using the Earth's
# mean anomaly ME
me = 358.4758440 + np.mod(35999.049750 * t, 360.0)
ellcor = (6910.10 - 17.20 * t) * np.sin(np.deg2rad(me)) + \
72.30 * np.sin(np.deg2rad(2.0 * me))
l = l + ellcor
# allow for the Venus perturbations using the mean anomaly of Venus MV
mv = 212.603219 + np.mod(58517.8038750 * t, 360.0)
vencorr = 4.80 * np.cos(np.deg2rad(299.10170 + mv - me)) + \
5.50 * np.cos(np.deg2rad(148.31330 + 2.0 * mv - 2.0 * me)) + \
2.50 * np.cos(np.deg2rad(315.94330 + 2.0 * mv - 3.0 * me)) + \
1.60 * np.cos(np.deg2rad(345.25330 + 3.0 * mv - 4.0 * me)) + \
1.00 * np.cos(np.deg2rad(318.150 + 3.0 * mv - 5.0 * me))
l = l + vencorr
# Allow for the Mars perturbations using the mean anomaly of Mars MM
mm = 319.5294250 + np.mod(19139.858500 * t, 360.0)
marscorr = 2.0 * np.cos(np.deg2rad(343.88830 - 2.0 * mm + 2.0 * me)) + \
1.80 * np.cos(np.deg2rad(200.40170 - 2.0 * mm + me))
l = l + marscorr
# Allow for the Jupiter perturbations using the mean anomaly of Jupiter MJ
mj = 225.3283280 + np.mod(3034.69202390 * t, 360.00)
jupcorr = 7.20 * np.cos(np.deg2rad(179.53170 - mj + me)) + \
2.60 * np.cos(np.deg2rad(263.21670 - mj)) + \
2.70 * np.cos(np.deg2rad(87.14500 - 2.0 * mj + 2.0 * me)) + \
1.60 * np.cos(np.deg2rad(109.49330 - 2.0 * mj + me))
l = l + jupcorr
# Allow for the Moons perturbations using the mean elongation of the Moon
# from the Sun D
d = 350.73768140 + np.mod(445267.114220 * t, 360.0)
mooncorr = 6.50 * np.sin(np.deg2rad(d))
l = l + mooncorr
# Note the original code is
# longterm = + 6.4d0 * sin(( 231.19d0 + 20.20d0 * t )*!dtor)
longterm = 6.40 * np.sin(np.deg2rad(231.190 + 20.20 * t))
l = l + longterm
l = np.mod(l + 2592000.0, 1296000.0)
longmed = l / 3600.0
# Allow for Aberration
l = l - 20.5
# Allow for Nutation using the longitude of the Moons mean node OMEGA
omega = 259.1832750 - np.mod(1934.1420080 * t, 360.0)
l = l - 17.20 * np.sin(np.deg2rad(omega))
# Form the True Obliquity
oblt = 23.4522940 - 0.01301250 * t + \
(9.20 * np.cos(np.deg2rad(omega))) / 3600.0
# Form Right Ascension and Declination
l = l / 3600.0
ra = np.rad2deg(np.arctan2(np.sin(np.deg2rad(l)) * \
np.cos(np.deg2rad(oblt)), np.cos(np.deg2rad(l))))
if isinstance(ra, np.ndarray):
ra[ra < 0.0] += 360.0
elif ra < 0.0:
ra = ra + 360.0
dec = np.rad2deg(np.arcsin(np.sin(np.deg2rad(l)) *
np.sin(np.deg2rad(oblt))))
# convert the internal variables to those listed in the top of the
# comment section in this code and in the original IDL code. Quantities
# are assigned following the advice in Astropy "Working with Angles"
return {"longitude": Longitude(longmed, u.deg),
"ra": Longitude(ra, u.deg),
"dec": Latitude(dec, u.deg),
"app_long": Longitude(l, u.deg),
"obliq": Angle(oblt, u.deg)}
|
|
import os
from ..java import (
Class as JavaClass,
Field as JavaField,
Method as JavaMethod,
Code as JavaCode,
opcodes as JavaOpcodes,
SourceFile,
Signature,
# LineNumberTable
)
from .blocks import Block, IgnoreBlock
from .methods import MainMethod, Method, extract_parameters
from .opcodes import ASTORE_name, ALOAD_name, IF, END_IF
class StaticBlock(Block):
def tweak(self):
self.code = [
# Set up the globals dictionary for the module
JavaOpcodes.NEW('java/util/Hashtable'),
JavaOpcodes.DUP(),
JavaOpcodes.INVOKESPECIAL('java/util/Hashtable', '<init>', '()V'),
JavaOpcodes.PUTSTATIC(self.module.descriptor, 'globals', 'Ljava/util/Hashtable;'),
# Load the Python builtins into the globals.
JavaOpcodes.GETSTATIC(self.module.descriptor, 'globals', 'Ljava/util/Hashtable;'),
JavaOpcodes.LDC_W('__builtins__'),
JavaOpcodes.NEW('org/python/types/Dict'),
JavaOpcodes.DUP(),
JavaOpcodes.GETSTATIC('org/Python', 'builtins', 'Ljava/util/Hashtable;'),
JavaOpcodes.INVOKESPECIAL('org/python/types/Dict', '<init>', '(Ljava/util/Map;)V'),
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'put', '(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.POP()
] + self.code
self.void_return()
def store_name(self, name, allow_locals=True):
self.add_opcodes(
ASTORE_name(self, '#TEMP#'),
JavaOpcodes.GETSTATIC(self.module.descriptor, 'globals', 'Ljava/util/Hashtable;'),
JavaOpcodes.LDC_W(name),
ALOAD_name(self, '#TEMP#'),
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'put', '(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.POP(),
)
def load_name(self, name, allow_locals=True):
self.add_opcodes(
# look for a global var.
JavaOpcodes.GETSTATIC(self.module.descriptor, 'globals', 'Ljava/util/Hashtable;'),
JavaOpcodes.LDC_W(name),
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'get', '(Ljava/lang/Object;)Ljava/lang/Object;'),
# If there's nothing in the globals, then look for a builtin.
IF(
[JavaOpcodes.DUP()],
JavaOpcodes.IFNONNULL
),
JavaOpcodes.POP(),
JavaOpcodes.GETSTATIC('org/Python', 'builtins', 'Ljava/util/Hashtable;'),
JavaOpcodes.LDC_W(name),
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'get', '(Ljava/lang/Object;)Ljava/lang/Object;'),
# If we still don't have something, throw a NameError.
IF(
[JavaOpcodes.DUP()],
JavaOpcodes.IFNONNULL
),
JavaOpcodes.POP(),
JavaOpcodes.NEW('org/python/exceptions/NameError'),
JavaOpcodes.DUP(),
JavaOpcodes.LDC_W(name),
JavaOpcodes.INVOKESPECIAL('org/python/exceptions/NameError', '<init>', '(Ljava/lang/String;)V'),
JavaOpcodes.ATHROW(),
END_IF(),
END_IF(),
# Make sure we actually have a Python object
JavaOpcodes.CHECKCAST('org/python/Object')
)
def delete_name(self, name, allow_locals=True):
self.add_opcodes(
# look for a global var.
JavaOpcodes.GETSTATIC(self.module.descriptor, 'globals', 'Ljava/util/Hashtable;'),
JavaOpcodes.LDC_W(name),
JavaOpcodes.INVOKEVIRTUAL('java/util/Hashtable', 'remove', '(Ljava/lang/Object;)Ljava/lang/Object;'),
)
@property
def descriptor(self):
return self.parent.descriptor
@property
def module(self):
return self.parent
def add_method(self, method_name, code):
method = Method(self.module, method_name, extract_parameters(code), static=True)
method.extract(code)
self.module.methods.append(method.transpile())
return method
class Module(Block):
def __init__(self, namespace, sourcefile):
super().__init__()
self.namespace = namespace
self.sourcefile = sourcefile
self.name = os.path.splitext(os.path.basename(sourcefile))[0]
self.methods = []
self.classes = []
self.anonymous_inner_class_count = 0
@property
def descriptor(self):
return '/'.join(self.namespace.split('.') + [self.name])
def transpile(self):
"""Convert a Python code block into a list of Java Classfile definitions.
Returns a list of triples:
(namespace, class_name, javaclassfile)
The list contains the classfile for the module, plus and classes
defined in the module.
"""
main_commands = []
body_commands = []
main_end = None
main = None
for cmd in self.commands:
if main_end is not None:
# Marker for the end of the main block:
if cmd.is_main_end(main_end):
main_end = None
try:
# The last command in the main block is a jump.
# Not sure why it is required, but we can ignore
# it for transpilation purposes.
main = MainMethod(self, main_commands[:-1]).transpile()
except IgnoreBlock:
pass
else:
main_commands.append(cmd)
else:
# Look for a very specific pattern, flagging the "main" method:
# if __name__ == '__main__':
# ...
# which is represented as:
# LOAD_NAME: __name__
# LOAD_CONST: __main__
# COMPARE_OP: ==
# POP_JUMP_IF_FALSE: <end of block target>
# ... <main code>
# <end of block target>
if cmd.is_main_start():
if main is not None:
print("Found duplicate main block... replacing previous main")
main_end = cmd.operation.target
# All other module-level cmds goes into the static block
else:
body_commands.append(cmd)
body = StaticBlock(self, body_commands).transpile()
# If there is any static content, generate a classfile
# for this module
classfile = JavaClass(self.descriptor, supername='org/python/types/Module')
classfile.attributes.append(SourceFile(os.path.basename(self.sourcefile)))
# Add a globals dictionary to the module.
classfile.fields.append(
JavaField(
'globals',
'Ljava/util/Hashtable;',
public=True,
static=True,
attributes=[
Signature('Ljava/util/Hashtable<Ljava/lang/String;Lorg/python/types/Object;>;')
]
)
)
# Add a static method to the module.
static_init = JavaMethod('<clinit>', '()V', public=False, static=True)
static_init.attributes.append(body)
classfile.methods.append(static_init)
if main is None:
print("Adding default main method...")
main = JavaMethod(
'main',
'([Ljava/lang/String;)V',
public=True,
static=True,
attributes=[
JavaCode(
max_stack=0,
max_locals=1,
code=[JavaOpcodes.RETURN()]
)
]
)
classfile.methods.append(main)
# Add any static methods defined in the module
for method in self.methods:
classfile.methods.append(method)
# The list of classfiles that will be returned will contain
# at least one entry - the class for the module itself.
classfiles = [(self.namespace, self.name, classfile)]
# Also output any classes defined in this module.
for namespace, class_name, classfile in self.classes:
classfiles.append((namespace, class_name, classfile))
return classfiles
|
|
# Copyright (c) 2016 IBM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from designateclient import exceptions as d_exc
from designateclient.v2 import client as d_client
from keystoneclient.auth.identity.generic import password
from keystoneclient.auth import token_endpoint
from keystoneclient import session
from oslo_config import cfg
from oslo_log import log
from neutron._i18n import _
from neutron.extensions import dns
from neutron.services.externaldns import driver
IPV4_PTR_ZONE_PREFIX_MIN_SIZE = 8
IPV4_PTR_ZONE_PREFIX_MAX_SIZE = 24
IPV6_PTR_ZONE_PREFIX_MIN_SIZE = 4
IPV6_PTR_ZONE_PREFIX_MAX_SIZE = 124
LOG = log.getLogger(__name__)
_SESSION = None
designate_opts = [
cfg.StrOpt('url',
help=_('URL for connecting to designate')),
cfg.StrOpt('admin_username',
help=_('Username for connecting to designate in admin '
'context')),
cfg.StrOpt('admin_password',
help=_('Password for connecting to designate in admin '
'context'),
secret=True),
cfg.StrOpt('admin_tenant_id',
help=_('Tenant id for connecting to designate in admin '
'context')),
cfg.StrOpt('admin_tenant_name',
help=_('Tenant name for connecting to designate in admin '
'context')),
cfg.StrOpt('admin_auth_url',
help=_('Authorization URL for connecting to designate in admin '
'context')),
cfg.BoolOpt('allow_reverse_dns_lookup', default=True,
help=_('Allow the creation of PTR records')),
cfg.IntOpt('ipv4_ptr_zone_prefix_size', default=24,
help=_('Number of bits in an ipv4 PTR zone that will be considered '
'network prefix. It has to align to byte boundary. Minimum '
'value is 8. Maximum value is 24. As a consequence, range '
'of values is 8, 16 and 24')),
cfg.IntOpt('ipv6_ptr_zone_prefix_size', default=120,
help=_('Number of bits in an ipv6 PTR zone that will be considered '
'network prefix. It has to align to nyble boundary. Minimum '
'value is 4. Maximum value is 124. As a consequence, range '
'of values is 4, 8, 12, 16,..., 124')),
cfg.StrOpt('ptr_zone_email', default='',
help=_('The email address to be used when creating PTR zones. '
'If not specified, the email address will be '
'admin@<dns_domain>')),
]
DESIGNATE_GROUP = 'designate'
CONF = cfg.CONF
CONF.register_opts(designate_opts, DESIGNATE_GROUP)
def get_clients(context):
global _SESSION
if not _SESSION:
_SESSION = session.Session()
auth = token_endpoint.Token(CONF.designate.url, context.auth_token)
client = d_client.Client(session=_SESSION, auth=auth)
admin_auth = password.Password(
auth_url=CONF.designate.admin_auth_url,
username=CONF.designate.admin_username,
password=CONF.designate.admin_password,
tenant_name=CONF.designate.admin_tenant_name,
tenant_id=CONF.designate.admin_tenant_id)
admin_client = d_client.Client(session=_SESSION, auth=admin_auth)
return client, admin_client
class Designate(driver.ExternalDNSService):
"""Driver for Designate."""
def __init__(self):
ipv4_ptr_zone_size = CONF.designate.ipv4_ptr_zone_prefix_size
ipv6_ptr_zone_size = CONF.designate.ipv6_ptr_zone_prefix_size
if (ipv4_ptr_zone_size < IPV4_PTR_ZONE_PREFIX_MIN_SIZE or
ipv4_ptr_zone_size > IPV4_PTR_ZONE_PREFIX_MAX_SIZE or
(ipv4_ptr_zone_size % 8) != 0):
raise dns.InvalidPTRZoneConfiguration(
parameter='ipv4_ptr_zone_size', number='8',
maximum=str(IPV4_PTR_ZONE_PREFIX_MAX_SIZE),
minimum=str(IPV4_PTR_ZONE_PREFIX_MIN_SIZE))
if (ipv6_ptr_zone_size < IPV6_PTR_ZONE_PREFIX_MIN_SIZE or
ipv6_ptr_zone_size > IPV6_PTR_ZONE_PREFIX_MAX_SIZE or
(ipv6_ptr_zone_size % 4) != 0):
raise dns.InvalidPTRZoneConfiguration(
parameter='ipv6_ptr_zone_size', number='4',
maximum=str(IPV6_PTR_ZONE_PREFIX_MAX_SIZE),
minimum=str(IPV6_PTR_ZONE_PREFIX_MIN_SIZE))
def create_record_set(self, context, dns_domain, dns_name, records):
designate, designate_admin = get_clients(context)
v4, v6 = self._classify_records(records)
try:
if v4:
designate.recordsets.create(dns_domain, dns_name, 'A', v4)
if v6:
designate.recordsets.create(dns_domain, dns_name, 'AAAA', v6)
except d_exc.NotFound:
raise dns.DNSDomainNotFound(dns_domain=dns_domain)
except d_exc.Conflict:
raise dns.DuplicateRecordSet(dns_name=dns_name)
if not CONF.designate.allow_reverse_dns_lookup:
return
# Set up the PTR records
recordset_name = '%s.%s' % (dns_name, dns_domain)
ptr_zone_email = 'admin@%s' % dns_domain[:-1]
if CONF.designate.ptr_zone_email:
ptr_zone_email = CONF.designate.ptr_zone_email
for record in records:
in_addr_name = netaddr.IPAddress(record).reverse_dns
in_addr_zone_name = self._get_in_addr_zone_name(in_addr_name)
in_addr_zone_description = (
'An %s zone for reverse lookups set up by Neutron.' %
'.'.join(in_addr_name.split('.')[-3:]))
try:
# Since we don't delete in-addr zones, assume it already
# exists. If it doesn't, create it
designate_admin.recordsets.create(in_addr_zone_name,
in_addr_name, 'PTR',
[recordset_name])
except d_exc.NotFound:
designate_admin.zones.create(
in_addr_zone_name, email=ptr_zone_email,
description=in_addr_zone_description)
designate_admin.recordsets.create(in_addr_zone_name,
in_addr_name, 'PTR',
[recordset_name])
def _classify_records(self, records):
v4 = []
v6 = []
for record in records:
if netaddr.IPAddress(record).version == 4:
v4.append(record)
else:
v6.append(record)
return v4, v6
def _get_in_addr_zone_name(self, in_addr_name):
units = self._get_bytes_or_nybles_to_skip(in_addr_name)
return '.'.join(in_addr_name.split('.')[units:])
def _get_bytes_or_nybles_to_skip(self, in_addr_name):
if 'in-addr.arpa' in in_addr_name:
return int((32 - CONF.designate.ipv4_ptr_zone_prefix_size) / 8)
return int((128 - CONF.designate.ipv6_ptr_zone_prefix_size) / 4)
def delete_record_set(self, context, dns_domain, dns_name, records):
designate, designate_admin = get_clients(context)
ids_to_delete = self._get_ids_ips_to_delete(
dns_domain, '%s.%s' % (dns_name, dns_domain), records, designate)
for _id in ids_to_delete:
designate.recordsets.delete(dns_domain, _id)
if not CONF.designate.allow_reverse_dns_lookup:
return
for record in records:
in_addr_name = netaddr.IPAddress(record).reverse_dns
in_addr_zone_name = self._get_in_addr_zone_name(in_addr_name)
designate_admin.recordsets.delete(in_addr_zone_name, in_addr_name)
def _get_ids_ips_to_delete(self, dns_domain, name, records,
designate_client):
try:
recordsets = designate_client.recordsets.list(
dns_domain, criterion={"name": "%s" % name})
except d_exc.NotFound:
raise dns.DNSDomainNotFound(dns_domain=dns_domain)
ids = [rec['id'] for rec in recordsets]
ips = [ip for rec in recordsets for ip in rec['records']]
if set(ips) != set(records):
raise dns.DuplicateRecordSet(dns_name=name)
return ids
|
|
from flask import Blueprint, render_template, session, request, redirect, url_for, flash, jsonify
from plaid import Client
from application.services import stripe_client
from forms import *
from application.db.model import *
import traceback
import random
from datetime import datetime
from flask.ext.login import current_user, login_required, login_user, logout_user
from application.util import constants
from application import services, common
from pprint import pprint
import requests
import json
import logging
onboarding_bp = Blueprint('onboarding_bp', __name__, url_prefix='/account')
def generate_and_store_new_verification_code(account):
verification_code = random.randint(1000, 9999)
account.phone_verification_code = verification_code
current_app.db_session.add(account)
current_app.db_session.commit()
return verification_code
@onboarding_bp.route('/verify', methods=['GET', 'POST'])
def verify_phone_number():
form = PhoneVerificationForm(request.form)
if form.validate_on_submit():
account = get_account_by_id(session['account_id'])
if account.status == int(Account.UNVERIFIED) and \
form.verification_code.data == account.phone_verification_code:
account.status = Account.VERIFIED_PHONE
stripe_customer = current_app.stripe_client.create_customer(
account.phone_number)
account.stripe_customer_id = stripe_customer['id']
account.time_updated = datetime.now()
current_app.db_session.add(account)
current_app.db_session.commit()
return redirect(url_for('onboarding_bp.account_verified'))
else:
flash('Invalid verification code')
return render_template('onboarding/verify.html', form=form)
@onboarding_bp.route('/account_verified', methods=['GET'])
def account_verified():
return render_template('onboarding/account_verified.html')
# ajax
@onboarding_bp.route('/resend_verification', methods=['POST'])
def resend_verification():
print 'account id =',session['account_id']
if session['account_id'] is None:
return jsonify({
'error': True,
'description': constants.MISSING_ACCOUNT
})
try:
account = get_account_by_id(session['account_id'])
verification_code = generate_and_store_new_verification_code(account)
target_phone = '+1' + account.phone_number
services.phone.send_message(target_phone, constants.PHONE_VERIFICATION_MSG.format(verification_code))
return jsonify({'success': True})
except Exception as e:
return jsonify({
'error': True,
'description': traceback.print_exc()
})
@onboarding_bp.route('/', methods=['GET', 'POST'])
def signup():
print 'current user =',current_user
if current_user.is_authenticated:
print 'redirecting to account'
return redirect(url_for('.account'))
form = SignupForm(request.form)
if form.validate_on_submit():
account = Account(
first_name = form.first_name.data,
last_name = form.last_name.data,
phone_number = form.phone_number.data,
password = form.password.data,
time_created = datetime.now(),
time_updated = datetime.now())
current_app.db_session.add(account)
current_app.db_session.commit()
# verify phone
session['account_id'] = account.id
verification_code = generate_and_store_new_verification_code(account)
target_phone = '+1' + form.phone_number.data.replace('-','')
services.phone.send_message(target_phone, constants.PHONE_VERIFICATION_MSG.format(verification_code))
return redirect(url_for('onboarding_bp.verify_phone_number'))
return render_template('onboarding/signup.html', form=form)
@onboarding_bp.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if form.validate_on_submit():
try:
account = get_account_by_phone_number(form.phone_number.data)
if account == None:
flash(constants.INVALID_CREDENTIALS)
return render_template('onboarding/login.html', form=form)
# print 'Account = ',account,' Status = ',account.status
if account.status == Account.UNVERIFIED:
session['account_id'] = account.id
flash(constants.ACCOUNT_NOT_VERIFIED)
return redirect(url_for('onboarding_bp.verify_phone_number'))
elif account.password_match(form.password.data) and account.status == Account.VERIFIED_PHONE:
# session['logged_in'] = True
# session['account_id'] = account.id
login_user(account)
next = request.args.get('next')
# next_is_valid should check if the user has valid
# permission to access the `next` url
# print 'Next page =',next,' is_valid_next =',next_is_valid(next)
# if not next_is_valid(next):
# return flask.abort(404)
return redirect(next or url_for('onboarding_bp.account'))
except Exception as e:
print 'Exception::',e
print traceback.format_exc()
return render_template('404.html')
return render_template('onboarding/login.html', form=form)
def fetch_financial_information():
for fi in current_user.fis:
response = get_bank_info(fi.bank_account_id)
fi.available_balance = response['available_balance']
fi.current_balance = response['current_balance']
fi.subtype = response['subtype']
fi.subtype_name = response['subtype_name']
fi.account_number_last_4 = response['account_number_last_4']
fi.institution_type = response['institution_type']
@onboarding_bp.route('/logout', methods=['GET', 'POST'])
def logout():
logout_user()
return redirect(url_for('.login'))
@onboarding_bp.route('/home', methods=['GET'])
@login_required
def account():
data = {}
eligible_for_membership_reapplication = True
for membership in current_user.memberships:
if membership.is_active() or membership.is_pending():
eligible_for_membership_reapplication = False
data['eligible_for_membership_reapplication'] = eligible_for_membership_reapplication
return render_template('onboarding/account.html', data=data)
@onboarding_bp.route('/apply-for-membership', methods=['GET'])
@login_required
def apply_for_membership():
if len(current_user.memberships) == 0:
return redirect(url_for('.enter_personal_information'))
memberships = sorted(current_user.memberships, key=lambda m: m.time_created, reverse = True)
if memberships[0].status == Membership.PENDING:
flash('You have already applied for membership.')
return redirect(url_for('.account'))
return redirect(url_for('.enter_personal_information'))
@onboarding_bp.route('/enter-personal-information', methods=['GET', 'POST'])
@login_required
def enter_personal_information():
if has_entered_personal_information(current_user):
return redirect(url_for('.enter_employer_information'))
form = PersonalInformationForm(request.form)
print 'errors =',form.dob.errors,' date = ',form.dob.data
if form.validate_on_submit():
address = Address(
street1 = form.street1.data,
street2 = form.street2.data,
city = form.city.data,
state = form.state.data,
address_type = Address.INDIVIDUAL,
postal_code = form.postal_code.data)
address.account_id = current_user.id
# current_app.db_session.add(address)
current_user.email = form.email.data
current_user.ssn = form.ssn.data.replace('-','')
current_user.dob = form.dob.data
current_user.time_updated = datetime.now()
current_user.driver_license_number = form.driver_license_number.data
current_user.addresses.append(address)
current_app.db_session.add(current_user)
current_app.db_session.commit()
return redirect(url_for('.enter_employer_information'))
breadcrumItems = get_breadcrum()
breadcrumItems[0]['active'] = True
return render_template('onboarding/enter_personal_information.html',
form=form, breadcrumItems = breadcrumItems)
def get_breadcrum():
breadcrumItems = [
{
'name': 'Enter personal information',
'active': False
},
{
'name': 'Enter employer information',
'active': False
},
{
'name': 'Select plan',
'active': False
},
{
'name': 'Add bank account',
'active': False
},
]
return breadcrumItems
@onboarding_bp.route('/enter_employer_information', methods=['GET', 'POST'])
@login_required
def enter_employer_information():
if has_entered_employer_information(current_user):
return redirect(url_for('.select_plan'))
form = EmployerInformationForm(request.form)
print 'error = ',form.errors
if form.validate_on_submit():
try:
print 'About to save employer information...'
employer_address = Address(
street1 = form.employer_street1.data,
street2 = form.employer_street2.data,
city = form.employer_city.data,
state = form.employer_state.data,
address_type = Address.EMPLOYER,
postal_code = form.employer_postal_code.data)
employer_address.account_id = current_user.id
current_app.db_session.add(employer_address)
current_user.employer_name = form.employer_name.data
current_user.employer_phone_number = form.employer_phone_number.data
current_user.time_updated = datetime.now()
current_app.db_session.add(current_user)
current_app.db_session.commit()
except Exception as e:
logging.info('failed to save employer information %s' % e)
flash(constants.PLEASE_TRY_AGAIN)
breadcrumItems = get_breadcrum()
breadcrumItems[1]['active'] = True
return render_template('onboarding/enter_employer_information.html',
form=form, breadcrumItems = breadcrumItems)
return redirect(url_for('.select_plan'))
else:
breadcrumItems = get_breadcrum()
breadcrumItems[1]['active'] = True
return render_template('onboarding/enter_employer_information.html',
form=form, breadcrumItems = breadcrumItems)
@onboarding_bp.route('/select_plan', methods=['GET', 'POST'])
@login_required
def select_plan():
form = SelectPlanForm(request.form)
plans = get_all_plans()
if form.validate_on_submit():
plan_id = form.plan_id.data
try:
membership = Membership(
account_id = current_user.id,
status = Membership.PENDING,
plan = get_plan_by_id(plan_id),
time_updated = datetime.now(),
time_created = datetime.now())
current_user.memberships.append(membership)
current_app.db_session.commit()
return redirect(url_for('.add_bank'))
except Exception as e:
logging.error('Failed to save membership info %s for user %s, exception %s'
% (membership, current_user, e))
flash(constants.PLEASE_TRY_AGAIN)
breadcrumItems = get_breadcrum()
breadcrumItems[2]['active'] = True
return render_template('onboarding/select_plan.html', form=form, plans=plans, breadcrumItems = breadcrumItems)
@onboarding_bp.route('/apply_next', methods=['POST', 'POST'])
def apply_next():
return redirect(url_for('onboarding_bp.add_bank'))
# AJAX CALL
@onboarding_bp.route('/add_bank', methods=['GET', 'POST'])
@login_required
def add_bank():
if request.method == 'GET':
if len(current_user.memberships) == 0:
return redirect(url_for('.apply_for_membership'))
institutions = get_all_iav_supported_institutions()
breadcrumItems = get_breadcrum()
breadcrumItems[3]['active'] = True
return render_template('onboarding/add_bank.html', institutions = institutions, breadcrumItems = breadcrumItems)
else:
try:
public_token = request.form['public_token']
account_id = request.form['account_id']
account_name = request.form['account_name']
institution = request.form['institution']
institution_type = request.form['institution_type']
# print('ADD BANK REQUEST account_id = {0}, account_name = {1}, institution = {2}, institution_type = {3}')\
# .format(account_id, account_name, institution, institution_type)
response = json.loads(exchange_token(public_token, account_id))
result = {}
if get_fi_by_access_token(response['account_id']) is not None:
result['error'] = True
result['message'] = constants.BANK_ALREADY_ADDED
return jsonify(result)
fi = Fi(
account_name = account_name,
bank_account_id = account_id,
institution = institution,
institution_type = institution_type,
account_type = '',
access_token = response['access_token'],
stripe_bank_account_token = response['stripe_bank_account_token'],
time_updated = datetime.now(),
time_created = datetime.now())
current_user.fis.append(fi)
fetch_financial_information()
current_app.db_session.add(current_user)
current_app.db_session.commit()
response = {}
response['success'] = True
return jsonify(response)
# return redirect(url_for('.account'))
except Exception as e:
print e
response = {}
response['error'] = 'true'
# response['description'] = e
return jsonify(response)
@onboarding_bp.route('/add_random_deposit', methods=['GET', 'POST'])
@login_required
def add_random_deposit():
form = RandomDepositForm(request.form)
if form.validate_on_submit():
response = current_app.stripe_client.add_customer_bank(
current_user.stripe_customer_id,
account_number = form.account_number.data,
routing_number = form.routing_number.data,
currency = form.currency.data,
country = form.country.data,
account_holder_name = form.name.data)
logging.info('Added bank account to stripe resposne = {}'.format(response))
current_user.fis.append(Fi(
institution = response['bank_name'],
account_number_last_4 = response['last4'],
bank_account_id = response['id'],
verification_type = Fi.RANDOM_DEPOSIT,
status = Fi.UNVERFIED,
time_updated = datetime.now(),
time_created = datetime.now(),
access_token = common.generate_fake_token(5)))
# current_user.fi.verification_type = Fi.RANDOM_DEPOSIT
current_app.db_session.add(current_user)
current_app.db_session.commit()
return redirect(url_for('.application_complete'))
else:
return render_template('onboarding/random_deposit.html', form = form)
@onboarding_bp.route('/start_account_verify_random_deposit', methods=['POST'])
@login_required
def start_account_verify_random_deposit():
fi_id = request.form['id']
print 'called start_account_verify_random_deposit...'
logging.info('Starting bank verification of fi id %s ' % fi_id)
session[constants.FI_ID_KEY] = fi_id
return redirect(url_for('.verify_account_random_deposit'))
@onboarding_bp.route('/verify_account_random_deposit', methods=['GET', 'POST'])
@login_required
def verify_account_random_deposit():
logging.info('verify_account_random_deposit called with id = %s' % session[constants.FI_ID_KEY])
form = RandomDepositVerifyAccountForm(request.form)
if form.validate_on_submit():
try:
if session[constants.FI_ID_KEY] is None:
raise Exception('Missing information for bank account verification')
bank_account_id = None
for fi in current_user.fis:
print 'fi id = ',fi.id
if fi.id == int(session[constants.FI_ID_KEY]):
print 'found match...'
bank_account_id = fi.bank_account_id
break
logging.info('About to verify bank account %s, customer id %s deposit1 %s deposit2 %s '
% (bank_account_id, current_user.stripe_customer_id, form.deposit1.data, form.deposit2.data))
response = current_app.stripe_client.verify_customer_bank(
current_user.stripe_customer_id,
bank_account_id,
form.deposit1.data,
form.deposit2.data)
logging.info('Verified bank account, response = ',response)
fi.status = Fi.VERIFIED
fi.time_updated = datetime.now()
current_app.db_session.add(fi)
current_app.db_session.commit()
flash('Bank account has been verified')
return redirect(url_for('.account'))
except Exception as e:
logging.error('failed to verify_account_random_deposit, exception %s' % e)
flash('Amounts does not match. Please try again')
return render_template('onboarding/verify_account_random_deposit.html', form=form)
else:
return render_template('onboarding/verify_account_random_deposit.html', form=form)
@onboarding_bp.route('/application_complete', methods=['GET'])
@login_required
def application_complete():
return render_template('onboarding/success.html')
def exchange_public_token(public_token, account_id):
Client.config({
'url': 'https://tartan.plaid.com'
})
client = Client(
client_id=current_app.config['CLIENT_ID'],
secret=current_app.config['CLIENT_SECRET'])
#exchange token
response = client.exchange_token(public_token)
print 'token exhcnage response = %s' % client.access_token
pprint(client)
return {
'access_token': client.access_token,
'stripe_bank_account_token': client.stripe_bank_account_token
}
def exchange_token(public_token, account_id):
payload = {
'client_id':current_app.config['CLIENT_ID'],
'secret':current_app.config['CLIENT_SECRET'],
'public_token':public_token,
'account_id':account_id
}
print 'payload ',json.dumps(payload)
response = requests.post('https://tartan.plaid.com/exchange_token', data=payload)
# print 'response = ',response.text, 'code = ',response.status_code
if response.status_code == requests.codes.ok:
return response.text
else:
raise Exception('Failed to exchange token')
def get_bank_info(bank_account_id):
if len(current_user.fis) == 0:
return
Client.config({'url': 'https://tartan.plaid.com'})
client = Client(
client_id=current_app.config['CLIENT_ID'],
secret=current_app.config['CLIENT_SECRET'],
access_token=current_user.fis[0].access_token)
response = client.auth_get().json()
print response
ai = {}
for account in response['accounts']:
if account['_id'] == bank_account_id:
ai['available_balance'] = account['balance']['available']
ai['current_balance'] = account['balance']['current']
ai['subtype'] = account['subtype']
ai['subtype_name'] = account['meta']['name']
ai['account_number_last_4'] = account['meta']['number']
ai['institution_type'] = account['institution_type']
return ai
def has_entered_personal_information(user):
if user.ssn == None or user.dob == None or user.driver_license_number == None:
return False
for address in user.addresses:
if address.type == Address.INDIVIDUAL:
return True
return False
def has_entered_employer_information(user):
if user.employer_name == None or user.employer_phone_number == None:
return False
for address in user.addresses:
if address.type == Address.BUSINESS:
return True
return False
# curl https://tartan.plaid.com/exchange_token \
# -d client_id="57bbc58566710877408d093e" \
# -d secret="0f3e8ecc989e5e6ed776b732d76161" \
# -d public_token="304cb58348ae917b3afe2b430a45b87744ffd1884a9fae31ba87869fe1222983cd626d9c27a92ef92b64393fdccfadb41eec4abce649d0d974e70314964e04cf" \
# -d account_id="nban4wnPKEtnmEpaKzbYFYQvA7D7pnCaeDBMy"
|
|
#!/usr/bin/env python
from PyQt4 import QtGui, QtCore
from pandas import read_csv
import subprocess
import psutil
import time
import sys, os
from Views import ui_mainrefactor as mw
from poplerGUI import ui_logic_session as sesslogic
from poplerGUI import ui_logic_site as sitelogic
from poplerGUI import ui_logic_main as mainlogic
from poplerGUI import ui_logic_taxa as taxalogic
from poplerGUI import ui_logic_time as timelogic
from poplerGUI import ui_logic_obs as rawlogic
from poplerGUI import ui_logic_covar as covarlogic
from poplerGUI import ui_logic_climatesite as climsitelogic
from poplerGUI import ui_logic_widetolong as widetolonglogic
from poplerGUI import ui_logic_splitcolumn as splitcolumnlogic
from poplerGUI import ui_logic_replace as replacelogic
from poplerGUI import ui_logic_cbind as cbindlogic
from poplerGUI.logiclayer import class_userfacade as face
from poplerGUI import class_modelviewpandas as view
from poplerGUI import class_inputhandler as ini
from poplerGUI.logiclayer import class_helpers as hlp
from poplerGUI.logiclayer.datalayer.class_filehandles import Memento
from poplerGUI.logiclayer.datalayer import config as orm
rootpath = os.path.dirname(os.path.dirname( __file__ ))
metapath = os.path.join(rootpath, 'Cataloged_Data_Current_sorted.csv')
class UiMainWindow(QtGui.QMainWindow, mw.Ui_MainWindow):
'''
The main window class will serve to manage the display
of various dialog boxes, the facade class, model-viewer
tables, and menu actions.
'''
def __init__(self, parent=None):
super().__init__(parent)
# attributes
self.setupUi(self)
self.facade = face.Facade()
self._log = None
self.dsite = sitelogic.SiteDialog()
self.dsession = sesslogic.SessionDialog()
self.dmain = mainlogic.MainDialog()
self.dtaxa = taxalogic.TaxaDialog()
self.dtime = timelogic.TimeDialog()
self.draw = rawlogic.ObsDialog()
self.dcovar = covarlogic.CovarDialog()
self.dclimatesite = climsitelogic.ClimateSite()
self.dclimatesession = sesslogic.SessionDialog()
self.dwidetolong = widetolonglogic.WidetoLongDialog()
self.dsplitcolumn = splitcolumnlogic.SplitColumnDialog()
self.dreplacevalue = replacelogic.ReplaceValueDialog()
self.dcbind = cbindlogic.CbindDialog()
self.data_model = view.PandasTableModelEdit(None)
self.data_model.log_change.connect(self.write_to_log)
self.change_count = 0
# Actions
self.actionUndo.triggered.connect(self.undo_data_mod)
self.actionCombine_Columns.triggered.connect(
self.cbind_display)
self.actionReplace.triggered.connect(
self.replace_value_display)
self.actionConvert_Wide_to_Long.triggered.connect(
self.wide_to_long_display)
self.actionSplit_Column_By.triggered.connect(
self.split_column_display)
self.actionSiteTable.triggered.connect(self.site_display)
self.actionStart_Session.triggered.connect(
self.session_display)
self.actionEnd_Session.triggered.connect(
self.end_session)
self.actionMainTable.triggered.connect(self.main_display)
self.actionTaxaTable.triggered.connect(self.taxa_display)
self.actionTimeFormat.triggered.connect(self.time_display)
self.actionRawTable.triggered.connect(self.obs_display)
self.actionCovariates.triggered.connect(self.covar_display)
self.actionCommit.triggered.connect(self.commit_data)
self.actionClimateSiteTable.triggered.connect(
self.climate_site_display)
self.actionNew_Climate.triggered.connect(
self.climate_session_display)
self.mdiArea.addSubWindow(self.subwindow_2)
self.mdiArea.addSubWindow(self.subwindow_1)
# Custom Signals
self.dsite.site_unlocks.connect(self.site_complete_enable)
self.dwidetolong.update_data.connect(self.update_data_model)
self.dsplitcolumn.update_data.connect(self.update_data_model)
self.dreplacevalue.update_data.connect(self.update_data_model)
self.dcbind.update_data.connect(self.update_data_model)
self.dclimatesite.climatesite_unlocks.connect(
self.climate_site_complete_enabled)
self.dsession.raw_data_model.connect(
self.update_data_model)
self.dclimatesession.raw_data_model.connect(
self.update_data_model)
# Dialog boxes for user feedback
self.error = QtGui.QErrorMessage()
self.message = QtGui.QMessageBox
metadf = read_csv(metapath, encoding='iso-8859-11')
metamodel = view.PandasTableModel(
metadf[
[
'global_id', 'lter', 'title', 'site_metadata',
'temp_int'
]
]
)
self.tblViewMeta.setModel(metamodel)
self.tblViewMeta.resizeColumnsToContents()
self.tblViewRaw.horizontalHeader().sectionDoubleClicked.connect(
self.changeHorizontalHeader)
self.tblViewRaw.resizeColumnsToContents()
@staticmethod
def update_data_view(self):
self.data_model = view.PandasTableModelEdit(None)
self.data_model.set_data(self.facade._data)
self.tblViewRaw.setModel(self.data_model)
def undo_data_mod(self):
if self.facade.data_caretaker._statelist:
self.facade.data_originator.restore_from_memento(
self.facade.data_caretaker.restore()
)
self.facade._data = self.facade.data_originator._data.copy()
self.update_data_view(self)
else:
self.error.showMessage(
'No further undo'
)
@QtCore.pyqtSlot(object)
def update_data_model(self, dataframe_state):
''' Updating data model and facade instance with
other dialog boxes '''
self.change_count += 1
self.facade._data.fillna('NA', inplace=True)
new_dataframe_state = Memento(
self.facade._data.copy(),
'{}_{}'.format(dataframe_state, self.change_count)
)
self.facade.data_caretaker.save(new_dataframe_state)
self.facade.data_originator.restore_from_memento(
new_dataframe_state
)
self.update_data_view(self)
# Updating facade instances with dialog boxes
self.dsite.facade = self.facade
self.dclimatesite.facade = self.facade
@QtCore.pyqtSlot(object)
def write_to_log(self, dict_obj):
self.facade.create_log_record('changecell')
self._log = self.facade._tablelog['changecell']
hlp.write_column_to_log(
dict_obj, self._log, 'changecell')
@QtCore.pyqtSlot(object)
def update_webview(self, url):
print(url)
@QtCore.pyqtSlot(object)
def site_complete_enable(self):
'''
Method to enable actions for display dialog
boxes that corresond to different database tables
'''
self.actionMainTable.setEnabled(True)
self.actionTaxaTable.setEnabled(True)
self.actionTimeFormat.setEnabled(True)
self.actionRawTable.setEnabled(True)
self.actionCovariates.setEnabled(True)
self.update_data_model('Updating data')
def changeHorizontalHeader(self, index):
''' method to update data model when column headers
are changed '''
oldHeader = self.facade._data.iloc[:,index].name
newHeader, ok = QtGui.QInputDialog.getText(
self, 'Input', 'New Column Label:')
if ok:
self.facade._data.rename(
columns={oldHeader:newHeader}, inplace=True)
self.facade.create_log_record('changecolumn')
self._log = self.facade._tablelog['changecolumn']
hlp.write_column_to_log(
{
'column_changes':
{
oldHeader: newHeader}
},
self._log, 'changecolumn'
)
self.update_data_model('header_changes')
def cbind_display(self):
''' Displays dialog box to combine columns '''
self.dcbind.show()
self.dcbind.facade = self.facade
def replace_value_display(self):
''' Displays dialog box to split a column '''
self.dreplacevalue.show()
self.dreplacevalue.facade = self.facade
def split_column_display(self):
''' Displays dialog box to split a column '''
self.dsplitcolumn.show()
self.dsplitcolumn.facade = self.facade
def wide_to_long_display(self):
''' Displays dialog box to melt data '''
self.dwidetolong.show()
self.dwidetolong.facade = self.facade
def site_display(self):
''' Displays the Site Dialog box'''
self.dsite.show()
self.dsite.facade = self.facade
def addsite_display(self):
''' Display dialog box for adding site column'''
self.daddsite.show()
self.daddsite.facade = self.facade
def session_display(self):
''' Displays the Site Dialog box'''
self.dsession.show()
self.dsession.facade = self.facade
def main_display(self):
''' Displays main dialog box'''
self.dmain.facade = self.facade
self.dmain.show()
def taxa_display(self):
''' Display the Taxa Dialog box'''
self.dtaxa.facade = self.facade
self.dtaxa.show()
def time_display(self):
''' Display the Time Dialog box'''
self.dtime.facade = self.facade
self.dtime.show()
def obs_display(self):
''' Display the Raw Obs Dialog box'''
self.draw.facade = self.facade
self.draw.show()
def covar_display(self):
'''Display the Raw Obs Dialog box'''
self.dcovar.facade = self.facade
self.dcovar.show()
def commit_data(self):
''' Method to call the upload to database command '''
commithandle = ini.InputHandler(
name='updateinfo', tablename='updatetable')
self.facade.input_register(commithandle)
try:
self.facade.push_merged_data()
self.actionCommit.setEnabled(False)
self.message.about(
self, 'Status', 'Database transaction complete')
except Exception as e:
print(str(e))
self.facade._tablelog['project_table'].debug(str(e))
self.error.showMessage(
'Datbase transaction error: ' + str(e) +
'. May need to alter site abbreviations.')
raise ValueError(str(e))
# Below are dialog boxes and logic that relate to Climate data
def climate_site_display(self):
''' Displays the Site Dialog box'''
self.dclimatesite.show()
self.dclimatesite.facade = self.facade
@QtCore.pyqtSlot(object)
def climate_site_complete_enabled(self, datamod2):
self.actionClimateRawTable.setEnabled(True)
self.update_data_model()
def climate_session_display(self):
''' Displays the Climate session dialog box'''
self.dclimatesession.show()
self.dclimatesession.facade = self.facade
self.actionSiteTable.setEnabled(False)
self.actionClimateSiteTable.setEnabled(True)
metapath = (
str(os.getcwd()) +
'/Datasets_manual_test/meta_climate_test.csv')
metadf = read_csv(metapath, encoding='iso-8859-11')
metamodel = view.PandasTableModel(metadf)
self.tblViewMeta.setModel(metamodel)
def end_session(self):
orm.conn.close()
subprocess.call(
"python" + " poplerGUI_run_main.py", shell=True)
self.close()
try:
PROCNAME = "python.exe"
for proc in psutil.process_iter():
if proc.name() == PROCNAME:
proc.kill()
except:
pass
|
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: runtime.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='runtime.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n\rruntime.proto\"\xce\x03\n\x0bRuntimeData\x12\'\n\x0brobot_state\x18\x01 \x01(\x0e\x32\x12.RuntimeData.State\x12,\n\x0bsensor_data\x18\x02 \x03(\x0b\x32\x17.RuntimeData.SensorData\x1a\x65\n\nParamValue\x12\r\n\x05param\x18\x01 \x01(\t\x12\x15\n\x0b\x66loat_value\x18\x02 \x01(\x02H\x00\x12\x13\n\tint_value\x18\x03 \x01(\x05H\x00\x12\x14\n\nbool_value\x18\x04 \x01(\x08H\x00\x42\x06\n\x04kind\x1a\x97\x01\n\nSensorData\x12\x13\n\x0b\x64\x65vice_type\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65vice_name\x18\x02 \x01(\t\x12\x0b\n\x03uid\x18\x04 \x01(\t\x12\x17\n\x0fint_device_type\x18\x05 \x01(\r\x12,\n\x0bparam_value\x18\x06 \x03(\x0b\x32\x17.RuntimeData.ParamValueJ\x04\x08\x03\x10\x04R\x05value\"g\n\x05State\x12\x13\n\x0fSTUDENT_CRASHED\x10\x00\x12\x13\n\x0fSTUDENT_RUNNING\x10\x01\x12\x13\n\x0fSTUDENT_STOPPED\x10\x02\x12\n\n\x06TELEOP\x10\x03\x12\x08\n\x04\x41UTO\x10\x04\x12\t\n\x05\x45STOP\x10\x05\x62\x06proto3'
)
_RUNTIMEDATA_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='RuntimeData.State',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STUDENT_CRASHED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STUDENT_RUNNING', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STUDENT_STOPPED', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TELEOP', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTO', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ESTOP', index=5, number=5,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=377,
serialized_end=480,
)
_sym_db.RegisterEnumDescriptor(_RUNTIMEDATA_STATE)
_RUNTIMEDATA_PARAMVALUE = _descriptor.Descriptor(
name='ParamValue',
full_name='RuntimeData.ParamValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='param', full_name='RuntimeData.ParamValue.param', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='float_value', full_name='RuntimeData.ParamValue.float_value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int_value', full_name='RuntimeData.ParamValue.int_value', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bool_value', full_name='RuntimeData.ParamValue.bool_value', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='kind', full_name='RuntimeData.ParamValue.kind',
index=0, containing_type=None, fields=[]),
],
serialized_start=120,
serialized_end=221,
)
_RUNTIMEDATA_SENSORDATA = _descriptor.Descriptor(
name='SensorData',
full_name='RuntimeData.SensorData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='device_type', full_name='RuntimeData.SensorData.device_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_name', full_name='RuntimeData.SensorData.device_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uid', full_name='RuntimeData.SensorData.uid', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int_device_type', full_name='RuntimeData.SensorData.int_device_type', index=3,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='param_value', full_name='RuntimeData.SensorData.param_value', index=4,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=224,
serialized_end=375,
)
_RUNTIMEDATA = _descriptor.Descriptor(
name='RuntimeData',
full_name='RuntimeData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='robot_state', full_name='RuntimeData.robot_state', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sensor_data', full_name='RuntimeData.sensor_data', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_RUNTIMEDATA_PARAMVALUE, _RUNTIMEDATA_SENSORDATA, ],
enum_types=[
_RUNTIMEDATA_STATE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=18,
serialized_end=480,
)
_RUNTIMEDATA_PARAMVALUE.containing_type = _RUNTIMEDATA
_RUNTIMEDATA_PARAMVALUE.oneofs_by_name['kind'].fields.append(
_RUNTIMEDATA_PARAMVALUE.fields_by_name['float_value'])
_RUNTIMEDATA_PARAMVALUE.fields_by_name['float_value'].containing_oneof = _RUNTIMEDATA_PARAMVALUE.oneofs_by_name['kind']
_RUNTIMEDATA_PARAMVALUE.oneofs_by_name['kind'].fields.append(
_RUNTIMEDATA_PARAMVALUE.fields_by_name['int_value'])
_RUNTIMEDATA_PARAMVALUE.fields_by_name['int_value'].containing_oneof = _RUNTIMEDATA_PARAMVALUE.oneofs_by_name['kind']
_RUNTIMEDATA_PARAMVALUE.oneofs_by_name['kind'].fields.append(
_RUNTIMEDATA_PARAMVALUE.fields_by_name['bool_value'])
_RUNTIMEDATA_PARAMVALUE.fields_by_name['bool_value'].containing_oneof = _RUNTIMEDATA_PARAMVALUE.oneofs_by_name['kind']
_RUNTIMEDATA_SENSORDATA.fields_by_name['param_value'].message_type = _RUNTIMEDATA_PARAMVALUE
_RUNTIMEDATA_SENSORDATA.containing_type = _RUNTIMEDATA
_RUNTIMEDATA.fields_by_name['robot_state'].enum_type = _RUNTIMEDATA_STATE
_RUNTIMEDATA.fields_by_name['sensor_data'].message_type = _RUNTIMEDATA_SENSORDATA
_RUNTIMEDATA_STATE.containing_type = _RUNTIMEDATA
DESCRIPTOR.message_types_by_name['RuntimeData'] = _RUNTIMEDATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RuntimeData = _reflection.GeneratedProtocolMessageType('RuntimeData', (_message.Message,), {
'ParamValue' : _reflection.GeneratedProtocolMessageType('ParamValue', (_message.Message,), {
'DESCRIPTOR' : _RUNTIMEDATA_PARAMVALUE,
'__module__' : 'runtime_pb2'
# @@protoc_insertion_point(class_scope:RuntimeData.ParamValue)
})
,
'SensorData' : _reflection.GeneratedProtocolMessageType('SensorData', (_message.Message,), {
'DESCRIPTOR' : _RUNTIMEDATA_SENSORDATA,
'__module__' : 'runtime_pb2'
# @@protoc_insertion_point(class_scope:RuntimeData.SensorData)
})
,
'DESCRIPTOR' : _RUNTIMEDATA,
'__module__' : 'runtime_pb2'
# @@protoc_insertion_point(class_scope:RuntimeData)
})
_sym_db.RegisterMessage(RuntimeData)
_sym_db.RegisterMessage(RuntimeData.ParamValue)
_sym_db.RegisterMessage(RuntimeData.SensorData)
# @@protoc_insertion_point(module_scope)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Test from the Marvin - Testing in Python wiki
# All tests inherit from cloudstackTestCase
from marvin.cloudstackTestCase import cloudstackTestCase
# Import Integration Libraries
# base - contains all resources as entities and defines create, delete,
# list operations on them
from marvin.lib.base import (Account,
VirtualMachine,
ServiceOffering,
Template,
DiskOffering,
Volume,
Host,
GuestOs)
# utils - utility classes for common cleanup, external library wrappers etc
from marvin.lib.utils import cleanup_resources, get_hypervisor_type, validateList
# common - commonly used methods for all tests are listed here
from marvin.lib.common import get_zone, get_domain, get_pod
from marvin.sshClient import SshClient
from marvin.codes import FAILED
from nose.plugins.attrib import attr
import xml.etree.ElementTree as ET
import logging
class Templates:
"""Test data for templates
"""
def __init__(self):
self.templates = {
"kvmvirtioscsi": {
"kvm": {
"name": "tiny-kvm-scsi",
"displaytext": "virtio-scsi kvm",
"format": "qcow2",
"hypervisor": "kvm",
"ostype": "Other PV Virtio-SCSI (64-bit)",
"url": "http://dl.openvm.eu/cloudstack/ubuntu/x86_64/ubuntu-16.04-kvm.qcow2.bz2",
"requireshvm": True,
"ispublic": True,
"passwordenabled": True
}
}
}
class TestDeployVirtioSCSIVM(cloudstackTestCase):
"""
Test deploy a kvm virtio scsi template
"""
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger('TestDeployVirtioSCSIVM')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
testClient = super(TestDeployVirtioSCSIVM, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
cls.hypervisorNotSupported = False
cls.hypervisor = testClient.getHypervisorInfo()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.pod = get_pod(cls.apiclient, cls.zone.id)
cls.services['mode'] = cls.zone.networktype
if cls.hypervisor.lower() not in ['kvm']:
cls.hypervisorNotSupported = True
return
kvmvirtioscsi = Templates().templates["kvmvirtioscsi"]
cls.template = Template.register(
cls.apiclient,
kvmvirtioscsi[cls.hypervisor.lower()],
cls.zone.id,
hypervisor=cls.hypervisor.lower(),
domainid=cls.domain.id)
cls.template.download(cls.apiclient)
if cls.template == FAILED:
assert False, "get_template() failed to return template"
cls.services["domainid"] = cls.domain.id
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["zoneid"] = cls.zone.id
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["small"]
)
cls.sparse_disk_offering = DiskOffering.create(
cls.apiclient,
cls.services["sparse_disk_offering"]
)
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["small"],
templateid=cls.template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
diskofferingid=cls.sparse_disk_offering.id,
mode=cls.zone.networktype
)
hosts = Host.list(cls.apiclient, id=cls.virtual_machine.hostid)
if len(hosts) != 1:
assert False, "Could not find host with id " + cls.virtual_machine.hostid
cls.vmhost = hosts[0]
# Stop VM to reset password
cls.virtual_machine.stop(cls.apiclient)
password = cls.virtual_machine.resetPassword(cls.apiclient)
cls.virtual_machine.username = "ubuntu"
cls.virtual_machine.password = password
# Start VM after password reset
cls.virtual_machine.start(cls.apiclient)
cls.cleanup = [
cls.template,
cls.service_offering,
cls.sparse_disk_offering,
cls.account
]
@classmethod
def tearDownClass(cls):
try:
cls.apiclient = super(
TestDeployVirtioSCSIVM,
cls
).getClsTestClient().getApiClient()
# Cleanup resources used
cleanup_resources(cls.apiclient, cls.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
def verifyVirshState(self, diskcount):
host = self.vmhost.ipaddress
instancename = self.virtual_machine.instancename
virshxml = self.getVirshXML(host, instancename)
root = ET.fromstring(virshxml)
scsis = root.findall("./devices/controller[@type='scsi']/alias[@name='scsi0']/..")
self.assertEqual(len(scsis), 1, "SCSI controller not found")
scsiindex = scsis[0].get('index')
self.assertNotEqual(scsiindex, None, "Could not find index of SCSI controller")
# find all scsi disks
disks = root.findall("./devices/disk[@device='disk']/target[@bus='scsi']/..")
self.assertEqual(len(disks), diskcount, "Could not find expected number of disks")
for disk in disks:
for child in disk:
if child.tag.lower() == "target":
dev = child.get("dev")
self.assert_(dev is not None and dev.startswith("sd"), "disk dev is invalid")
elif child.tag.lower() == "address":
con = child.get("controller")
self.assertEqual(con, scsiindex, "disk controller not equal to SCSI " \
"controller index")
elif child.tag.lower() == "driver":
discard = child.get("discard")
if discard: # may not be defined by older qemu/libvirt
self.assertEqual(discard, "unmap", "discard settings not unmap")
def verifyGuestState(self, diskcount):
ssh = self.virtual_machine.get_ssh_client(reconnect=True)
output = ssh.execute("lspci | grep \"Virtio SCSI\"")
self.assertTrue(len(output) > 0, "Could not find virtio scsi controller")
output = ssh.execute("lsblk -rS | grep sd")
for disk in output:
self.logger.debug("disk " + disk + " found")
self.assertEqual(len(output), diskcount,
"Could not find appropriate number of scsi disks in guest")
def getVirshXML(self, host, instancename):
if host is None:
self.logger.debug("getVirshXML: host is none")
return ""
else:
self.logger.debug("host is: " + host)
if instancename is None:
self.logger.debug("getVirshXML: instancename is none")
return ""
else:
self.logger.debug("instancename is: " + instancename)
sshc = SshClient(
host=host,
port=self.services['configurableData']['host']["publicport"],
user=self.hostConfig['username'],
passwd=self.hostConfig['password'])
ssh = sshc.ssh
chan = ssh.get_transport().open_session()
chan.exec_command("virsh dumpxml " + instancename)
stdout = ""
while True:
b = chan.recv(10000)
if len(b) == 0:
break
stdout += b
stderr = ""
while True:
b = chan.recv_stderr(10000)
if len(b) == 0:
break
stderr += b
xstatus = chan.recv_exit_status()
chan.close()
if xstatus != 0:
raise CommandNonzeroException(xstatus, stderr)
# rely on sshClient to close ssh
self.logger.debug("xml is: \n\n%s\n\n" % (stdout))
return stdout
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_01_verify_libvirt(self):
"""Test that libvirt properly created domain with scsi controller
"""
# Validate virsh dumpxml
if self.hypervisorNotSupported:
self.skipTest("Hypervisor not supported")
self.verifyVirshState(2)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_02_verify_libvirt_after_restart(self):
""" Verify that libvirt settings are as expected after a VM stop / start
"""
if self.hypervisorNotSupported:
self.skipTest("Hypervisor not supported")
self.virtual_machine.stop(self.apiclient)
self.virtual_machine.start(self.apiclient)
self.verifyVirshState(2)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_03_verify_libvirt_attach_disk(self):
""" Verify that libvirt settings are expected after a disk add
"""
if self.hypervisorNotSupported:
self.skipTest("Hypervisor not supported")
self.volume = Volume.create(
self.apiclient,
self.services,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.sparse_disk_offering.id
)
self.virtual_machine.attach_volume(
self.apiclient,
self.volume
)
self.verifyVirshState(3)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_04_verify_guest_lspci(self):
""" Verify that guest sees scsi controller and disks
"""
if self.hypervisorNotSupported:
self.skipTest("Hypervisor not supported")
self.verifyGuestState(3)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_05_change_vm_ostype_restart(self):
""" Update os type to Ubuntu, change vm details rootdiskController
explicitly to scsi.
"""
if self.hypervisorNotSupported:
self.skipTest("Hypervisor not supported")
self.virtual_machine.stop(self.apiclient)
ostypes = GuestOs.listMapping(self.apiclient, hypervisor="kvm")
self.assertTrue(len(ostypes) > 0)
ostypeid = None
for ostype in ostypes:
if ostype.osdisplayname == "Ubuntu 16.04 (64-bit)":
ostypeid = ostype.ostypeid
break
self.assertIsNotNone(ostypeid,
"Could not find ostypeid for Ubuntu 16.0.4 (64-bit) mapped to kvm")
self.virtual_machine.update(self.apiclient, ostypeid=ostypeid,
details=[{"rootDiskController": "scsi"}])
self.virtual_machine.start(self.apiclient)
self.verifyVirshState(3)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
def test_06_verify_guest_lspci_again(self):
""" Verify that guest sees scsi controller and disks after switching ostype and rdc
"""
if self.hypervisorNotSupported:
self.skipTest("Hypervisor not supported")
self.verifyGuestState(3)
class CommandNonzeroException(Exception):
def __init__(self, code, stderr):
self.code = code
self.stderr = stderr
def __str__(self):
return "Status code %d: %s" % (self.code, self.stderr)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class WrapFunctionTest(test.TestCase):
def testDocString(self):
def f(x, do_add):
v = variables.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with ops.control_dependencies([op]):
return v.read_value()
f_add = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32), True])
self.assertAllEqual(f_add(1.0), 6.0)
self.assertAllEqual(f_add(1.0), 7.0)
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32), False])
self.assertAllEqual(f_sub(1.0), 4.0)
self.assertAllEqual(f_sub(1.0), 3.0)
def testPrune(self):
x_in = []
x_out = []
def f(x, y):
x_in.append(x)
xx = x * x
x_out.append(xx)
return xx, 2 * y*y
f_wrapped = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32)] * 2)
f_pruned = f_wrapped.prune(x_in[0], [x_out[0]])
self.assertAllEqual(f_pruned(ops.convert_to_tensor(2.0)), [4.0])
def _assert_single_captured_variable_argument(self, graph_def):
# The single FunctionDef should have one argument, a captured variable
function_def, = graph_def.library.function
self.assertLen(function_def.signature.input_arg, 1)
function_arg, = function_def.signature.input_arg
self.assertEqual(dtypes.resource, dtypes.as_dtype(function_arg.type))
def testVariableLifting(self):
save_prefix = os.path.join(self.get_temp_dir(), 'meta_graph_test')
export_graph = ops.Graph()
with export_graph.as_default():
v = variables.Variable(1.)
array_ops.identity(v + 1., name='output')
saver = saver_lib.Saver([v])
with self.test_session() as session:
session.run(v.initializer)
saver.save(session, save_prefix)
def importer():
saver_lib.import_meta_graph(save_prefix + '.meta')
return ops.get_default_graph().as_graph_element('output:0')
wrapped = wrap_function.wrap_function(importer, [])
lifted_variables = list(wrapped.graph.variables)
self.assertLen(lifted_variables, 1)
initializer = wrapped.prune(
[], wrapped.graph.as_graph_element(v.initializer.name))
self.assertEqual(lifted_variables, list(initializer.graph.variables))
self.assertEqual(initializer.graph.external_captures,
wrapped.graph.external_captures)
@def_function.function
def wraps_initializer():
initializer()
wraps_initializer()
self.assertEqual(1., lifted_variables[0].numpy())
wrapped_initializer_graphdef = (
wraps_initializer.get_concrete_function().graph.as_graph_def())
self._assert_single_captured_variable_argument(wrapped_initializer_graphdef)
@def_function.function
def wraps_wrapped():
return wrapped()
# Verify that the original graph also has the correct signature.
wrapped_wrapped_graphdef = (
wraps_wrapped.get_concrete_function().graph.as_graph_def())
self._assert_single_captured_variable_argument(wrapped_wrapped_graphdef)
# Now check that the graph runs wrapped, from eager, and when pruned.
self.assertAllEqual(wraps_wrapped().numpy(),
lifted_variables[0].numpy() + 1.)
self.assertAllEqual(wrapped().numpy(), lifted_variables[0].numpy() + 1.)
pruned = wrapped.prune([], wrapped.graph.as_graph_element('output:0'))
self.assertAllEqual(wrapped().numpy(), pruned().numpy())
def testNoArguments(self):
def f():
return constant_op.constant(1.)
f_wrapped = wrap_function.wrap_function(f, [])
self.assertAllEqual(1.0, f_wrapped())
def testPruneCaptures(self):
v1 = variables.Variable(2.)
def f():
v2 = variables.Variable(3.)
return array_ops.identity(v1 * v2 * constant_op.constant(1.), 'fetch')
f_wrapped = wrap_function.wrap_function(f, [])
self.assertAllEqual(6.0, f_wrapped())
# Test pruning directly on the inputs
pruned = f_wrapped.prune(
feeds=f_wrapped.inputs,
fetches=f_wrapped.graph.get_tensor_by_name('fetch:0'))
self.assertAllEqual(6.0, pruned())
# Test pruning with no inputs
pruned = f_wrapped.prune(
feeds=(),
fetches=f_wrapped.graph.get_tensor_by_name('fetch:0'))
self.assertAllEqual(6.0, pruned())
def testCollectionsIsolation(self):
v1 = variables.Variable(2.)
v2_holder = []
def f():
v2 = variables.Variable(3.)
v2_holder.append(v2)
ops.add_to_collection(ops.GraphKeys.LOSSES, v2 * constant_op.constant(3.))
return array_ops.identity(v1 * v2 * constant_op.constant(1.), 'fetch')
f_wrapped = wrap_function.wrap_function(f, [])
self.assertAllEqual(6.0, f_wrapped())
self.assertEqual(
len(f_wrapped.graph.get_collection(ops.GraphKeys.LOSSES)), 1)
f_var_collection = f_wrapped.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(f_var_collection), 1)
self.assertIs(f_var_collection[0], v2_holder[0])
v3_holder = []
def g():
v3 = variables.Variable(4.)
v3_holder.append(v3)
ops.add_to_collection(ops.GraphKeys.LOSSES, v3 * constant_op.constant(3.))
return array_ops.identity(v1 * v3 * constant_op.constant(1.), 'fetch')
g_wrapped = wrap_function.wrap_function(g, [])
self.assertAllEqual(8.0, g_wrapped())
self.assertEqual(
len(g_wrapped.graph.get_collection(ops.GraphKeys.LOSSES)), 1)
g_var_collection = g_wrapped.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(g_var_collection), 1)
self.assertIs(g_var_collection[0], v3_holder[0])
# Both have only one value, and their values aren't equal. So no sharing.
self.assertNotEqual(g_wrapped.graph.get_collection(ops.GraphKeys.LOSSES),
f_wrapped.graph.get_collection(ops.GraphKeys.LOSSES))
def testGradientsOfPrune(self):
v1 = variables.Variable(2.)
v2_holder = []
def f(z):
v2 = variables.Variable(3.)
v2_holder.append(v2)
return array_ops.identity(v1 * v2 * z, 'fetch')
f_wrapped = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtype=dtypes.float32)])
x = constant_op.constant(1.)
with backprop.GradientTape() as tape:
tape.watch(x)
out = f_wrapped(x)
grads = tape.gradient(out, [x, v1, v2_holder[0]])
self.assertAllEqual(6.0, out)
self.assertAllEqual([6.0, 3.0, 2.0], grads)
pruned = f_wrapped.prune(
feeds=f_wrapped.inputs,
fetches=f_wrapped.graph.get_tensor_by_name('fetch:0'))
x = constant_op.constant(1.)
with backprop.GradientTape() as tape:
tape.watch(x)
out = pruned(x)
grads = tape.gradient(out, [x, v1, v2_holder[0]])
self.assertAllEqual(6.0, out)
self.assertAllEqual([6.0, 3.0, 2.0], grads)
def testPruneOperations(self):
v = variables.Variable(0)
def f():
v.assign_add(1, name='increment', read_value=False)
f_wrapped = wrap_function.wrap_function(f, [])
pruned = f_wrapped.prune(
feeds=(),
fetches=(f_wrapped.graph.get_operation_by_name('increment'),))
self.assertEqual((None,), pruned())
self.assertEqual(1, self.evaluate(v))
del f, f_wrapped
def f1():
v.assign_add(
array_ops.placeholder(shape=[], dtype=dtypes.int32, name='step'),
name='increment', read_value=False)
return constant_op.constant(1, name='other')
f_wrapped = wrap_function.wrap_function(f1, [])
increments = f_wrapped.prune(
feeds=(f_wrapped.graph.get_tensor_by_name('step:0')),
fetches=(f_wrapped.graph.get_operation_by_name('increment'),
f_wrapped.graph.get_tensor_by_name('other:0')))
first_output, second_output = increments(constant_op.constant(2))
self.assertEqual(['step:0', 'increment/resource:0'],
[t.name for t in increments.inputs])
self.assertIs(None, first_output)
self.assertEqual(1, second_output.numpy())
self.assertEqual(3, v.numpy())
does_not_increment = f_wrapped.prune(
feeds=(f_wrapped.graph.get_tensor_by_name('step:0')),
fetches=f_wrapped.graph.get_tensor_by_name('other:0'))
self.assertEqual(1, does_not_increment(constant_op.constant(3)).numpy())
self.assertEqual(3, v.numpy())
def testPruneStatefulOpsFromWrappedFunc(self):
v0 = variables.Variable(0)
v1 = variables.Variable(0)
# When we wrap a function, we expect it to be executed with 'tf.Graph`
# rules: it's allowed to prune all ops that are not in transitive fanin of
# the fetches.
def f(x):
v0.assign_add(1, name='increment_v0')
v1.assign_add(1, name='increment_v1')
return x
f_wrapped = wrap_function.wrap_function(f, [1])
self.assertEqual(1, f_wrapped().numpy())
self.assertEqual(0, v0.numpy())
self.assertEqual(0, v1.numpy())
f_wrapped_with_name = wrap_function.wrap_function(f, [2], name='func')
self.assertEqual(2, f_wrapped_with_name().numpy())
self.assertEqual(0, v0.numpy())
self.assertEqual(0, v1.numpy())
def test_operation_returned(self):
v = variables.Variable(0)
def f():
v.assign(1, read_value=False, name='assign_to_v')
f_wrapped = wrap_function.wrap_function(f, [])
operation_to_fetch = f_wrapped.graph.get_operation_by_name('assign_to_v')
f_pruned = f_wrapped.prune(
[], operation_to_fetch)
self.assertEqual(
['assign_to_v'],
[operation.name for operation in f_pruned.graph.control_outputs])
self.assertEqual(0, v.numpy())
f_pruned()
self.assertEqual(1, v.numpy())
f_wrapped.prune([], 'assign_to_v')()
f_wrapped.prune([], meta_graph_pb2.TensorInfo(name='assign_to_v'))()
def test_function_from_graph_def(self):
@def_function.function
def make_graph_def(x):
return x + 1.
original_func_graph = make_graph_def.get_concrete_function(
tensor_spec.TensorSpec([None, 2], dtypes.float32)).graph
graph_def = original_func_graph.as_graph_def()
revived_function = wrap_function.function_from_graph_def(
graph_def, inputs=original_func_graph.inputs[0].name,
outputs=original_func_graph.outputs[0].name)
self.assertEqual(2., revived_function(constant_op.constant(1.)).numpy())
def test_create_variables_with_same_name(self):
def f():
v1 = variables.Variable(0, name='v')
v2 = variables.Variable(1, name='v')
return v1, v2
f_wrapped = wrap_function.wrap_function(f, [])
self.assertDictEqual(
{'v:0': 0, 'v_1:0': 1}, # assert that variable names are uniquified
{v.name: v.numpy()
for v in f_wrapped._variable_holder.variables.values()})
# Uniquification should reset in separate calls to wrap_function.
def f2():
v1 = variables.Variable(3, name='v')
v2 = variables.Variable(4, name='v')
return v1, v2
f_wrapped_2 = wrap_function.wrap_function(f2, [])
self.assertDictEqual(
{'v:0': 3, 'v_1:0': 4},
{v.name: v.numpy()
for v in f_wrapped_2._variable_holder.variables.values()})
class WrappedGraphTest(test.TestCase):
def testAddFunction(self):
def fn(x):
v = variables.Variable(3, name='v')
v2 = variable_scope.get_variable(
'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32)
return v + v2 + x
with self.cached_session() as sess:
result = fn(constant_op.constant(5))
sess.run(variables.global_variables_initializer())
expected = sess.run(result)
g = wrap_function.WrappedGraph()
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
wrapped_fn = g.wrap_function(fn, signature)
self.assertEqual(expected, wrapped_fn(constant_op.constant(5)).numpy())
def testCollections(self):
def fn(x):
v = variables.VariableV1(3, name='v', trainable=False, collections=['a'])
v2 = variable_scope.get_variable(
'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32,
collections=['a', 'b'])
return v + v2 + x
def assert_collections(graph):
self.assertLen(graph.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES), 1)
self.assertLen(graph.get_collection('a'), 2)
self.assertLen(graph.get_collection('b'), 1)
g = wrap_function.WrappedGraph()
g.wrap_function(fn, [tensor_spec.TensorSpec([], dtypes.int32)])
assert_collections(g.graph)
def assert_fn():
assert_collections(ops.get_default_graph())
return 1 # Return is required
# Assert that collections are accessible within a wrapped function.
g.wrap_function(assert_fn, [])
def testShareVariablesSameGraph(self):
def add_v1(x):
with variable_scope.variable_scope(
'reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(3), shape=[], dtype=dtypes.int32)
return v + x
def subtract_v1(x):
with variable_scope.variable_scope(
'reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32)
return v - x
def different_variable_fn_v1(x):
with variable_scope.variable_scope(
'no_reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(5), shape=[], dtype=dtypes.int32)
return v * x
def increment_variable_v1(x):
with variable_scope.variable_scope(
'reuse', reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable(
'v', initializer=init_ops.Constant(6), shape=[], dtype=dtypes.int32)
return v.assign_add(x)
g = wrap_function.WrappedGraph()
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
add = g.wrap_function(add_v1, signature)
subtract = g.wrap_function(subtract_v1, signature)
different_variable_fn = g.wrap_function(different_variable_fn_v1, signature)
increment_variable = g.wrap_function(increment_variable_v1, signature)
self.assertEqual(10, add(constant_op.constant(7)).numpy())
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
# The shared variable has a starting value of 3 because add_v1 was wrapped
# first.
self.assertEqual(-4, subtract(constant_op.constant(7)).numpy())
self.assertEqual(10, increment_variable(constant_op.constant(7)).numpy())
# Check that variable updates
self.assertEqual(17, add(constant_op.constant(7)).numpy())
self.assertEqual(3, subtract(constant_op.constant(7)).numpy())
# Sanity check - result from this function shouldn't change.
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
self.assertAllEqual({'reuse/v', 'no_reuse/v'}, set(g.variables.keys()))
def testShareVariablesDifferentGraphs(self):
def add_v1(x):
v = variables.Variable(3, name='v')
return v + x
def subtract_v1(x):
v = variables.Variable(4, name='v')
return v - x
def different_variable_fn_v1(x):
with ops.name_scope('different_scope'):
v = variables.Variable(5, name='v')
return v * x
def increment_variable_v1(x):
v = variables.Variable(6, name='v')
return v.assign_add(x)
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
vh = wrap_function.VariableHolder(share_variables=True)
new_graph = lambda: wrap_function.WrappedGraph(variable_holder=vh)
add = new_graph().wrap_function(add_v1, signature)
subtract = new_graph().wrap_function(subtract_v1, signature)
different_variable_fn = new_graph().wrap_function(
different_variable_fn_v1, signature)
increment_variable = new_graph().wrap_function(
increment_variable_v1, signature)
self.assertEqual(10, add(constant_op.constant(7)).numpy())
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
# Because the variable in add_v1 was created first, its starting value is 3
# instead of the values defined in subtract_v1 or increment_variable_v1.
self.assertEqual(-4, subtract(constant_op.constant(7)).numpy())
self.assertEqual(10, increment_variable(constant_op.constant(7)).numpy())
# Check that variable updates
self.assertEqual(17, add(constant_op.constant(7)).numpy())
self.assertEqual(3, subtract(constant_op.constant(7)).numpy())
# Sanity check - result from this function shouldn't change.
self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy())
self.assertAllEqual({'v', 'different_scope/v'}, set(vh.variables.keys()))
def testReturnOp(self):
def update_var_v1(x):
v = variables.Variable(3, name='v')
update_op = state_ops.assign(v, x).op
return update_op
g = wrap_function.WrappedGraph()
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
update_var = g.wrap_function(update_var_v1, signature)
self.assertEqual(g.variables['v'].numpy(), 3)
update_var(constant_op.constant(12))
self.assertEqual(g.variables['v'].numpy(), 12)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
"""
DGL-based GCN for graph property prediction.
"""
import torch.nn as nn
import torch.nn.functional as F
from deepchem.models.losses import Loss, L2Loss, SparseSoftmaxCrossEntropy
from deepchem.models.torch_models.torch_model import TorchModel
class GCN(nn.Module):
"""Model for Graph Property Prediction Based on Graph Convolution Networks (GCN).
This model proceeds as follows:
* Update node representations in graphs with a variant of GCN
* For each graph, compute its representation by 1) a weighted sum of the node
representations in the graph, where the weights are computed by applying a
gating function to the node representations 2) a max pooling of the node
representations 3) concatenating the output of 1) and 2)
* Perform the final prediction using an MLP
Examples
--------
>>> import deepchem as dc
>>> import dgl
>>> from deepchem.models import GCN
>>> smiles = ["C1CCC1", "C1=CC=CN=C1"]
>>> featurizer = dc.feat.MolGraphConvFeaturizer()
>>> graphs = featurizer.featurize(smiles)
>>> print(type(graphs[0]))
<class 'deepchem.feat.graph_data.GraphData'>
>>> dgl_graphs = [graphs[i].to_dgl_graph(self_loop=True) for i in range(len(graphs))]
>>> # Batch two graphs into a graph of two connected components
>>> batch_dgl_graph = dgl.batch(dgl_graphs)
>>> model = GCN(n_tasks=1, mode='regression')
>>> preds = model(batch_dgl_graph)
>>> print(type(preds))
<class 'torch.Tensor'>
>>> preds.shape == (2, 1)
True
References
----------
.. [1] Thomas N. Kipf and Max Welling. "Semi-Supervised Classification with Graph
Convolutional Networks." ICLR 2017.
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
This model is different from deepchem.models.GraphConvModel as follows:
* For each graph convolution, the learnable weight in this model is shared across all nodes.
``GraphConvModel`` employs separate learnable weights for nodes of different degrees. A
learnable weight is shared across all nodes of a particular degree.
* For ``GraphConvModel``, there is an additional GraphPool operation after each
graph convolution. The operation updates the representation of a node by applying an
element-wise maximum over the representations of its neighbors and itself.
* For computing graph-level representations, this model computes a weighted sum and an
element-wise maximum of the representations of all nodes in a graph and concatenates them.
The node weights are obtained by using a linear/dense layer followd by a sigmoid function.
For ``GraphConvModel``, the sum over node representations is unweighted.
* There are various minor differences in using dropout, skip connection and batch
normalization.
"""
def __init__(self,
n_tasks: int,
graph_conv_layers: list = None,
activation=None,
residual: bool = True,
batchnorm: bool = False,
dropout: float = 0.,
predictor_hidden_feats: int = 128,
predictor_dropout: float = 0.,
mode: str = 'regression',
number_atom_features: int = 30,
n_classes: int = 2,
nfeat_name: str = 'x'):
"""
Parameters
----------
n_tasks: int
Number of tasks.
graph_conv_layers: list of int
Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel
for the i-th GCN layer. If not specified, the default value will be [64, 64].
activation: callable
The activation function to apply to the output of each GCN layer.
By default, no activation function will be applied.
residual: bool
Whether to add a residual connection within each GCN layer. Default to True.
batchnorm: bool
Whether to apply batch normalization to the output of each GCN layer.
Default to False.
dropout: float
The dropout probability for the output of each GCN layer. Default to 0.
predictor_hidden_feats: int
The size for hidden representations in the output MLP predictor. Default to 128.
predictor_dropout: float
The dropout probability in the output MLP predictor. Default to 0.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
number_atom_features: int
The length of the initial atom feature vectors. Default to 30.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
nfeat_name: str
For an input graph ``g``, the model assumes that it stores node features in
``g.ndata[nfeat_name]`` and will retrieve input node features from that.
Default to 'x'.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
try:
import dgllife
except:
raise ImportError('This class requires dgllife.')
if mode not in ['classification', 'regression']:
raise ValueError("mode must be either 'classification' or 'regression'")
super(GCN, self).__init__()
self.n_tasks = n_tasks
self.mode = mode
self.n_classes = n_classes
self.nfeat_name = nfeat_name
if mode == 'classification':
out_size = n_tasks * n_classes
else:
out_size = n_tasks
from dgllife.model import GCNPredictor as DGLGCNPredictor
if graph_conv_layers is None:
graph_conv_layers = [64, 64]
num_gnn_layers = len(graph_conv_layers)
if activation is not None:
activation = [activation] * num_gnn_layers
self.model = DGLGCNPredictor(
in_feats=number_atom_features,
hidden_feats=graph_conv_layers,
activation=activation,
residual=[residual] * num_gnn_layers,
batchnorm=[batchnorm] * num_gnn_layers,
dropout=[dropout] * num_gnn_layers,
n_tasks=out_size,
predictor_hidden_feats=predictor_hidden_feats,
predictor_dropout=predictor_dropout)
def forward(self, g):
"""Predict graph labels
Parameters
----------
g: DGLGraph
A DGLGraph for a batch of graphs. It stores the node features in
``dgl_graph.ndata[self.nfeat_name]``.
Returns
-------
torch.Tensor
The model output.
* When self.mode = 'regression',
its shape will be ``(dgl_graph.batch_size, self.n_tasks)``.
* When self.mode = 'classification', the output consists of probabilities
for classes. Its shape will be ``(dgl_graph.batch_size, self.n_tasks, self.n_classes)``
if self.n_tasks > 1; its shape will be ``(dgl_graph.batch_size, self.n_classes)`` if
self.n_tasks is 1.
torch.Tensor, optional
This is only returned when self.mode = 'classification', the output consists of the
logits for classes before softmax.
"""
node_feats = g.ndata[self.nfeat_name]
out = self.model(g, node_feats)
if self.mode == 'classification':
if self.n_tasks == 1:
logits = out.view(-1, self.n_classes)
softmax_dim = 1
else:
logits = out.view(-1, self.n_tasks, self.n_classes)
softmax_dim = 2
proba = F.softmax(logits, dim=softmax_dim)
return proba, logits
else:
return out
class GCNModel(TorchModel):
"""Model for Graph Property Prediction Based on Graph Convolution Networks (GCN).
This model proceeds as follows:
* Update node representations in graphs with a variant of GCN
* For each graph, compute its representation by 1) a weighted sum of the node
representations in the graph, where the weights are computed by applying a
gating function to the node representations 2) a max pooling of the node
representations 3) concatenating the output of 1) and 2)
* Perform the final prediction using an MLP
Examples
--------
>>> import deepchem as dc
>>> from deepchem.models import GCNModel
>>> # preparing dataset
>>> smiles = ["C1CCC1", "CCC"]
>>> labels = [0., 1.]
>>> featurizer = dc.feat.MolGraphConvFeaturizer()
>>> X = featurizer.featurize(smiles)
>>> dataset = dc.data.NumpyDataset(X=X, y=labels)
>>> # training model
>>> model = GCNModel(mode='classification', n_tasks=1,
... batch_size=16, learning_rate=0.001)
>>> loss = model.fit(dataset, nb_epoch=5)
References
----------
.. [1] Thomas N. Kipf and Max Welling. "Semi-Supervised Classification with Graph
Convolutional Networks." ICLR 2017.
Notes
-----
This class requires DGL (https://github.com/dmlc/dgl) and DGL-LifeSci
(https://github.com/awslabs/dgl-lifesci) to be installed.
This model is different from deepchem.models.GraphConvModel as follows:
* For each graph convolution, the learnable weight in this model is shared across all nodes.
``GraphConvModel`` employs separate learnable weights for nodes of different degrees. A
learnable weight is shared across all nodes of a particular degree.
* For ``GraphConvModel``, there is an additional GraphPool operation after each
graph convolution. The operation updates the representation of a node by applying an
element-wise maximum over the representations of its neighbors and itself.
* For computing graph-level representations, this model computes a weighted sum and an
element-wise maximum of the representations of all nodes in a graph and concatenates them.
The node weights are obtained by using a linear/dense layer followd by a sigmoid function.
For ``GraphConvModel``, the sum over node representations is unweighted.
* There are various minor differences in using dropout, skip connection and batch
normalization.
"""
def __init__(self,
n_tasks: int,
graph_conv_layers: list = None,
activation=None,
residual: bool = True,
batchnorm: bool = False,
dropout: float = 0.,
predictor_hidden_feats: int = 128,
predictor_dropout: float = 0.,
mode: str = 'regression',
number_atom_features=30,
n_classes: int = 2,
self_loop: bool = True,
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks.
graph_conv_layers: list of int
Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel
for the i-th GCN layer. If not specified, the default value will be [64, 64].
activation: callable
The activation function to apply to the output of each GCN layer.
By default, no activation function will be applied.
residual: bool
Whether to add a residual connection within each GCN layer. Default to True.
batchnorm: bool
Whether to apply batch normalization to the output of each GCN layer.
Default to False.
dropout: float
The dropout probability for the output of each GCN layer. Default to 0.
predictor_hidden_feats: int
The size for hidden representations in the output MLP predictor. Default to 128.
predictor_dropout: float
The dropout probability in the output MLP predictor. Default to 0.
mode: str
The model type, 'classification' or 'regression'. Default to 'regression'.
number_atom_features: int
The length of the initial atom feature vectors. Default to 30.
n_classes: int
The number of classes to predict per task
(only used when ``mode`` is 'classification'). Default to 2.
self_loop: bool
Whether to add self loops for the nodes, i.e. edges from nodes to themselves.
When input graphs have isolated nodes, self loops allow preserving the original feature
of them in message passing. Default to True.
kwargs
This can include any keyword argument of TorchModel.
"""
model = GCN(
n_tasks=n_tasks,
graph_conv_layers=graph_conv_layers,
activation=activation,
residual=residual,
batchnorm=batchnorm,
dropout=dropout,
predictor_hidden_feats=predictor_hidden_feats,
predictor_dropout=predictor_dropout,
mode=mode,
number_atom_features=number_atom_features,
n_classes=n_classes)
if mode == 'regression':
loss: Loss = L2Loss()
output_types = ['prediction']
else:
loss = SparseSoftmaxCrossEntropy()
output_types = ['prediction', 'loss']
super(GCNModel, self).__init__(
model, loss=loss, output_types=output_types, **kwargs)
self._self_loop = self_loop
def _prepare_batch(self, batch):
"""Create batch data for GCN.
Parameters
----------
batch: tuple
The tuple is ``(inputs, labels, weights)``.
Returns
-------
inputs: DGLGraph
DGLGraph for a batch of graphs.
labels: list of torch.Tensor or None
The graph labels.
weights: list of torch.Tensor or None
The weights for each sample or sample/task pair converted to torch.Tensor.
"""
try:
import dgl
except:
raise ImportError('This class requires dgl.')
inputs, labels, weights = batch
dgl_graphs = [
graph.to_dgl_graph(self_loop=self._self_loop) for graph in inputs[0]
]
inputs = dgl.batch(dgl_graphs).to(self.device)
_, labels, weights = super(GCNModel, self)._prepare_batch(([], labels,
weights))
return inputs, labels, weights
|
|
# -*- coding: utf-8 -*-
from branca.element import Element, Figure
from folium.elements import JSCSSMixin
from folium.map import Layer
from folium.utilities import none_max, none_min
from jinja2 import Template
class HeatMapWithTime(JSCSSMixin, Layer):
"""
Create a HeatMapWithTime layer
Parameters
----------
data: list of list of points of the form [lat, lng] or [lat, lng, weight]
The points you want to plot. The outer list corresponds to the various time
steps in sequential order. (weight is in (0, 1] range and defaults to 1 if
not specified for a point)
index: Index giving the label (or timestamp) of the elements of data. Should have
the same length as data, or is replaced by a simple count if not specified.
name : string, default None
The name of the Layer, as it will appear in LayerControls.
radius: default 15.
The radius used around points for the heatmap.
min_opacity: default 0
The minimum opacity for the heatmap.
max_opacity: default 0.6
The maximum opacity for the heatmap.
scale_radius: default False
Scale the radius of the points based on the zoom level.
gradient: dict, default None
Match point density values to colors. Color can be a name ('red'),
RGB values ('rgb(255,0,0)') or a hex number ('#FF0000').
use_local_extrema: default False
Defines whether the heatmap uses a global extrema set found from the input data
OR a local extrema (the maximum and minimum of the currently displayed view).
auto_play: default False
Automatically play the animation across time.
display_index: default True
Display the index (usually time) in the time control.
index_steps: default 1
Steps to take in the index dimension between animation steps.
min_speed: default 0.1
Minimum fps speed for animation.
max_speed: default 10
Maximum fps speed for animation.
speed_step: default 0.1
Step between different fps speeds on the speed slider.
position: default 'bottomleft'
Position string for the time slider. Format: 'bottom/top'+'left/right'.
overlay : bool, default True
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening (only for overlays).
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var times = {{this.times}};
{{this._parent.get_name()}}.timeDimension = L.timeDimension(
{times : times, currentTime: new Date(1)}
);
var {{this._control_name}} = new L.Control.TimeDimensionCustom({{this.index}}, {
autoPlay: {{this.auto_play}},
backwardButton: {{this.backward_button}},
displayDate: {{this.display_index}},
forwardButton: {{this.forward_button}},
limitMinimumRange: {{this.limit_minimum_range}},
limitSliders: {{this.limit_sliders}},
loopButton: {{this.loop_button}},
maxSpeed: {{this.max_speed}},
minSpeed: {{this.min_speed}},
playButton: {{this.play_button}},
playReverseButton: {{this.play_reverse_button}},
position: "{{this.position}}",
speedSlider: {{this.speed_slider}},
speedStep: {{this.speed_step}},
styleNS: "{{this.style_NS}}",
timeSlider: {{this.time_slider}},
timeSliderDrapUpdate: {{this.time_slider_drap_update}},
timeSteps: {{this.index_steps}}
})
.addTo({{this._parent.get_name()}});
var {{this.get_name()}} = new TDHeatmap({{this.data}},
{heatmapOptions: {
radius: {{this.radius}},
minOpacity: {{this.min_opacity}},
maxOpacity: {{this.max_opacity}},
scaleRadius: {{this.scale_radius}},
useLocalExtrema: {{this.use_local_extrema}},
defaultWeight: 1,
{% if this.gradient %}gradient: {{ this.gradient }}{% endif %}
}
})
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
default_js = [
('iso8601',
'https://cdn.jsdelivr.net/npm/iso8601-js-period@0.2.1/iso8601.min.js'),
('leaflet.timedimension.min.js',
'https://cdn.jsdelivr.net/npm/leaflet-timedimension@1.1.1/dist/leaflet.timedimension.min.js'),
('heatmap.min.js',
'https://cdn.jsdelivr.net/gh/python-visualization/folium/folium/templates/pa7_hm.min.js'),
('leaflet-heatmap.js',
'https://cdn.jsdelivr.net/gh/python-visualization/folium/folium/templates/pa7_leaflet_hm.min.js'),
]
default_css = [
('leaflet.timedimension.control.min.css',
'https://cdn.jsdelivr.net/npm/leaflet-timedimension@1.1.1/dist/leaflet.timedimension.control.css')
]
def __init__(self, data, index=None, name=None, radius=15, min_opacity=0,
max_opacity=0.6, scale_radius=False, gradient=None,
use_local_extrema=False, auto_play=False,
display_index=True, index_steps=1, min_speed=0.1,
max_speed=10, speed_step=0.1, position='bottomleft',
overlay=True, control=True, show=True):
super(HeatMapWithTime, self).__init__(name=name, overlay=overlay,
control=control, show=show)
self._name = 'HeatMap'
self._control_name = self.get_name() + 'Control'
# Input data.
self.data = data
self.index = index if index is not None else [str(i) for i in
range(1, len(data)+1)]
if len(self.data) != len(self.index):
raise ValueError('Input data and index are not of compatible lengths.') # noqa
self.times = list(range(1, len(data)+1))
# Heatmap settings.
self.radius = radius
self.min_opacity = min_opacity
self.max_opacity = max_opacity
self.scale_radius = 'true' if scale_radius else 'false'
self.use_local_extrema = 'true' if use_local_extrema else 'false'
self.gradient = gradient
# Time dimension settings.
self.auto_play = 'true' if auto_play else 'false'
self.display_index = 'true' if display_index else 'false'
self.min_speed = min_speed
self.max_speed = max_speed
self.position = position
self.speed_step = speed_step
self.index_steps = index_steps
# Hard coded defaults for simplicity.
self.backward_button = 'true'
self.forward_button = 'true'
self.limit_sliders = 'true'
self.limit_minimum_range = 5
self.loop_button = 'true'
self.speed_slider = 'true'
self.time_slider = 'true'
self.play_button = 'true'
self.play_reverse_button = 'true'
self.time_slider_drap_update = 'false'
self.style_NS = 'leaflet-control-timecontrol'
def render(self, **kwargs):
super(HeatMapWithTime, self).render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
figure.header.add_child(
Element(
"""
<script>
var TDHeatmap = L.TimeDimension.Layer.extend({
initialize: function(data, options) {
var heatmapCfg = {
radius: 15,
maxOpacity: 1.,
scaleRadius: false,
useLocalExtrema: false,
latField: 'lat',
lngField: 'lng',
valueField: 'count',
defaultWeight : 1,
};
heatmapCfg = $.extend({}, heatmapCfg, options.heatmapOptions || {});
var layer = new HeatmapOverlay(heatmapCfg);
L.TimeDimension.Layer.prototype.initialize.call(this, layer, options);
this._currentLoadedTime = 0;
this._currentTimeData = {
data: []
};
this.data= data;
this.defaultWeight = heatmapCfg.defaultWeight || 1;
},
onAdd: function(map) {
L.TimeDimension.Layer.prototype.onAdd.call(this, map);
map.addLayer(this._baseLayer);
if (this._timeDimension) {
this._getDataForTime(this._timeDimension.getCurrentTime());
}
},
_onNewTimeLoading: function(ev) {
this._getDataForTime(ev.time);
return;
},
isReady: function(time) {
return (this._currentLoadedTime == time);
},
_update: function() {
this._baseLayer.setData(this._currentTimeData);
return true;
},
_getDataForTime: function(time) {
delete this._currentTimeData.data;
this._currentTimeData.data = [];
var data = this.data[time-1];
for (var i = 0; i < data.length; i++) {
this._currentTimeData.data.push({
lat: data[i][0],
lng: data[i][1],
count: data[i].length>2 ? data[i][2] : this.defaultWeight
});
}
this._currentLoadedTime = time;
if (this._timeDimension && time == this._timeDimension.getCurrentTime() && !this._timeDimension.isLoading()) {
this._update();
}
this.fire('timeload', {
time: time
});
}
});
L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({
initialize: function(index, options) {
var playerOptions = {
buffer: 1,
minBufferReady: -1
};
options.playerOptions = $.extend({}, playerOptions, options.playerOptions || {});
L.Control.TimeDimension.prototype.initialize.call(this, options);
this.index = index;
},
_getDisplayDateFormat: function(date){
return this.index[date.getTime()-1];
}
});
</script>
""", # noqa
template_name='timeControlScript'
)
)
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]].
"""
bounds = [[None, None], [None, None]]
for point in self.data:
bounds = [
[
none_min(bounds[0][0], point[0]),
none_min(bounds[0][1], point[1]),
],
[
none_max(bounds[1][0], point[0]),
none_max(bounds[1][1], point[1]),
],
]
return bounds
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import ad_group_ad_label
from google.ads.googleads.v9.services.types import ad_group_ad_label_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import AdGroupAdLabelServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdGroupAdLabelServiceGrpcTransport
class AdGroupAdLabelServiceClientMeta(type):
"""Metaclass for the AdGroupAdLabelService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AdGroupAdLabelServiceTransport]]
_transport_registry["grpc"] = AdGroupAdLabelServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AdGroupAdLabelServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdGroupAdLabelServiceClient(metaclass=AdGroupAdLabelServiceClientMeta):
"""Service to manage labels on ad group ads."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupAdLabelServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupAdLabelServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdGroupAdLabelServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdGroupAdLabelServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def ad_group_ad_path(
customer_id: str, ad_group_id: str, ad_id: str,
) -> str:
"""Return a fully-qualified ad_group_ad string."""
return "customers/{customer_id}/adGroupAds/{ad_group_id}~{ad_id}".format(
customer_id=customer_id, ad_group_id=ad_group_id, ad_id=ad_id,
)
@staticmethod
def parse_ad_group_ad_path(path: str) -> Dict[str, str]:
"""Parse a ad_group_ad path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroupAds/(?P<ad_group_id>.+?)~(?P<ad_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def ad_group_ad_label_path(
customer_id: str, ad_group_id: str, ad_id: str, label_id: str,
) -> str:
"""Return a fully-qualified ad_group_ad_label string."""
return "customers/{customer_id}/adGroupAdLabels/{ad_group_id}~{ad_id}~{label_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
ad_id=ad_id,
label_id=label_id,
)
@staticmethod
def parse_ad_group_ad_label_path(path: str) -> Dict[str, str]:
"""Parse a ad_group_ad_label path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adGroupAdLabels/(?P<ad_group_id>.+?)~(?P<ad_id>.+?)~(?P<label_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def label_path(customer_id: str, label_id: str,) -> str:
"""Return a fully-qualified label string."""
return "customers/{customer_id}/labels/{label_id}".format(
customer_id=customer_id, label_id=label_id,
)
@staticmethod
def parse_label_path(path: str) -> Dict[str, str]:
"""Parse a label path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/labels/(?P<label_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdGroupAdLabelServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad group ad label service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdGroupAdLabelServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdGroupAdLabelServiceTransport):
# transport is a AdGroupAdLabelServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdGroupAdLabelServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad_group_ad_label(
self,
request: Union[
ad_group_ad_label_service.GetAdGroupAdLabelRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_ad_label.AdGroupAdLabel:
r"""Returns the requested ad group ad label in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetAdGroupAdLabelRequest, dict]):
The request object. Request message for
[AdGroupAdLabelService.GetAdGroupAdLabel][google.ads.googleads.v9.services.AdGroupAdLabelService.GetAdGroupAdLabel].
resource_name (:class:`str`):
Required. The resource name of the ad
group ad label to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.AdGroupAdLabel:
A relationship between an ad group ad
and a label.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_ad_label_service.GetAdGroupAdLabelRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, ad_group_ad_label_service.GetAdGroupAdLabelRequest
):
request = ad_group_ad_label_service.GetAdGroupAdLabelRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_ad_group_ad_label
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_ad_group_ad_labels(
self,
request: Union[
ad_group_ad_label_service.MutateAdGroupAdLabelsRequest, dict
] = None,
*,
customer_id: str = None,
operations: Sequence[
ad_group_ad_label_service.AdGroupAdLabelOperation
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_ad_label_service.MutateAdGroupAdLabelsResponse:
r"""Creates and removes ad group ad labels. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__
`HeaderError <>`__ `InternalError <>`__ `LabelError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.MutateAdGroupAdLabelsRequest, dict]):
The request object. Request message for
[AdGroupAdLabelService.MutateAdGroupAdLabels][google.ads.googleads.v9.services.AdGroupAdLabelService.MutateAdGroupAdLabels].
customer_id (:class:`str`):
Required. ID of the customer whose ad
group ad labels are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v9.services.types.AdGroupAdLabelOperation]`):
Required. The list of operations to
perform on ad group ad labels.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.services.types.MutateAdGroupAdLabelsResponse:
Response message for an ad group ad
labels mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_ad_label_service.MutateAdGroupAdLabelsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, ad_group_ad_label_service.MutateAdGroupAdLabelsRequest
):
request = ad_group_ad_label_service.MutateAdGroupAdLabelsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_ad_group_ad_labels
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AdGroupAdLabelServiceClient",)
|
|
from __future__ import print_function, division
import os
import tempfile
import shutil
from copy import deepcopy
import numpy as np
import pytest
from .. import Model
from .test_helpers import random_id, get_test_dust, get_realistic_test_dust
from ...grid import CartesianGrid, CylindricalPolarGrid, SphericalPolarGrid, AMRGrid, OctreeGrid
from ...dust import IsotropicDust, SphericalDust
DATA = os.path.join(os.path.dirname(__file__), 'data')
def test_basic(tmpdir):
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.set_n_photons(initial=100, imaging=100)
m.write(tmpdir.join(random_id()).strpath)
def test_noname_nofilename():
m = Model()
with pytest.raises(ValueError) as e:
m.write()
assert e.value.args[0] == "filename= has not been specified and model has no name"
def test_nogrid():
m = Model()
with pytest.raises(Exception) as e:
m.write('test')
assert e.value.args[0] == 'No coordinate grid has been set up'
def test_nophotons():
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
with pytest.raises(Exception) as e:
m.write('test')
assert e.value.args[0] == 'Photon numbers not set'
def test_incomplete_photons_1():
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
with pytest.raises(Exception) as e:
m.set_n_photons(initial=1)
assert e.value.args[0] == '[n_photons] imaging should bet set'
def test_incomplete_photons_2():
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
with pytest.raises(Exception) as e:
m.set_n_photons(imaging=1)
assert e.value.args[0] == '[n_photons] initial should be set since the initial iterations are being computed'
class TestAllGridTypes(object):
def setup_class(self):
self.grid = {}
self.grid['car'] = CartesianGrid([-1., 1.], [-2., 2.], [-3., 3.])
self.grid['cyl'] = CylindricalPolarGrid([0., 1.], [-1., 1.], [0., 2. * np.pi])
self.grid['sph'] = SphericalPolarGrid([0., 1.], [0., np.pi], [0., 2. * np.pi])
self.grid['amr'] = AMRGrid()
level = self.grid['amr'].add_level()
grid = level.add_grid()
grid.xmin, grid.xmax = -1., 1.
grid.ymin, grid.ymax = -1., 1.
grid.zmin, grid.zmax = -1., 1.
grid.nx, grid.ny, grid.nz = 8, 8, 8
grid.quantities['density'] = [np.ones((8, 8, 8))]
refined = [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
self.grid['oct'] = OctreeGrid(0., 0., 0., 10., 10., 10., np.array(refined).astype(bool))
# Set up initial densities
self.density = {}
self.density['car'] = np.array([[[1.]]])
self.density['cyl'] = np.array([[[1.]]])
self.density['sph'] = np.array([[[1.]]])
self.density['amr'] = self.grid['amr']['density'][0]
self.density['oct'] = np.ones(len(refined))
self.dust = get_test_dust()
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_mismatch_density_energy_1(self, grid_type):
m = Model()
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust)
with pytest.raises(Exception) as exc:
m.add_density_grid(self.density[grid_type], self.dust, specific_energy=self.density[grid_type])
assert exc.value.args[0] == "Cannot add specific energy as it was not added for previous density arrays"
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_mismatch_density_energy_2(self, tmpdir, grid_type):
m = Model()
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust, specific_energy=self.density[grid_type])
m.add_density_grid(self.density[grid_type], self.dust)
m.set_n_photons(initial=100, imaging=100)
with pytest.raises(Exception) as exc:
m.write(tmpdir.join(random_id()).strpath)
assert exc.value.args[0] == "Not all dust lists in the grid have the same size"
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_add_density(self, tmpdir, grid_type):
m = Model()
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 5000.
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust)
m.set_n_photons(initial=100, imaging=100)
m.write(tmpdir.join(random_id()).strpath)
m.run(tmpdir.join(random_id()).strpath)
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_add_density_from_grid(self, tmpdir, grid_type):
m = Model()
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 5000.
g = deepcopy(self.grid[grid_type])
if grid_type != 'amr':
g['density'] = []
g['density'].append(self.density[grid_type])
m.set_grid(g)
m.add_density_grid(g['density'][0], self.dust)
m.add_density_grid(g['density'][0], self.dust)
m.set_n_photons(initial=100, imaging=100)
m.write(tmpdir.join(random_id()).strpath)
m.run(tmpdir.join(random_id()).strpath)
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_merge_density(self, tmpdir, grid_type):
m = Model()
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 5000.
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust)
m.add_density_grid(self.density[grid_type], self.dust, merge_if_possible=True)
m.set_n_photons(initial=100, imaging=100)
m.write(tmpdir.join(random_id()).strpath)
m.run(tmpdir.join(random_id()).strpath)
class TestMerge(object):
def setup_class(self):
self.grid = {}
self.grid['car'] = CartesianGrid([-1., 1.], [-2., 2.], [-3., 3.])
self.grid['cyl'] = CylindricalPolarGrid([0., 1.], [-1., 1.], [0., 2. * np.pi])
self.grid['sph'] = SphericalPolarGrid([0., 1.], [0., np.pi], [0., 2. * np.pi])
self.grid['amr'] = AMRGrid()
level = self.grid['amr'].add_level()
grid = level.add_grid()
grid.xmin, grid.xmax = -1., 1.
grid.ymin, grid.ymax = -1., 1.
grid.zmin, grid.zmax = -1., 1.
grid.nx, grid.ny, grid.nz = 8, 8, 8
grid.quantities['density'] = np.ones((8, 8, 8))
refined = [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
self.grid['oct'] = OctreeGrid(0., 0., 0., 10., 10., 10., np.array(refined).astype(bool))
# Set up initial densities
self.density = {}
self.density['car'] = np.array([[[1.]]])
self.density['cyl'] = np.array([[[1.]]])
self.density['sph'] = np.array([[[1.]]])
self.density['amr'] = self.grid['amr']['density']
self.density['oct'] = np.ones(len(refined))
self.tmpdir = tempfile.mkdtemp()
self.dust1_filename = os.path.join(self.tmpdir, random_id())
self.dust1 = get_test_dust()
self.dust1.write(self.dust1_filename)
self.dust2_filename = os.path.join(self.tmpdir, random_id())
self.dust2 = get_test_dust()
self.dust2.write(self.dust2_filename)
self.dust3_filename = os.path.join(self.tmpdir, random_id())
self.dust3 = IsotropicDust([3.e9, 3.e16], [0.5, 0.5], [1., 0.5])
self.dust3.set_lte_emissivities(n_temp=10, temp_min=0.1, temp_max=1600.)
self.dust3.write(self.dust3_filename)
# The following dust file does not have emissivities and mean
# opacities since it has never been written to a file
self.dust4 = get_test_dust(set_emissivities=False)
def teardown_class(self):
shutil.rmtree(self.tmpdir)
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_merge_no(self, grid_type):
m = Model()
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust1_filename)
m.add_density_grid(self.density[grid_type], self.dust2_filename, merge_if_possible=True)
assert m.grid.n_dust == 2
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_merge_filename_disabled(self, grid_type):
m = Model()
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust1_filename)
m.add_density_grid(self.density[grid_type], self.dust1_filename, merge_if_possible=False)
assert m.grid.n_dust == 2
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_merge_filename(self, grid_type):
m = Model()
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust1_filename)
m.add_density_grid(self.density[grid_type], self.dust1_filename, merge_if_possible=True)
assert m.grid.n_dust == 1
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_merge_object_identical(self, grid_type):
m = Model()
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust1)
m.add_density_grid(self.density[grid_type], self.dust1, merge_if_possible=True)
assert m.grid.n_dust == 1
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_merge_object_samehash(self, grid_type):
m = Model()
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust1)
m.add_density_grid(self.density[grid_type], self.dust2, merge_if_possible=True)
assert m.grid.n_dust == 1
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_merge_object_diffhash(self, grid_type):
m = Model()
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust1)
m.add_density_grid(self.density[grid_type], self.dust3, merge_if_possible=True)
assert m.grid.n_dust == 2
@pytest.mark.parametrize(('grid_type'), ['car', 'sph', 'cyl', 'amr', 'oct'])
def test_merge_object_incomplete(self, grid_type):
m = Model()
m.set_grid(self.grid[grid_type])
m.add_density_grid(self.density[grid_type], self.dust1)
m.add_density_grid(self.density[grid_type], self.dust4, merge_if_possible=True)
assert m.grid.n_dust == 2
def test_dust_mix(tmpdir):
# This is a regression test for a bug which caused the code to crash if
# isotropic dust and non-isotropic dust were used together.
iso_dust = get_realistic_test_dust()
kmh_dust = os.path.join(DATA, 'kmh_lite.hdf5')
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 6000.
m.add_density_grid(np.array([[[1.]]]), kmh_dust)
m.add_density_grid(np.array([[[1.]]]), iso_dust)
m.set_n_photons(initial=100000, imaging=0)
m.write(tmpdir.join(random_id()).strpath)
m.run(tmpdir.join(random_id()).strpath)
def test_voronoi_basics(tmpdir):
# A test to check the interaction between C++, Fortran and Python,
# and to test the internal consistency of the Voronoi gridding.
from ...util.constants import au
np.random.seed(12343)
# Generate random points
N = 1000
x = np.random.uniform(-100 * au, 100 * au, N)
y = np.random.uniform(-100 * au, 100 * au, N)
z = np.random.uniform(-100 * au, 100 * au, N)
# Set up model
m = Model()
m.set_voronoi_grid(x, y, z)
kmh_dust = SphericalDust(os.path.join(DATA, 'kmh_lite.hdf5'))
m.add_density_grid(np.repeat(1.e-17, N), kmh_dust)
# Set up fly-around images
i = m.add_peeled_images()
i.set_wavelength_range(1, 900., 1000)
i.set_viewing_angles(np.repeat(85, 9), np.linspace(0., 60., 10)[:-1])
i.set_image_limits(-150 * au, 150 * au, -150 * au, 150 * au)
i.set_image_size(512, 512)
# We are just simulating a cube with a constant temperature of 20K
m.set_n_initial_iterations(0)
m.set_minimum_temperature(20.)
# Use raytracing for optimal signal-to-noise
m.set_raytracing(True)
m.set_n_photons(imaging=0, raytracing_sources=0, raytracing_dust=1e5)
m.write(tmpdir.join(random_id()).strpath)
m.run(tmpdir.join(random_id()).strpath)
def test_dust_changed_nosave(tmpdir):
kmh_dust = SphericalDust(os.path.join(DATA, 'kmh_lite.hdf5'))
kmh_dust.set_sublimation_temperature('fast', temperature=1600)
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.add_density_grid(np.array([[[1.]]]), kmh_dust)
m.set_n_photons(initial=1, imaging=1)
with pytest.raises(ValueError) as exc:
m.write(tmpdir.join(random_id()).strpath, copy=False)
assert exc.value.args[0].startswith('Dust properties have been modified since being read in')
def test_dust_changed_save(tmpdir):
kmh_dust = SphericalDust(os.path.join(DATA, 'kmh_lite.hdf5'))
kmh_dust.set_sublimation_temperature('fast', temperature=1600)
kmh_dust.write(tmpdir.join(random_id()).strpath)
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.add_density_grid(np.array([[[1.]]]), kmh_dust)
m.set_n_photons(initial=1, imaging=1)
m.write(tmpdir.join(random_id()).strpath, copy=False)
def test_model_minimal(tmpdir):
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=10)
m.write(tmpdir.join(random_id()).strpath)
m.run()
def test_binned_forced_first_interaction(tmpdir):
m = Model()
m.set_cartesian_grid([-1., 1.], [-1., 1.], [-1., 1.])
i = m.add_binned_images(sed=True, image=False)
i.set_wavelength_range(5, 1, 10)
i.set_viewing_bins(2, 2)
m.set_n_photons(initial=100, imaging=100)
with pytest.raises(Exception) as exc:
m.write(tmpdir.join(random_id()).strpath)
assert exc.value.args[0] == "can't use binned images with forced first interaction - use set_forced_first_interaction(False) to disable"
|
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import fixtures
import netaddr
from neutronclient.common import exceptions as nc_exc
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import utils
from neutron.common import utils as common_utils
from neutron.tests.common import net_helpers
from neutron.tests.fullstack.resources import config
from neutron.tests.fullstack.resources import process
LOG = logging.getLogger(__name__)
class EnvironmentDescription(object):
"""A set of characteristics of an environment setup.
Does the setup, as a whole, support tunneling? How about l2pop?
"""
def __init__(self, network_type='vxlan', l2_pop=True, qos=False):
self.network_type = network_type
self.l2_pop = l2_pop
self.qos = qos
@property
def tunneling_enabled(self):
return self.network_type in ('vxlan', 'gre')
class HostDescription(object):
"""A set of characteristics of an environment Host.
What agents should the host spawn? What mode should each agent operate
under?
"""
def __init__(self, l3_agent=False, of_interface='ovs-ofctl'):
self.l3_agent = l3_agent
self.of_interface = of_interface
class Host(fixtures.Fixture):
"""The Host class models a physical host running agents, all reporting with
the same hostname.
OpenStack installers or administrators connect compute nodes to the
physical tenant network by connecting the provider bridges to their
respective physical NICs. Or, if using tunneling, by configuring an
IP address on the appropriate physical NIC. The Host class does the same
with the connect_* methods.
TODO(amuller): Add start/stop/restart methods that will start/stop/restart
all of the agents on this host. Add a kill method that stops all agents
and disconnects the host from other hosts.
"""
def __init__(self, env_desc, host_desc,
test_name, neutron_config,
central_data_bridge, central_external_bridge):
self.env_desc = env_desc
self.host_desc = host_desc
self.test_name = test_name
self.neutron_config = neutron_config
# Use reserved class E addresses
self.local_ip = self.get_random_ip('240.0.0.1', '255.255.255.254')
self.central_data_bridge = central_data_bridge
self.central_external_bridge = central_external_bridge
self.agents = {}
def _setUp(self):
agent_cfg_fixture = config.OVSConfigFixture(
self.env_desc, self.host_desc, self.neutron_config.temp_dir,
self.local_ip)
self.useFixture(agent_cfg_fixture)
if self.env_desc.tunneling_enabled:
self.useFixture(
net_helpers.OVSBridgeFixture(
agent_cfg_fixture.get_br_tun_name())).bridge
self.connect_to_internal_network_via_tunneling()
else:
br_phys = self.useFixture(
net_helpers.OVSBridgeFixture(
agent_cfg_fixture.get_br_phys_name())).bridge
self.connect_to_internal_network_via_vlans(br_phys)
self.ovs_agent = self.useFixture(
process.OVSAgentFixture(
self.env_desc, self.host_desc,
self.test_name, self.neutron_config, agent_cfg_fixture))
if self.host_desc.l3_agent:
l3_agent_cfg_fixture = self.useFixture(
config.L3ConfigFixture(
self.env_desc, self.host_desc,
self.neutron_config.temp_dir,
self.ovs_agent.agent_cfg_fixture.get_br_int_name()))
br_ex = self.useFixture(
net_helpers.OVSBridgeFixture(
l3_agent_cfg_fixture.get_external_bridge())).bridge
self.connect_to_external_network(br_ex)
self.l3_agent = self.useFixture(
process.L3AgentFixture(
self.env_desc, self.host_desc,
self.test_name,
self.neutron_config,
l3_agent_cfg_fixture))
def connect_to_internal_network_via_tunneling(self):
veth_1, veth_2 = self.useFixture(
net_helpers.VethFixture()).ports
# NOTE: This sets an IP address on the host's root namespace
# which is cleaned up when the device is deleted.
veth_1.addr.add(common_utils.ip_to_cidr(self.local_ip, 32))
veth_1.link.set_up()
veth_2.link.set_up()
def connect_to_internal_network_via_vlans(self, host_data_bridge):
# If using VLANs as a segmentation device, it's needed to connect
# a provider bridge to a centralized, shared bridge.
net_helpers.create_patch_ports(
self.central_data_bridge, host_data_bridge)
def connect_to_external_network(self, host_external_bridge):
net_helpers.create_patch_ports(
self.central_external_bridge, host_external_bridge)
@staticmethod
def get_random_ip(low, high):
parent_range = netaddr.IPRange(low, high)
return str(random.choice(parent_range))
@property
def hostname(self):
return self.neutron_config.config.DEFAULT.host
@property
def l3_agent(self):
return self.agents['l3']
@l3_agent.setter
def l3_agent(self, agent):
self.agents['l3'] = agent
@property
def ovs_agent(self):
return self.agents['ovs']
@ovs_agent.setter
def ovs_agent(self, agent):
self.agents['ovs'] = agent
class Environment(fixtures.Fixture):
"""Represents a deployment topology.
Environment is a collection of hosts. It starts a Neutron server
and a parametrized number of Hosts, each a collection of agents.
The Environment accepts a collection of HostDescription, each describing
the type of Host to create.
"""
def __init__(self, env_desc, hosts_desc):
"""
:param env_desc: An EnvironmentDescription instance.
:param hosts_desc: A list of HostDescription instances.
"""
super(Environment, self).__init__()
self.env_desc = env_desc
self.hosts_desc = hosts_desc
self.hosts = []
def wait_until_env_is_up(self):
utils.wait_until_true(self._processes_are_ready)
def _processes_are_ready(self):
try:
running_agents = self.neutron_server.client.list_agents()['agents']
agents_count = sum(len(host.agents) for host in self.hosts)
return len(running_agents) == agents_count
except nc_exc.NeutronClientException:
return False
def _create_host(self, host_desc):
temp_dir = self.useFixture(fixtures.TempDir()).path
neutron_config = config.NeutronConfigFixture(
self.env_desc, host_desc, temp_dir,
cfg.CONF.database.connection, self.rabbitmq_environment)
self.useFixture(neutron_config)
return self.useFixture(
Host(self.env_desc,
host_desc,
self.test_name,
neutron_config,
self.central_data_bridge,
self.central_external_bridge))
def _setUp(self):
self.temp_dir = self.useFixture(fixtures.TempDir()).path
self.rabbitmq_environment = self.useFixture(
process.RabbitmqEnvironmentFixture())
plugin_cfg_fixture = self.useFixture(
config.ML2ConfigFixture(
self.env_desc, None, self.temp_dir,
self.env_desc.network_type))
neutron_cfg_fixture = self.useFixture(
config.NeutronConfigFixture(
self.env_desc, None, self.temp_dir,
cfg.CONF.database.connection, self.rabbitmq_environment))
self.neutron_server = self.useFixture(
process.NeutronServerFixture(
self.env_desc, None,
self.test_name, neutron_cfg_fixture, plugin_cfg_fixture))
self.central_data_bridge = self.useFixture(
net_helpers.OVSBridgeFixture('cnt-data')).bridge
self.central_external_bridge = self.useFixture(
net_helpers.OVSBridgeFixture('cnt-ex')).bridge
self.hosts = [self._create_host(desc) for desc in self.hosts_desc]
self.wait_until_env_is_up()
|
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import calendar
import codecs
import datetime
import io
import os.path
import re
import subprocess
import sys
from pycoin.convention import tx_fee, satoshi_to_mbtc
from pycoin.encoding import hash160
from pycoin.key import Key
from pycoin.key.validate import is_address_valid
from pycoin.networks import address_prefix_for_netcode
from pycoin.serialize import b2h_rev, h2b, h2b_rev, stream_to_bytes
from pycoin.services import spendables_for_address, get_tx_db
from pycoin.services.providers import message_about_tx_cache_env, \
message_about_get_tx_env, message_about_spendables_for_address_env
from pycoin.tx import Spendable, Tx, TxOut, \
SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_ANYONECANPAY
from pycoin.tx.Tx import BadSpendableError
from pycoin.tx.tx_utils import distribute_from_split_pool, sign_tx
from pycoin.tx.TxOut import standard_tx_out_script
from pycoin.tx.script.tools import opcode_list
from pycoin.tx.script.check_signature import parse_signature_blob
from pycoin.tx.script.der import UnexpectedDER
DEFAULT_VERSION = 1
DEFAULT_LOCK_TIME = 0
LOCKTIME_THRESHOLD = 500000000
def validate_bitcoind(tx, tx_db, bitcoind_url):
try:
from pycoin.services.bitcoind import bitcoind_agrees_on_transaction_validity
if bitcoind_agrees_on_transaction_validity(bitcoind_url, tx):
print("interop test passed for %s" % tx.id(), file=sys.stderr)
else:
print("tx ==> %s FAILED interop test" % tx.id(), file=sys.stderr)
except ImportError:
print("warning: can't talk to bitcoind due to missing library")
def sighash_type_to_string(sighash_type):
anyonecanpay = sighash_type & SIGHASH_ANYONECANPAY
sighash_type &= ~SIGHASH_ANYONECANPAY
if sighash_type == SIGHASH_ALL:
sighash_str = 'SIGHASH_ALL'
elif sighash_type == SIGHASH_NONE:
sighash_str = 'SIGHASH_NONE'
elif sighash_type == SIGHASH_SINGLE:
sighash_str = 'SIGHASH_SINGLE'
else:
sighash_str = 'SIGHASH_UNKNOWN'
if anyonecanpay:
sighash_str += ' | SIGHASH_ANYONECANPAY'
return sighash_str
def dump_tx(tx, netcode='BTC', verbose_signature=False):
address_prefix = address_prefix_for_netcode(netcode)
tx_bin = stream_to_bytes(tx.stream)
print("Version: %2d tx hash %s %d bytes " % (tx.version, tx.id(), len(tx_bin)))
print("TxIn count: %d; TxOut count: %d" % (len(tx.txs_in), len(tx.txs_out)))
if tx.lock_time == 0:
meaning = "valid anytime"
elif tx.lock_time < LOCKTIME_THRESHOLD:
meaning = "valid after block index %d" % tx.lock_time
else:
when = datetime.datetime.utcfromtimestamp(tx.lock_time)
meaning = "valid on or after %s utc" % when.isoformat()
print("Lock time: %d (%s)" % (tx.lock_time, meaning))
print("Input%s:" % ('s' if len(tx.txs_in) != 1 else ''))
missing_unspents = tx.missing_unspents()
for idx, tx_in in enumerate(tx.txs_in):
if tx.is_coinbase():
print("%4d: COINBASE %12.5f mBTC" % (idx, satoshi_to_mbtc(tx.total_in())))
else:
suffix = ""
if tx.missing_unspent(idx):
tx_out = None
address = tx_in.bitcoin_address(address_prefix=address_prefix)
else:
tx_out = tx.unspents[idx]
sig_result = " sig ok" if tx.is_signature_ok(idx) else " BAD SIG"
suffix = " %12.5f mBTC %s" % (satoshi_to_mbtc(tx_out.coin_value), sig_result)
address = tx_out.bitcoin_address(netcode=netcode)
t = "%4d: %34s from %s:%-4d%s" % (idx, address, b2h_rev(tx_in.previous_hash),
tx_in.previous_index, suffix)
print(t.rstrip())
if verbose_signature:
signatures = []
for opcode in opcode_list(tx_in.script):
if not opcode.startswith("OP_"):
try:
signatures.append(parse_signature_blob(h2b(opcode)))
except UnexpectedDER:
pass
if signatures:
sig_types_identical = (zip(*signatures)[1]).count(signatures[0][1]) == len(signatures)
i = 1 if len(signatures) > 1 else ''
for sig_pair, sig_type in signatures:
print(" r{0}: {1:#x}\n s{0}: {2:#x}".format(i, *sig_pair))
if not sig_types_identical and tx_out:
print(" z{}: {:#x} {}".format(i, tx.signature_hash(tx_out.script, idx, sig_type),
sighash_type_to_string(sig_type)))
if i: i += 1
if sig_types_identical and tx_out:
print(" z:{} {:#x} {}".format(' ' if i else '', tx.signature_hash(tx_out.script, idx, sig_type),
sighash_type_to_string(sig_type)))
print("Output%s:" % ('s' if len(tx.txs_out) != 1 else ''))
for idx, tx_out in enumerate(tx.txs_out):
amount_mbtc = satoshi_to_mbtc(tx_out.coin_value)
address = tx_out.bitcoin_address(netcode=netcode) or "(unknown)"
print("%4d: %34s receives %12.5f mBTC" % (idx, address, amount_mbtc))
if not missing_unspents:
print("Total input %12.5f mBTC" % satoshi_to_mbtc(tx.total_in()))
print( "Total output %12.5f mBTC" % satoshi_to_mbtc(tx.total_out()))
if not missing_unspents:
print("Total fees %12.5f mBTC" % satoshi_to_mbtc(tx.fee()))
def check_fees(tx):
total_in, total_out = tx.total_in(), tx.total_out()
actual_tx_fee = total_in - total_out
recommended_tx_fee = tx_fee.recommended_fee_for_tx(tx)
print("warning: transaction fees recommendations casually calculated and estimates may be incorrect",
file=sys.stderr)
if actual_tx_fee > recommended_tx_fee:
print("warning: transaction fee of %s exceeds expected value of %s mBTC" %
(satoshi_to_mbtc(actual_tx_fee), satoshi_to_mbtc(recommended_tx_fee)),
file=sys.stderr)
elif actual_tx_fee < 0:
print("not enough source coins (%s mBTC) for destination (%s mBTC)."
" Short %s mBTC" %
(satoshi_to_mbtc(total_in),
satoshi_to_mbtc(total_out), satoshi_to_mbtc(-actual_tx_fee)),
file=sys.stderr)
elif actual_tx_fee < recommended_tx_fee:
print("warning: transaction fee lower than (casually calculated)"
" expected value of %s mBTC, transaction might not propogate" %
satoshi_to_mbtc(recommended_tx_fee), file=sys.stderr)
return actual_tx_fee
EARLIEST_DATE = datetime.datetime(year=2009, month=1, day=1)
def parse_locktime(s):
s = re.sub(r"[ ,:\-]+", r"-", s)
for fmt1 in ["%Y-%m-%dT", "%Y-%m-%d", "%b-%d-%Y", "%b-%d-%y", "%B-%d-%Y", "%B-%d-%y"]:
for fmt2 in ["T%H-%M-%S", "T%H-%M", "-%H-%M-%S", "-%H-%M", ""]:
fmt = fmt1 + fmt2
try:
when = datetime.datetime.strptime(s, fmt)
if when < EARLIEST_DATE:
raise ValueError("invalid date: must be after %s" % EARLIEST_DATE)
return calendar.timegm(when.timetuple())
except ValueError:
pass
return int(s)
def parse_fee(fee):
if fee in ["standard"]:
return fee
return int(fee)
EPILOG = 'Files are binary by default unless they end with the suffix ".hex".'
def main():
parser = argparse.ArgumentParser(
description="Manipulate bitcoin (or alt coin) transactions.",
epilog=EPILOG)
parser.add_argument('-t', "--transaction-version", type=int,
help='Transaction version, either 1 (default) or 3 (not yet supported).')
parser.add_argument('-l', "--lock-time", type=parse_locktime, help='Lock time; either a block'
'index, or a date/time (example: "2014-01-01T15:00:00"')
parser.add_argument('-n', "--network", default="BTC",
help='Define network code (M=Bitcoin mainnet, T=Bitcoin testnet).')
parser.add_argument('-a', "--augment", action='store_true',
help='augment tx by adding any missing spendable metadata by fetching'
' inputs from cache and/or web services')
parser.add_argument('-s', "--verbose-signature", action='store_true',
help='Display technical signature details.')
parser.add_argument("-i", "--fetch-spendables", metavar="address", action="append",
help='Add all unspent spendables for the given bitcoin address. This information'
' is fetched from web services.')
parser.add_argument('-f', "--private-key-file", metavar="path-to-private-keys", action="append",
help='file containing WIF or BIP0032 private keys. If file name ends with .gpg, '
'"gpg -d" will be invoked automatically. File is read one line at a time, and if '
'the file contains only one WIF per line, it will also be scanned for a bitcoin '
'address, and any addresses found will be assumed to be public keys for the given'
' private key.',
type=argparse.FileType('r'))
parser.add_argument('-g', "--gpg-argument", help='argument to pass to gpg (besides -d).', default='')
parser.add_argument("--remove-tx-in", metavar="tx_in_index_to_delete", action="append", type=int,
help='remove a tx_in')
parser.add_argument("--remove-tx-out", metavar="tx_out_index_to_delete", action="append", type=int,
help='remove a tx_out')
parser.add_argument('-F', "--fee", help='fee, in satoshis, to pay on transaction, or '
'"standard" to auto-calculate. This is only useful if the "split pool" '
'is used; otherwise, the fee is automatically set to the unclaimed funds.',
default="standard", metavar="transaction-fee", type=parse_fee)
parser.add_argument('-C', "--cache", help='force the resultant transaction into the transaction cache.'
' Mostly for testing.', action='store_true'),
parser.add_argument('-u', "--show-unspents", action='store_true',
help='show TxOut items for this transaction in Spendable form.')
parser.add_argument('-b', "--bitcoind-url",
help='URL to bitcoind instance to validate against (http://user:pass@host:port).')
parser.add_argument('-o', "--output-file", metavar="path-to-output-file", type=argparse.FileType('wb'),
help='file to write transaction to. This supresses most other output.')
parser.add_argument('-p', "--pay-to-script", metavar="pay-to-script", action="append",
help='a hex version of a script required for a pay-to-script input (a bitcoin address that starts with 3)')
parser.add_argument('-P', "--pay-to-script-file", metavar="pay-to-script-file", nargs=1, type=argparse.FileType('r'),
help='a file containing hex scripts (one per line) corresponding to pay-to-script inputs')
parser.add_argument("argument", nargs="+", help='generic argument: can be a hex transaction id '
'(exactly 64 characters) to be fetched from cache or a web service;'
' a transaction as a hex string; a path name to a transaction to be loaded;'
' a spendable 4-tuple of the form tx_id/tx_out_idx/script_hex/satoshi_count '
'to be added to TxIn list; an address/satoshi_count to be added to the TxOut '
'list; an address to be added to the TxOut list and placed in the "split'
' pool".')
args = parser.parse_args()
# defaults
txs = []
spendables = []
payables = []
key_iters = []
TX_ID_RE = re.compile(r"^[0-9a-fA-F]{64}$")
# there are a few warnings we might optionally print out, but only if
# they are relevant. We don't want to print them out multiple times, so we
# collect them here and print them at the end if they ever kick in.
warning_tx_cache = None
warning_get_tx = None
warning_spendables = None
if args.private_key_file:
wif_re = re.compile(r"[1-9a-km-zA-LMNP-Z]{51,111}")
# address_re = re.compile(r"[1-9a-kmnp-zA-KMNP-Z]{27-31}")
for f in args.private_key_file:
if f.name.endswith(".gpg"):
gpg_args = ["gpg", "-d"]
if args.gpg_argument:
gpg_args.extend(args.gpg_argument.split())
gpg_args.append(f.name)
popen = subprocess.Popen(gpg_args, stdout=subprocess.PIPE)
f = popen.stdout
for line in f.readlines():
# decode
if isinstance(line, bytes):
line = line.decode("utf8")
# look for WIFs
possible_keys = wif_re.findall(line)
def make_key(x):
try:
return Key.from_text(x)
except Exception:
return None
keys = [make_key(x) for x in possible_keys]
for key in keys:
if key:
key_iters.append((k.wif() for k in key.subkeys("")))
# if len(keys) == 1 and key.hierarchical_wallet() is None:
# # we have exactly 1 WIF. Let's look for an address
# potential_addresses = address_re.findall(line)
# update p2sh_lookup
p2sh_lookup = {}
if args.pay_to_script:
for p2s in args.pay_to_script:
try:
script = h2b(p2s)
p2sh_lookup[hash160(script)] = script
except Exception:
print("warning: error parsing pay-to-script value %s" % p2s)
if args.pay_to_script_file:
hex_re = re.compile(r"[0-9a-fA-F]+")
for f in args.pay_to_script_file:
count = 0
for l in f:
try:
m = hex_re.search(l)
if m:
p2s = m.group(0)
script = h2b(p2s)
p2sh_lookup[hash160(script)] = script
count += 1
except Exception:
print("warning: error parsing pay-to-script file %s" % f.name)
if count == 0:
print("warning: no scripts found in %s" % f.name)
# we create the tx_db lazily
tx_db = None
for arg in args.argument:
# hex transaction id
if TX_ID_RE.match(arg):
if tx_db is None:
warning_tx_cache = message_about_tx_cache_env()
warning_get_tx = message_about_get_tx_env()
tx_db = get_tx_db()
tx = tx_db.get(h2b_rev(arg))
if not tx:
for m in [warning_tx_cache, warning_get_tx, warning_spendables]:
if m:
print("warning: %s" % m, file=sys.stderr)
parser.error("can't find Tx with id %s" % arg)
txs.append(tx)
continue
# hex transaction data
try:
tx = Tx.from_hex(arg)
txs.append(tx)
continue
except Exception:
pass
is_valid = is_address_valid(arg, allowable_netcodes=[args.network])
if is_valid:
payables.append((arg, 0))
continue
try:
key = Key.from_text(arg)
# TODO: check network
if key.wif() is None:
payables.append((key.address(), 0))
continue
# TODO: support paths to subkeys
key_iters.append((k.wif() for k in key.subkeys("")))
continue
except Exception:
pass
if os.path.exists(arg):
try:
with open(arg, "rb") as f:
if f.name.endswith("hex"):
f = io.BytesIO(codecs.getreader("hex_codec")(f).read())
tx = Tx.parse(f)
txs.append(tx)
try:
tx.parse_unspents(f)
except Exception as ex:
pass
continue
except Exception:
pass
parts = arg.split("/")
if len(parts) == 4:
# spendable
try:
spendables.append(Spendable.from_text(arg))
continue
except Exception:
pass
if len(parts) == 2 and is_address_valid(parts[0], allowable_netcodes=[args.network]):
try:
payables.append(parts)
continue
except ValueError:
pass
parser.error("can't parse %s" % arg)
if args.fetch_spendables:
warning_spendables = message_about_spendables_for_address_env()
for address in args.fetch_spendables:
spendables.extend(spendables_for_address(address))
for tx in txs:
if tx.missing_unspents() and args.augment:
if tx_db is None:
warning_tx_cache = message_about_tx_cache_env()
warning_get_tx = message_about_get_tx_env()
tx_db = get_tx_db()
tx.unspents_from_db(tx_db, ignore_missing=True)
txs_in = []
txs_out = []
unspents = []
# we use a clever trick here to keep each tx_in corresponding with its tx_out
for tx in txs:
smaller = min(len(tx.txs_in), len(tx.txs_out))
txs_in.extend(tx.txs_in[:smaller])
txs_out.extend(tx.txs_out[:smaller])
unspents.extend(tx.unspents[:smaller])
for tx in txs:
smaller = min(len(tx.txs_in), len(tx.txs_out))
txs_in.extend(tx.txs_in[smaller:])
txs_out.extend(tx.txs_out[smaller:])
unspents.extend(tx.unspents[smaller:])
for spendable in spendables:
txs_in.append(spendable.tx_in())
unspents.append(spendable)
for address, coin_value in payables:
script = standard_tx_out_script(address)
txs_out.append(TxOut(coin_value, script))
lock_time = args.lock_time
version = args.transaction_version
# if no lock_time is explicitly set, inherit from the first tx or use default
if lock_time is None:
if txs:
lock_time = txs[0].lock_time
else:
lock_time = DEFAULT_LOCK_TIME
# if no version is explicitly set, inherit from the first tx or use default
if version is None:
if txs:
version = txs[0].version
else:
version = DEFAULT_VERSION
if args.remove_tx_in:
s = set(args.remove_tx_in)
txs_in = [tx_in for idx, tx_in in enumerate(txs_in) if idx not in s]
if args.remove_tx_out:
s = set(args.remove_tx_out)
txs_out = [tx_out for idx, tx_out in enumerate(txs_out) if idx not in s]
tx = Tx(txs_in=txs_in, txs_out=txs_out, lock_time=lock_time, version=version, unspents=unspents)
fee = args.fee
try:
distribute_from_split_pool(tx, fee)
except ValueError as ex:
print("warning: %s" % ex.args[0], file=sys.stderr)
unsigned_before = tx.bad_signature_count()
if unsigned_before > 0 and key_iters:
def wif_iter(iters):
while len(iters) > 0:
for idx, iter in enumerate(iters):
try:
wif = next(iter)
yield wif
except StopIteration:
iters = iters[:idx] + iters[idx+1:]
break
print("signing...", file=sys.stderr)
sign_tx(tx, wif_iter(key_iters), p2sh_lookup=p2sh_lookup)
unsigned_after = tx.bad_signature_count()
if unsigned_after > 0 and key_iters:
print("warning: %d TxIn items still unsigned" % unsigned_after, file=sys.stderr)
if len(tx.txs_in) == 0:
print("warning: transaction has no inputs", file=sys.stderr)
if len(tx.txs_out) == 0:
print("warning: transaction has no outputs", file=sys.stderr)
include_unspents = (unsigned_after > 0)
tx_as_hex = tx.as_hex(include_unspents=include_unspents)
if args.output_file:
f = args.output_file
if f.name.endswith(".hex"):
f.write(tx_as_hex.encode("utf8"))
else:
tx.stream(f)
if include_unspents:
tx.stream_unspents(f)
f.close()
elif args.show_unspents:
for spendable in tx.tx_outs_as_spendable():
print(spendable.as_text())
else:
if not tx.missing_unspents():
check_fees(tx)
dump_tx(tx, args.network, args.verbose_signature)
if include_unspents:
print("including unspents in hex dump since transaction not fully signed")
print(tx_as_hex)
if args.cache:
if tx_db is None:
warning_tx_cache = message_about_tx_cache_env()
warning_get_tx = message_about_get_tx_env()
tx_db = get_tx_db()
tx_db.put(tx)
if args.bitcoind_url:
if tx_db is None:
warning_tx_cache = message_about_tx_cache_env()
warning_get_tx = message_about_get_tx_env()
tx_db = get_tx_db()
validate_bitcoind(tx, tx_db, args.bitcoind_url)
if tx.missing_unspents():
print("\n** can't validate transaction as source transactions missing", file=sys.stderr)
else:
try:
if tx_db is None:
warning_tx_cache = message_about_tx_cache_env()
warning_get_tx = message_about_get_tx_env()
tx_db = get_tx_db()
tx.validate_unspents(tx_db)
print('all incoming transaction values validated')
except BadSpendableError as ex:
print("\n**** ERROR: FEES INCORRECTLY STATED: %s" % ex.args[0], file=sys.stderr)
except Exception as ex:
print("\n*** can't validate source transactions as untampered: %s" %
ex.args[0], file=sys.stderr)
# print warnings
for m in [warning_tx_cache, warning_get_tx, warning_spendables]:
if m:
print("warning: %s" % m, file=sys.stderr)
if __name__ == '__main__':
main()
|
|
import asyncio
import contextlib
import tempfile
import pytest
from py import path
from aiohttp.web import Application
from .test_utils import unused_port as _unused_port
from .test_utils import (RawTestServer, TestClient, TestServer,
loop_context, setup_test_loop, teardown_test_loop)
try:
import uvloop
except:
uvloop = None
def pytest_addoption(parser):
parser.addoption('--fast', action='store_true', default=False,
help='run tests faster by disabling extra checks')
parser.addoption('--with-uvloop-only', action='store_true', default=False,
help='run tests with uvloop only if available')
parser.addoption('--without-uvloop', action='store_true', default=False,
help='run tests without uvloop')
parser.addoption('--enable-loop-debug', action='store_true', default=False,
help='enable event loop debug mode')
@pytest.fixture
def fast(request):
""" --fast config option """
return request.config.getoption('--fast')
@contextlib.contextmanager
def _passthrough_loop_context(loop, fast=False):
if loop:
# loop already exists, pass it straight through
yield loop
else:
# this shadows loop_context's standard behavior
loop = setup_test_loop()
yield loop
teardown_test_loop(loop, fast=fast)
def pytest_pycollect_makeitem(collector, name, obj):
"""
Fix pytest collecting for coroutines.
"""
if collector.funcnamefilter(name) and asyncio.iscoroutinefunction(obj):
return list(collector._genfunctions(name, obj))
def pytest_pyfunc_call(pyfuncitem):
"""
Run coroutines in an event loop instead of a normal function call.
"""
fast = pyfuncitem.config.getoption("--fast")
if asyncio.iscoroutinefunction(pyfuncitem.function):
existing_loop = pyfuncitem.funcargs.get('loop', None)
with _passthrough_loop_context(existing_loop, fast=fast) as _loop:
testargs = {arg: pyfuncitem.funcargs[arg]
for arg in pyfuncitem._fixtureinfo.argnames}
task = _loop.create_task(pyfuncitem.obj(**testargs))
_loop.run_until_complete(task)
return True
def pytest_configure(config):
fast = config.getoption('--fast')
uvloop_only = config.getoption('--with-uvloop-only')
without_uvloop = False
if fast:
without_uvloop = True
if config.getoption('--without-uvloop'):
without_uvloop = True
LOOP_FACTORIES.clear()
if uvloop_only and uvloop is not None:
LOOP_FACTORIES.append(uvloop.new_event_loop)
elif without_uvloop:
LOOP_FACTORIES.append(asyncio.new_event_loop)
else:
LOOP_FACTORIES.append(asyncio.new_event_loop)
if uvloop is not None:
LOOP_FACTORIES.append(uvloop.new_event_loop)
asyncio.set_event_loop(None)
LOOP_FACTORIES = []
@pytest.yield_fixture(params=LOOP_FACTORIES)
def loop(request):
"""Return an instance of the event loop."""
fast = request.config.getoption('--fast')
debug = request.config.getoption('--enable-loop-debug')
with loop_context(request.param, fast=fast) as _loop:
if debug:
_loop.set_debug(True)
yield _loop
@pytest.fixture
def unused_port():
"""Return a port that is unused on the current host."""
return _unused_port
@pytest.yield_fixture
def test_server(loop):
"""Factory to create a TestServer instance, given an app.
test_server(app, **kwargs)
"""
servers = []
@asyncio.coroutine
def go(app, **kwargs):
assert app.loop is loop, \
"Application is attached to other event loop"
server = TestServer(app)
yield from server.start_server(**kwargs)
servers.append(server)
return server
yield go
@asyncio.coroutine
def finalize():
while servers:
yield from servers.pop().close()
loop.run_until_complete(finalize())
@pytest.yield_fixture
def raw_test_server(loop):
"""Factory to create a RawTestServer instance, given a web handler.
raw_test_server(handler, **kwargs)
"""
servers = []
@asyncio.coroutine
def go(handler, **kwargs):
server = RawTestServer(handler, loop=loop)
yield from server.start_server(**kwargs)
servers.append(server)
return server
yield go
@asyncio.coroutine
def finalize():
while servers:
yield from servers.pop().close()
loop.run_until_complete(finalize())
@pytest.yield_fixture
def test_client(loop):
"""Factory to create a TestClient instance.
test_client(app, **kwargs)
test_client(server, **kwargs)
test_client(raw_server, **kwargs)
"""
clients = []
@asyncio.coroutine
def go(__param, *args, **kwargs):
if isinstance(__param, Application):
assert not args, "args should be empty"
assert __param.loop is loop, \
"Application is attached to other event loop"
client = TestClient(__param, **kwargs)
elif isinstance(__param, TestServer):
assert not args, "args should be empty"
assert __param.app.loop is loop, \
"TestServer is attached to other event loop"
client = TestClient(__param, **kwargs)
elif isinstance(__param, RawTestServer):
assert not args, "args should be empty"
assert __param._loop is loop, \
"TestServer is attached to other event loop"
client = TestClient(__param, **kwargs)
else:
__param = __param(loop, *args, **kwargs)
client = TestClient(__param)
yield from client.start_server()
clients.append(client)
return client
yield go
@asyncio.coroutine
def finalize():
while clients:
yield from clients.pop().close()
loop.run_until_complete(finalize())
@pytest.fixture
def shorttmpdir():
"""Provides a temporary directory with a shorter file system path than the
tmpdir fixture.
"""
tmpdir = path.local(tempfile.mkdtemp())
yield tmpdir
tmpdir.remove(rec=1)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import shutil
import tensorflow as tf
from tensorboard.backend.event_processing import event_file_inspector as efi
from tensorboard.compat.proto import event_pb2
from tensorboard.compat.proto import summary_pb2
from tensorboard.util import test_util
tf.compat.v1.disable_v2_behavior()
class EventFileInspectorTest(tf.test.TestCase):
def setUp(self):
self.logdir = os.path.join(self.get_temp_dir(), "tfevents")
self._MakeDirectoryIfNotExists(self.logdir)
def tearDown(self):
shutil.rmtree(self.logdir)
def _MakeDirectoryIfNotExists(self, path):
if not os.path.exists(path):
os.mkdir(path)
def _WriteScalarSummaries(self, data, subdirs=("",)):
# Writes data to a tempfile in subdirs, and returns generator for the data.
# If subdirs is given, writes data identically to all subdirectories.
for subdir_ in subdirs:
subdir = os.path.join(self.logdir, subdir_)
self._MakeDirectoryIfNotExists(subdir)
with test_util.FileWriterCache.get(subdir) as sw:
for datum in data:
summary = summary_pb2.Summary()
if "simple_value" in datum:
summary.value.add(
tag=datum["tag"], simple_value=datum["simple_value"]
)
sw.add_summary(summary, global_step=datum["step"])
elif "histo" in datum:
summary.value.add(
tag=datum["tag"], histo=summary_pb2.HistogramProto()
)
sw.add_summary(summary, global_step=datum["step"])
elif "session_log" in datum:
sw.add_session_log(
datum["session_log"], global_step=datum["step"]
)
def testEmptyLogdir(self):
# Nothing was written to logdir
units = efi.get_inspection_units(self.logdir)
self.assertEqual([], units)
def testGetAvailableTags(self):
data = [
{"tag": "c", "histo": 2, "step": 10},
{"tag": "c", "histo": 2, "step": 11},
{"tag": "c", "histo": 2, "step": 9},
{"tag": "b", "simple_value": 2, "step": 20},
{"tag": "b", "simple_value": 2, "step": 15},
{"tag": "a", "simple_value": 2, "step": 3},
]
self._WriteScalarSummaries(data)
units = efi.get_inspection_units(self.logdir)
tags = efi.get_unique_tags(units[0].field_to_obs)
self.assertEqual(["a", "b"], tags["scalars"])
self.assertEqual(["c"], tags["histograms"])
def testInspectAll(self):
data = [
{"tag": "c", "histo": 2, "step": 10},
{"tag": "c", "histo": 2, "step": 11},
{"tag": "c", "histo": 2, "step": 9},
{"tag": "b", "simple_value": 2, "step": 20},
{"tag": "b", "simple_value": 2, "step": 15},
{"tag": "a", "simple_value": 2, "step": 3},
]
self._WriteScalarSummaries(data)
units = efi.get_inspection_units(self.logdir)
printable = efi.get_dict_to_print(units[0].field_to_obs)
self.assertEqual(printable["histograms"]["max_step"], 11)
self.assertEqual(printable["histograms"]["min_step"], 9)
self.assertEqual(printable["histograms"]["num_steps"], 3)
self.assertEqual(printable["histograms"]["last_step"], 9)
self.assertEqual(printable["histograms"]["first_step"], 10)
self.assertEqual(printable["histograms"]["outoforder_steps"], [(11, 9)])
self.assertEqual(printable["scalars"]["max_step"], 20)
self.assertEqual(printable["scalars"]["min_step"], 3)
self.assertEqual(printable["scalars"]["num_steps"], 3)
self.assertEqual(printable["scalars"]["last_step"], 3)
self.assertEqual(printable["scalars"]["first_step"], 20)
self.assertEqual(
printable["scalars"]["outoforder_steps"], [(20, 15), (15, 3)]
)
def testInspectTag(self):
data = [
{"tag": "c", "histo": 2, "step": 10},
{"tag": "c", "histo": 2, "step": 11},
{"tag": "c", "histo": 2, "step": 9},
{"tag": "b", "histo": 2, "step": 20},
{"tag": "b", "simple_value": 2, "step": 15},
{"tag": "a", "simple_value": 2, "step": 3},
]
self._WriteScalarSummaries(data)
units = efi.get_inspection_units(self.logdir, tag="c")
printable = efi.get_dict_to_print(units[0].field_to_obs)
self.assertEqual(printable["histograms"]["max_step"], 11)
self.assertEqual(printable["histograms"]["min_step"], 9)
self.assertEqual(printable["histograms"]["num_steps"], 3)
self.assertEqual(printable["histograms"]["last_step"], 9)
self.assertEqual(printable["histograms"]["first_step"], 10)
self.assertEqual(printable["histograms"]["outoforder_steps"], [(11, 9)])
self.assertEqual(printable["scalars"], None)
def testSessionLogSummaries(self):
data = [
{
"session_log": event_pb2.SessionLog(
status=event_pb2.SessionLog.START
),
"step": 0,
},
{
"session_log": event_pb2.SessionLog(
status=event_pb2.SessionLog.CHECKPOINT
),
"step": 1,
},
{
"session_log": event_pb2.SessionLog(
status=event_pb2.SessionLog.CHECKPOINT
),
"step": 2,
},
{
"session_log": event_pb2.SessionLog(
status=event_pb2.SessionLog.CHECKPOINT
),
"step": 3,
},
{
"session_log": event_pb2.SessionLog(
status=event_pb2.SessionLog.STOP
),
"step": 4,
},
{
"session_log": event_pb2.SessionLog(
status=event_pb2.SessionLog.START
),
"step": 5,
},
{
"session_log": event_pb2.SessionLog(
status=event_pb2.SessionLog.STOP
),
"step": 6,
},
]
self._WriteScalarSummaries(data)
units = efi.get_inspection_units(self.logdir)
self.assertEqual(1, len(units))
printable = efi.get_dict_to_print(units[0].field_to_obs)
self.assertEqual(printable["sessionlog:start"]["steps"], [0, 5])
self.assertEqual(printable["sessionlog:stop"]["steps"], [4, 6])
self.assertEqual(printable["sessionlog:checkpoint"]["num_steps"], 3)
def testInspectAllWithNestedLogdirs(self):
data = [
{"tag": "c", "simple_value": 2, "step": 10},
{"tag": "c", "simple_value": 2, "step": 11},
{"tag": "c", "simple_value": 2, "step": 9},
{"tag": "b", "simple_value": 2, "step": 20},
{"tag": "b", "simple_value": 2, "step": 15},
{"tag": "a", "simple_value": 2, "step": 3},
]
subdirs = ["eval", "train"]
self._WriteScalarSummaries(data, subdirs=subdirs)
units = efi.get_inspection_units(self.logdir)
self.assertEqual(2, len(units))
directory_names = [os.path.join(self.logdir, name) for name in subdirs]
self.assertEqual(directory_names, sorted([unit.name for unit in units]))
for unit in units:
printable = efi.get_dict_to_print(unit.field_to_obs)["scalars"]
self.assertEqual(printable["max_step"], 20)
self.assertEqual(printable["min_step"], 3)
self.assertEqual(printable["num_steps"], 6)
self.assertEqual(printable["last_step"], 3)
self.assertEqual(printable["first_step"], 10)
self.assertEqual(
printable["outoforder_steps"], [(11, 9), (20, 15), (15, 3)]
)
if __name__ == "__main__":
tf.test.main()
|
|
import numpy
from matplotlib import pyplot
import advection
import weno_coefficients
from scipy.integrate import ode
def weno(order, q):
"""
Do WENO reconstruction
Parameters
----------
order : int
The stencil width
q : numpy array
Scalar data to reconstruct
Returns
-------
qL : numpy array
Reconstructed data - boundary points are zero
"""
C = weno_coefficients.C_all[order]
a = weno_coefficients.a_all[order]
sigma = weno_coefficients.sigma_all[order]
qL = numpy.zeros_like(q)
beta = numpy.zeros((order, len(q)))
w = numpy.zeros_like(beta)
np = len(q) - 2 * order
epsilon = 1e-16
for i in range(order, np+order):
q_stencils = numpy.zeros(order)
alpha = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l+1):
beta[k, i] += sigma[k, l, m] * q[i+k-l] * q[i+k-m]
alpha[k] = C[k] / (epsilon + beta[k, i]**2)
for l in range(order):
q_stencils[k] += a[k, l] * q[i+k-l]
w[:, i] = alpha / numpy.sum(alpha)
qL[i] = numpy.dot(w[:, i], q_stencils)
return qL
def weno_M(order, q):
"""
Do WENOM reconstruction following Gerolymos equation (18)
Parameters
----------
order : int
The stencil width
q : numpy array
Scalar data to reconstruct
Returns
-------
qL : numpy array
Reconstructed data - boundary points are zero
"""
C = weno_coefficients.C_all[order]
a = weno_coefficients.a_all[order]
sigma = weno_coefficients.sigma_all[order]
qL = numpy.zeros_like(q)
beta = numpy.zeros((order, len(q)))
w = numpy.zeros_like(beta)
np = len(q) - 2 * order
epsilon = 1e-16
for i in range(order, np+order):
q_stencils = numpy.zeros(order)
alpha_JS = numpy.zeros(order)
for k in range(order):
for l in range(order):
for m in range(l+1):
beta[k, i] += sigma[k, l, m] * q[i+k-l] * q[i+k-m]
alpha_JS[k] = C[k] / (epsilon + beta[k, i]**2)
for l in range(order):
q_stencils[k] += a[k, l] * q[i+k-l]
w_JS = alpha_JS / numpy.sum(alpha_JS)
alpha = w_JS * (C + C**2 - 3 * C * w_JS + w_JS**2) / \
(C**2 + w_JS * (1 - 2 * C))
w[:, i] = alpha / numpy.sum(alpha)
qL[i] = numpy.dot(w[:, i], q_stencils)
return qL
class WENOSimulation(advection.Simulation):
def __init__(self, grid, u, C=0.8, weno_order=3):
self.grid = grid
self.t = 0.0 # simulation time
self.u = u # the constant advective velocity
self.C = C # CFL number
self.weno_order = weno_order
def init_cond(self, type="tophat"):
""" initialize the data """
if type == "sine_sine":
self.grid.a[:] = numpy.sin(numpy.pi*self.grid.x -
numpy.sin(numpy.pi*self.grid.x) / numpy.pi)
else:
super().init_cond(type)
def rk_substep(self):
g = self.grid
g.fill_BCs()
f = self.u * g.a
alpha = abs(self.u)
fp = (f + alpha * g.a) / 2
fm = (f - alpha * g.a) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno(self.weno_order, fp[:-1])
fml[-1::-1] = weno(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
def evolve(self, num_periods=1):
""" evolve the linear advection equation using RK4 """
self.t = 0.0
g = self.grid
tmax = num_periods*self.period()
# main evolution loop
while self.t < tmax:
# fill the boundary conditions
g.fill_BCs()
# get the timestep
dt = self.timestep()
if self.t + dt > tmax:
dt = tmax - self.t
# RK4
# Store the data at the start of the step
a_start = g.a.copy()
k1 = dt * self.rk_substep()
g.a = a_start + k1 / 2
k2 = dt * self.rk_substep()
g.a = a_start + k2 / 2
k3 = dt * self.rk_substep()
g.a = a_start + k3
k4 = dt * self.rk_substep()
g.a = a_start + (k1 + 2 * (k2 + k3) + k4) / 6
self.t += dt
def evolve_scipy(self, num_periods=1):
""" evolve the linear advection equation using RK4 """
self.t = 0.0
g = self.grid
def rk_substep_scipy(t, y):
# Periodic BCs
y[:g.ng] = y[-2*g.ng:-g.ng]
y[-g.ng:] = y[g.ng:2*g.ng]
f = self.u * y
alpha = abs(self.u)
fp = (f + alpha * y) / 2
fm = (f - alpha * y) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno(self.weno_order, fp[:-1])
fml[-1::-1] = weno(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
tmax = num_periods*self.period()
r = ode(rk_substep_scipy).set_integrator('dop853')
r.set_initial_value(g.a, 0)
dt = self.timestep()
# main evolution loop
while r.successful() and r.t < tmax:
dt = min(dt, tmax - r.t)
r.integrate(r.t+dt)
g.a[:] = r.y
class WENOMSimulation(WENOSimulation):
def rk_substep(self):
g = self.grid
g.fill_BCs()
f = self.u * g.a
alpha = abs(self.u)
fp = (f + alpha * g.a) / 2
fm = (f - alpha * g.a) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno_M(self.weno_order, fp[:-1])
fml[-1::-1] = weno_M(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
def evolve_scipy(self, num_periods=1):
""" evolve the linear advection equation using scipy """
self.t = 0.0
g = self.grid
def rk_substep_scipy(t, y):
# Periodic BCs
y[:g.ng] = y[-2*g.ng:-g.ng]
y[-g.ng:] = y[g.ng:2*g.ng]
f = self.u * y
alpha = abs(self.u)
fp = (f + alpha * y) / 2
fm = (f - alpha * y) / 2
fpr = g.scratch_array()
fml = g.scratch_array()
flux = g.scratch_array()
fpr[1:] = weno_M(self.weno_order, fp[:-1])
fml[-1::-1] = weno_M(self.weno_order, fm[-1::-1])
flux[1:-1] = fpr[1:-1] + fml[1:-1]
rhs = g.scratch_array()
rhs[1:-1] = 1/g.dx * (flux[1:-1] - flux[2:])
return rhs
tmax = num_periods*self.period()
r = ode(rk_substep_scipy).set_integrator('dop853')
r.set_initial_value(g.a, 0)
dt = self.timestep()
# main evolution loop
while r.successful() and r.t < tmax:
dt = min(dt, tmax - r.t)
r.integrate(r.t+dt)
g.a[:] = r.y
if __name__ == "__main__":
#-------------------------------------------------------------------------
# compute WENO3 case
xmin = 0.0
xmax = 1.0
nx = 64
order = 3
ng = order+1
g = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
u = 1.0
s = WENOSimulation(g, u, C=0.5, weno_order=3)
s.init_cond("gaussian")
ainit = s.grid.a.copy()
s.evolve(num_periods=1)
pyplot.plot(g.x[g.ilo:g.ihi+1], ainit[g.ilo:g.ihi+1],
ls=":", label="exact")
pyplot.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1],
label="WENO3")
# #-------------------------------------------------------------------------
# # convergence test
# # Note that WENO schemes with standard weights lose convergence at
# # critical points. For high degree critical points they lose more orders.
# # The suggestion in Gerolymos is that you may expect to drop down to
# # order r-1 in the limit.
# # The Gaussian has all odd derivatives vanishing at the origin, so
# # the higher order schemes will lose accuracy.
# # For the Gaussian:
# # This shows clean 5th order convergence for r=3
# # But for r=4-6 the best you get is ~6th order, and 5th order is more
# # realistic
# # For sin(x - sin(x)) type data Gerolymos expects better results
# # But the problem actually appears to be the time integrator
# # Switching to Dormand-Price 8th order from scipy (a hack) will make it
# # work for all cases. With sin(.. sin) data you get 2r - 2 thanks to
# # the one critical point.
#
# problem = "sine_sine"
#
# xmin =-1.0
# xmax = 1.0
## orders = [4]
# orders = [3, 4, 5, 6]
## N1 = [2**4*3**i//2**i for i in range(5)]
## N2 = [2**5*3**i//2**i for i in range(6)]
## N3 = [3**4*4**i//3**i for i in range(5)]
## N4 = [2**(4+i) for i in range(4)]
## N = numpy.unique(numpy.array(N1+N2+N3+N4, dtype=numpy.int))
## N.sort()
## N = [32, 64, 128, 256, 512]
## N = [32, 64, 128]
# N = [24, 32, 54, 64, 81, 108, 128]
#
# errs = []
# errsM = []
#
# u = 1.0
#
# colors="bygrc"
#
# for order in orders:
# ng = order+1
# errs.append([])
# errsM.append([])
# for nx in N:
# print(order, nx)
# gu = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
# su = WENOSimulation(gu, u, C=0.5, weno_order=order)
## guM = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
## suM = WENOMSimulation(guM, u, C=0.5, weno_order=order)
#
# su.init_cond("sine_sine")
## suM.init_cond("sine_sine")
# ainit = su.grid.a.copy()
#
# su.evolve_scipy(num_periods=1)
## suM.evolve_scipy(num_periods=1)
#
# errs[-1].append(gu.norm(gu.a - ainit))
## errsM[-1].append(guM.norm(guM.a - ainit))
#
# pyplot.clf()
# N = numpy.array(N, dtype=numpy.float64)
# for n_order, order in enumerate(orders):
# pyplot.scatter(N, errs[n_order],
# color=colors[n_order],
# label=r"WENO, $r={}$".format(order))
## pyplot.scatter(N, errsM[n_order],
## color=colors[n_order],
## label=r"WENOM, $r={}$".format(order))
# pyplot.plot(N, errs[n_order][0]*(N[0]/N)**(2*order-2),
# linestyle="--", color=colors[n_order],
# label=r"$\mathcal{{O}}(\Delta x^{{{}}})$".format(2*order-2))
## pyplot.plot(N, errs[n_order][len(N)-1]*(N[len(N)-1]/N)**4,
## color="k", label=r"$\mathcal{O}(\Delta x^4)$")
#
# ax = pyplot.gca()
# ax.set_ylim(numpy.min(errs)/5, numpy.max(errs)*5)
# ax.set_xscale('log')
# ax.set_yscale('log')
#
# pyplot.xlabel("N")
# pyplot.ylabel(r"$\| a^\mathrm{final} - a^\mathrm{init} \|_2$",
# fontsize=16)
#
# pyplot.legend(frameon=False)
# pyplot.savefig("weno-converge-sine-sine.pdf")
## pyplot.show()
#-------------- RK4
problem = "gaussian"
xmin = 0.0
xmax = 1.0
orders = [3, 5]
N = [54, 64, 81, 108, 128]
errs = []
u = 1.0
colors="brc"
for order in orders:
ng = order+1
errs.append([])
for nx in N:
print(order, nx)
gu = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
su = WENOSimulation(gu, u, C=0.5, weno_order=order)
su.init_cond("gaussian")
ainit = su.grid.a.copy()
su.evolve(num_periods=5)
errs[-1].append(gu.norm(gu.a - ainit))
pyplot.clf()
N = numpy.array(N, dtype=numpy.float64)
for n_order, order in enumerate(orders):
pyplot.scatter(N, errs[n_order],
color=colors[n_order],
label=r"WENO, $r={}$".format(order))
pyplot.plot(N, errs[0][-1]*(N[-1]/N)**(5),
linestyle="--", color=colors[0],
label=r"$\mathcal{{O}}(\Delta x^{{{}}})$".format(5))
pyplot.plot(N, errs[n_order][len(N)-1]*(N[len(N)-1]/N)**4,
color="k", label=r"$\mathcal{O}(\Delta x^4)$")
ax = pyplot.gca()
ax.set_ylim(numpy.min(errs)/5, numpy.max(errs)*5)
ax.set_xscale('log')
ax.set_yscale('log')
pyplot.xlabel("N")
pyplot.ylabel(r"$\| a^\mathrm{final} - a^\mathrm{init} \|_2$",
fontsize=16)
pyplot.title("Convergence of Gaussian, RK4")
pyplot.legend(frameon=False)
pyplot.savefig("weno-converge-gaussian-rk4.pdf")
# pyplot.show()
#-------------- Gaussian
problem = "gaussian"
xmin = 0.0
xmax = 1.0
orders = [3, 4, 5, 6]
N = [24, 32, 54, 64, 81, 108, 128]
# N = [32, 64, 108, 128]
errs = []
errsM = []
u = 1.0
colors="bygrc"
for order in orders:
ng = order+1
errs.append([])
errsM.append([])
for nx in N:
print(order, nx)
gu = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
su = WENOSimulation(gu, u, C=0.5, weno_order=order)
# guM = advection.Grid1d(nx, ng, xmin=xmin, xmax=xmax)
# suM = WENOMSimulation(guM, u, C=0.5, weno_order=order)
su.init_cond("gaussian")
# suM.init_cond("gaussian")
ainit = su.grid.a.copy()
su.evolve_scipy(num_periods=1)
# suM.evolve_scipy(num_periods=1)
errs[-1].append(gu.norm(gu.a - ainit))
# errsM[-1].append(guM.norm(guM.a - ainit))
pyplot.clf()
N = numpy.array(N, dtype=numpy.float64)
for n_order, order in enumerate(orders):
pyplot.scatter(N, errs[n_order],
color=colors[n_order],
label=r"WENO, $r={}$".format(order))
# pyplot.scatter(N, errsM[n_order],
# color=colors[n_order],
# label=r"WENOM, $r={}$".format(order))
pyplot.plot(N, errs[n_order][0]*(N[0]/N)**(2*order-2),
linestyle="--", color=colors[n_order],
label=r"$\mathcal{{O}}(\Delta x^{{{}}})$".format(2*order-2))
# pyplot.plot(N, errs[n_order][len(N)-1]*(N[len(N)-1]/N)**4,
# color="k", label=r"$\mathcal{O}(\Delta x^4)$")
ax = pyplot.gca()
ax.set_ylim(numpy.min(errs)/5, numpy.max(errs)*5)
ax.set_xscale('log')
ax.set_yscale('log')
pyplot.xlabel("N")
pyplot.ylabel(r"$\| a^\mathrm{final} - a^\mathrm{init} \|_2$",
fontsize=16)
pyplot.title("Convergence of Gaussian, DOPRK8")
pyplot.legend(frameon=False)
pyplot.savefig("weno-converge-gaussian.pdf")
# pyplot.show()
|
|
import ConfigParser
import sys
import time
import logging
import logging.handlers
import os
import signal
import pwd
import shutil
from campus_factory.ClusterStatus import ClusterStatus
from campus_factory.util.ExternalCommands import RunExternal
from campus_factory.OfflineAds.OfflineAds import OfflineAds
from campus_factory.util.DaemonWrangler import DaemonWrangler
from campus_factory.Cluster import *
from campus_factory.util.StreamToLogger import StreamToLogger
from campus_factory.util.CampusConfig import get_option, set_config_file, set_option
BOSCO_CLUSTERLIST = "~/.bosco/.clusterlist"
class Factory:
"""
The main class of the factory. Designed to be run inside the condor scheduler.
@author: Derek Weitzel (dweitzel@cse.unl.edu)
"""
def __init__(self, options):
"""
Initialization function.
@param options: A set of options in the form of an options parser
Required options: config - location of configuration File
"""
self.options = options
def Intialize(self, signum=0, frame=None):
"""
Function to initialize the factory's variables such as configuration
and logging
"""
# Set the sighup signal handler
signal.signal(signal.SIGHUP, self.Intialize)
# Read in the configuration file
self.config_file = self.options.config
files_read = set_config_file(self.config_file)
# check if no files read in
if len(files_read) < 1:
sys.stderr.write("No configuration files found. Location = %s\n" % self.config_file)
sys.exit(1)
self._SetLogging()
if os.getuid() == 0 or get_option("factory_user"):
logging.info("Detected that factory should change user")
self._DropPriv()
if get_option("useoffline", "false").lower() == "true":
self.UseOffline = True
else:
self.UseOffline = False
self.cluster_list = []
# Get the cluster lists
if get_option("clusterlist", "") is not "":
logging.debug("Using the cluster list in the campus factory configuration.")
for cluster_id in get_option("clusterlist").split(','):
self.cluster_list.append(Cluster(cluster_id, useOffline = self.UseOffline))
else:
# Check for the bosco cluster command
(stdout, stderr) = RunExternal("bosco_cluster -l")
if len(stdout) != 0 and stdout is not "No clusters configured":
logging.debug("Using the cluster list installed with BOSCO")
for cluster_id in stdout.split("\n"):
if len(cluster_id) > 0 and cluster_id != "":
self.cluster_list.append(Cluster(cluster_id, useOffline = self.UseOffline))
else:
# Initialize as empty, which infers to submit 'here'
self.cluster_list = [ Cluster(get_option("CONDOR_HOST"), useOffline = self.UseOffline) ]
# Tar up the executables
wrangler = DaemonWrangler()
wrangler.Package()
def _DropPriv(self):
factory_user = get_option("factory_user")
current_uid = os.getuid()
if factory_user is None:
logging.warning("factory_user is not set in campus factory config file")
if get_option("CONDOR_IDS"):
logging.info("CONDOR_IDS is set, will use for dropping privledge")
(factory_uid, factory_gid) = get_option("CONDOR_IDS").split(".")
factory_uid = int(factory_uid)
factory_gid = int(factory_gid)
factory_user = pwd.getpwuid(factory_uid).pw_name
elif current_uid == 0:
logging.error("We are running as root, which can not submit condor jobs.")
logging.error("Don't know who to drop privledges to.")
logging.error("I can't do my job!")
logging.error("Exiting...")
sys.exit(1)
else:
# If factory user is set
factory_uid = pwd.getpwnam(factory_user).pw_uid
factory_gid = pwd.getpwnam(factory_user).pw_gid
logging.debug("Using %i:%i for user:group" % (factory_uid, factory_gid))
# Some parts of bosco need the HOME directory and USER to be defined
os.environ["HOME"] = pwd.getpwnam(factory_user).pw_dir
os.environ["USER"] = factory_user
os.setgid(factory_gid)
os.setuid(factory_uid)
def _SetLogging(self):
"""
Setting the logging level and set the logging.
"""
logging_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
level = logging_levels.get(get_option("loglevel"))
logdirectory = get_option("logdirectory")
handler = logging.handlers.RotatingFileHandler(os.path.join(logdirectory, "campus_factory.log"),
maxBytes=10000000, backupCount=5)
root_logger = logging.getLogger()
# Clear out the logger
root_logger.handlers = []
root_logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
root_logger.addHandler(handler)
# Send stdout to the log
stdout_logger = logging.getLogger()
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger()
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
def Restart(self):
status = ClusterStatus()
# Get the factory id
factoryID = status.GetFactoryID()
# Hold then release the factory in the queue
(stderr, stdout) = RunExternal("condor_hold %s" % factoryID)
print "Stderr = %s" % stderr.strip()
#print "Stdout = %s" % stdout.strip()
(stderr, stdout) = RunExternal("condor_release %s" % factoryID)
print "Stderr = %s" % stderr.strip()
#print "Stdout = %s" % stdout.strip()
def Stop(self):
status = ClusterStatus()
# Get the factory id
factoryID = status.GetFactoryID()
# Remove the factory job
(stderr, stdout) = RunExternal("condor_rm %s" % factoryID)
print "Stderr = %s" % stderr.strip()
def Start(self):
"""
Start the Factory
"""
self.Intialize()
statuses = {}
status = ClusterStatus(status_constraint="IsUndefined(Offline)")
offline = OfflineAds()
# First, daemonize?
while 1:
logging.info("Starting iteration...")
# Check if there are any idle jobs
if not self.UseOffline:
user_idle = self.GetIdleJobs(ClusterStatus())
if user_idle == None:
logging.info("Received None from idle jobs")
self.SleepFactory()
continue
idleuserjobs = 0
for user in user_idle.keys():
idleuserjobs += user_idle[user]
logging.debug("Idle jobs = %i" % idleuserjobs)
if idleuserjobs < 1:
logging.info("No idle jobs")
self.SleepFactory()
continue
# For each ssh'd blahp
for cluster in self.cluster_list:
idleslots = idlejobs = 0
if self.UseOffline:
idleuserjobs = cluster.GetIdleJobs()
# Check if the cluster is able to submit jobs
try:
(idleslots, idlejobs) = cluster.ClusterMeetPreferences()
except ClusterPreferenceException, e:
logging.debug("Received error from ClusterMeetPreferences")
logging.debug(e)
idleslots = idlejobs = None
# If the cluster preferences weren't met, then move on
if idleslots == None or idlejobs == None:
continue
# Get the offline ads to update.
if self.UseOffline:
num_submit = cluster.GetIdleJobs()
# Determine how many glideins to submit
num_submit = self.GetNumSubmit(idleslots, idlejobs, idleuserjobs)
logging.info("Submitting %i glidein jobs", num_submit)
cluster.SubmitGlideins(num_submit)
self.SleepFactory()
def SleepFactory(self):
sleeptime = int(get_option("iterationtime"))
logging.info("Sleeping for %i seconds" % sleeptime)
time.sleep(sleeptime)
def GetNumSubmit(self, idleslots, idlejobs, idleuserjobs):
"""
Calculate the number of glideins to submit.
@param idleslots: Number of idle startd's
@param idlejobs: Number of glideins in queue, but not active
@param idleuserjobs: Number of idle user jobs from FLOCK_FROM
@return: int - Number of glideins to submit
"""
# If we have already submitted enough glideins to fufill the request,
# don't submit more.
if max([idlejobs, idleslots]) >= idleuserjobs:
logging.debug("The number of idlejobs or idleslots fufills the requested idleuserjobs, not submitting any glideins")
return 0
status = ClusterStatus(status_constraint="IsUndefined(Offline)")
# Check that running glideins are reporting to the collector
running_glidein_jobs = status.GetRunningGlideinJobs()
logging.debug("Number of running_glidein_jobs = %i", running_glidein_jobs)
running_glideins = status.GetRunningGlideins()
logging.debug("Number of running glideins = %i", running_glideins)
if ((running_glidein_jobs * .9) > running_glideins):
logging.error("I'm guessing glideins are not reporting to the collector, not submitting")
return 0
# Ok, so now submit until we can't submit any more, or there are less user jobs
return min([int(get_option("maxqueuedjobs")) - idlejobs, \
idleuserjobs,\
int(get_option("MaxIdleGlideins")) - idleslots])
def GetIdleJobs(self, status):
"""
Get the number of idle jobs from configured flock from hosts.
@return: { user, int } - Number of idle jobs by user (dictionary)
"""
# Check for idle jobs to flock from
if not self.UseOffline:
schedds = []
# Get schedd's to query
if get_option("FLOCK_FROM"):
schedds = get_option("FLOCK_FROM").strip().split(",")
logging.debug("Schedds to query: %s" % str(schedds))
idleuserjobs = status.GetIdleJobs(schedds)
if idleuserjobs == None:
logging.info("Received None from idle user jobs, going to try later")
return None
# Add all the idle jobs from all the schedds, unique on user (owner)
user_idle = {}
for schedd in idleuserjobs.keys():
for user in idleuserjobs[schedd].keys():
if not user_idle.has_key(user):
user_idle[user] = 0
user_idle[user] += idleuserjobs[schedd][user]
return user_idle
|
|
"""
Utilities to simplify the boilerplate for native lowering.
"""
from __future__ import print_function, absolute_import, division
import collections
import contextlib
import inspect
import functools
import warnings
from enum import Enum
from .. import typing, cgutils, types, utils, errors
from .. typing.templates import BaseRegistryLoader
class Registry(object):
"""
A registry of function and attribute implementations.
"""
def __init__(self):
self.functions = []
self.getattrs = []
self.setattrs = []
self.casts = []
self.constants = []
def lower(self, func, *argtys):
"""
Decorate an implementation of *func* for the given argument types.
*func* may be an actual global function object, or any
pseudo-function supported by Numba, such as "getitem".
The decorated implementation has the signature
(context, builder, sig, args).
"""
def decorate(impl):
self.functions.append((impl, func, argtys))
return impl
return decorate
def _decorate_attr(self, impl, ty, attr, impl_list, decorator):
real_impl = decorator(impl, ty, attr)
impl_list.append((real_impl, attr, real_impl.signature))
return impl
def lower_getattr(self, ty, attr):
"""
Decorate an implementation of __getattr__ for type *ty* and
the attribute *attr*.
The decorated implementation will have the signature
(context, builder, typ, val).
"""
def decorate(impl):
return self._decorate_attr(impl, ty, attr, self.getattrs,
_decorate_getattr)
return decorate
def lower_getattr_generic(self, ty):
"""
Decorate the fallback implementation of __getattr__ for type *ty*.
The decorated implementation will have the signature
(context, builder, typ, val, attr). The implementation is
called for attributes which haven't been explicitly registered
with lower_getattr().
"""
return self.lower_getattr(ty, None)
def lower_setattr(self, ty, attr):
"""
Decorate an implementation of __setattr__ for type *ty* and
the attribute *attr*.
The decorated implementation will have the signature
(context, builder, sig, args).
"""
def decorate(impl):
return self._decorate_attr(impl, ty, attr, self.setattrs,
_decorate_setattr)
return decorate
def lower_setattr_generic(self, ty):
"""
Decorate the fallback implementation of __setattr__ for type *ty*.
The decorated implementation will have the signature
(context, builder, sig, args, attr). The implementation is
called for attributes which haven't been explicitly registered
with lower_setattr().
"""
return self.lower_setattr(ty, None)
def lower_cast(self, fromty, toty):
"""
Decorate the implementation of implicit conversion between
*fromty* and *toty*.
The decorated implementation will have the signature
(context, builder, fromty, toty, val).
"""
def decorate(impl):
self.casts.append((impl, (fromty, toty)))
return impl
return decorate
def lower_constant(self, ty):
"""
Decorate the implementation for creating a constant of type *ty*.
The decorated implementation will have the signature
(context, builder, ty, pyval).
"""
def decorate(impl):
self.constants.append((impl, (ty,)))
return impl
return decorate
class RegistryLoader(BaseRegistryLoader):
"""
An incremental loader for a target registry.
"""
registry_items = ('functions', 'getattrs', 'setattrs', 'casts', 'constants')
# Global registry for implementations of builtin operations
# (functions, attributes, type casts)
builtin_registry = Registry()
lower_builtin = builtin_registry.lower
lower_getattr = builtin_registry.lower_getattr
lower_getattr_generic = builtin_registry.lower_getattr_generic
lower_setattr = builtin_registry.lower_setattr
lower_setattr_generic = builtin_registry.lower_setattr_generic
lower_cast = builtin_registry.lower_cast
lower_constant = builtin_registry.lower_constant
def _decorate_getattr(impl, ty, attr):
real_impl = impl
if attr is not None:
def res(context, builder, typ, value, attr):
return real_impl(context, builder, typ, value)
else:
def res(context, builder, typ, value, attr):
return real_impl(context, builder, typ, value, attr)
res.signature = (ty,)
res.attr = attr
return res
def _decorate_setattr(impl, ty, attr):
real_impl = impl
if attr is not None:
def res(context, builder, sig, args, attr):
return real_impl(context, builder, sig, args)
else:
def res(context, builder, sig, args, attr):
return real_impl(context, builder, sig, args, attr)
res.signature = (ty, types.Any)
res.attr = attr
return res
def fix_returning_optional(context, builder, sig, status, retval):
# Reconstruct optional return type
if isinstance(sig.return_type, types.Optional):
value_type = sig.return_type.type
optional_none = context.make_optional_none(builder, value_type)
retvalptr = cgutils.alloca_once_value(builder, optional_none)
with builder.if_then(builder.not_(status.is_none)):
optional_value = context.make_optional_value(
builder, value_type, retval,
)
builder.store(optional_value, retvalptr)
retval = builder.load(retvalptr)
return retval
def user_function(fndesc, libs):
"""
A wrapper inserting code calling Numba-compiled *fndesc*.
"""
def imp(context, builder, sig, args):
func = context.declare_function(builder.module, fndesc)
# env=None assumes this is a nopython function
status, retval = context.call_conv.call_function(
builder, func, fndesc.restype, fndesc.argtypes, args)
with cgutils.if_unlikely(builder, status.is_error):
context.call_conv.return_status_propagate(builder, status)
assert sig.return_type == fndesc.restype
# Reconstruct optional return type
retval = fix_returning_optional(context, builder, sig, status, retval)
# If the data representations don't match up
if retval.type != context.get_value_type(sig.return_type):
msg = "function returned {0} but expect {1}"
raise TypeError(msg.format(retval.type, sig.return_type))
return impl_ret_new_ref(context, builder, fndesc.restype, retval)
imp.signature = fndesc.argtypes
imp.libs = tuple(libs)
return imp
def user_generator(gendesc, libs):
"""
A wrapper inserting code calling Numba-compiled *gendesc*.
"""
def imp(context, builder, sig, args):
func = context.declare_function(builder.module, gendesc)
# env=None assumes this is a nopython function
status, retval = context.call_conv.call_function(
builder, func, gendesc.restype, gendesc.argtypes, args)
# Return raw status for caller to process StopIteration
return status, retval
imp.libs = tuple(libs)
return imp
def iterator_impl(iterable_type, iterator_type):
"""
Decorator a given class as implementing *iterator_type*
(by providing an `iternext()` method).
"""
def wrapper(cls):
# These are unbound methods
iternext = cls.iternext
@iternext_impl(RefType.BORROWED)
def iternext_wrapper(context, builder, sig, args, result):
(value,) = args
iterobj = cls(context, builder, value)
return iternext(iterobj, context, builder, result)
lower_builtin('iternext', iterator_type)(iternext_wrapper)
return cls
return wrapper
class _IternextResult(object):
"""
A result wrapper for iteration, passed by iternext_impl() into the
wrapped function.
"""
__slots__ = ('_context', '_builder', '_pairobj')
def __init__(self, context, builder, pairobj):
self._context = context
self._builder = builder
self._pairobj = pairobj
def set_exhausted(self):
"""
Mark the iterator as exhausted.
"""
self._pairobj.second = self._context.get_constant(types.boolean, False)
def set_valid(self, is_valid=True):
"""
Mark the iterator as valid according to *is_valid* (which must
be either a Python boolean or a LLVM inst).
"""
if is_valid in (False, True):
is_valid = self._context.get_constant(types.boolean, is_valid)
self._pairobj.second = is_valid
def yield_(self, value):
"""
Mark the iterator as yielding the given *value* (a LLVM inst).
"""
self._pairobj.first = value
def is_valid(self):
"""
Return whether the iterator is marked valid.
"""
return self._context.get_argument_value(self._builder,
types.boolean,
self._pairobj.second)
def yielded_value(self):
"""
Return the iterator's yielded value, if any.
"""
return self._pairobj.first
class RefType(Enum):
"""
Enumerate the reference type
"""
"""
A new reference
"""
NEW = 1
"""
A borrowed reference
"""
BORROWED = 2
"""
An untracked reference
"""
UNTRACKED = 3
def iternext_impl(ref_type=None):
"""
Wrap the given iternext() implementation so that it gets passed
an _IternextResult() object easing the returning of the iternext()
result pair.
ref_type: If the ref_type is a numba.targets.imputils.RefType value then the
reference type used is that specified through the RefType enum.
For backwards compatibility Numba will, for a short period of time, maintain
the old interface which assumes the reference type is borrowed. This is,
however, deprecated behaviour and users of this API are encouraged to update
their code.
The wrapped function will be called with the following signature:
(context, builder, sig, args, iternext_result)
"""
if ref_type is None:
raise ValueError("ref_type must be an enum member of imputils.RefType")
if ref_type not in [x for x in RefType]:
# this is to make it so the pre-0.44 behaviour of defaulting to a
# borrowed reference type and 4 arg call site continues to work
url = ("http://numba.pydata.org/numba-doc/latest/reference/"
"deprecation.html#"
"deprecation-of-iternext-impl-without-a-supplied-reftype")
msg = ("\nThe use of iternext_impl without specifying a "
"numba.targets.imputils.RefType is deprecated.\n\nFor more "
"information visit %s" % url)
warnings.warn(errors.NumbaDeprecationWarning(msg), stacklevel=2)
def wrapper(context, builder, sig, args):
pair_type = sig.return_type
pairobj = context.make_helper(builder, pair_type)
ref_type(context, builder, sig, args,
_IternextResult(context, builder, pairobj))
return impl_ret_borrowed(context, builder,
pair_type, pairobj._getvalue())
return wrapper
else:
def outer(func):
def wrapper(context, builder, sig, args):
pair_type = sig.return_type
pairobj = context.make_helper(builder, pair_type)
func(context, builder, sig, args,
_IternextResult(context, builder, pairobj))
if ref_type == RefType.NEW:
impl_ret = impl_ret_new_ref
elif ref_type == RefType.BORROWED:
impl_ret = impl_ret_borrowed
elif ref_type == RefType.UNTRACKED:
impl_ret = impl_ret_untracked
else:
raise ValueError("Unknown ref_type encountered")
return impl_ret(context, builder,
pair_type, pairobj._getvalue())
return wrapper
return outer
def call_getiter(context, builder, iterable_type, val):
"""
Call the `getiter()` implementation for the given *iterable_type*
of value *val*, and return the corresponding LLVM inst.
"""
getiter_sig = typing.signature(iterable_type.iterator_type, iterable_type)
getiter_impl = context.get_function('getiter', getiter_sig)
return getiter_impl(builder, (val,))
def call_iternext(context, builder, iterator_type, val):
"""
Call the `iternext()` implementation for the given *iterator_type*
of value *val*, and return a convenience _IternextResult() object
reflecting the results.
"""
itemty = iterator_type.yield_type
pair_type = types.Pair(itemty, types.boolean)
iternext_sig = typing.signature(pair_type, iterator_type)
iternext_impl = context.get_function('iternext', iternext_sig)
val = iternext_impl(builder, (val,))
pairobj = context.make_helper(builder, pair_type, val)
return _IternextResult(context, builder, pairobj)
def call_len(context, builder, ty, val):
"""
Call len() on the given value. Return None if len() isn't defined on
this type.
"""
try:
len_impl = context.get_function(len, typing.signature(types.intp, ty,))
except NotImplementedError:
return None
else:
return len_impl(builder, (val,))
_ForIterLoop = collections.namedtuple('_ForIterLoop',
('value', 'do_break'))
@contextlib.contextmanager
def for_iter(context, builder, iterable_type, val):
"""
Simulate a for loop on the given iterable. Yields a namedtuple with
the given members:
- `value` is the value being yielded
- `do_break` is a callable to early out of the loop
"""
iterator_type = iterable_type.iterator_type
iterval = call_getiter(context, builder, iterable_type, val)
bb_body = builder.append_basic_block('for_iter.body')
bb_end = builder.append_basic_block('for_iter.end')
def do_break():
builder.branch(bb_end)
builder.branch(bb_body)
with builder.goto_block(bb_body):
res = call_iternext(context, builder, iterator_type, iterval)
with builder.if_then(builder.not_(res.is_valid()), likely=False):
builder.branch(bb_end)
yield _ForIterLoop(res.yielded_value(), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
if context.enable_nrt:
context.nrt.decref(builder, iterator_type, iterval)
def impl_ret_new_ref(ctx, builder, retty, ret):
"""
The implementation returns a new reference.
"""
return ret
def impl_ret_borrowed(ctx, builder, retty, ret):
"""
The implementation returns a borrowed reference.
This function automatically incref so that the implementation is
returning a new reference.
"""
if ctx.enable_nrt:
ctx.nrt.incref(builder, retty, ret)
return ret
def impl_ret_untracked(ctx, builder, retty, ret):
"""
The return type is not a NRT object.
"""
return ret
@contextlib.contextmanager
def force_error_model(context, model_name='numpy'):
"""
Temporarily change the context's error model.
"""
from . import callconv
old_error_model = context.error_model
context.error_model = callconv.create_error_model(model_name, context)
try:
yield
finally:
context.error_model = old_error_model
def numba_typeref_ctor(*args, **kwargs):
"""A stub for use internally by Numba when a call is emitted
on a TypeRef.
"""
raise NotImplementedError("This function should not be executed.")
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.kfac.estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.kfac.python.ops import estimator
from tensorflow.contrib.kfac.python.ops import layer_collection as lc
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
_ALL_ESTIMATION_MODES = ["gradients", "empirical", "curvature_prop", "exact"]
class EstimatorTest(test.TestCase):
def setUp(self):
self._graph = ops.Graph()
with self._graph.as_default():
self.layer_collection = lc.LayerCollection()
self.inputs = random_ops.random_normal((2, 2), dtype=dtypes.float32)
self.weights = variable_scope.get_variable(
"w", shape=(2, 2), dtype=dtypes.float32)
self.bias = variable_scope.get_variable(
"b", initializer=init_ops.zeros_initializer(), shape=(2, 1))
self.output = math_ops.matmul(self.inputs, self.weights) + self.bias
# Only register the weights.
self.layer_collection.register_fully_connected(
params=(self.weights,), inputs=self.inputs, outputs=self.output)
self.outputs = math_ops.tanh(self.output)
self.targets = array_ops.zeros_like(self.outputs)
self.layer_collection.register_categorical_predictive_distribution(
logits=self.outputs, targets=self.targets)
def testEstimatorInitManualRegistration(self):
with self._graph.as_default():
# We should be able to build an estimator for only the registered vars.
estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection
)
# Check that we throw an error if we try to build an estimator for vars
# that were not manually registered.
with self.assertRaises(ValueError):
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights, self.bias],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection
)
est.make_ops_and_vars()
# Check that we throw an error if we don't include registered variables,
# i.e. self.weights
with self.assertRaises(ValueError):
est = estimator.FisherEstimatorRoundRobin(
variables=[],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection)
est.make_ops_and_vars()
@test.mock.patch.object(utils.SubGraph, "variable_uses", return_value=42)
def testVariableWrongNumberOfUses(self, mock_uses):
with self.assertRaises(ValueError):
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection)
est.make_ops_and_vars()
def testInvalidEstimationMode(self):
with self.assertRaises(ValueError):
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection,
estimation_mode="not_a_real_mode")
est.make_ops_and_vars()
def testGradientsModeBuild(self):
with self._graph.as_default():
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection,
estimation_mode="gradients")
est.make_ops_and_vars()
def testEmpiricalModeBuild(self):
with self._graph.as_default():
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection,
estimation_mode="empirical")
est.make_ops_and_vars()
def testCurvaturePropModeBuild(self):
with self._graph.as_default():
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection,
estimation_mode="curvature_prop")
est.make_ops_and_vars()
def testExactModeBuild(self):
with self._graph.as_default():
est = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
cov_ema_decay=0.1,
damping=0.2,
layer_collection=self.layer_collection,
estimation_mode="exact")
est.make_ops_and_vars()
def test_cov_update_thunks(self):
"""Ensures covariance update ops run once per global_step."""
with self._graph.as_default(), self.test_session() as sess:
fisher_estimator = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
layer_collection=self.layer_collection,
damping=0.2,
cov_ema_decay=0.0)
# Construct an op that executes one covariance update per step.
global_step = training_util.get_or_create_global_step()
(cov_variable_thunks, cov_update_op_thunks, _,
_) = fisher_estimator.create_ops_and_vars_thunks()
for thunk in cov_variable_thunks:
thunk()
cov_matrices = [
fisher_factor.get_cov()
for fisher_factor in self.layer_collection.get_factors()
]
cov_update_op = control_flow_ops.case(
[(math_ops.equal(global_step, i), thunk)
for i, thunk in enumerate(cov_update_op_thunks)])
increment_global_step = global_step.assign_add(1)
sess.run(variables.global_variables_initializer())
initial_cov_values = sess.run(cov_matrices)
# Ensure there's one update per covariance matrix.
self.assertEqual(len(cov_matrices), len(cov_update_op_thunks))
# Test is no-op if only 1 covariance matrix.
assert len(cov_matrices) > 1
for i in range(len(cov_matrices)):
# Compare new and old covariance values
new_cov_values = sess.run(cov_matrices)
is_cov_equal = [
np.allclose(initial_cov_value, new_cov_value)
for (initial_cov_value,
new_cov_value) in zip(initial_cov_values, new_cov_values)
]
num_cov_equal = sum(is_cov_equal)
# Ensure exactly one covariance matrix changes per step.
self.assertEqual(num_cov_equal, len(cov_matrices) - i)
# Run all covariance update ops.
sess.run(cov_update_op)
sess.run(increment_global_step)
def test_round_robin_placement(self):
"""Check if the ops and variables are placed on devices correctly."""
with self._graph.as_default():
fisher_estimator = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
layer_collection=self.layer_collection,
damping=0.2,
cov_ema_decay=0.0,
cov_devices=["/cpu:{}".format(i) for i in range(2)],
inv_devices=["/cpu:{}".format(i) for i in range(2)])
# Construct an op that executes one covariance update per step.
(cov_update_ops, _, inv_update_ops, _, _,
_) = fisher_estimator.make_ops_and_vars(scope="test")
self.assertEqual(cov_update_ops[0].device, "/device:CPU:0")
self.assertEqual(cov_update_ops[1].device, "/device:CPU:1")
self.assertEqual(inv_update_ops[0].device, "/device:CPU:0")
self.assertEqual(inv_update_ops[1].device, "/device:CPU:1")
cov_matrices = [
fisher_factor.get_cov()
for fisher_factor in self.layer_collection.get_factors()
]
inv_matrices = [
matrix
for fisher_factor in self.layer_collection.get_factors()
for matrix in fisher_factor._matpower_by_exp_and_damping.values()
]
self.assertEqual(cov_matrices[0].device, "/device:CPU:0")
self.assertEqual(cov_matrices[1].device, "/device:CPU:1")
# Inverse matrices need to be explicitly placed.
self.assertEqual(inv_matrices[0].device, "")
self.assertEqual(inv_matrices[1].device, "")
def test_inv_update_thunks(self):
"""Ensures inverse update ops run once per global_step."""
with self._graph.as_default(), self.test_session() as sess:
fisher_estimator = estimator.FisherEstimatorRoundRobin(
variables=[self.weights],
layer_collection=self.layer_collection,
damping=0.2,
cov_ema_decay=0.0)
# Construct op that updates one inverse per global step.
global_step = training_util.get_or_create_global_step()
(cov_variable_thunks, _, inv_variable_thunks,
inv_update_op_thunks) = fisher_estimator.create_ops_and_vars_thunks()
for thunk in cov_variable_thunks:
thunk()
for thunk in inv_variable_thunks:
thunk()
inv_matrices = [
matrix
for fisher_factor in self.layer_collection.get_factors()
for matrix in fisher_factor._matpower_by_exp_and_damping.values()
]
inv_update_op = control_flow_ops.case(
[(math_ops.equal(global_step, i), thunk)
for i, thunk in enumerate(inv_update_op_thunks)])
increment_global_step = global_step.assign_add(1)
sess.run(variables.global_variables_initializer())
initial_inv_values = sess.run(inv_matrices)
# Ensure there's one update per inverse matrix. This is true as long as
# there's no fan-in/fan-out or parameter re-use.
self.assertEqual(len(inv_matrices), len(inv_update_op_thunks))
# Test is no-op if only 1 invariance matrix.
assert len(inv_matrices) > 1
# Assign each covariance matrix a value other than the identity. This
# ensures that the inverse matrices are updated to something different as
# well.
cov_matrices = [
fisher_factor.get_cov()
for fisher_factor in self.layer_collection.get_factors()
]
sess.run([
cov_matrix.assign(2 * linalg_ops.eye(int(cov_matrix.shape[0])))
for cov_matrix in cov_matrices
])
for i in range(len(inv_matrices)):
# Compare new and old inverse values
new_inv_values = sess.run(inv_matrices)
is_inv_equal = [
np.allclose(initial_inv_value, new_inv_value)
for (initial_inv_value,
new_inv_value) in zip(initial_inv_values, new_inv_values)
]
num_inv_equal = sum(is_inv_equal)
# Ensure exactly one inverse matrix changes per step.
self.assertEqual(num_inv_equal, len(inv_matrices) - i)
# Run all inverse update ops.
sess.run(inv_update_op)
sess.run(increment_global_step)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from quantrocket.cli.utils.parse import dict_str
def add_subparser(subparsers):
_parser = subparsers.add_parser("history", description="QuantRocket historical market data CLI", help="Collect and query historical data")
_subparsers = _parser.add_subparsers(title="subcommands", dest="subcommand")
_subparsers.required = True
examples = """
Create a new database into which custom data can be loaded.
Examples:
Create a custom database for loading fundamental data:
quantrocket history create-custom-db custom-fundamentals --bar-size '1 day' --columns Revenue:int EPS:float Currency:str TotalAssets:int
Create a custom database for loading intraday OHCLV data:
quantrocket history create-custom-db custom-stk-1sec --bar-size '1 sec' --columns Open:float High:float Low:float Close:float Volume:int
"""
parser = _subparsers.add_parser(
"create-custom-db",
help="create a new database into which custom data can be loaded",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the code to assign to the database (lowercase alphanumerics and hyphens only)")
parser.add_argument(
"-z", "--bar-size",
metavar="BAR_SIZE",
help="the bar size that will be loaded. This isn't enforced but facilitates efficient "
"querying and provides a hint to other parts of the API. Use a Pandas timedelta "
"string, for example, '1 day' or '1 min' or '1 sec'.")
parser.add_argument(
"-c", "--columns",
metavar="NAME:TYPE",
nargs="*",
type=dict_str,
help="the columns to create, specified as 'name:type'. For example, 'Close:float' "
" or 'Volume:int'. Valid column types are 'int', 'float', 'str', 'date', and "
"'datetime'. Column names must start with a letter and include only letters, "
"numbers, and underscores. Sid and Date columns are automatically created and "
"need not be specified. For boolean columns, choose type 'int' and store 1 or 0. ")
parser.set_defaults(func="quantrocket.history._cli_create_custom_db")
examples = """
Create a new database for collecting historical data from EDI.
Examples:
Create a database for end-of-day China stock prices from EDI:
quantrocket history create-edi-db china-1d -e XSHG XSHE
"""
parser = _subparsers.add_parser(
"create-edi-db",
help="create a new database for collecting historical data from EDI",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the code to assign to the database (lowercase alphanumerics and hyphens only)")
parser.add_argument(
"-e", "--exchanges",
metavar="MIC",
nargs="*",
help="one or more exchange codes (MICs) which should be collected")
parser.set_defaults(func="quantrocket.history._cli_create_edi_db")
examples = """
Create a new database for collecting historical data from Interactive Brokers.
The historical data requirements you specify when you create a new database (bar size,
universes, etc.) are applied each time you collect data for that database.
Examples:
Create an end-of-day database called "arca-etf-eod" for a universe called "arca-etf":
quantrocket history create-ibkr-db 'arca-etf-eod' --universes 'arca-etf' --bar-size '1 day'
Create a similar end-of-day database, but collect primary exchange prices instead of
consolidated prices, adjust prices for dividends (=ADJUSTED_LAST), and use an explicit
start date:
quantrocket history create-ibkr-db 'arca-etf-eod' -u 'arca-etf' -z '1 day' --primary-exchange --bar-type 'ADJUSTED_LAST' -s 2010-01-01
Create a database of 1-minute bars showing the midpoint for a universe of FX pairs:
quantrocket history create-ibkr-db 'fx-1m' -u 'fx' -z '1 min' --bar-type MIDPOINT
Create a database of 1-second bars just before the open for a universe of Canadian energy
stocks in 2016:
quantrocket history create-ibkr-db 'tse-enr-929' -u 'tse-enr' -z '1 secs' --outside-rth --times 09:29:55 09:29:56 09:29:57 09:29:58 09:29:59 -s 2016-01-01 -e 2016-12-31
"""
parser = _subparsers.add_parser(
"create-ibkr-db",
help="create a new database for collecting historical data from Interactive Brokers",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the code to assign to the database (lowercase alphanumerics and hyphens only)")
parser.add_argument(
"-u", "--universes",
metavar="UNIVERSE",
nargs="*",
help="include these universes")
parser.add_argument(
"-i", "--sids",
metavar="SID",
nargs="*",
help="include these sids")
parser.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
help="collect history back to this start date (default is to collect as far back as data "
"is available)")
parser.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="collect history up to this end date (default is to collect up to the present)")
parser.add_argument(
"-z", "--bar-size",
metavar="BAR_SIZE",
choices=[
"1 secs", "5 secs", "10 secs", "15 secs", "30 secs",
"1 min", "2 mins", "3 mins", "5 mins", "10 mins", "15 mins", "20 mins", "30 mins",
"1 hour", "2 hours", "3 hours", "4 hours", "8 hours",
"1 day",
"1 week",
"1 month"],
help="the bar size to collect. Possible choices: %(choices)s")
parser.add_argument(
"-t", "--bar-type",
metavar="BAR_TYPE",
choices=["TRADES",
"ADJUSTED_LAST",
"MIDPOINT",
"BID",
"ASK",
"BID_ASK",
"HISTORICAL_VOLATILITY",
"OPTION_IMPLIED_VOLATILITY"],
help="the bar type to collect (if not specified, defaults to MIDPOINT for FX and "
"TRADES for everything else). Possible choices: %(choices)s")
parser.add_argument(
"-o", "--outside-rth",
action="store_true",
help="include data from outside regular trading hours (default is to limit to regular "
"trading hours)")
parser.add_argument(
"-p", "--primary-exchange",
action="store_true",
help="limit to data from the primary exchange")
times_group = parser.add_mutually_exclusive_group()
times_group.add_argument(
"--times",
nargs="*",
metavar="HH:MM:SS",
help="limit to these times (refers to the bar's start time; mutually exclusive "
"with --between-times)")
times_group.add_argument(
"--between-times",
nargs=2,
metavar="HH:MM:SS",
help="limit to times between these two times (refers to the bar's start time; "
"mutually exclusive with --times)")
parser.add_argument(
"--shard",
metavar="HOW",
choices=["year", "month", "day", "time", "sid", "sid,time", "off"],
help="whether and how to shard the database, i.e. break it into smaller pieces. "
"Required for intraday databases. Possible choices are `year` (separate "
"database for each year), `month` (separate database for each year+month), "
"`day` (separate database for each day), `time` (separate database for each bar "
"time), `sid` (separate database for each security), `sid,time` (duplicate "
"copies of database, one sharded by sid and the other by time),or `off` (no "
"sharding). See http://qrok.it/h/shard for more help.")
parser.set_defaults(func="quantrocket.history._cli_create_ibkr_db")
examples = """
Create a new database for collecting historical data from Sharadar.
Examples:
Create a database for Sharadar US stocks and call it "sharadar-us-stk-1d":
quantrocket history create-sharadar-db sharadar-us-stk-1d --sec-type STK --country US
"""
parser = _subparsers.add_parser(
"create-sharadar-db",
help="create a new database for collecting historical data from Sharadar",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the code to assign to the database (lowercase alphanumerics and hyphens only)")
parser.add_argument(
"-t", "--sec-type",
metavar="SEC_TYPE",
choices=["STK","ETF"],
help="the security type to collect. Possible choices: %(choices)s")
parser.add_argument(
"-c", "--country",
metavar="COUNTRY",
choices=["US","FREE"],
default="US",
help="country to collect data for. Possible choices: %(choices)s")
parser.set_defaults(func="quantrocket.history._cli_create_sharadar_db")
examples = """
Create a new database for collecting historical US stock data from QuantRocket.
Examples:
Create a database for end-of-day US stock prices:
quantrocket history create-usstock-db usstock-1d
"""
parser = _subparsers.add_parser(
"create-usstock-db",
help="create a new database for collecting historical US stock data from QuantRocket",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the code to assign to the database (lowercase alphanumerics and hyphens only)")
parser.add_argument(
"-z", "--bar-size",
metavar="BAR_SIZE",
choices=["1 day"],
help="the bar size to collect. Possible choices: %(choices)s")
parser.add_argument(
"--free",
action="store_true",
help="limit to free sample data. Default is to collect the full dataset.")
parser.add_argument(
"-u", "--universe",
choices=[
"US",
"FREE"
],
help="[DEPRECATED] whether to collect free sample data or the full dataset. "
"This parameter is deprecated and will be removed in a future release. Please "
"use --free to request free sample data or omit --free to request the full dataset.")
parser.set_defaults(func="quantrocket.history._cli_create_usstock_db")
examples = """
List history databases.
Examples:
quantrocket history list
"""
parser = _subparsers.add_parser(
"list",
help="list history databases",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.set_defaults(func="quantrocket.history._cli_list_databases")
examples = """
Return the configuration for a history database.
Examples:
Return the configuration for a database called "jpn-lrg-15m":
quantrocket history config jpn-lrg-15m
"""
parser = _subparsers.add_parser(
"config",
help="return the configuration for a history database",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
help="the database code")
parser.set_defaults(func="quantrocket.history._cli_get_db_config")
examples = """
Delete a history database.
Deleting a history database deletes its configuration and data and is irreversible.
Examples:
Delete a database called "jpn-lrg-15m":
quantrocket history drop-db jpn-lrg-15m --confirm-by-typing-db-code-again jpn-lrg-15m
"""
parser = _subparsers.add_parser(
"drop-db",
help="delete a history database",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
help="the database code")
parser.add_argument(
"--confirm-by-typing-db-code-again",
metavar="CODE",
required=True,
help="enter the db code again to confirm you want to drop the database, its config, "
"and all its data")
parser.set_defaults(func="quantrocket.history._cli_drop_db")
examples = """
Collect historical market data from a vendor and save it to a history database.
The vendor and collection parameters are determined by the stored database
configuration as defined at the time the database was created. For certain
vendors, collection parameters can be overridden at the time of data collection.
Examples:
Collect historical data for a database of Chinese stock prices:
quantrocket history collect china-1d
Collect historical data for an IBKR database of US futures, using the priority
queue to jump in front of other queued IBKR collections:
quantrocket history collect globex-10m --priority
"""
parser = _subparsers.add_parser(
"collect",
help="collect historical market data from a vendor and save it to a history database",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"codes",
metavar="CODE",
nargs="+",
help="the database code(s) to collect data for")
parser.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="collect history for these sids, overriding config "
"(typically used to collect a subset of securities). Only "
"supported for IBKR databases.")
parser.add_argument(
"-u", "--universes",
nargs="*",
metavar="UNIVERSE",
help="collect history for these universes, overriding config "
"(typically used to collect a subset of securities). Only "
"supported for IBKR databases.")
parser.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
help="collect history back to this start date, overriding config. "
"Only supported for IBKR databases.")
parser.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="collect history up to this end date, overriding config. Only "
"supported for IBKR databases.")
parser.add_argument(
"-p", "--priority",
action="store_true",
help="use the priority queue (default is to use the standard queue). "
"Only applicable to IBKR databases.")
parser.set_defaults(func="quantrocket.history._cli_collect_history")
examples = """
Get the current queue of historical data collections.
Examples:
quantrocket history queue
"""
parser = _subparsers.add_parser(
"queue",
help="get the current queue of historical data collections",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.set_defaults(func="quantrocket.history._cli_get_history_queue")
examples = """
Cancel running or pending historical data collections.
Examples:
Cancel collections for a database called japan-1d:
quantrocket history cancel japan-1d
"""
parser = _subparsers.add_parser(
"cancel",
help="cancel running or pending historical data collections",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"codes",
metavar="CODE",
nargs="+",
help="the database code(s) to cancel collections for")
parser.set_defaults(func="quantrocket.history._cli_cancel_collections")
examples = """
Wait for historical data collection to finish.
Examples:
Wait at most 10 minutes for data collection to finish for a database called 'fx-1h':
quantrocket history wait 'fx-1h' -t 10min
"""
parser = _subparsers.add_parser(
"wait",
help="wait for historical data collection to finish",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"codes",
metavar="CODE",
nargs="+",
help="the database code(s) to wait for")
parser.add_argument(
"-t", "--timeout",
metavar="TIMEDELTA",
help="time out if data collection hasn't finished after this much time (use Pandas "
"timedelta string, e.g. 30sec or 5min or 2h)")
parser.set_defaults(func="quantrocket.history._cli_wait_for_collections")
examples = """
Query historical market data from a history database and download to file.
Examples:
Download a CSV of all historical market data since 2015 from a database called
"arca-eod" to a file called arca.csv:
quantrocket history get arca-eod --start-date 2015-01-01 -o arca.csv
"""
parser = _subparsers.add_parser(
"get",
help="query historical market data from a history database and download to file",
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"code",
metavar="CODE",
help="the code of the database to query")
filters = parser.add_argument_group("filtering options")
filters.add_argument(
"-s", "--start-date",
metavar="YYYY-MM-DD",
help="limit to history on or after this date")
filters.add_argument(
"-e", "--end-date",
metavar="YYYY-MM-DD",
help="limit to history on or before this date")
filters.add_argument(
"-u", "--universes",
nargs="*",
metavar="UNIVERSE",
help="limit to these universes")
filters.add_argument(
"-i", "--sids",
nargs="*",
metavar="SID",
help="limit to these sids")
filters.add_argument(
"--exclude-universes",
nargs="*",
metavar="UNIVERSE",
help="exclude these universes")
filters.add_argument(
"--exclude-sids",
nargs="*",
metavar="SID",
help="exclude these sids")
filters.add_argument(
"-t", "--times",
nargs="*",
metavar="HH:MM:SS",
help="limit to these times")
outputs = parser.add_argument_group("output options")
outputs.add_argument(
"-o", "--outfile",
metavar="OUTFILE",
dest="filepath_or_buffer",
help="filename to write the data to (default is stdout)")
output_format_group = outputs.add_mutually_exclusive_group()
output_format_group.add_argument(
"-j", "--json",
action="store_const",
const="json",
dest="output",
help="format output as JSON (default is CSV)")
outputs.add_argument(
"-f", "--fields",
metavar="FIELD",
nargs="*",
help="only return these fields (pass '?' or any invalid fieldname to see "
"available fields)")
outputs.add_argument(
"-c", "--cont-fut",
choices=["concat"],
metavar="HOW",
help="stitch futures into continuous contracts using this method "
"(default is not to stitch together). Possible choices: %(choices)s")
parser.set_defaults(func="quantrocket.history._cli_download_history_file")
|
|
from datetime import datetime
from unittest import skipIf
import copy
import uuid
from cassandra.cqlengine import ValidationError as CQLValidationError
from django.core import validators
from django.forms import fields
from common.models import CassandraFamilyMember
from django_cassandra_engine.test import TestCase as CassandraTestCase
class TestDjangoCassandraModel(CassandraTestCase):
def setUp(self):
self.some_uuid = uuid.uuid4()
self.family_member = CassandraFamilyMember.objects.create(
id=self.some_uuid,
first_name="Homer",
last_name="Simpson",
is_real=False,
favourite_number=666,
favourite_float_number=43.4,
created_on=datetime.now(),
)
def test_model_is_hashable(self):
models = set()
models.add(self.family_member)
self.assertEqual(1, len(models))
def test_serializable_value(self):
self.assertEqual(self.some_uuid, self.family_member.serializable_value("id"))
self.assertEqual(
self.family_member.first_name,
self.family_member.serializable_value("first_name"),
)
def test_clone_queryset(self):
qset = CassandraFamilyMember.objects.filter(id=self.some_uuid)
self.assertNotEqual(id(qset._clone()), id(qset))
def test_create(self):
family_member = self.family_member
self.assertEqual(family_member.first_name, "Homer")
self.assertEqual(family_member.last_name, "Simpson")
self.assertEqual(family_member.is_real, False)
self.assertEqual(family_member.favourite_number, 666)
self.assertEqual(family_member.favourite_float_number, 43.4)
def test_get_by_pk(self):
got_family_member = CassandraFamilyMember.objects.allow_filtering().get(
pk=self.family_member.id
)
self.assertIsNotNone(got_family_member)
def test_exclude(self):
results = CassandraFamilyMember.objects.exclude(id=self.some_uuid)
for model in results:
self.assertNotEqual(model.id, self.some_uuid)
def test_exclude_after_filter(self):
results = CassandraFamilyMember.objects.filter(id=self.some_uuid).exclude(
last_name="Simpson"
)
self.assertEqual(len(results), 0)
def test_exclude_after_all(self):
keeper = CassandraFamilyMember.objects.create(
id=uuid.uuid4(),
first_name="Ned",
last_name="Flanders",
is_real=False,
favourite_number=666,
favourite_float_number=43.4,
created_on=datetime.now(),
)
results = CassandraFamilyMember.objects.all().exclude(last_name="Simpson")
self.assertEqual(len(results), 1)
self.assertEqual(results[0].id, keeper.id)
def test_get_by_pk_returns_primary_key_instead_of_partition_key(self):
got_family_member = CassandraFamilyMember.objects.allow_filtering().get(
pk=self.family_member.id
)
self.assertEqual(got_family_member.pk, self.family_member.id)
def test_default_manager_is_set(self):
self.assertTrue(
isinstance(
CassandraFamilyMember._default_manager,
type(CassandraFamilyMember.objects),
)
)
self.assertTrue(
isinstance(
CassandraFamilyMember._base_manager,
type(CassandraFamilyMember.objects),
)
)
self.assertTrue(hasattr(CassandraFamilyMember._default_manager, "all"))
self.assertTrue(hasattr(CassandraFamilyMember._default_manager, "filter"))
def test_get_queryset(self):
results = CassandraFamilyMember.objects.get_queryset()
self.assertTrue(results[0].id, self.some_uuid)
def test_calling_queryset_methods_not_through_manager_raises(self):
with self.assertRaises(AttributeError):
CassandraFamilyMember.all()
with self.assertRaises(AttributeError):
CassandraFamilyMember.get()
with self.assertRaises(AttributeError):
CassandraFamilyMember.filter()
def test_manager_has_a_name(self):
self.assertEqual(CassandraFamilyMember._default_manager.name, "objects")
def test_can_migrate(self):
self.assertFalse(CassandraFamilyMember._meta.can_migrate(connection=None))
def test_get_all_related_objects_with_model(self):
self.assertEqual(
CassandraFamilyMember._meta.get_all_related_objects_with_model(),
[],
)
def test_related_objects_property(self):
self.assertEqual(CassandraFamilyMember._meta.related_objects, [])
def test_db_table(self):
self.assertEqual(
CassandraFamilyMember._meta.db_table,
"common_cassandrafamilymember",
)
def test_pk_attribute(self):
self.assertEqual(
CassandraFamilyMember._meta.pk,
CassandraFamilyMember._meta.get_field("id"),
)
def test_get_fields(self):
expected_field_names = [
"id",
"first_name",
"last_name",
"is_real",
"favourite_number",
"favourite_float_number",
"created_on",
]
fields = CassandraFamilyMember._meta._get_fields()
self.assertEqual(len(fields), len(expected_field_names))
self.assertEqual([f.name for f in fields], expected_field_names)
def test_meta_attrs(self):
self.assertEqual(
CassandraFamilyMember._meta.model_name, "cassandrafamilymember"
)
self.assertEqual(CassandraFamilyMember._meta.swappable, False)
self.assertEqual(CassandraFamilyMember._meta.managed, False)
def test_values_list_with_id_pk_field_returns_it(self):
all_things = CassandraFamilyMember.objects.allow_filtering().filter(
id=self.some_uuid
)
self.assertEqual(
list(all_things.values_list("id", flat=True)), [self.some_uuid]
)
def test_values_list_with_pk_returns_the_primary_key_field_uuid(self):
all_things = CassandraFamilyMember.objects.allow_filtering().filter(
id=self.some_uuid
)
model = all_things[0]
self.assertEqual(
list(all_things.values_list("pk")),
[
[
model.id,
model.first_name,
model.last_name,
model.favourite_float_number,
]
],
)
def test_values_list_with_pk_can_return_multiple_pks(self):
some_uuid = uuid.uuid4()
family_member = CassandraFamilyMember.objects.create(
id=some_uuid,
first_name="Homer",
last_name="Simpson",
is_real=False,
favourite_number=666,
favourite_float_number=43.4,
created_on=datetime.now(),
)
all_things = CassandraFamilyMember.objects.allow_filtering().filter(
id=some_uuid
)
expected = [
[
family_member.id,
family_member.first_name,
family_member.last_name,
family_member.favourite_float_number,
]
]
self.assertEqual(len(all_things.values_list("pk")), len(expected))
def test_private_fields_are_set(self):
private_fields = [f.name for f in CassandraFamilyMember._meta.private_fields]
expected_private_fields = [
"id",
"first_name",
"last_name",
"is_real",
"favourite_number",
"favourite_float_number",
"created_on",
]
self.assertEqual(private_fields, expected_private_fields)
def test_model_doesnotexist_is_raised_when_record_not_found(self):
with self.assertRaises(CassandraFamilyMember.DoesNotExist):
not_found_uuid = uuid.uuid4()
CassandraFamilyMember.objects.allow_filtering().get(id=not_found_uuid)
class TestDjangoCassandraField(CassandraTestCase):
def setUp(self):
self.some_uuid = uuid.uuid4()
self.family_member = CassandraFamilyMember.objects.create(
id=self.some_uuid,
first_name="Homer",
last_name="Simpson",
is_real=False,
favourite_number=666,
favourite_float_number=43.4,
created_on=datetime.now(),
)
def test_attributes(self):
model_fields = self.family_member._meta._get_fields()
for field in model_fields:
allow_null = (
not field.required
and not field.is_primary_key
and not field.partition_key
) or field.has_default
self.assertEqual(field.unique_for_date, None)
self.assertEqual(field.unique_for_month, None)
self.assertEqual(field.unique_for_year, None)
self.assertEqual(field.db_column, None)
self.assertEqual(field.db_index, field.index)
self.assertEqual(field.null, allow_null)
self.assertEqual(field.blank, allow_null)
self.assertEqual(field.choices, [])
self.assertEqual(field.flatchoices, [])
self.assertEqual(field.help_text, "")
self.assertEqual(field.concrete, True)
self.assertEqual(field.editable, True)
self.assertEqual(field.many_to_many, False)
self.assertEqual(field.many_to_one, False)
self.assertEqual(field.one_to_many, False)
self.assertEqual(field.one_to_one, False)
self.assertEqual(field.hidden, False)
self.assertEqual(field.serialize, True)
self.assertEqual(field.name, field.db_field_name)
self.assertEqual(field.verbose_name, field.db_field_name)
self.assertEqual(field._verbose_name, field.db_field_name)
self.assertEqual(field.field, field)
self.assertEqual(field.model, type(self.family_member))
self.assertEqual(field.related_query_name(), None)
self.assertEqual(field.auto_created, False)
self.assertEqual(field.is_relation, False)
self.assertEqual(field.remote_field, None)
self.assertEqual(field.rel, None)
self.assertEqual(field.rel, None)
self.assertEqual(field.unique, field.is_primary_key)
self.assertEqual(field.attname, field.column_name)
self.assertEqual(field.validators, [])
self.assertEqual(field.empty_values, list(validators.EMPTY_VALUES))
def test_methods(self):
model_fields = self.family_member._meta._get_fields()
for field in model_fields:
self.assertEqual(field.get_attname(), field.attname)
self.assertEqual(field.get_cache_name(), "_{}_cache".format(field.name))
self.assertEqual(
field.value_to_string(self.family_member),
str(getattr(self.family_member, field.name)),
)
self.assertEqual(
field.pre_save(self.family_member, True),
getattr(self.family_member, field.name),
)
self.assertEqual(
field.get_prep_value(self.family_member.id), self.some_uuid
)
self.assertEqual(
field.get_db_prep_save(self.family_member.id, connection=None),
self.some_uuid,
)
self.assertTrue(isinstance(field.formfield(), fields.CharField))
self.assertEqual(field.get_internal_type(), field.__class__.__name__)
self.assertEqual(
field.get_attname_column(),
(field.db_field_name, field.db_field_name),
)
self.assertEqual(field.get_db_converters(), [])
field_with_default = self.family_member._meta.get_field("id")
self.assertTrue(
isinstance(field_with_default.get_default(), type(self.family_member.id))
)
# in Django, 'has_default' is a function, while in python-driver
# it is a property unfortunately.
self.assertEqual(field_with_default.has_default, True)
text_field = self.family_member._meta.get_field("last_name")
text_field.save_form_data(instance=self.family_member, data="new data")
self.assertEqual(self.family_member.last_name, "new data")
self.assertIsNone(field.run_validators(text_field.value))
def test_methods_which_are_not_implemented_raise(self):
model_fields = self.family_member._meta._get_fields()
methods_that_should_raise = (
"get_choices",
"get_choices_default",
"select_format",
"deconstruct",
"db_type_suffix",
"get_prep_lookup",
"get_db_prep_lookup",
"set_attributes_from_name",
"db_parameters",
"get_col",
)
for field in model_fields:
for method_name in methods_that_should_raise:
with self.assertRaises(NotImplementedError):
getattr(field, method_name)()
def test_get_pk_value_on_save_returns_true_if_field_has_default(self):
field_with_default = self.family_member._meta.get_field("id")
self.assertTrue(
field_with_default.get_pk_value_on_save(instance=self.family_member),
self.family_member.id,
)
def test_get_pk_value_on_save_returns_none_if_field_no_default(self):
field_without_default = self.family_member._meta.get_field("last_name")
self.assertIsNone(
field_without_default.get_pk_value_on_save(instance=self.family_member),
)
def test_formfield_uses_specified_form_class(self):
text_field = self.family_member._meta.get_field("last_name")
form_field = text_field.formfield(form_class=fields.BooleanField)
self.assertTrue(isinstance(form_field, fields.BooleanField))
def test_field_check_returns_error_when_name_is_pk(self):
text_field = copy.deepcopy(self.family_member._meta.get_field("last_name"))
text_field.name = "pk"
check_errors = text_field.check()
self.assertEqual(len(check_errors), 1)
def test_field_check_returns_error_when_name_ends_underscore(self):
text_field = copy.deepcopy(self.family_member._meta.get_field("last_name"))
text_field.name = "name_"
check_errors = text_field.check()
self.assertEqual(len(check_errors), 1)
def test_field_check_returns_error_when_name_contains_double_under(self):
text_field = copy.deepcopy(self.family_member._meta.get_field("last_name"))
text_field.name = "some__name"
check_errors = text_field.check()
self.assertEqual(len(check_errors), 1)
def test_field_clean(self):
text_field = copy.deepcopy(self.family_member._meta.get_field("last_name"))
self.assertEqual(text_field.clean("some val", self.family_member), "some val")
def test_field_client_raises_when_value_is_not_valid(self):
text_field = copy.deepcopy(self.family_member._meta.get_field("last_name"))
with self.assertRaises(CQLValidationError):
text_field.clean(123, self.family_member)
def test_get_filter_kwargs_for_object(self):
text_field = self.family_member._meta.get_field("last_name")
self.assertEqual(
text_field.get_filter_kwargs_for_object(obj=self.family_member),
{"last_name": self.family_member.last_name},
)
id_field = self.family_member._meta.get_field("id")
self.assertEqual(
id_field.get_filter_kwargs_for_object(obj=self.family_member),
{"id": self.family_member.id},
)
|
|
##########################################################################
#
# Copyright 2011 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
'''Describe GL types.'''
import platform
from stdapi import *
GLboolean = Enum("GLboolean", [
"GL_TRUE",
"GL_FALSE",
])
GLvoid = Alias("GLvoid", Void)
GLbyte = Alias("GLbyte", SChar)
GLshort = Alias("GLshort", Short)
GLint = Alias("GLint", Int)
GLint64 = Alias("GLint64", Int64)
GLubyte = Alias("GLubyte", UChar)
GLushort = Alias("GLushort", UShort)
GLuint = Alias("GLuint", UInt)
GLuint64 = Alias("GLuint64", UInt64)
GLsizei = Alias("GLsizei", Int)
GLintptr = Alias("GLintptr", Int)
GLsizeiptr = Alias("GLsizeiptr", Int)
GLfloat = Alias("GLfloat", Float)
GLclampf = Alias("GLclampf", Float)
GLdouble = Alias("GLdouble", Double)
GLclampd = Alias("GLclampd", Double)
GLchar = Alias("GLchar", Char)
GLcharARB = Alias("GLcharARB", SChar)
GLintptrARB = Alias("GLintptrARB", Int)
GLsizeiptrARB = Alias("GLsizeiptrARB", Int)
GLhandleARB = Handle("handleARB", Alias("GLhandleARB", UInt))
GLhalfARB = Alias("GLhalfARB", UShort)
GLhalfNV = Alias("GLhalfNV", UShort)
GLint64EXT = Alias("GLint64EXT", Int64)
GLuint64EXT = Alias("GLuint64EXT", UInt64)
GLDEBUGPROC = Opaque("GLDEBUGPROC")
GLDEBUGPROCARB = Opaque("GLDEBUGPROCARB")
GLDEBUGPROCAMD = Opaque("GLDEBUGPROCAMD")
GLstring = String(GLchar)
GLstringConst = String(Const(GLchar))
GLstringARB = String(GLcharARB)
GLstringConstARB = String(Const(GLcharARB))
GLpointer = OpaquePointer(GLvoid)
GLpointerConst = OpaquePointer(Const(GLvoid))
GLlist = Handle("list", GLuint)
GLtexture = Handle("texture", GLuint)
GLbuffer = Handle("buffer", GLuint)
GLquery = Handle("query", GLuint)
GLfenceNV = Handle("fenceNV", GLuint)
GLprogram = Handle("program", GLuint)
GLshader = Handle("shader", GLuint)
# Share the same mapping table for uniform locations of both core and
# GL_ARB_shader_objects programs. For a combination of reasons:
#
# - all OpenGL implementations appear to alias the names for both kind of
# programs;
#
# - most applications will use only one kind of shader programs;
#
# - some applications actually mix glUniformXxx calls with
# GL_ARB_shader_objects programs and glUniformXxxARB calls with core
# programs, and therefore, rely on a joint implementation.
#
# We use GLhandleARB as program key, since it is wider (void *) on MacOSX.
#
GLlocation = Handle("location", GLint, key=('program', GLhandleARB))
GLlocationARB = Handle("location", GLint, key=('programObj', GLhandleARB))
contextKey = ('reinterpret_cast<uintptr_t>(glretrace::getCurrentContext())', UIntPtr)
GLprogramARB = Handle("programARB", GLuint)
GLframebuffer = Handle("framebuffer", GLuint)
GLrenderbuffer = Handle("renderbuffer", GLuint)
GLfragmentShaderATI = Handle("fragmentShaderATI", GLuint)
GLarray = Handle("array", GLuint, key=contextKey) # per-context
GLarrayAPPLE = Handle("arrayAPPLE", GLuint) # shared
GLregion = Handle("region", GLuint)
GLpipeline = Handle("pipeline", GLuint)
GLsampler = Handle("sampler", GLuint)
GLfeedback = Handle("feedback", GLuint)
GLfence = Handle("fence", GLuint)
# GL mappings are pointers to linear memory regions.
#
# The map length is not always available in the function prototype, and must be
# reconstructed from other state.
GLmap = LinearPointer(GLvoid, "length")
GLsync = Handle("sync", IntPointer("GLsync"))
GLenum = Enum("GLenum", [
# Parameters are added later from glparams.py's parameter table
])
# Some functions take GLenum disguised as GLint, and need special treatment so
# that symbolic names are traced correctly.
GLenum_int = Alias("GLint", GLenum)
GLenum_mode = FakeEnum(GLenum, [
"GL_POINTS", # 0x0000
"GL_LINES", # 0x0001
"GL_LINE_LOOP", # 0x0002
"GL_LINE_STRIP", # 0x0003
"GL_TRIANGLES", # 0x0004
"GL_TRIANGLE_STRIP", # 0x0005
"GL_TRIANGLE_FAN", # 0x0006
"GL_QUADS", # 0x0007
"GL_QUAD_STRIP", # 0x0008
"GL_POLYGON", # 0x0009
"GL_LINES_ADJACENCY", # 0x000A
"GL_LINE_STRIP_ADJACENCY", # 0x000B
"GL_TRIANGLES_ADJACENCY", # 0x000C
"GL_TRIANGLE_STRIP_ADJACENCY", # 0x000D
"GL_PATCHES", # 0x000E
])
GLenum_error = FakeEnum(GLenum, [
"GL_NO_ERROR", # 0x0
"GL_INVALID_ENUM", # 0x0500
"GL_INVALID_VALUE", # 0x0501
"GL_INVALID_OPERATION", # 0x0502
"GL_STACK_OVERFLOW", # 0x0503
"GL_STACK_UNDERFLOW", # 0x0504
"GL_OUT_OF_MEMORY", # 0x0505
"GL_INVALID_FRAMEBUFFER_OPERATION", # 0x0506
"GL_TABLE_TOO_LARGE", # 0x8031
])
GLbitfield = Alias("GLbitfield", UInt)
GLbitfield_attrib = Flags(GLbitfield, [
"GL_ALL_ATTRIB_BITS", # 0x000FFFFF
"GL_CURRENT_BIT", # 0x00000001
"GL_POINT_BIT", # 0x00000002
"GL_LINE_BIT", # 0x00000004
"GL_POLYGON_BIT", # 0x00000008
"GL_POLYGON_STIPPLE_BIT", # 0x00000010
"GL_PIXEL_MODE_BIT", # 0x00000020
"GL_LIGHTING_BIT", # 0x00000040
"GL_FOG_BIT", # 0x00000080
"GL_DEPTH_BUFFER_BIT", # 0x00000100
"GL_ACCUM_BUFFER_BIT", # 0x00000200
"GL_STENCIL_BUFFER_BIT", # 0x00000400
"GL_VIEWPORT_BIT", # 0x00000800
"GL_TRANSFORM_BIT", # 0x00001000
"GL_ENABLE_BIT", # 0x00002000
"GL_COLOR_BUFFER_BIT", # 0x00004000
"GL_HINT_BIT", # 0x00008000
"GL_EVAL_BIT", # 0x00010000
"GL_LIST_BIT", # 0x00020000
"GL_TEXTURE_BIT", # 0x00040000
"GL_SCISSOR_BIT", # 0x00080000
"GL_MULTISAMPLE_BIT", # 0x20000000
])
GLbitfield_client_attrib = Flags(GLbitfield, [
"GL_CLIENT_ALL_ATTRIB_BITS", # 0xFFFFFFFF
"GL_CLIENT_PIXEL_STORE_BIT", # 0x00000001
"GL_CLIENT_VERTEX_ARRAY_BIT", # 0x00000002
])
GLbitfield_shader = Flags(GLbitfield, [
"GL_ALL_SHADER_BITS", # 0xFFFFFFFF
"GL_VERTEX_SHADER_BIT", # 0x00000001
"GL_FRAGMENT_SHADER_BIT", # 0x00000002
"GL_GEOMETRY_SHADER_BIT", # 0x00000004
"GL_TESS_CONTROL_SHADER_BIT", # 0x00000008
"GL_TESS_EVALUATION_SHADER_BIT", # 0x00000010
"GL_COMPUTE_SHADER_BIT", # 0x00000020
])
GLbitfield_access = Flags(GLbitfield, [
"GL_MAP_READ_BIT", # 0x0001
"GL_MAP_WRITE_BIT", # 0x0002
"GL_MAP_INVALIDATE_RANGE_BIT", # 0x0004
"GL_MAP_INVALIDATE_BUFFER_BIT", # 0x0008
"GL_MAP_FLUSH_EXPLICIT_BIT", # 0x0010
"GL_MAP_UNSYNCHRONIZED_BIT", # 0x0020
])
GLbitfield_sync_flush = Flags(GLbitfield, [
"GL_SYNC_FLUSH_COMMANDS_BIT", # 0x00000001
])
GLbitfield_barrier = Flags(GLbitfield, [
"GL_ALL_BARRIER_BITS", # 0xFFFFFFFF
"GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT", # 0x00000001
"GL_ELEMENT_ARRAY_BARRIER_BIT", # 0x00000002
"GL_UNIFORM_BARRIER_BIT", # 0x00000004
"GL_TEXTURE_FETCH_BARRIER_BIT", # 0x00000008
"GL_SHADER_GLOBAL_ACCESS_BARRIER_BIT_NV", # 0x00000010
"GL_SHADER_IMAGE_ACCESS_BARRIER_BIT", # 0x00000020
"GL_COMMAND_BARRIER_BIT", # 0x00000040
"GL_PIXEL_BUFFER_BARRIER_BIT", # 0x00000080
"GL_TEXTURE_UPDATE_BARRIER_BIT", # 0x00000100
"GL_BUFFER_UPDATE_BARRIER_BIT", # 0x00000200
"GL_FRAMEBUFFER_BARRIER_BIT", # 0x00000400
"GL_TRANSFORM_FEEDBACK_BARRIER_BIT", # 0x00000800
"GL_ATOMIC_COUNTER_BARRIER_BIT", # 0x00001000
])
# GL_ARB_vertex_array_bgra
size_bgra = FakeEnum(GLint, [
"GL_BGRA",
])
def GLindexBuffer(countExpr, typeExpr):
# Indices arguments are polymorphic:
# - offsets when element array buffer is bound
# - or a blob otherwise.
sizeExpr = '%s*_gl_type_size(%s)' % (countExpr, typeExpr)
return Polymorphic('_element_array_buffer_binding()', [
('0', Blob(Const(GLvoid), sizeExpr)),
],
IntPointer("const GLvoid *"),
contextLess=False,
)
# Polymorphic object name
def GLname(targetExpr):
return Polymorphic(targetExpr, [
('GL_BUFFER', GLbuffer),
('GL_SHADER', GLshader),
('GL_PROGRAM', GLprogram),
('GL_VERTEX_ARRAY', GLarray),
('GL_QUERY', GLquery),
('GL_PROGRAM_PIPELINE', GLpipeline),
('GL_TRANSFORM_FEEDBACK', GLuint),
('GL_SAMPLER', GLsampler),
('GL_TEXTURE', GLtexture),
('GL_TEXTURE_1D', GLtexture),
('GL_TEXTURE_1D_ARRAY', GLtexture),
('GL_TEXTURE_2D', GLtexture),
('GL_TEXTURE_2D_MULTISAMPLE', GLtexture),
('GL_TEXTURE_2D_ARRAY', GLtexture),
('GL_TEXTURE_RECTANGLE', GLtexture),
('GL_TEXTURE_CUBE_MAP', GLtexture),
('GL_TEXTURE_CUBE_MAP_POSITIVE_X', GLtexture),
('GL_TEXTURE_CUBE_MAP_NEGATIVE_X', GLtexture),
('GL_TEXTURE_CUBE_MAP_POSITIVE_Y', GLtexture),
('GL_TEXTURE_CUBE_MAP_NEGATIVE_Y', GLtexture),
('GL_TEXTURE_CUBE_MAP_POSITIVE_Z', GLtexture),
('GL_TEXTURE_CUBE_MAP_NEGATIVE_Z', GLtexture),
('GL_TEXTURE_CUBE_MAP_ARRAY', GLtexture),
('GL_TEXTURE_3D', GLtexture),
('GL_RENDERBUFFER', GLrenderbuffer),
('GL_FRAMEBUFFER', GLframebuffer),
('GL_DISPLAY_LIST', GLlist),
('GL_FENCE_APPLE', GLfence),
('GL_DRAW_PIXELS_APPLE', GLuint), # GL_APPLE_fence
], GLuint)
# GL_AMD_performance_monitor
GLperfMonitorCounterInfoAMD = Polymorphic('pname', [
('GL_COUNTER_TYPE_AMD', Pointer(GLenum)),
('GL_PERCENTAGE_AMD', Pointer(Float)),
('GL_COUNTER_RANGE_AMD', Array(Float, 2)),
],
OpaquePointer(GLvoid),
)
|
|
#!/usr/bin/env python
import datetime
import random
import os
import logging
import unittest2
from dateutil.relativedelta import relativedelta
class KardboardTestCase(unittest2.TestCase):
def setUp(self):
if os.environ.get('KARDBOARD_SETTINGS'):
os.environ['KARDBOARD_SETTINGS'] = ''
from kardboard import default_settings
default_settings.TEMPLATE_DEBUG = True
from kardboard.views import app
from flask.ext.mongoengine import MongoEngine
from kardboard.util import now
delattr(app, 'db')
from mongoengine.connection import connect, disconnect
disconnect()
app.config.from_object('kardboard.default_settings')
app.config['MONGODB_DB'] = 'kardboard_unittest'
app.config['TESTING'] = True
app.config['CELERY_ALWAYS_EAGER'] = True
connect(app.config['MONGODB_DB'])
app.db = MongoEngine(app)
self.config = app.config
self.app = app.test_client()
self.flask_app = app
self.used_keys = []
self._setup_logging()
self.now = now
super(KardboardTestCase, self).setUp()
def tearDown(self):
if hasattr(self.config, 'TICKET_HELPER'):
del self.config['TICKET_HELPER']
self._flush_db()
self.flask_app.logger.handlers = self._old_logging_handlers
def _setup_logging(self):
self._old_logging_handlers = self.flask_app.logger.handlers
del self.flask_app.logger.handlers[:]
new_handler = logging.StreamHandler()
new_handler.setLevel(logging.CRITICAL)
new_handler.setFormatter(logging.Formatter(self.flask_app.debug_log_format))
self.flask_app.logger.addHandler(new_handler)
def _flush_db(self):
from mongoengine.connection import _get_db
db = _get_db()
#Truncate/wipe the test database
names = [name for name in db.collection_names()
if 'system.' not in name]
[db.drop_collection(name) for name in names]
def _get_target_url(self):
raise NotImplementedError
def _get_target_class(self):
raise NotImplementedError
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def _get_card_class(self):
from kardboard.models import Kard
return Kard
def _get_record_class(self):
from kardboard.models import DailyRecord
return DailyRecord
def _get_person_class(self):
from kardboard.models import Person
return Person
def _make_unique_key(self):
key = random.randint(1, 10000)
if key not in self.used_keys:
self.used_keys.append(key)
return key
return self._make_unique_key()
def _date(self, dtype, date=None, days=0):
from kardboard.util import make_end_date, make_start_date
from kardboard.util import now
if not date:
date = now()
if dtype == 'start':
date = make_start_date(date=date)
elif dtype == 'end':
date = make_end_date(date=date)
date = date + relativedelta(days=days)
return date
def make_card(self, **kwargs):
from kardboard.util import now
key = self._make_unique_key()
fields = {
'key': "CMSAD-%s" % key,
'title': "Theres always money in the banana stand",
'backlog_date': now()
}
fields.update(**kwargs)
k = self._get_card_class()(**fields)
return k
def delete_all_cards(self):
self._get_card_class().objects.all().delete()
def make_record(self, date, **kwargs):
fields = {
'date': date,
'backlog': 3,
'in_progress': 8,
'done': 10,
'completed': 1,
'moving_cycle_time': 12,
'moving_lead_time': 16,
'moving_std_dev': 69,
'moving_median_abs_dev': 30,
}
fields.update(**kwargs)
r = self._get_record_class()(**fields)
return r
def make_person(self, **kwargs):
key = self._make_unique_key()
fields = {
'name': 'cheisel-%s' % key,
}
fields.update(**kwargs)
p = self._get_person_class()(**fields)
return p
def assertEqualDateTimes(self, expected, actual):
expected = (expected.year, expected.month, expected.day, expected.hour, expected.minute)
actual = (actual.year, actual.month, actual.day, actual.hour, actual.minute)
self.assertEqual(expected, actual)
class DashboardTestCase(KardboardTestCase):
def setUp(self):
super(DashboardTestCase, self).setUp()
from kardboard.models import Kard, DailyRecord
self.Kard = Kard
self.DailyRecord = DailyRecord
self.year = datetime.datetime.now().year - 1
self.month = 6
self.day = 15
self.team1 = 'Team 1'
self.team2 = 'Team 2'
self.backlogged_date = datetime.datetime(
year=self.year, month=self.month, day=12)
for i in xrange(0, 5):
#board will have 5 cards in elabo, started, and done
k = self.make_card(backlog_date=self.backlogged_date, team=self.team1) # elabo
k.save()
k = self.make_card(start_date=datetime.datetime(
year=self.year, month=self.month, day=12), team=self.team1)
#k.save()
k = self.make_card(
start_date=datetime.datetime(year=self.year,
month=self.month, day=12),
done_date=datetime.datetime(year=self.year,
month=self.month, day=19), team=self.team1)
#k.save()
for i in xrange(0, 3):
#board will have 3 cards in elabo, started, and done
k = self.make_card(backlog_date=self.backlogged_date, team=self.team2) # backlogged
#k.save()
k = self.make_card(start_date=datetime.datetime(
year=2011, month=6, day=12), team=self.team2)
#k.save()
k = self.make_card(
start_date=datetime.datetime(year=2011, month=6, day=12),
done_date=datetime.datetime(year=2011, month=6, day=19), team=self.team2)
#k.save()
def _set_up_records(self):
from kardboard.util import make_start_date
from kardboard.util import make_end_date
start_date = datetime.datetime(2011, 1, 1)
end_date = datetime.datetime(2011, 6, 30)
start_date = make_start_date(date=start_date)
end_date = make_end_date(date=end_date)
current_date = start_date
while current_date <= end_date:
r = self.make_record(current_date)
r.save()
current_date = current_date + relativedelta(days=1)
class FormTests(KardboardTestCase):
pass
|
|
import unittest
from reactivex import operators as ops
from reactivex.testing import ReactiveTest, TestScheduler
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestSkip(unittest.TestCase):
def test_skip_complete_after(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 6),
on_next(150, 4),
on_next(210, 9),
on_next(230, 13),
on_next(270, 7),
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
on_completed(690),
)
def create():
return xs.pipe(ops.skip(20))
results = scheduler.start(create)
assert results.messages == [on_completed(690)]
assert xs.subscriptions == [subscribe(200, 690)]
def test_skip_complete_same(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 6),
on_next(150, 4),
on_next(210, 9),
on_next(230, 13),
on_next(270, 7),
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
on_completed(690),
)
def create():
return xs.pipe(ops.skip(17))
results = scheduler.start(create)
assert results.messages == [on_completed(690)]
assert xs.subscriptions == [subscribe(200, 690)]
def test_skip_complete_before(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 6),
on_next(150, 4),
on_next(210, 9),
on_next(230, 13),
on_next(270, 7),
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
on_completed(690),
)
def create():
return xs.pipe(ops.skip(10))
results = scheduler.start(create)
assert results.messages == [
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
on_completed(690),
]
assert xs.subscriptions == [subscribe(200, 690)]
def test_skip_Complete_zero(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 6),
on_next(150, 4),
on_next(210, 9),
on_next(230, 13),
on_next(270, 7),
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
on_completed(690),
)
def create():
return xs.pipe(ops.skip(0))
results = scheduler.start(create)
assert results.messages == [
on_next(210, 9),
on_next(230, 13),
on_next(270, 7),
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
on_completed(690),
]
assert xs.subscriptions == [subscribe(200, 690)]
def test_skip_error_after(self):
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 6),
on_next(150, 4),
on_next(210, 9),
on_next(230, 13),
on_next(270, 7),
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
on_error(690, ex),
)
def create():
return xs.pipe(ops.skip(20))
results = scheduler.start(create)
assert results.messages == [on_error(690, ex)]
assert xs.subscriptions == [subscribe(200, 690)]
def test_skip_error_same(self):
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 6),
on_next(150, 4),
on_next(210, 9),
on_next(230, 13),
on_next(270, 7),
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
on_error(690, ex),
)
def create():
return xs.pipe(ops.skip(17))
results = scheduler.start(create)
assert results.messages == [on_error(690, ex)]
assert xs.subscriptions == [subscribe(200, 690)]
def test_skip_error_before(self):
ex = "ex"
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 6),
on_next(150, 4),
on_next(210, 9),
on_next(230, 13),
on_next(270, 7),
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
on_error(690, ex),
)
def create():
return xs.pipe(ops.skip(3))
results = scheduler.start(create)
assert results.messages == [
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
on_error(690, ex),
]
assert xs.subscriptions == [subscribe(200, 690)]
def test_skip_dispose_before(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 6),
on_next(150, 4),
on_next(210, 9),
on_next(230, 13),
on_next(270, 7),
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
)
def create():
return xs.pipe(ops.skip(3))
results = scheduler.start(create, disposed=250)
assert results.messages == []
assert xs.subscriptions == [subscribe(200, 250)]
def test_skip_dispose_after(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 6),
on_next(150, 4),
on_next(210, 9),
on_next(230, 13),
on_next(270, 7),
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
on_next(410, 15),
on_next(415, 16),
on_next(460, 72),
on_next(510, 76),
on_next(560, 32),
on_next(570, -100),
on_next(580, -3),
on_next(590, 5),
on_next(630, 10),
)
def create():
return xs.pipe(ops.skip(3))
results = scheduler.start(create, disposed=400)
assert results.messages == [
on_next(280, 1),
on_next(300, -1),
on_next(310, 3),
on_next(340, 8),
on_next(370, 11),
]
assert xs.subscriptions == [subscribe(200, 400)]
if __name__ == "__main__":
unittest.main()
|
|
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Backup manager manages volume backups.
Volume Backups are full copies of persistent volumes stored in a backup
store e.g. an object store or any other backup store if and when support is
added. They are usable without the original object being available. A
volume backup can be restored to the original volume it was created from or
any other available volume with a minimum size of the original volume.
Volume backups can be created, restored, deleted and listed.
**Related Flags**
:backup_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.backup.manager.Manager`).
"""
import contextlib
import os
from castellan import key_manager
from eventlet import tpool
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.keymgr import migration as key_migration
from cinder import manager
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
backup_manager_opts = [
cfg.StrOpt('backup_driver',
default='cinder.backup.drivers.swift.SwiftBackupDriver',
help='Driver to use for backups.',),
cfg.IntOpt('backup_driver_init_check_interval',
default=60,
min=5,
help='Time in seconds between checks to see if the backup '
'driver has been successfully initialized, any time '
'the driver is restarted.'),
cfg.IntOpt('backup_driver_stats_polling_interval',
default=60,
min=10,
deprecated_name='backup_driver_status_check_interval',
help='Time in seconds between checks of the backup driver '
'status. If does not report as working, it is '
'restarted.'),
cfg.BoolOpt('backup_service_inithost_offload',
default=True,
help='Offload pending backup delete during '
'backup service startup. If false, the backup service '
'will remain down until all pending backups are '
'deleted.',),
cfg.IntOpt('backup_native_threads_pool_size',
default=60,
min=20,
help='Size of the native threads pool for the backups. '
'Most backup drivers rely heavily on this, it can be '
'decreased for specific drivers that don\'t.'),
]
CONF = cfg.CONF
CONF.register_opts(backup_manager_opts)
CONF.import_opt('use_multipath_for_image_xfer', 'cinder.volume.driver')
CONF.import_opt('num_volume_device_scan_tries', 'cinder.volume.driver')
QUOTAS = quota.QUOTAS
MAPPING = {
# Module name "google" conflicts with google library namespace inside the
# driver when it imports google.auth
'cinder.backup.drivers.google.GoogleBackupDriver':
'cinder.backup.drivers.gcs.GoogleBackupDriver',
}
SERVICE_PGRP = '' if os.name == 'nt' else os.getpgrp()
# TODO(geguileo): Once Eventlet issue #432 gets fixed we can just tpool.execute
# the whole call to the driver's backup and restore methods instead of proxy
# wrapping the device_file and having the drivers also proxy wrap their
# writes/reads and the compression/decompression calls.
# (https://github.com/eventlet/eventlet/issues/432)
class BackupManager(manager.SchedulerDependentManager):
"""Manages backup of block storage devices."""
RPC_API_VERSION = backup_rpcapi.BackupAPI.RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, *args, **kwargs):
self.az = CONF.storage_availability_zone
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
super(BackupManager, self).__init__(*args, **kwargs)
self.is_initialized = False
self._set_tpool_size(CONF.backup_native_threads_pool_size)
self._process_number = kwargs.get('process_number', 1)
self._semaphore = kwargs.get('semaphore', contextlib.suppress())
self.driver_name = CONF.backup_driver
if self.driver_name in MAPPING:
new_name = MAPPING[self.driver_name]
LOG.warning('Backup driver path %s is deprecated, update your '
'configuration to the new path %s',
self.driver_name, new_name)
self.driver_name = new_name
self.service = importutils.import_class(self.driver_name)
self.message_api = message_api.API()
def init_host(self, **kwargs):
"""Run initialization needed for a standalone service."""
ctxt = context.get_admin_context()
self.setup_backup_backend(ctxt)
try:
self._cleanup_incomplete_backup_operations(ctxt)
except Exception:
# Don't block startup of the backup service.
LOG.exception("Problem cleaning incomplete backup operations.")
# Migrate any ConfKeyManager keys based on fixed_key to the currently
# configured key manager.
backups = objects.BackupList.get_all_by_host(ctxt, self.host)
self._add_to_threadpool(key_migration.migrate_fixed_key,
backups=backups)
self.publish_service_capabilities(ctxt)
def _setup_backup_driver(self, ctxt):
backup_service = self.service(context=ctxt)
backup_service.check_for_setup_error()
self.is_initialized = True
raise loopingcall.LoopingCallDone()
def setup_backup_backend(self, ctxt):
try:
init_loop = loopingcall.FixedIntervalLoopingCall(
self._setup_backup_driver, ctxt)
init_loop.start(interval=CONF.backup_driver_init_check_interval)
except loopingcall.LoopingCallDone:
LOG.info("Backup driver was successfully initialized.")
except Exception:
LOG.exception("Failed to initialize driver.",
resource={'type': 'driver',
'id': self.__class__.__name__})
def reset(self):
super(BackupManager, self).reset()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
@utils.synchronized('cleanup_incomplete_backups_%s' % SERVICE_PGRP,
external=True, delay=0.1)
def _cleanup_incomplete_backup_operations(self, ctxt):
# Only the first launched process should do the cleanup, the others
# have waited on the lock for the first one to finish the cleanup and
# can now continue with the start process.
if self._process_number != 1:
LOG.debug("Process #%s %sskips cleanup.",
self._process_number,
'(pgid=%s) ' % SERVICE_PGRP if SERVICE_PGRP else '')
return
LOG.info("Cleaning up incomplete backup operations.")
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting)
backups = objects.BackupList.get_all_by_host(ctxt, self.host)
for backup in backups:
try:
self._cleanup_one_backup(ctxt, backup)
except Exception:
LOG.exception("Problem cleaning up backup %(bkup)s.",
{'bkup': backup['id']})
try:
self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt,
backup)
except Exception:
LOG.exception("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s.",
{'bkup': backup['id']})
def _cleanup_one_volume(self, ctxt, volume):
if volume['status'] == 'backing-up':
self._detach_all_attachments(ctxt, volume)
LOG.info('Resetting volume %(vol_id)s to previous '
'status %(status)s (was backing-up).',
{'vol_id': volume['id'],
'status': volume['previous_status']})
self.db.volume_update(ctxt, volume['id'],
{'status': volume['previous_status']})
elif volume['status'] == 'restoring-backup':
self._detach_all_attachments(ctxt, volume)
LOG.info('Setting volume %s to error_restoring '
'(was restoring-backup).', volume['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
def _cleanup_one_backup(self, ctxt, backup):
if backup['status'] == fields.BackupStatus.CREATING:
LOG.info('Resetting backup %s to error (was creating).',
backup['id'])
volume = objects.Volume.get_by_id(ctxt, backup.volume_id)
self._cleanup_one_volume(ctxt, volume)
err = 'incomplete backup reset on manager restart'
volume_utils.update_backup_error(backup, err)
elif backup['status'] == fields.BackupStatus.RESTORING:
LOG.info('Resetting backup %s to '
'available (was restoring).',
backup['id'])
volume = objects.Volume.get_by_id(ctxt, backup.restore_volume_id)
self._cleanup_one_volume(ctxt, volume)
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
elif backup['status'] == fields.BackupStatus.DELETING:
# Don't resume deleting the backup of an encrypted volume. The
# admin context won't be sufficient to delete the backup's copy
# of the encryption key ID (a real user context is required).
if backup.encryption_key_id is None:
LOG.info('Resuming delete on backup: %s.', backup.id)
if CONF.backup_service_inithost_offload:
# Offload all the pending backup delete operations to the
# threadpool to prevent the main backup service thread
# from being blocked.
self._add_to_threadpool(self.delete_backup, ctxt, backup)
else:
# Delete backups sequentially
self.delete_backup(ctxt, backup)
else:
LOG.info('Unable to resume deleting backup of an encrypted '
'volume, resetting backup %s to error_deleting '
'(was deleting).',
backup.id)
backup.status = fields.BackupStatus.ERROR_DELETING
backup.save()
def _detach_all_attachments(self, ctxt, volume):
attachments = volume['volume_attachment'] or []
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
try:
rpcapi = self.volume_rpcapi
rpcapi.detach_volume(ctxt, volume, attachment['id'])
except Exception:
LOG.exception("Detach attachment %(attach_id)s failed.",
{'attach_id': attachment['id']},
resource=volume)
def _delete_temp_volume(self, ctxt, backup):
try:
temp_volume = objects.Volume.get_by_id(
ctxt, backup.temp_volume_id)
self.volume_rpcapi.delete_volume(ctxt, temp_volume)
except exception.VolumeNotFound:
LOG.debug("Could not find temp volume %(vol)s to clean up "
"for backup %(backup)s.",
{'vol': backup.temp_volume_id,
'backup': backup.id})
backup.temp_volume_id = None
backup.save()
def _delete_temp_snapshot(self, ctxt, backup):
try:
temp_snapshot = objects.Snapshot.get_by_id(
ctxt, backup.temp_snapshot_id)
# We may want to consider routing those calls through the
# cinder API.
temp_snapshot.status = fields.SnapshotStatus.DELETING
temp_snapshot.save()
self.volume_rpcapi.delete_snapshot(ctxt, temp_snapshot)
except exception.SnapshotNotFound:
LOG.debug("Could not find temp snapshot %(snap)s to clean "
"up for backup %(backup)s.",
{'snap': backup.temp_snapshot_id,
'backup': backup.id})
backup.temp_snapshot_id = None
backup.save()
def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup):
# NOTE(xyang): If the service crashes or gets restarted during the
# backup operation, there could be temporary volumes or snapshots
# that are not deleted. Make sure any temporary volumes or snapshots
# create by the backup job are deleted when service is started.
if (backup.temp_volume_id
and backup.status == fields.BackupStatus.ERROR):
self._delete_temp_volume(ctxt, backup)
if (backup.temp_snapshot_id
and backup.status == fields.BackupStatus.ERROR):
self._delete_temp_snapshot(ctxt, backup)
def _cleanup_temp_volumes_snapshots_when_backup_created(
self, ctxt, backup):
# Delete temp volumes or snapshots when backup creation is completed.
if backup.temp_volume_id:
self._delete_temp_volume(ctxt, backup)
if backup.temp_snapshot_id:
self._delete_temp_snapshot(ctxt, backup)
@utils.limit_operations
def create_backup(self, context, backup):
"""Create volume backups using configured backup service."""
volume_id = backup.volume_id
snapshot_id = backup.snapshot_id
volume = objects.Volume.get_by_id(context, volume_id)
snapshot = objects.Snapshot.get_by_id(
context, snapshot_id) if snapshot_id else None
previous_status = volume.get('previous_status', None)
context.message_resource_id = backup.id
context.message_resource_type = message_field.Resource.VOLUME_BACKUP
context.message_action = message_field.Action.BACKUP_CREATE
if snapshot_id:
log_message = ('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s snapshot: %(snapshot_id)s.'
% {'backup_id': backup.id,
'volume_id': volume_id,
'snapshot_id': snapshot_id})
else:
log_message = ('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'
% {'backup_id': backup.id,
'volume_id': volume_id})
LOG.info(log_message)
self._notify_about_backup_usage(context, backup, "create.start")
expected_status = "backing-up"
if snapshot_id:
actual_status = snapshot['status']
if actual_status != expected_status:
err = _('Create backup aborted, expected snapshot status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
volume_utils.update_backup_error(backup, err)
raise exception.InvalidSnapshot(reason=err)
else:
actual_status = volume['status']
if actual_status != expected_status:
err = _('Create backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
volume_utils.update_backup_error(backup, err)
raise exception.InvalidVolume(reason=err)
expected_status = fields.BackupStatus.CREATING
actual_status = backup.status
if actual_status != expected_status:
err = _('Create backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status,
'actual_status': actual_status,
}
volume_utils.update_backup_error(backup, err)
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_INVALID_STATE)
raise exception.InvalidBackup(reason=err)
try:
if not self.is_working():
err = _('Create backup aborted due to backup service is down.')
volume_utils.update_backup_error(backup, err)
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_SERVICE_DOWN)
raise exception.InvalidBackup(reason=err)
backup.service = self.driver_name
backup.save()
# Start backup, then continue_backup, then finish_backup
self._start_backup(context, backup, volume)
except Exception as err:
with excutils.save_and_reraise_exception():
if snapshot_id:
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
else:
self.db.volume_update(
context, volume_id,
{'status': previous_status,
'previous_status': 'error_backing-up'})
volume_utils.update_backup_error(backup, str(err))
def _start_backup(self, context, backup, volume):
"""This starts the backup process.
First we have to get the backup device from the volume manager.
This can take a long time to complete. Once the volume manager
is done creating/getting the backup device, then we get a callback
to complete the process of backing up the volume.
"""
# Save a copy of the encryption key ID in case the volume is deleted.
if (volume.encryption_key_id is not None and
backup.encryption_key_id is None):
backup.encryption_key_id = volume_utils.clone_encryption_key(
context,
key_manager.API(CONF),
volume.encryption_key_id)
backup.save()
# This is an async call to the volume manager. We will get a
# callback from the volume manager to continue once it's done.
LOG.info("Call Volume Manager to get_backup_device for %s", backup)
self.volume_rpcapi.get_backup_device(context, backup, volume)
def continue_backup(self, context, backup, backup_device):
"""This is the callback from the volume manager to continue."""
message_created = False
volume_id = backup.volume_id
volume = objects.Volume.get_by_id(context, volume_id)
snapshot_id = backup.snapshot_id
snapshot = objects.Snapshot.get_by_id(
context, snapshot_id) if snapshot_id else None
previous_status = volume.get('previous_status', None)
backup_service = self.service(context)
properties = volume_utils.brick_get_connector_properties()
updates = {}
try:
try:
attach_info = self._attach_device(context,
backup_device.device_obj,
properties,
backup_device.is_snapshot)
except Exception:
with excutils.save_and_reraise_exception():
if not message_created:
message_created = True
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.ATTACH_ERROR)
try:
device_path = attach_info['device']['path']
if (isinstance(device_path, str) and
not os.path.isdir(device_path)):
if backup_device.secure_enabled:
with open(device_path, 'rb') as device_file:
updates = backup_service.backup(
backup, tpool.Proxy(device_file))
else:
with utils.temporary_chown(device_path):
with open(device_path, 'rb') as device_file:
updates = backup_service.backup(
backup, tpool.Proxy(device_file))
# device_path is already file-like so no need to open it
else:
updates = backup_service.backup(backup,
tpool.Proxy(device_path))
except Exception:
with excutils.save_and_reraise_exception():
if not message_created:
message_created = True
self.message_api.create_from_request_context(
context,
detail=
message_field.Detail.BACKUP_CREATE_DRIVER_ERROR)
finally:
try:
self._detach_device(context, attach_info,
backup_device.device_obj, properties,
backup_device.is_snapshot, force=True,
ignore_errors=True)
except Exception:
with excutils.save_and_reraise_exception():
if not message_created:
message_created = True
self.message_api.create_from_request_context(
context,
detail=
message_field.Detail.DETACH_ERROR)
except Exception as err:
with excutils.save_and_reraise_exception():
if snapshot_id:
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
else:
self.db.volume_update(
context, volume_id,
{'status': previous_status,
'previous_status': 'error_backing-up'})
volume_utils.update_backup_error(backup, str(err))
finally:
with backup.as_read_deleted():
backup.refresh()
try:
self._cleanup_temp_volumes_snapshots_when_backup_created(
context, backup)
except Exception:
with excutils.save_and_reraise_exception():
if not message_created:
self.message_api.create_from_request_context(
context,
detail=
message_field.Detail.BACKUP_CREATE_CLEANUP_ERROR)
self._finish_backup(context, backup, volume, updates)
def _finish_backup(self, context, backup, volume, updates):
volume_id = backup.volume_id
snapshot_id = backup.snapshot_id
previous_status = volume.get('previous_status', None)
# Restore the original status.
if snapshot_id:
self.db.snapshot_update(
context, snapshot_id,
{'status': fields.SnapshotStatus.AVAILABLE})
else:
self.db.volume_update(context, volume_id,
{'status': previous_status,
'previous_status': 'backing-up'})
# _run_backup method above updated the status for the backup, so it
# will reflect latest status, even if it is deleted
completion_msg = 'finished'
if backup.status in (fields.BackupStatus.DELETING,
fields.BackupStatus.DELETED):
completion_msg = 'aborted'
else:
backup.status = fields.BackupStatus.AVAILABLE
backup.size = volume['size']
if updates:
backup.update(updates)
backup.save()
# Handle the num_dependent_backups of parent backup when child
# backup has created successfully.
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(context,
backup.parent_id)
parent_backup.num_dependent_backups += 1
parent_backup.save()
LOG.info('Create backup %s. backup: %s.', completion_msg, backup.id)
self._notify_about_backup_usage(context, backup, "create.end")
def _is_our_backup(self, backup):
# Accept strings and Service OVO
if not isinstance(backup, str):
backup = backup.service
if not backup:
return True
# TODO(tommylikehu): We upgraded the 'driver_name' from module
# to class name, so we use 'in' here to match two namings,
# this can be replaced with equal sign during next
# release (Rocky).
if self.driver_name.startswith(backup):
return True
# We support renaming of drivers, so check old names as well
for key, value in MAPPING.items():
if key.startswith(backup) and self.driver_name.startswith(value):
return True
return False
@utils.limit_operations
def restore_backup(self, context, backup, volume_id):
"""Restore volume backups from configured backup service."""
context.message_resource_id = backup.id
context.message_resource_type = message_field.Resource.VOLUME_BACKUP
context.message_action = message_field.Action.BACKUP_RESTORE
LOG.info('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.',
{'backup_id': backup.id, 'volume_id': volume_id})
volume = objects.Volume.get_by_id(context, volume_id)
self._notify_about_backup_usage(context, backup, "restore.start")
expected_status = [fields.VolumeStatus.RESTORING_BACKUP,
fields.VolumeStatus.CREATING]
volume_previous_status = volume['status']
if volume_previous_status not in expected_status:
err = (_('Restore backup aborted, expected volume status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': ','.join(expected_status),
'actual_status': volume_previous_status})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
self.db.volume_update(
context, volume_id,
{'status':
(fields.VolumeStatus.ERROR if
volume_previous_status == fields.VolumeStatus.CREATING else
fields.VolumeStatus.ERROR_RESTORING)})
self.message_api.create(
context,
action=message_field.Action.BACKUP_RESTORE,
resource_type=message_field.Resource.VOLUME_BACKUP,
resource_uuid=volume.id,
detail=message_field.Detail.VOLUME_INVALID_STATE)
raise exception.InvalidVolume(reason=err)
expected_status = fields.BackupStatus.RESTORING
actual_status = backup['status']
if actual_status != expected_status:
err = (_('Restore backup aborted: expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
volume_utils.update_backup_error(backup, err)
self.db.volume_update(context, volume_id,
{'status': fields.VolumeStatus.ERROR})
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_INVALID_STATE)
raise exception.InvalidBackup(reason=err)
if volume['size'] > backup['size']:
LOG.info('Volume: %(vol_id)s, size: %(vol_size)d is '
'larger than backup: %(backup_id)s, '
'size: %(backup_size)d, continuing with restore.',
{'vol_id': volume['id'],
'vol_size': volume['size'],
'backup_id': backup['id'],
'backup_size': backup['size']})
if not self._is_our_backup(backup):
err = _('Restore backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].') % {
'configured_service': self.driver_name,
'backup_service': backup.service,
}
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
self.db.volume_update(context, volume_id,
{'status': fields.VolumeStatus.ERROR})
raise exception.InvalidBackup(reason=err)
canceled = False
try:
self._run_restore(context, backup, volume)
except exception.BackupRestoreCancel:
canceled = True
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(
context, volume_id,
{'status': (fields.VolumeStatus.ERROR if
actual_status == fields.VolumeStatus.CREATING
else fields.VolumeStatus.ERROR_RESTORING)})
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
if canceled:
volume.status = fields.VolumeStatus.ERROR
else:
volume.status = fields.VolumeStatus.AVAILABLE
# NOTE(tommylikehu): If previous status is 'creating', this is
# just a new created volume and we need update the 'launched_at'
# attribute as well.
if volume_previous_status == fields.VolumeStatus.CREATING:
volume['launched_at'] = timeutils.utcnow()
old_src_backup_id = self.db.volume_metadata_get(
context, volume_id).get("src_backup_id", None)
if backup.volume_id != volume.id or (
old_src_backup_id and old_src_backup_id != backup.id):
self.db.volume_metadata_update(
context,
volume.id,
{'src_backup_id': backup.id},
False)
volume.save()
backup.status = fields.BackupStatus.AVAILABLE
backup.save()
LOG.info('%(result)s restoring backup %(backup_id)s to volume '
'%(volume_id)s.',
{'result': 'Canceled' if canceled else 'Finished',
'backup_id': backup.id,
'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end")
def _run_restore(self, context, backup, volume):
message_created = False
orig_key_id = volume.encryption_key_id
backup_service = self.service(context)
properties = volume_utils.brick_get_connector_properties()
secure_enabled = (
self.volume_rpcapi.secure_file_operations_enabled(context,
volume))
try:
attach_info = self._attach_device(context, volume, properties)
except Exception:
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.ATTACH_ERROR)
raise
# NOTE(geguileo): Not all I/O disk operations properly do greenthread
# context switching and may end up blocking the greenthread, so we go
# with native threads proxy-wrapping the device file object.
try:
device_path = attach_info['device']['path']
open_mode = 'rb+' if os.name == 'nt' else 'wb'
if (isinstance(device_path, str) and
not os.path.isdir(device_path)):
if secure_enabled:
with open(device_path, open_mode) as device_file:
backup_service.restore(backup, volume.id,
tpool.Proxy(device_file))
else:
with utils.temporary_chown(device_path):
with open(device_path, open_mode) as device_file:
backup_service.restore(backup, volume.id,
tpool.Proxy(device_file))
# device_path is already file-like so no need to open it
else:
backup_service.restore(backup, volume.id,
tpool.Proxy(device_path))
except exception.BackupRestoreCancel:
raise
except Exception:
LOG.exception('Restoring backup %(backup_id)s to volume '
'%(volume_id)s failed.', {'backup_id': backup.id,
'volume_id': volume.id})
# We set message_create to True before creating the
# message because if the message create call fails
# and is catched by the base/outer exception handler
# then we will end up storing a wrong message
message_created = True
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_RESTORE_ERROR)
raise
finally:
try:
self._detach_device(context, attach_info, volume, properties,
force=True)
except Exception:
if not message_created:
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.DETACH_ERROR)
raise
# Regardless of whether the restore was successful, do some
# housekeeping to ensure the restored volume's encryption key ID is
# unique, and any previous key ID is deleted. Start by fetching fresh
# info on the restored volume.
restored_volume = objects.Volume.get_by_id(context, volume.id)
restored_key_id = restored_volume.encryption_key_id
if restored_key_id != orig_key_id:
LOG.info('Updating encryption key ID for volume %(volume_id)s '
'from backup %(backup_id)s.',
{'volume_id': volume.id, 'backup_id': backup.id})
key_mgr = key_manager.API(CONF)
if orig_key_id is not None:
LOG.debug('Deleting original volume encryption key ID.')
volume_utils.delete_encryption_key(context,
key_mgr,
orig_key_id)
if backup.encryption_key_id is None:
# This backup predates the current code that stores the cloned
# key ID in the backup database. Fortunately, the key ID
# restored from the backup data _is_ a clone of the original
# volume's key ID, so grab it.
LOG.debug('Gleaning backup encryption key ID from metadata.')
backup.encryption_key_id = restored_key_id
backup.save()
# Clone the key ID again to ensure every restored volume has
# a unique key ID. The volume's key ID should not be the same
# as the backup.encryption_key_id (the copy made when the backup
# was first created).
new_key_id = volume_utils.clone_encryption_key(
context,
key_mgr,
backup.encryption_key_id)
restored_volume.encryption_key_id = new_key_id
restored_volume.save()
else:
LOG.debug('Encryption key ID for volume %(volume_id)s already '
'matches encryption key ID in backup %(backup_id)s.',
{'volume_id': volume.id, 'backup_id': backup.id})
def delete_backup(self, context, backup):
"""Delete volume backup from configured backup service."""
LOG.info('Delete backup started, backup: %s.', backup.id)
self._notify_about_backup_usage(context, backup, "delete.start")
context.message_resource_id = backup.id
context.message_resource_type = message_field.Resource.VOLUME_BACKUP
context.message_action = message_field.Action.BACKUP_DELETE
expected_status = fields.BackupStatus.DELETING
actual_status = backup.status
if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status,
'actual_status': actual_status}
volume_utils.update_backup_error(backup, err)
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_INVALID_STATE)
raise exception.InvalidBackup(reason=err)
if backup.service and not self.is_working():
err = _('Delete backup is aborted due to backup service is down.')
status = fields.BackupStatus.ERROR_DELETING
volume_utils.update_backup_error(backup, err, status)
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_SERVICE_DOWN)
raise exception.InvalidBackup(reason=err)
if not self._is_our_backup(backup):
err = _('Delete backup aborted, the backup service currently'
' configured [%(configured_service)s] is not the'
' backup service that was used to create this'
' backup [%(backup_service)s].')\
% {'configured_service': self.driver_name,
'backup_service': backup.service}
volume_utils.update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err)
if backup.service:
try:
backup_service = self.service(context)
backup_service.delete_backup(backup)
except Exception as err:
with excutils.save_and_reraise_exception():
volume_utils.update_backup_error(backup, str(err))
self.message_api.create_from_request_context(
context,
detail=message_field.Detail.BACKUP_DELETE_DRIVER_ERROR)
# Get reservations
try:
reserve_opts = {
'backups': -1,
'backup_gigabytes': -backup.size,
}
reservations = QUOTAS.reserve(context,
project_id=backup.project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Failed to update usages deleting backup")
if backup.encryption_key_id is not None:
volume_utils.delete_encryption_key(context,
key_manager.API(CONF),
backup.encryption_key_id)
backup.encryption_key_id = None
backup.save()
backup.destroy()
# If this backup is incremental backup, handle the
# num_dependent_backups of parent backup
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(context,
backup.parent_id)
if parent_backup.has_dependent_backups:
parent_backup.num_dependent_backups -= 1
parent_backup.save()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations,
project_id=backup.project_id)
LOG.info('Delete backup finished, backup %s deleted.', backup.id)
self._notify_about_backup_usage(context, backup, "delete.end")
def _notify_about_backup_usage(self,
context,
backup,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_backup_usage(
context, backup, event_suffix,
extra_usage_info=extra_usage_info,
host=self.host)
def export_record(self, context, backup):
"""Export all volume backup metadata details to allow clean import.
Export backup metadata so it could be re-imported into the database
without any prerequisite in the backup database.
:param context: running context
:param backup: backup object to export
:returns: backup_record - a description of how to import the backup
:returns: contains 'backup_url' - how to import the backup, and
:returns: 'backup_service' describing the needed driver.
:raises InvalidBackup:
"""
LOG.info('Export record started, backup: %s.', backup.id)
expected_status = fields.BackupStatus.AVAILABLE
actual_status = backup.status
if actual_status != expected_status:
err = (_('Export backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
raise exception.InvalidBackup(reason=err)
backup_record = {'backup_service': backup.service}
if not self._is_our_backup(backup):
err = (_('Export record aborted, the backup service currently '
'configured [%(configured_service)s] is not the '
'backup service that was used to create this '
'backup [%(backup_service)s].') %
{'configured_service': self.driver_name,
'backup_service': backup.service})
raise exception.InvalidBackup(reason=err)
# Call driver to create backup description string
try:
backup_service = self.service(context)
driver_info = backup_service.export_record(backup)
backup_url = backup.encode_record(driver_info=driver_info)
backup_record['backup_url'] = backup_url
except Exception as err:
msg = str(err)
raise exception.InvalidBackup(reason=msg)
LOG.info('Export record finished, backup %s exported.', backup.id)
return backup_record
def import_record(self,
context,
backup,
backup_service,
backup_url,
backup_hosts):
"""Import all volume backup metadata details to the backup db.
:param context: running context
:param backup: The new backup object for the import
:param backup_service: The needed backup driver for import
:param backup_url: An identifier string to locate the backup
:param backup_hosts: Potential hosts to execute the import
:raises InvalidBackup:
:raises ServiceNotFound:
"""
LOG.info('Import record started, backup_url: %s.', backup_url)
# Can we import this backup?
if not self._is_our_backup(backup_service):
# No, are there additional potential backup hosts in the list?
if len(backup_hosts) > 0:
# try the next host on the list, maybe he can import
first_host = backup_hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
backup,
backup_service,
backup_url,
backup_hosts)
else:
# empty list - we are the last host on the list, fail
err = _('Import record failed, cannot find backup '
'service to perform the import. Request service '
'%(service)s.') % {'service': backup_service}
volume_utils.update_backup_error(backup, err)
raise exception.ServiceNotFound(service_id=backup_service)
else:
# Yes...
try:
# Deserialize backup record information
backup_options = backup.decode_record(backup_url)
# Extract driver specific info and pass it to the driver
driver_options = backup_options.pop('driver_info', {})
backup_service = self.service(context)
backup_service.import_record(backup, driver_options)
except Exception as err:
msg = str(err)
volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg)
required_import_options = {
'display_name',
'display_description',
'container',
'size',
'service_metadata',
'object_count',
'id'
}
# Check for missing fields in imported data
missing_opts = required_import_options - set(backup_options)
if missing_opts:
msg = (_('Driver successfully decoded imported backup data, '
'but there are missing fields (%s).') %
', '.join(missing_opts))
volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg)
# Confirm the ID from the record in the DB is the right one
backup_id = backup_options['id']
if backup_id != backup.id:
msg = (_('Trying to import backup metadata from id %(meta_id)s'
' into backup %(id)s.') %
{'meta_id': backup_id, 'id': backup.id})
volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg)
# Overwrite some fields
backup_options['service'] = self.driver_name
backup_options['availability_zone'] = self.az
backup_options['host'] = self.host
# Remove some values which are not actual fields and some that
# were set by the API node
for key in ('name', 'user_id', 'project_id', 'deleted_at',
'deleted', 'fail_reason', 'status'):
backup_options.pop(key, None)
# Update the database
backup.update(backup_options)
backup.save()
# Update the backup's status
backup.update({"status": fields.BackupStatus.AVAILABLE})
backup.save()
LOG.info('Import record id %s metadata from driver '
'finished.', backup.id)
def reset_status(self, context, backup, status):
"""Reset volume backup status.
:param context: running context
:param backup: The backup object for reset status operation
:param status: The status to be set
:raises InvalidBackup:
:raises AttributeError:
"""
LOG.info('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.',
{'backup_id': backup.id,
'status': status})
LOG.info('Backup service: %s.', backup.service)
if not self._is_our_backup(backup):
err = _('Reset backup status aborted, the backup service'
' currently configured [%(configured_service)s] '
'is not the backup service that was used to create'
' this backup [%(backup_service)s].') % \
{'configured_service': self.driver_name,
'backup_service': backup.service}
raise exception.InvalidBackup(reason=err)
if backup.service is not None:
backup.status = status
backup.save()
# Needs to clean temporary volumes and snapshots.
try:
self._cleanup_temp_volumes_snapshots_for_one_backup(
context, backup)
except Exception:
LOG.exception("Problem cleaning temp volumes and "
"snapshots for backup %(bkup)s.",
{'bkup': backup.id})
volume_utils.notify_about_backup_usage(context, backup,
'reset_status.end')
def check_support_to_force_delete(self, context):
"""Check if the backup driver supports force delete operation.
:param context: running context
"""
backup_service = self.service(context)
return backup_service.support_force_delete
def _attach_device(self, ctxt, backup_device,
properties, is_snapshot=False):
"""Attach backup device."""
if not is_snapshot:
return self._attach_volume(ctxt, backup_device, properties)
else:
return self._attach_snapshot(ctxt, backup_device, properties)
def _attach_volume(self, context, volume, properties):
"""Attach a volume."""
try:
conn = self.volume_rpcapi.initialize_connection(context,
volume,
properties)
return self._connect_device(conn)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.volume_rpcapi.terminate_connection(context, volume,
properties,
force=True)
except Exception:
LOG.warning("Failed to terminate the connection "
"of volume %(volume_id)s, but it is "
"acceptable.",
{'volume_id': volume.id})
def _attach_snapshot(self, ctxt, snapshot, properties):
"""Attach a snapshot."""
try:
conn = self.volume_rpcapi.initialize_connection_snapshot(
ctxt, snapshot, properties)
return self._connect_device(conn)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.volume_rpcapi.terminate_connection_snapshot(
ctxt, snapshot, properties, force=True)
except Exception:
LOG.warning("Failed to terminate the connection "
"of snapshot %(snapshot_id)s, but it is "
"acceptable.",
{'snapshot_id': snapshot.id})
def _connect_device(self, conn):
"""Establish connection to device."""
use_multipath = CONF.use_multipath_for_image_xfer
device_scan_attempts = CONF.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = volume_utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn,
expect_raw_disk=True)
vol_handle = connector.connect_volume(conn['data'])
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _detach_device(self, ctxt, attach_info, device,
properties, is_snapshot=False, force=False,
ignore_errors=False):
"""Disconnect the volume or snapshot from the host. """
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'],
force=force, ignore_errors=ignore_errors)
rpcapi = self.volume_rpcapi
if not is_snapshot:
rpcapi.terminate_connection(ctxt, device, properties,
force=force)
rpcapi.remove_export(ctxt, device, sync=True)
else:
rpcapi.terminate_connection_snapshot(ctxt, device,
properties, force=force)
rpcapi.remove_export_snapshot(ctxt, device, sync=True)
def is_working(self):
return self.is_initialized
@periodic_task.periodic_task(
spacing=CONF.backup_driver_stats_polling_interval)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _report_driver_status(self, context):
backup_stats = {
'backend_state': self.is_working(),
'driver_name': self.driver_name,
'availability_zone': self.az
}
self.update_service_capabilities(backup_stats)
|
|
from django.test import TestCase
from django.utils.unittest import skipIf
import reversion
import yawf
from yawf.exceptions import MessageSpecNotRegisteredError, UnhandledMessageError
import yawf.creation
import yawf.dispatch
from yawf.handlers import Handler
from yawf.revision.utils import (
diff_fields, versions_diff, deserialize_revision, previous_version)
from yawf.message_log.models import main_record_for_revision
from yawf.messages.spec import MessageSpec
from yawf.allowed import get_allowed
yawf.autodiscover()
from .models import Window, WINDOW_OPEN_STATUS
class WorkflowTestMixin(object):
workflow_id = None
def get_workflow(self):
return yawf.get_workflow(self.workflow_id)
def test_workflow_registered(self):
workflow = self.get_workflow()
self.assertIsNotNone(workflow)
def test_initial_handler(self):
w = self.get_workflow()
if isinstance(w.start_workflow, basestring):
start_handler = w.get_handler(w.initial_state, w.start_workflow)
self._test_handler_obj(start_handler)
def test_initial_message(self):
w = self.get_workflow()
if isinstance(w.start_workflow, basestring):
spec = w.get_message_spec(w.start_workflow)
self._test_message_spec(spec)
def _test_handler_obj(self, handler):
self.assertIsInstance(handler, yawf.handlers.Handler)
def _test_message_spec(self, spec):
self.assertIsInstance(spec, yawf.messages.spec.MessageSpec)
self.assertIsInstance(spec.id, basestring)
self.assertTrue(hasattr(spec, 'validator_cls'))
self.assertTrue(hasattr(spec.validator_cls, 'is_valid'))
def test_validate(self):
w = self.get_workflow()
w.validate()
class SimpleWorkflowTest(WorkflowTestMixin, TestCase):
workflow_id = 'simple'
sender = '__sender__'
def test_creation(self):
self.assertRaises(
yawf.exceptions.CreateValidationError,
lambda: yawf.creation.create(self.workflow_id, self.sender, {}))
window = yawf.creation.create(
self.workflow_id, self.sender,
{
'title': 'Main window',
'width': 500,
'height': 300,
})
new_instance, handler_effect, side_effect = yawf.creation.start_workflow(window, self.sender)
self.assertEqual(window.id, new_instance.id)
self.assertFalse(window is new_instance)
def test_grouped_action(self):
window, _, _ = self._new_window(width=500, height=300)
self.assertEqual(window.width, 500)
self.assertEqual(window.height, 300)
resized_window, handler_effects, effects = yawf.dispatch.dispatch(window, self.sender,
'edit__resize', dict(width=200, height=400))
self.assertEqual(resized_window.width, 200)
self.assertEqual(resized_window.height, 400)
resized_window = Window.objects.get(id=window.id)
self.assertEqual(resized_window.width, 200)
self.assertEqual(resized_window.height, 400)
self.assertListEqual(effects, ['edit_effect', 'resize_effect'])
def test_multimessage(self):
window, _, _ = self._new_window(width=500, height=300)
child1, _, _ = self._new_window(parent=window)
child2, _, _ = self._new_window(parent=window)
window, handler_effects, _ = yawf.dispatch.dispatch(
window, self.sender, 'minimize_all')
self.assertTrue(isinstance(handler_effects, list))
self.assertEqual(len(handler_effects), 3)
window, child1, child2 = handler_effects
self.assertEqual(window.open_status, WINDOW_OPEN_STATUS.MINIMIZED)
self.assertEqual(child1.open_status, WINDOW_OPEN_STATUS.MINIMIZED)
self.assertEqual(child2.open_status, WINDOW_OPEN_STATUS.MINIMIZED)
def test_revision_deserialization(self):
window, _, _ = self._new_window(width=500, height=300)
self.assertEqual(window.revision, 2)
child1, _, _ = self._new_window(parent=window)
child2, _, _ = self._new_window(parent=window)
window, _, _ = yawf.dispatch.dispatch(
window, self.sender, 'minimize_all')
versions = reversion.get_for_object(window)
self.assertEqual(len(versions), 2)
last_version = versions[0]
message_revision = last_version.revision
# checking log record
log_record = main_record_for_revision(message_revision)
self.assertEqual(log_record.message, 'minimize_all')
self.assertEqual(log_record.object_id, window.id)
# checking revision
rev = deserialize_revision(message_revision)
versions = rev.get_versions_for_record(log_record)
self.assertEqual(len(versions), 1)
version = versions[0]
self.assertEqual(last_version, version)
previous = previous_version(version)
diff = versions_diff(previous, version, full=True)
self.assertItemsEqual(diff[0],
{
'field_name': 'open_status',
'old': 'normal',
'new': 'minimized',
'field_verbose_name': 'open status'
})
self.assertItemsEqual(diff[1],
{
'field_name': 'revision',
'old': '2',
'new': '3',
'field_verbose_name': 'revision'
})
def test_revision_diff(self):
window, _, _ = self._new_window(width=500, height=300)
self.assertEqual(window.revision, 2)
resized_window, _, _ = yawf.dispatch.dispatch(window, self.sender,
'edit__resize', dict(width=200, height=300))
self.assertEqual(resized_window.revision, 3)
versions = reversion.get_for_object(window)
self.assertEqual(len(versions), 2)
new_rev = versions[0]
old_rev = versions[1]
diff = list(diff_fields(old_rev, new_rev))
self.assertItemsEqual(diff, ['width'])
diff = versions_diff(old_rev, new_rev)
self.assertItemsEqual(diff,
[
{
'field_name': 'width',
'old': 500,
'new': 200,
'field_verbose_name': 'width'
},
])
def test_allowed(self):
window, _, _ = self._new_window()
allowed = get_allowed(self.sender, window)
self.assertItemsEqual(allowed.keys(), ['allowed_messages', 'allowed_resources'])
def test_view_handling(self):
window, _, _ = self._new_window(width=500, height=300)
self.assertEqual(window.width, 500)
self.assertEqual(window.height, 300)
self.client.post(
'/simple/window/%d/resize/' % window.id,
{
'width': 200,
'height': 400,
})
window = Window.objects.get(pk=window.id)
self.assertEqual(window.width, 200)
self.assertEqual(window.height, 400)
self.assertEqual(window.open_status, 'normal')
r = self.client.post('/simple/window/%d/maximize/' % window.id)
self.assertEqual(r.status_code, 200)
window = Window.objects.get(pk=window.id)
self.assertEqual(window.open_status, 'maximized')
def _new_window(self, title='Main window', width=500, height=300,
parent=None):
window = yawf.creation.create(
self.workflow_id, self.sender,
{
'title': title,
'width': width,
'height': height,
'parent': parent.id if parent is not None else None,
})
return yawf.creation.start_workflow(window, self.sender)
def which(name):
"""Searches for name in exec path and returns full path (from pygraphviz)"""
import os
import glob
paths = os.environ["PATH"]
if os.name == "nt":
exe = ".exe"
else:
exe = ""
for path in paths.split(os.pathsep):
match = glob.glob(os.path.join(path, name + exe))
if match:
return match[0]
return None
class BuiltinViewTest(TestCase):
def test_describe(self):
response = self.client.get('/describe/simple/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/describe/some_nonexist_workflow/')
self.assertEqual(response.status_code, 404)
@skipIf(which('dot') is None,
"graphviz is not installed")
def test_handlers_graph(self):
response = self.client.get('/describe/simple/graph/handlers/')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header('Content-Type'))
self.assertEqual(response['Content-Type'], 'image/png')
self.assertTrue(len(response.content) > 1024)
@skipIf(which('dot') is None,
"graphviz is not installed")
def test_effects_graph(self):
response = self.client.get('/describe/simple/graph/effects/')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header('Content-Type'))
self.assertEqual(response['Content-Type'], 'image/png')
self.assertTrue(len(response.content) > 1024)
class MessageGroupsTest(WorkflowTestMixin, TestCase):
workflow_id = 'message_groups'
sender = '__sender__'
def test_grouped_handlers_registration(self):
wf = self.get_workflow()
spec = wf.get_message_spec('hover')
self.assertIsInstance(spec, MessageSpec)
with self.assertRaises(MessageSpecNotRegisteredError):
wf.get_message_spec('hover__subhover')
spec = wf.get_message_spec('edit')
self.assertIsInstance(spec, MessageSpec)
spec = wf.get_message_spec('edit__title')
self.assertIsInstance(spec, MessageSpec)
handler = wf.get_handler(WINDOW_OPEN_STATUS.NORMAL, 'hover')
self.assertIsInstance(handler, Handler)
with self.assertRaises(UnhandledMessageError):
wf.get_handler(WINDOW_OPEN_STATUS.NORMAL, 'hover__subhover')
handler = wf.get_handler(WINDOW_OPEN_STATUS.NORMAL, 'edit__title')
self.assertIsInstance(handler, Handler)
handler = wf.get_handler(WINDOW_OPEN_STATUS.NORMAL, 'edit')
self.assertIsInstance(handler, Handler)
class MinimalWorkflowTest(WorkflowTestMixin, TestCase):
workflow_id = 'minimal'
sender = '__sender__'
|
|
"""Module containing the application logic for Flake8."""
from __future__ import print_function
import logging
import sys
import time
import flake8
from flake8 import checker
from flake8 import defaults
from flake8 import exceptions
from flake8 import style_guide
from flake8 import utils
from flake8.main import options
from flake8.options import aggregator
from flake8.options import manager
from flake8.plugins import manager as plugin_manager
LOG = logging.getLogger(__name__)
class Application(object):
"""Abstract our application into a class."""
def __init__(self, program='flake8', version=flake8.__version__):
# type: (str, str) -> NoneType
"""Initialize our application.
:param str program:
The name of the program/application that we're executing.
:param str version:
The version of the program/application we're executing.
"""
#: The timestamp when the Application instance was instantiated.
self.start_time = time.time()
#: The timestamp when the Application finished reported errors.
self.end_time = None
#: The name of the program being run
self.program = program
#: The version of the program being run
self.version = version
#: The instance of :class:`flake8.options.manager.OptionManager` used
#: to parse and handle the options and arguments passed by the user
self.option_manager = manager.OptionManager(
prog='flake8', version=flake8.__version__
)
options.register_default_options(self.option_manager)
# We haven't found or registered our plugins yet, so let's defer
# printing the version until we aggregate options from config files
# and the command-line. First, let's clone our arguments on the CLI,
# then we'll attempt to remove ``--version`` so that we can avoid
# triggering the "version" action in optparse. If it's not there, we
# do not need to worry and we can continue. If it is, we successfully
# defer printing the version until just a little bit later.
# Similarly we have to defer printing the help text until later.
args = sys.argv[:]
try:
args.remove('--version')
except ValueError:
pass
try:
args.remove('--help')
except ValueError:
pass
try:
args.remove('-h')
except ValueError:
pass
preliminary_opts, _ = self.option_manager.parse_known_args(args)
# Set the verbosity of the program
flake8.configure_logging(preliminary_opts.verbose,
preliminary_opts.output_file)
#: The instance of :class:`flake8.plugins.manager.Checkers`
self.check_plugins = None
#: The instance of :class:`flake8.plugins.manager.Listeners`
self.listening_plugins = None
#: The instance of :class:`flake8.plugins.manager.ReportFormatters`
self.formatting_plugins = None
#: The user-selected formatter from :attr:`formatting_plugins`
self.formatter = None
#: The :class:`flake8.plugins.notifier.Notifier` for listening plugins
self.listener_trie = None
#: The :class:`flake8.style_guide.StyleGuide` built from the user's
#: options
self.guide = None
#: The :class:`flake8.checker.Manager` that will handle running all of
#: the checks selected by the user.
self.file_checker_manager = None
#: The user-supplied options parsed into an instance of
#: :class:`optparse.Values`
self.options = None
#: The left over arguments that were not parsed by
#: :attr:`option_manager`
self.args = None
#: The number of errors, warnings, and other messages after running
#: flake8 and taking into account ignored errors and lines.
self.result_count = 0
#: The total number of errors before accounting for ignored errors and
#: lines.
self.total_result_count = 0
#: Whether the program is processing a diff or not
self.running_against_diff = False
#: The parsed diff information
self.parsed_diff = {}
def exit(self):
# type: () -> NoneType
"""Handle finalization and exiting the program.
This should be the last thing called on the application instance. It
will check certain options and exit appropriately.
"""
if self.options.count:
print(self.result_count)
if not self.options.exit_zero:
raise SystemExit(self.result_count > 0)
def find_plugins(self):
# type: () -> NoneType
"""Find and load the plugins for this application.
If :attr:`check_plugins`, :attr:`listening_plugins`, or
:attr:`formatting_plugins` are ``None`` then this method will update
them with the appropriate plugin manager instance. Given the expense
of finding plugins (via :mod:`pkg_resources`) we want this to be
idempotent and so only update those attributes if they are ``None``.
"""
if self.check_plugins is None:
self.check_plugins = plugin_manager.Checkers()
if self.listening_plugins is None:
self.listening_plugins = plugin_manager.Listeners()
if self.formatting_plugins is None:
self.formatting_plugins = plugin_manager.ReportFormatters()
self.check_plugins.load_plugins()
self.listening_plugins.load_plugins()
self.formatting_plugins.load_plugins()
def register_plugin_options(self):
# type: () -> NoneType
"""Register options provided by plugins to our option manager."""
self.check_plugins.register_options(self.option_manager)
self.check_plugins.register_plugin_versions(self.option_manager)
self.listening_plugins.register_options(self.option_manager)
self.formatting_plugins.register_options(self.option_manager)
def parse_configuration_and_cli(self, argv=None):
# type: (Union[NoneType, List[str]]) -> NoneType
"""Parse configuration files and the CLI options.
:param list argv:
Command-line arguments passed in directly.
"""
if self.options is None and self.args is None:
self.options, self.args = aggregator.aggregate_options(
self.option_manager, argv
)
self.running_against_diff = self.options.diff
if self.running_against_diff:
self.parsed_diff = utils.parse_unified_diff()
self.check_plugins.provide_options(self.option_manager, self.options,
self.args)
self.listening_plugins.provide_options(self.option_manager,
self.options,
self.args)
self.formatting_plugins.provide_options(self.option_manager,
self.options,
self.args)
def make_formatter(self, formatter_class=None):
# type: () -> NoneType
"""Initialize a formatter based on the parsed options."""
if self.formatter is None:
format_plugin = self.options.format
if 1 <= self.options.quiet < 2:
format_plugin = 'quiet-filename'
elif 2 <= self.options.quiet:
format_plugin = 'quiet-nothing'
if formatter_class is None:
formatter_class = self.formatting_plugins.get(
format_plugin, self.formatting_plugins['default']
).execute
self.formatter = formatter_class(self.options)
def make_notifier(self):
# type: () -> NoneType
"""Initialize our listener Notifier."""
if self.listener_trie is None:
self.listener_trie = self.listening_plugins.build_notifier()
def make_guide(self):
# type: () -> NoneType
"""Initialize our StyleGuide."""
if self.guide is None:
self.guide = style_guide.StyleGuide(
self.options, self.listener_trie, self.formatter
)
if self.running_against_diff:
self.guide.add_diff_ranges(self.parsed_diff)
def make_file_checker_manager(self):
# type: () -> NoneType
"""Initialize our FileChecker Manager."""
if self.file_checker_manager is None:
self.file_checker_manager = checker.Manager(
style_guide=self.guide,
arguments=self.args,
checker_plugins=self.check_plugins,
)
def run_checks(self, files=None):
# type: (Union[List[str], NoneType]) -> NoneType
"""Run the actual checks with the FileChecker Manager.
This method encapsulates the logic to make a
:class:`~flake8.checker.Manger` instance run the checks it is
managing.
:param list files:
List of filenames to process
"""
if self.running_against_diff:
files = list(sorted(self.parsed_diff.keys()))
self.file_checker_manager.start(files)
self.file_checker_manager.run()
LOG.info('Finished running')
self.file_checker_manager.stop()
self.end_time = time.time()
def report_benchmarks(self):
"""Aggregate, calculate, and report benchmarks for this run."""
if not self.options.benchmark:
return
time_elapsed = self.end_time - self.start_time
statistics = [('seconds elapsed', time_elapsed)]
add_statistic = statistics.append
for statistic in (defaults.STATISTIC_NAMES + ('files',)):
value = self.file_checker_manager.statistics[statistic]
total_description = 'total ' + statistic + ' processed'
add_statistic((total_description, value))
per_second_description = statistic + ' processed per second'
add_statistic((per_second_description, int(value / time_elapsed)))
self.formatter.show_benchmarks(statistics)
def report_errors(self):
# type: () -> NoneType
"""Report all the errors found by flake8 3.0.
This also updates the :attr:`result_count` attribute with the total
number of errors, warnings, and other messages found.
"""
LOG.info('Reporting errors')
results = self.file_checker_manager.report()
self.total_result_count, self.result_count = results
LOG.info('Found a total of %d violations and reported %d',
self.total_result_count, self.result_count)
def report_statistics(self):
"""Aggregate and report statistics from this run."""
if not self.options.statistics:
return
self.formatter.show_statistics(self.guide.stats)
def initialize(self, argv):
# type: () -> NoneType
"""Initialize the application to be run.
This finds the plugins, registers their options, and parses the
command-line arguments.
"""
# NOTE(sigmavirus24): When updating this, make sure you also update
# our legacy API calls to these same methods.
self.find_plugins()
self.register_plugin_options()
self.parse_configuration_and_cli(argv)
self.make_formatter()
self.make_notifier()
self.make_guide()
self.make_file_checker_manager()
def _run(self, argv):
# type: (Union[NoneType, List[str]]) -> NoneType
self.initialize(argv)
self.run_checks()
self.formatter.start()
self.report_errors()
self.report_statistics()
self.report_benchmarks()
self.formatter.stop()
def run(self, argv=None):
# type: (Union[NoneType, List[str]]) -> NoneType
"""Run our application.
This method will also handle KeyboardInterrupt exceptions for the
entirety of the flake8 application. If it sees a KeyboardInterrupt it
will forcibly clean up the :class:`~flake8.checker.Manager`.
"""
try:
self._run(argv)
except KeyboardInterrupt as exc:
print('... stopped')
LOG.critical('Caught keyboard interrupt from user')
LOG.exception(exc)
self.file_checker_manager._force_cleanup()
except exceptions.EarlyQuit:
print('... stopped while processing files')
|
|
import json
import copy
import time
import signal
from benchmark.monitor import TestMonitor
from benchmark.launcher import TestLauncher
def _recursive_macro_replace(val, macros):
"""
Iterate over the items of val and replace the macros with
the properties of macros dictionary
"""
# Iterate over dicts
if isinstance(val, dict):
for k,v in val.iteritems():
val[k] = _recursive_macro_replace(v, macros)
return val
# Iterate over lists
elif isinstance(val, list):
for i in range(0, len(val)):
val[i] = _recursive_macro_replace(val[i], macros)
return val
# Replace only strings
elif (type(val) is str) or (type(val) is unicode):
if '%' in val:
return val % macros
return val
# Everything else passes through
else:
return val
class TestRunner:
def __init__(self, test_case):
"""
Initialize the runner for the specified test case
"""
# Reset properties
self.lastError = ""
self.testCase = test_case
# Prepare launcher for monitor machine
self.m_monitor = self.prepareLauncherConfig(
test_case.local, test_case.remote, test_case.values
)
# Prepare launcher for assistant machines
self.m_assist = []
for m in test_case.remote:
self.m_assist.append(
self.prepareLauncherConfig(
m, [test_case.local], test_case.values
)
)
def prepareLauncherConfig(self, machine, remote_machines, values):
"""
Prepare launcher configuration for the specified machine and test-case
"""
# Collect information from various sources to build known macros
km = machine.app['env'].copy()
km.update( values )
if 'globals' in self.testCase.config:
km.update( self.testCase.config['globals'] )
# Define local info
km['local_ip'] = machine.ip
km['local_name'] = machine.name
# Define remote info
for i in range(0, len(remote_machines)):
# Unprefixed, is the last EP
km['remote_ip'] = remote_machines[i].ip
km['remote_name'] = remote_machines[i].name
# But also include a list of EPs
km['remote_%i_ip' % i] = remote_machines[i].ip
km['remote_%i_name' % i] = remote_machines[i].name
# Calculate some derrivatives (Allow 8 pending messages on the queue)
mul = 1
if 'queue-size' in self.testCase.config['globals']:
mul = int(self.testCase.config['globals']['queue-size'])
km['rxtx_size_plus'] = int(km['rxtx_size']) * mul
km['rxtx_size_minus'] = int(km['rxtx_size']) / mul
############################
# Compile environment
############################
# Start with empty
l_env = {}
# Iterate over environment variables
for k,v in machine.app['env'].iteritems():
# Replace macros in the value
value = v
if '%' in value:
value = value % km
# Update env variable AND the known macros
l_env[k] = value
km[k] = value
############################
# Compile Configuration
############################
# Recursively replace macros
l_conf = copy.deepcopy( machine.app['config'] )
_recursive_macro_replace( l_conf, km )
############################
# Compile command-line
############################
# Just clone the base command-line
l_cmdline = list(machine.app['cmdline'])
# Prepend the executable to run (if specified)
if ('exec' in machine.app) and machine.app['exec']:
l_cmdline.insert(0, machine.app['exec'])
# Append configuration file flag
l_cmdline.append( machine.app['config_arg'] )
# Convert macros
l_cmdline = _recursive_macro_replace( l_cmdline, km )
print "DEBUG: Executing %s <config>" % " ".join(l_cmdline)
print "DEBUG: Config:\n%s" % json.dumps(l_conf, indent=4, separators=(',', ': '))
############################
# Compile bootstrap script
############################
# Prepare script
l_script = "\n".join([
"#!/bin/bash",
# Create a temporary were to keep the config
"CONF=/tmp/fairmq-benchmark.json",
# Write down the config
"cat <<EOF > $CONF",
json.dumps(l_conf, indent=4, separators=(',', ': ')),
"EOF",
# Prepare environment
"\n".join(map(lambda kv: "export %s=%s" % kv, l_env.iteritems())),
# Start memory monitor
#"function mem_mon {",
#" while true; do",
#" free",
#"free -m",
#"top -bn1 | grep load",
#" sleep 1",
#" done",
#"}",
#"mem_mon&",
"function mem_monitor {",
"local MONITOR_PID=0",
"local PS_DETAILS=0",
"local STATUS_DETAILS=0",
"while true; do",
"MONITOR_PID=$(pidof " + l_cmdline[0] + ")",
"PID_NAME=$(ps -p $MONITOR_PID -o comm=)",
"echo " + l_cmdline[0],
"echo $PID_NAME",
"[ -z \"$MONITOR_PID\" ] && continue",
"local PS_DETAILS=$(ps up $MONITOR_PID | tail -n1)",
"echo PS_INFO: $PS_DETAILS",
#Memory stats
"local STATUS_DETAILS=$(smem -P $PID_NAME | tail -n1)",
"echo STAT_INFO: $STATUS_DETAILS",
"sleep 1",
#Network stats
"local NET_STAT_RX=$(netstat -Ienp7s0f0 -e | awk 'NR==6{print $5}')",
"local NET_STAT_TX=$(netstat -Ienp7s0f0 -e | awk 'NR==8{print $5}')",
"echo \"NTSTAT_RX: $NET_STAT_RX\"",
"echo \"NTSTAT_TX: $NET_STAT_TX\"",
#CPU stats
"local CPU_PERC=$(ps -p $MONITOR_PID -o %cpu,%mem,cmd)",
"echo CPU: $CPU_PERC",
"done",
"}",
"mem_monitor&",
"PIDS=\"$!\"",
# Execute command-line
"stdbuf -i0 -o0 -e0 " + " ".join(l_cmdline) + " $CONF&",
"APPPID=$!",
"PIDS=\"$PIDS $APPPID\"",
# Register a SIGINT handler
"function cleanup {",
"echo \"Killing monitor and process $PIDS\" >&2",
"kill -SIGINT $PIDS",
"sleep 1",
"kill $PIDS",
"exit 1",
"}",
"trap cleanup SIGINT SIGHUP",
# Wait for main process to finish
"echo \"Waiting for main application ($APPPID) to exit\"",
"wait $APPPID",
"cleanup",
])
############################
# Prepare bootstrap command
############################
# Prepare command-line
l_bootstrap = ["bash"]
# In case of SSH, prefix with SSH
if 'ssh' in machine.config:
_ssh = machine.config['ssh']
# Calculate ssh info to prepend
ssh_cmdline = [ 'ssh', '-t' ]
# Check for identity file
if 'key' in _ssh:
ssh_cmdline.append( "-i" )
ssh_cmdline.append( _ssh['key'] )
# Get host
if 'host' in _ssh:
host = _ssh['host']
else:
host = machine.ip
# Get user
if 'user' in _ssh:
host = "%s@%s" % (_ssh['user'], host)
# Finalize cmdline
ssh_cmdline.append( host )
ssh_cmdline.append( "--" )
# Prepend to l_bootstrap
ssh_cmdline.extend( l_bootstrap )
l_bootstrap = ssh_cmdline
# Return config
return (machine.name, l_bootstrap, l_script)
def run(self):
"""
Start the test and return the results, or None if an error occured
"""
print "--[ %s ]-----" % self.testCase.name
# Create a test monitor
monitor = TestMonitor( self.testCase )
# Create launchers
launchers = [ TestLauncher( *self.m_monitor, monitor=monitor ) ]
for m in self.m_assist:
launchers.append( TestLauncher( *m ) )
# Start launchers
monitor.start()
for l in launchers:
print "INFO: Starting app on %s" % l.name
l.start()
time.sleep(0.5)
# Wait for head process to exit
print "INFO: Waiting head worker to complete"
launchers[0].join()
print "INFO: Head worker completed"
# Wait 5 seconds for other threads to exit
hasAlive = True
timeout = time.time() + 5
while (time.time() < timeout) and hasAlive:
hasAlive = False
for i in range(1,len(launchers)):
if launchers[i].poll() is None:
hasAlive = True
time.sleep(0.5)
# Kill incomplete threads
if hasAlive:
print "INFO: Forcefully stopping remaining workers"
for i in range(1,len(launchers)):
if launchers[i].poll() is None:
launchers[i].interrupt()
# Join all threads
print "INFO: Joining remaining workers"
for i in range(1,len(launchers)):
if launchers[i].poll() is None:
launchers[i].join()
# Collect monitor results
monitor.close()
return monitor.metrics()
|
|
from sys import argv
import minimum_vertex_cover as mvc
from Genetic import Pop, Solucao
def run():
# open File
try:
file_name = argv[1]
except IndexError:
file_name = input("Enter a file name: ")
graph, n_vertices, n_arestas = read_file(file_name)
full_graph = {**graph[0], **graph[1]} # syntax python 3.5 (junta os 2 dicts num so)
show_graph(full_graph)
op = menu()
if op == '1':
sol_opt = pesquisa_local(graph)
print("\nSOLUCAO: ", sol_opt)
sol_bin = sol_in_bin(n_vertices, sol_opt)
print("SOL BIN: ", sol_bin)
elif op == '2':
alg_genetico(n_vertices, full_graph)
elif op == '3':
hybrid(n_vertices, graph)
else:
print("Invalid option")
def alg_genetico(n_geracoes, graph):
runs = int(input("N. runs: "))
settings = get_settings()
mbf = 0.0
best_ever = None
for r in range(runs):
pop = Pop(settings, n_geracoes)
pop.evaluate(graph)
gen_actual = 1
best_run = pop.get_best()
while gen_actual < n_geracoes:
parents = pop.tournament()
pop = pop.genetic_operators(parents)
pop.evaluate(graph)
best_run = pop.get_best()
gen_actual += 1
invalids = 0
for s in pop.pop:
if not s.valido:
invalids += 1
print("\nRepeticao", r)
print(best_run)
print("\nPercentagem Invalidos:", invalids/20*100) # pop_size / 100
mbf += best_run.fitness
print("MBF = ", mbf)
if r == 0 or best_run.fitness > best_ever.fitness:
best_ever = best_run
print("MBF: ", mbf)
print("MELHOR SOLUCAO ENCONTRADA:")
print(best_ever.sol)
def hybrid(n_geracoes, graph):
"""
:param n_geracoes: igual a numero de vertices no grapho
:param graph: tuple dos dois mapas retirados de read file
:return:
O Algoritmo hibrido e exactamente igual ao genetico, no entanto, introduz a solucao optima
conseguida pela pesquisa local na populacao inicial.
"""
sol_bin = sol_in_bin(n_geracoes, pesquisa_local(graph))
full_graph = {**graph[0], **graph[1]} # syntax python 3.5 (junta os 2 dicts num so)
runs = int(input("N. runs: "))
settings = get_settings()
mbf = 0.0
best_ever = None
for r in range(runs):
pop = Pop(settings, n_geracoes)
pop.pop[0].sol = sol_bin
pop.evaluate(full_graph)
gen_actual = 1
best_run = pop.get_best()
while gen_actual < n_geracoes:
parents = pop.tournament()
pop = pop.genetic_operators(parents)
pop.evaluate(full_graph)
best_run = pop.get_best()
gen_actual += 1
invalids = 0
for s in pop.pop:
if not s.valido:
invalids += 1
print("\nRepeticao", r)
print(best_run)
print("\nPercentagem Invalidos:", invalids/20*100) # pop_size / 100
mbf += best_run.fitness
print("MBF = ", mbf)
if r == 0 or best_run.fitness > best_ever.fitness:
best_ever = best_run
print("MBF: ", mbf)
print("MELHOR SOLUCAO ENCONTRADA:")
print(best_ever.sol)
def pesquisa_local(graph):
# MINIMUM VERTEX COVER
print("\nMVC")
# calcula o minimum vertex cover
m = mvc.min_vertex_cover(graph[0], graph[1])
show_graph(m)
"""
Sendo A - conjunto de todos os vertices
I - conjunto de todos os vertices independentes
EC - total vertex cover
MVC - Minimum vertex cover
A = I + EC
I = A - MVC
"""
# remover da lista que contem todos os vertices
# os valors que estao no mvc
sol_list = all_vert(graph)
for v in m:
for u in m[v]:
try:
sol_list.remove(u)
except ValueError:
pass
return sol_list
def all_vert(graph):
# Criar uma lista com todos os vertices
keys = list(graph[0].keys()) + list(graph[1].keys())
all_vertices = []
for v in keys:
if v not in all_vertices:
all_vertices.append(v)
return all_vertices
def sol_in_bin(n_vertices, sol_list):
sol = []
for v in range(n_vertices):
sol.append(0)
for v in sol_list:
sol[v-1] = 1
return sol
def menu():
print("1 - Pesquisa Local")
print("2 - Algoritmo Genetico")
print("3 - Algoritmo hibrido")
op = input("Option: ")
return op
def get_settings():
op = input("Enter 1 for default settings 2 for costumized: ")
if op == '1':
pop_size = 20
prob_mut = 0.01
prob_rec = 0.7
max_gen = 100
else:
pop_size = int(input("Pop size: "))
prob_mut = float(input("Probabilidade de mutacao: "))
prob_rec = float(input("Probabilidade de reprocriacao: "))
max_gen = int(input("Max. Geracoes: "))
return [pop_size, prob_mut, prob_rec, max_gen]
def show_graph(graph):
string = ""
for k in graph:
string += str(k) + ": ["
for v in graph[k]:
string += str(v) + ' '
string += "]\n"
print(string)
def read_file(file_name):
"""
:param file_name: nome do ficheiro a abrir
:return: left_v, right_v, n_vertices, n_arestas
Le o ficheiro extraindo o numero de vertices e numero de arestas
na linha que lhe corresponde (aquela que comeca por 'p')
Nas linhas que comecam por 'e', tira as ligacoes e cria 2 dicionarios, left_v e right_v.
left_v: dict em que as keys sao os vertices que se encontram a esquerda nas ligacoes
e o 'value' e uma lista de vertices que se encontram a direita nas ligacoes
right_v: mesmo que o left mas da direita para a esquerda
"""
left_v = {} # dicionario que contem para cada vertice a esquerda da ligacao todos os que lhe correspondem a direita
right_v = {} # mesmo que left_v, mas da direita para a esquerda
with open(file_name, 'r') as graph:
for edge in graph: # por cada linha
if edge.startswith('p'):
l = edge.split(' ')
n_vertices = int(l[2])
n_arestas = int(l[3])
if edge.startswith('e'):
e = edge.split(' ')
try:
left_v[int(e[1])].append(int(e[2]))
except KeyError:
left_v[int(e[1])] = [int(e[2])]
try:
right_v[int(e[2])].append(int(e[1]))
except KeyError:
right_v[int(e[2])] = [int(e[1])]
return [left_v, right_v], n_vertices, n_arestas
if __name__ == '__main__':
run()
|
|
from __future__ import division
import numpy as np
from scipy import ndimage as nd
from ..morphology import dilation, erosion, square
from ..util import img_as_float, view_as_windows, pad
from ..color import gray2rgb
def _find_boundaries_subpixel(label_img):
"""See ``find_boundaries(..., mode='subpixel')``.
Notes
-----
This function puts in an empty row and column between each *actual*
row and column of the image, for a corresponding shape of $2s - 1$
for every image dimension of size $s$. These "interstitial" rows
and columns are filled as ``True`` if they separate two labels in
`label_img`, ``False`` otherwise.
I used ``view_as_windows`` to get the neighborhood of each pixel.
Then I check whether there are two labels or more in that
neighborhood.
"""
ndim = label_img.ndim
max_label = np.iinfo(label_img.dtype).max
label_img_expanded = np.zeros([(2 * s - 1) for s in label_img.shape],
label_img.dtype)
pixels = [slice(None, None, 2)] * ndim
label_img_expanded[pixels] = label_img
edges = np.ones(label_img_expanded.shape, dtype=bool)
edges[pixels] = False
label_img_expanded[edges] = max_label
windows = view_as_windows(pad(label_img_expanded, 1,
mode='constant', constant_values=0),
(3,) * ndim)
boundaries = np.zeros_like(edges)
for index in np.ndindex(label_img_expanded.shape):
if edges[index]:
values = np.unique(windows[index].ravel())
if len(values) > 2: # single value and max_label
boundaries[index] = True
return boundaries
def find_boundaries(label_img, connectivity=1, mode='thick', background=0):
"""Return bool array where boundaries between labeled regions are True.
Parameters
----------
label_img : array of int
An array in which different regions are labeled with different
integers.
connectivity: int in {1, ..., `label_img.ndim`}, optional
A pixel is considered a boundary pixel if any of its neighbors
has a different label. `connectivity` controls which pixels are
considered neighbors. A connectivity of 1 (default) means
pixels sharing an edge (in 2D) or a face (in 3D) will be
considered neighbors. A connectivity of `label_img.ndim` means
pixels sharing a corner will be considered neighbors.
mode: string in {'thick', 'inner', 'outer', 'subpixel'}
How to mark the boundaries:
- thick: any pixel not completely surrounded by pixels of the
same label (defined by `connectivity`) is marked as a boundary.
This results in boundaries that are 2 pixels thick.
- inner: outline the pixels *just inside* of objects, leaving
background pixels untouched.
- outer: outline pixels in the background around object
boundaries. When two objects touch, their boundary is also
marked.
- subpixel: return a doubled image, with pixels *between* the
original pixels marked as boundary where appropriate.
background: int, optional
For modes 'inner' and 'outer', a definition of a background
label is required. See `mode` for descriptions of these two.
Returns
-------
boundaries : array of bool, same shape as `label_img`
A bool image where ``True`` represents a boundary pixel. For
`mode` equal to 'subpixel', ``boundaries.shape[i]`` is equal
to ``2 * label_img.shape[i] - 1`` for all ``i`` (a pixel is
inserted in between all other pairs of pixels).
Examples
--------
>>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 1, 1, 1, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 5, 5, 5, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
>>> find_boundaries(labels, mode='thick').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='inner').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels, mode='outer').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> labels_small = labels[::2, ::3]
>>> labels_small
array([[0, 0, 0, 0],
[0, 0, 5, 0],
[0, 1, 5, 0],
[0, 0, 5, 0],
[0, 0, 0, 0]], dtype=uint8)
>>> find_boundaries(labels_small, mode='subpixel').astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
ndim = label_img.ndim
selem = nd.generate_binary_structure(ndim, connectivity)
if mode != 'subpixel':
boundaries = dilation(label_img, selem) != erosion(label_img, selem)
if mode == 'inner':
foreground_image = (label_img != background)
boundaries &= foreground_image
elif mode == 'outer':
max_label = np.iinfo(label_img.dtype).max
background_image = (label_img == background)
selem = nd.generate_binary_structure(ndim, ndim)
inverted_background = np.array(label_img, copy=True)
inverted_background[background_image] = max_label
adjacent_objects = ((dilation(label_img, selem) !=
erosion(inverted_background, selem)) &
~background_image)
boundaries &= (background_image | adjacent_objects)
return boundaries
else:
boundaries = _find_boundaries_subpixel(label_img)
return boundaries
def mark_boundaries(image, label_img, color=(1, 1, 0),
outline_color=None, mode='outer', background_label=0):
"""Return image with boundaries between labeled regions highlighted.
Parameters
----------
image : (M, N[, 3]) array
Grayscale or RGB image.
label_img : (M, N) array of int
Label array where regions are marked by different integer values.
color : length-3 sequence, optional
RGB color of boundaries in the output image.
outline_color : length-3 sequence, optional
RGB color surrounding boundaries in the output image. If None, no
outline is drawn.
mode : string in {'thick', 'inner', 'outer', 'subpixel'}, optional
The mode for finding boundaries.
background_label : int, optional
Which label to consider background (this is only useful for
modes ``inner`` and ``outer``).
Returns
-------
marked : (M, N, 3) array of float
An image in which the boundaries between labels are
superimposed on the original image.
See Also
--------
find_boundaries
"""
marked = img_as_float(image, force_copy=True)
if marked.ndim == 2:
marked = gray2rgb(marked)
if mode == 'subpixel':
# Here, we want to interpose an extra line of pixels between
# each original line - except for the last axis which holds
# the RGB information. ``nd.zoom`` then performs the (cubic)
# interpolation, filling in the values of the interposed pixels
marked = nd.zoom(marked, [2 - 1/s for s in marked.shape[:-1]] + [1],
mode='reflect')
boundaries = find_boundaries(label_img, mode=mode,
background=background_label)
if outline_color is not None:
outlines = dilation(boundaries, square(3))
marked[outlines] = outline_color
marked[boundaries] = color
return marked
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class LocalNetworkGatewaysOperations(object):
"""LocalNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-10-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LocalNetworkGateway')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, local_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a local network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param parameters: Parameters supplied to the create or update local
network gateway operation.
:type parameters:
~azure.mgmt.network.v2017_10_01.models.LocalNetworkGateway
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
LocalNetworkGateway or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.LocalNetworkGateway]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified local network gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LocalNetworkGateway or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_10_01.models.LocalNetworkGateway or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _delete_initial(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, local_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified local network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _update_tags_initial(
self, resource_group_name, local_network_gateway_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'localNetworkGatewayName': self._serialize.url("local_network_gateway_name", local_network_gateway_name, 'str', min_length=1),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, local_network_gateway_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""Updates a local network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param local_network_gateway_name: The name of the local network
gateway.
:type local_network_gateway_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
LocalNetworkGateway or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.LocalNetworkGateway]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
local_network_gateway_name=local_network_gateway_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('LocalNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the local network gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LocalNetworkGateway
:rtype:
~azure.mgmt.network.v2017_10_01.models.LocalNetworkGatewayPaged[~azure.mgmt.network.v2017_10_01.models.LocalNetworkGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LocalNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
"""Test UniFi Controller."""
from collections import deque
from copy import deepcopy
from datetime import timedelta
import aiounifi
from asynctest import Mock, patch
import pytest
from homeassistant.components import unifi
from homeassistant.components.unifi.const import (
CONF_CONTROLLER,
CONF_SITE_ID,
UNIFI_WIRELESS_CLIENTS,
)
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
CONTROLLER_HOST = {
"hostname": "controller_host",
"ip": "1.2.3.4",
"is_wired": True,
"last_seen": 1562600145,
"mac": "10:00:00:00:00:01",
"name": "Controller host",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 1,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
}
CONTROLLER_DATA = {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_SITE_ID: "site_id",
CONF_VERIFY_SSL: False,
}
ENTRY_CONFIG = {CONF_CONTROLLER: CONTROLLER_DATA}
ENTRY_OPTIONS = {}
CONFIGURATION = []
SITES = {"Site name": {"desc": "Site name", "name": "site_id", "role": "admin"}}
async def setup_unifi_integration(
hass,
config=ENTRY_CONFIG,
options=ENTRY_OPTIONS,
sites=SITES,
clients_response=None,
devices_response=None,
clients_all_response=None,
known_wireless_clients=None,
controllers=None,
):
"""Create the UniFi controller."""
configuration = {}
if controllers:
configuration = {unifi.DOMAIN: {unifi.CONF_CONTROLLERS: controllers}}
assert await async_setup_component(hass, unifi.DOMAIN, configuration)
config_entry = MockConfigEntry(
domain=unifi.DOMAIN,
data=deepcopy(config),
options=deepcopy(options),
entry_id=1,
)
config_entry.add_to_hass(hass)
if known_wireless_clients:
hass.data[UNIFI_WIRELESS_CLIENTS].update_data(
known_wireless_clients, config_entry
)
mock_client_responses = deque()
if clients_response:
mock_client_responses.append(clients_response)
mock_device_responses = deque()
if devices_response:
mock_device_responses.append(devices_response)
mock_client_all_responses = deque()
if clients_all_response:
mock_client_all_responses.append(clients_all_response)
mock_requests = []
async def mock_request(self, method, path, json=None):
mock_requests.append({"method": method, "path": path, "json": json})
if path == "s/{site}/stat/sta" and mock_client_responses:
return mock_client_responses.popleft()
if path == "s/{site}/stat/device" and mock_device_responses:
return mock_device_responses.popleft()
if path == "s/{site}/rest/user" and mock_client_all_responses:
return mock_client_all_responses.popleft()
return {}
# "aiounifi.Controller.start_websocket", return_value=True
with patch("aiounifi.Controller.login", return_value=True), patch(
"aiounifi.Controller.sites", return_value=sites
), patch("aiounifi.Controller.request", new=mock_request), patch.object(
aiounifi.websocket.WSClient, "start", return_value=True
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
controller_id = unifi.get_controller_id_from_config_entry(config_entry)
if controller_id not in hass.data[unifi.DOMAIN]:
return None
controller = hass.data[unifi.DOMAIN][controller_id]
controller.mock_client_responses = mock_client_responses
controller.mock_device_responses = mock_device_responses
controller.mock_client_all_responses = mock_client_all_responses
controller.mock_requests = mock_requests
return controller
async def test_controller_setup(hass):
"""Successful setup."""
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
controller = await setup_unifi_integration(hass)
entry = controller.config_entry
assert len(forward_entry_setup.mock_calls) == len(
unifi.controller.SUPPORTED_PLATFORMS
)
assert forward_entry_setup.mock_calls[0][1] == (entry, "device_tracker")
assert forward_entry_setup.mock_calls[1][1] == (entry, "sensor")
assert forward_entry_setup.mock_calls[2][1] == (entry, "switch")
assert controller.host == CONTROLLER_DATA[CONF_HOST]
assert controller.site == CONTROLLER_DATA[CONF_SITE_ID]
assert controller.site_name in SITES
assert controller.site_role == SITES[controller.site_name]["role"]
assert (
controller.option_allow_bandwidth_sensors
== unifi.const.DEFAULT_ALLOW_BANDWIDTH_SENSORS
)
assert controller.option_block_clients == unifi.const.DEFAULT_BLOCK_CLIENTS
assert controller.option_track_clients == unifi.const.DEFAULT_TRACK_CLIENTS
assert controller.option_track_devices == unifi.const.DEFAULT_TRACK_DEVICES
assert (
controller.option_track_wired_clients == unifi.const.DEFAULT_TRACK_WIRED_CLIENTS
)
assert controller.option_detection_time == timedelta(
seconds=unifi.const.DEFAULT_DETECTION_TIME
)
assert controller.option_ssid_filter == unifi.const.DEFAULT_SSID_FILTER
assert controller.mac is None
assert controller.signal_update == "unifi-update-1.2.3.4-site_id"
assert controller.signal_options_update == "unifi-options-1.2.3.4-site_id"
async def test_controller_mac(hass):
"""Test that it is possible to identify controller mac."""
controller = await setup_unifi_integration(hass, clients_response=[CONTROLLER_HOST])
assert controller.mac == "10:00:00:00:00:01"
async def test_controller_import_config(hass):
"""Test that import configuration.yaml instructions work."""
controllers = [
{
CONF_HOST: "1.2.3.4",
CONF_SITE_ID: "Site name",
unifi.CONF_BLOCK_CLIENT: ["random mac"],
unifi.CONF_DONT_TRACK_CLIENTS: True,
unifi.CONF_DONT_TRACK_DEVICES: True,
unifi.CONF_DONT_TRACK_WIRED_CLIENTS: True,
unifi.CONF_DETECTION_TIME: 150,
unifi.CONF_SSID_FILTER: ["SSID"],
}
]
controller = await setup_unifi_integration(hass, controllers=controllers)
assert controller.option_allow_bandwidth_sensors is False
assert controller.option_block_clients == ["random mac"]
assert controller.option_track_clients is False
assert controller.option_track_devices is False
assert controller.option_track_wired_clients is False
assert controller.option_detection_time == timedelta(seconds=150)
assert controller.option_ssid_filter == ["SSID"]
async def test_controller_not_accessible(hass):
"""Retry to login gets scheduled when connection fails."""
with patch.object(
unifi.controller, "get_controller", side_effect=unifi.errors.CannotConnect
):
await setup_unifi_integration(hass)
assert hass.data[unifi.DOMAIN] == {}
async def test_controller_unknown_error(hass):
"""Unknown errors are handled."""
with patch.object(unifi.controller, "get_controller", side_effect=Exception):
await setup_unifi_integration(hass)
assert hass.data[unifi.DOMAIN] == {}
async def test_reset_after_successful_setup(hass):
"""Calling reset when the entry has been setup."""
controller = await setup_unifi_integration(hass)
assert len(controller.listeners) == 5
result = await controller.async_reset()
await hass.async_block_till_done()
assert result is True
assert len(controller.listeners) == 0
async def test_wireless_client_event_calls_update_wireless_devices(hass):
"""Call update_wireless_devices method when receiving wireless client event."""
controller = await setup_unifi_integration(hass)
with patch(
"homeassistant.components.unifi.controller.UniFiController.update_wireless_clients",
return_value=None,
) as wireless_clients_mock:
controller.api.websocket._data = {
"meta": {"rc": "ok", "message": "events"},
"data": [
{
"datetime": "2020-01-20T19:37:04Z",
"key": aiounifi.events.WIRELESS_CLIENT_CONNECTED,
"msg": "User[11:22:33:44:55:66] has connected to WLAN",
"time": 1579549024893,
}
],
}
controller.api.session_handler("data")
assert wireless_clients_mock.assert_called_once
async def test_get_controller(hass):
"""Successful call."""
with patch("aiounifi.Controller.login", return_value=Mock()):
assert await unifi.controller.get_controller(hass, **CONTROLLER_DATA)
async def test_get_controller_verify_ssl_false(hass):
"""Successful call with verify ssl set to false."""
controller_data = dict(CONTROLLER_DATA)
controller_data[CONF_VERIFY_SSL] = False
with patch("aiounifi.Controller.login", return_value=Mock()):
assert await unifi.controller.get_controller(hass, **controller_data)
async def test_get_controller_login_failed(hass):
"""Check that get_controller can handle a failed login."""
with patch(
"aiounifi.Controller.login", side_effect=aiounifi.Unauthorized
), pytest.raises(unifi.errors.AuthenticationRequired):
await unifi.controller.get_controller(hass, **CONTROLLER_DATA)
async def test_get_controller_controller_unavailable(hass):
"""Check that get_controller can handle controller being unavailable."""
with patch(
"aiounifi.Controller.login", side_effect=aiounifi.RequestError
), pytest.raises(unifi.errors.CannotConnect):
await unifi.controller.get_controller(hass, **CONTROLLER_DATA)
async def test_get_controller_unknown_error(hass):
"""Check that get_controller can handle unknown errors."""
with patch(
"aiounifi.Controller.login", side_effect=aiounifi.AiounifiException
), pytest.raises(unifi.errors.AuthenticationRequired):
await unifi.controller.get_controller(hass, **CONTROLLER_DATA)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pure Python zipfile importer.
This approximates the standard zipimport module, which isn't supported
by Google App Engine. See PEP 302 for more information about the API
for import hooks.
Usage:
import py_zipimport
As a side effect of importing, the module overrides sys.path_hooks,
and also creates an alias 'zipimport' for itself. When your app is
running in Google App Engine production, you don't even need to import
it, since this is already done for you. In the Google App Engine SDK
this module is not used; instead, the standard zipimport module is
used.
"""
__all__ = ['ZipImportError', 'zipimporter']
import os
import sys
import types
import UserDict
import zipfile
_SEARCH_ORDER = [
('.py', False),
('/__init__.py', True),
]
_zipfile_cache = {}
class ZipImportError(ImportError):
"""Exception raised by zipimporter objects."""
class zipimporter:
"""A PEP-302-style importer that can import from a zipfile.
Just insert or append this class (not an instance) to sys.path_hooks
and you're in business. Instances satisfy both the 'importer' and
'loader' APIs specified in PEP 302.
"""
def __init__(self, path_entry):
"""Constructor.
Args:
path_entry: The entry in sys.path. This should be the name of an
existing zipfile possibly with a path separator and a prefix
path within the archive appended, e.g. /x/django.zip or
/x/django.zip/foo/bar.
Raises:
ZipImportError if the path_entry does not represent a valid
zipfile with optional prefix.
"""
archive = path_entry
prefix = ''
while not os.path.lexists(archive):
head, tail = os.path.split(archive)
if head == archive:
msg = 'Nothing found for %r' % path_entry
raise ZipImportError(msg)
archive = head
prefix = os.path.join(tail, prefix)
if not os.path.isfile(archive):
msg = 'Non-file %r found for %r' % (archive, path_entry)
raise ZipImportError(msg)
self.archive = archive
self.prefix = os.path.join(prefix, '')
self.zipfile = _zipfile_cache.get(archive)
if self.zipfile is None:
try:
self.zipfile = zipfile.ZipFile(self.archive)
except (EnvironmentError, zipfile.BadZipfile), err:
msg = 'Can\'t open zipfile %s: %s: %s' % (self.archive,
err.__class__.__name__, err)
import logging
logging.warn(msg)
raise ZipImportError(msg)
else:
_zipfile_cache[archive] = self.zipfile
import logging
logging.info('zipimporter(%r, %r)', archive, prefix)
def __repr__(self):
"""Return a string representation matching zipimport.c."""
name = self.archive
if self.prefix:
name = os.path.join(name, self.prefix)
return '<zipimporter object "%s">' % name
def _get_info(self, fullmodname):
"""Internal helper for find_module() and load_module().
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
Returns:
A tuple (submodname, is_package, relpath) where:
submodname: The final component of the module name, e.g. 'mail'.
is_package: A bool indicating whether this is a package.
relpath: The path to the module's source code within to the zipfile.
Raises:
ImportError if the module is not found in the archive.
"""
parts = fullmodname.split('.')
submodname = parts[-1]
for suffix, is_package in _SEARCH_ORDER:
relpath = os.path.join(self.prefix,
submodname + suffix.replace('/', os.sep))
try:
self.zipfile.getinfo(relpath.replace(os.sep, '/'))
except KeyError:
pass
else:
return submodname, is_package, relpath
msg = ('Can\'t find module %s in zipfile %s with prefix %r' %
(fullmodname, self.archive, self.prefix))
raise ZipImportError(msg)
def _get_source(self, fullmodname):
"""Internal helper for load_module().
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
Returns:
A tuple (submodname, is_package, fullpath, source) where:
submodname: The final component of the module name, e.g. 'mail'.
is_package: A bool indicating whether this is a package.
fullpath: The path to the module's source code including the
zipfile's filename.
source: The module's source code.
Raises:
ImportError if the module is not found in the archive.
"""
submodname, is_package, relpath = self._get_info(fullmodname)
fullpath = '%s%s%s' % (self.archive, os.sep, relpath)
source = self.zipfile.read(relpath.replace(os.sep, '/'))
source = source.replace('\r\n', '\n')
source = source.replace('\r', '\n')
return submodname, is_package, fullpath, source
def find_module(self, fullmodname, path=None):
"""PEP-302-compliant find_module() method.
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
path: Optional and ignored; present for API compatibility only.
Returns:
None if the module isn't found in the archive; self if it is found.
"""
try:
submodname, is_package, relpath = self._get_info(fullmodname)
except ImportError:
return None
else:
return self
def load_module(self, fullmodname):
"""PEP-302-compliant load_module() method.
Args:
fullmodname: The dot-separated full module name, e.g. 'django.core.mail'.
Returns:
The module object constructed from the source code.
Raises:
SyntaxError if the module's source code is syntactically incorrect.
ImportError if there was a problem accessing the source code.
Whatever else can be raised by executing the module's source code.
"""
submodname, is_package, fullpath, source = self._get_source(fullmodname)
code = compile(source, fullpath, 'exec')
mod = sys.modules.get(fullmodname)
try:
if mod is None:
mod = sys.modules[fullmodname] = types.ModuleType(fullmodname)
mod.__loader__ = self
mod.__file__ = fullpath
mod.__name__ = fullmodname
if is_package:
mod.__path__ = [os.path.dirname(mod.__file__)]
exec code in mod.__dict__
except:
if fullmodname in sys.modules:
del sys.modules[fullmodname]
raise
return mod
def get_data(self, fullpath):
"""Return (binary) content of a data file in the zipfile."""
prefix = os.path.join(self.archive, '')
if fullpath.startswith(prefix):
relpath = fullpath[len(prefix):]
elif os.path.isabs(fullpath):
raise IOError('Absolute path %r doesn\'t start with zipfile name %r' %
(fullpath, prefix))
else:
relpath = fullpath
try:
return self.zipfile.read(relpath)
except KeyError:
raise IOError('Path %r not found in zipfile %r' %
(relpath, self.archive))
def is_package(self, fullmodname):
"""Return whether a module is a package."""
submodname, is_package, relpath = self._get_info(fullmodname)
return is_package
def get_code(self, fullmodname):
"""Return bytecode for a module."""
submodname, is_package, fullpath, source = self._get_source(fullmodname)
return compile(source, fullpath, 'exec')
def get_source(self, fullmodname):
"""Return source code for a module."""
submodname, is_package, fullpath, source = self._get_source(fullmodname)
return source
class ZipFileCache(UserDict.DictMixin):
"""Helper class to export archive data in _zip_directory_cache.
Just take the info from _zipfile_cache and convert it as required.
"""
def __init__(self, archive):
_zipfile_cache[archive]
self._archive = archive
def keys(self):
return _zipfile_cache[self._archive].namelist()
def __getitem__(self, filename):
info = _zipfile_cache[self._archive].getinfo(filename)
dt = info.date_time
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
return (os.path.join(self._archive, info.filename), info.compress_type,
info.compress_size, info.file_size, info.header_offset, dostime,
dosdate, info.CRC)
class ZipDirectoryCache(UserDict.DictMixin):
"""Helper class to export _zip_directory_cache."""
def keys(self):
return _zipfile_cache.keys()
def __getitem__(self, archive):
return ZipFileCache(archive)
_zip_directory_cache = ZipDirectoryCache()
sys.modules['zipimport'] = sys.modules[__name__]
sys.path_hooks[:] = [zipimporter]
|
|
import asyncio
import weakref
from bokeh.layouts import row, column
from bokeh.models import (
ColumnDataSource,
DataRange1d,
HoverTool,
Range1d,
Button,
Select,
NumeralTickFormatter,
)
from bokeh.palettes import Spectral9
from bokeh.plotting import figure
import dask
from tornado import gen
import tlz as toolz
from distributed.dashboard.components import DashboardComponent
from distributed.dashboard.utils import (
without_property_validation,
BOKEH_VERSION,
update,
)
from distributed import profile
from distributed.utils import log_errors, parse_timedelta
from distributed.compatibility import WINDOWS
if dask.config.get("distributed.dashboard.export-tool"):
from distributed.dashboard.export_tool import ExportTool
else:
ExportTool = None
profile_interval = dask.config.get("distributed.worker.profile.interval")
profile_interval = parse_timedelta(profile_interval, default="ms")
class Processing(DashboardComponent):
""" Processing and distribution per core
This shows how many tasks are actively running on each worker and how many
tasks are enqueued for each worker and how many are in the common pool
"""
def __init__(self, **kwargs):
data = self.processing_update({"processing": {}, "nthreads": {}})
self.source = ColumnDataSource(data)
x_range = Range1d(-1, 1)
fig = figure(
title="Processing and Pending",
tools="",
x_range=x_range,
id="bk-processing-stacks-plot",
**kwargs
)
fig.quad(
source=self.source,
left=0,
right="right",
color=Spectral9[0],
top="top",
bottom="bottom",
)
fig.xaxis.minor_tick_line_alpha = 0
fig.yaxis.visible = False
fig.ygrid.visible = False
hover = HoverTool()
fig.add_tools(hover)
hover = fig.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Host:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@name</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Processing:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@processing</span>
</div>
"""
hover.point_policy = "follow_mouse"
self.root = fig
@without_property_validation
def update(self, messages):
with log_errors():
msg = messages["processing"]
if not msg.get("nthreads"):
return
data = self.processing_update(msg)
x_range = self.root.x_range
max_right = max(data["right"])
cores = max(data["nthreads"])
if x_range.end < max_right:
x_range.end = max_right + 2
elif x_range.end > 2 * max_right + cores: # way out there, walk back
x_range.end = x_range.end * 0.95 + max_right * 0.05
update(self.source, data)
@staticmethod
def processing_update(msg):
with log_errors():
names = sorted(msg["processing"])
names = sorted(names)
processing = msg["processing"]
processing = [processing[name] for name in names]
nthreads = msg["nthreads"]
nthreads = [nthreads[name] for name in names]
n = len(names)
d = {
"name": list(names),
"processing": processing,
"right": list(processing),
"top": list(range(n, 0, -1)),
"bottom": list(range(n - 1, -1, -1)),
"nthreads": nthreads,
}
d["alpha"] = [0.7] * n
return d
class ProfilePlot(DashboardComponent):
""" Time plots of the current resource usage on the cluster
This is two plots, one for CPU and Memory and another for Network I/O
"""
def __init__(self, **kwargs):
state = profile.create()
data = profile.plot_data(state, profile_interval)
self.states = data.pop("states")
self.root, self.source = profile.plot_figure(data, **kwargs)
@without_property_validation
def cb(attr, old, new):
with log_errors():
try:
selected = new.indices
except AttributeError:
selected = new["1d"]["indices"]
try:
ind = selected[0]
except IndexError:
return
data = profile.plot_data(self.states[ind], profile_interval)
del self.states[:]
self.states.extend(data.pop("states"))
update(self.source, data)
self.source.selected = old
if BOKEH_VERSION >= "1.0.0":
self.source.selected.on_change("indices", cb)
else:
self.source.on_change("selected", cb)
@without_property_validation
def update(self, state):
with log_errors():
self.state = state
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
update(self.source, data)
class ProfileTimePlot(DashboardComponent):
""" Time plots of the current resource usage on the cluster
This is two plots, one for CPU and Memory and another for Network I/O
"""
def __init__(self, server, doc=None, **kwargs):
if doc is not None:
self.doc = weakref.ref(doc)
try:
self.key = doc.session_context.request.arguments.get("key", None)
except AttributeError:
self.key = None
if isinstance(self.key, list):
self.key = self.key[0]
if isinstance(self.key, bytes):
self.key = self.key.decode()
self.task_names = ["All", self.key]
else:
self.key = None
self.task_names = ["All"]
self.server = server
self.start = None
self.stop = None
self.ts = {"count": [], "time": []}
self.state = profile.create()
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
self.profile_plot, self.source = profile.plot_figure(data, **kwargs)
changing = [False] # avoid repeated changes from within callback
@without_property_validation
def cb(attr, old, new):
if changing[0]:
return
with log_errors():
if isinstance(new, list): # bokeh >= 1.0
selected = new
else:
selected = new["1d"]["indices"]
try:
ind = selected[0]
except IndexError:
return
data = profile.plot_data(self.states[ind], profile_interval)
del self.states[:]
self.states.extend(data.pop("states"))
changing[0] = True # don't recursively trigger callback
update(self.source, data)
if isinstance(new, list): # bokeh >= 1.0
self.source.selected.indices = old
else:
self.source.selected = old
changing[0] = False
if BOKEH_VERSION >= "1.0.0":
self.source.selected.on_change("indices", cb)
else:
self.source.on_change("selected", cb)
self.ts_source = ColumnDataSource({"time": [], "count": []})
self.ts_plot = figure(
title="Activity over time",
height=150,
x_axis_type="datetime",
active_drag="xbox_select",
y_range=[0, 1 / profile_interval],
tools="xpan,xwheel_zoom,xbox_select,reset",
sizing_mode="stretch_width",
)
self.ts_plot.line("time", "count", source=self.ts_source)
self.ts_plot.circle(
"time", "count", source=self.ts_source, color=None, selection_color="orange"
)
self.ts_plot.yaxis.visible = False
self.ts_plot.grid.visible = False
def ts_change(attr, old, new):
with log_errors():
try:
selected = self.ts_source.selected.indices
except AttributeError:
selected = self.ts_source.selected["1d"]["indices"]
if selected:
start = self.ts_source.data["time"][min(selected)] / 1000
stop = self.ts_source.data["time"][max(selected)] / 1000
self.start, self.stop = min(start, stop), max(start, stop)
else:
self.start = self.stop = None
self.trigger_update(update_metadata=False)
if BOKEH_VERSION >= "1.0.0":
self.ts_source.selected.on_change("indices", ts_change)
else:
self.ts_source.on_change("selected", ts_change)
self.reset_button = Button(label="Reset", button_type="success")
self.reset_button.on_click(lambda: self.update(self.state))
self.update_button = Button(label="Update", button_type="success")
self.update_button.on_click(self.trigger_update)
self.select = Select(value=self.task_names[-1], options=self.task_names)
def select_cb(attr, old, new):
if new == "All":
new = None
self.key = new
self.trigger_update(update_metadata=False)
self.select.on_change("value", select_cb)
self.root = column(
row(
self.select,
self.reset_button,
self.update_button,
sizing_mode="scale_width",
height=250,
),
self.profile_plot,
self.ts_plot,
**kwargs
)
@without_property_validation
def update(self, state, metadata=None):
with log_errors():
self.state = state
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
update(self.source, data)
if metadata is not None and metadata["counts"]:
self.task_names = ["All"] + sorted(metadata["keys"])
self.select.options = self.task_names
if self.key:
ts = metadata["keys"][self.key]
else:
ts = metadata["counts"]
times, counts = zip(*ts)
self.ts = {"count": counts, "time": [t * 1000 for t in times]}
self.ts_source.data.update(self.ts)
@without_property_validation
def trigger_update(self, update_metadata=True):
async def cb():
with log_errors():
prof = await self.server.get_profile(
key=self.key, start=self.start, stop=self.stop
)
if update_metadata:
metadata = await self.server.get_profile_metadata()
else:
metadata = None
if isinstance(prof, gen.Future):
prof, metadata = await asyncio.gather(prof, metadata)
self.doc().add_next_tick_callback(lambda: self.update(prof, metadata))
self.server.loop.add_callback(cb)
class ProfileServer(DashboardComponent):
""" Time plots of the current resource usage on the cluster
This is two plots, one for CPU and Memory and another for Network I/O
"""
def __init__(self, server, doc=None, **kwargs):
if doc is not None:
self.doc = weakref.ref(doc)
self.server = server
self.log = self.server.io_loop.profile
self.start = None
self.stop = None
self.ts = {"count": [], "time": []}
self.state = profile.get_profile(self.log)
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
self.profile_plot, self.source = profile.plot_figure(data, **kwargs)
changing = [False] # avoid repeated changes from within callback
@without_property_validation
def cb(attr, old, new):
if changing[0]:
return
with log_errors():
if isinstance(new, list): # bokeh >= 1.0
selected = new
else:
selected = new["1d"]["indices"]
try:
ind = selected[0]
except IndexError:
return
data = profile.plot_data(self.states[ind], profile_interval)
del self.states[:]
self.states.extend(data.pop("states"))
changing[0] = True # don't recursively trigger callback
update(self.source, data)
if isinstance(new, list): # bokeh >= 1.0
self.source.selected.indices = old
else:
self.source.selected = old
changing[0] = False
if BOKEH_VERSION >= "1.0.0":
self.source.selected.on_change("indices", cb)
else:
self.source.on_change("selected", cb)
self.ts_source = ColumnDataSource({"time": [], "count": []})
self.ts_plot = figure(
title="Activity over time",
height=150,
x_axis_type="datetime",
active_drag="xbox_select",
y_range=[0, 1 / profile_interval],
tools="xpan,xwheel_zoom,xbox_select,reset",
sizing_mode="stretch_width",
)
self.ts_plot.line("time", "count", source=self.ts_source)
self.ts_plot.circle(
"time", "count", source=self.ts_source, color=None, selection_color="orange"
)
self.ts_plot.yaxis.visible = False
self.ts_plot.grid.visible = False
def ts_change(attr, old, new):
with log_errors():
try:
selected = self.ts_source.selected.indices
except AttributeError:
selected = self.ts_source.selected["1d"]["indices"]
if selected:
start = self.ts_source.data["time"][min(selected)] / 1000
stop = self.ts_source.data["time"][max(selected)] / 1000
self.start, self.stop = min(start, stop), max(start, stop)
else:
self.start = self.stop = None
self.trigger_update()
if BOKEH_VERSION >= "1.0.0":
self.ts_source.selected.on_change("indices", ts_change)
else:
self.ts_source.on_change("selected", ts_change)
self.reset_button = Button(label="Reset", button_type="success")
self.reset_button.on_click(lambda: self.update(self.state))
self.update_button = Button(label="Update", button_type="success")
self.update_button.on_click(self.trigger_update)
self.root = column(
row(self.reset_button, self.update_button, sizing_mode="scale_width"),
self.profile_plot,
self.ts_plot,
**kwargs
)
@without_property_validation
def update(self, state):
with log_errors():
self.state = state
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
update(self.source, data)
@without_property_validation
def trigger_update(self):
self.state = profile.get_profile(self.log, start=self.start, stop=self.stop)
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
update(self.source, data)
times = [t * 1000 for t, _ in self.log]
counts = list(toolz.pluck("count", toolz.pluck(1, self.log)))
self.ts_source.data.update({"time": times, "count": counts})
class SystemMonitor(DashboardComponent):
def __init__(self, worker, height=150, **kwargs):
self.worker = worker
names = worker.monitor.quantities
self.last = 0
self.source = ColumnDataSource({name: [] for name in names})
update(self.source, self.get_data())
x_range = DataRange1d(follow="end", follow_interval=20000, range_padding=0)
tools = "reset,xpan,xwheel_zoom"
self.cpu = figure(
title="CPU",
x_axis_type="datetime",
height=height,
tools=tools,
x_range=x_range,
**kwargs
)
self.cpu.line(source=self.source, x="time", y="cpu")
self.cpu.yaxis.axis_label = "Percentage"
self.mem = figure(
title="Memory",
x_axis_type="datetime",
height=height,
tools=tools,
x_range=x_range,
**kwargs
)
self.mem.line(source=self.source, x="time", y="memory")
self.mem.yaxis.axis_label = "Bytes"
self.bandwidth = figure(
title="Bandwidth",
x_axis_type="datetime",
height=height,
x_range=x_range,
tools=tools,
**kwargs
)
self.bandwidth.line(source=self.source, x="time", y="read_bytes", color="red")
self.bandwidth.line(source=self.source, x="time", y="write_bytes", color="blue")
self.bandwidth.yaxis.axis_label = "Bytes / second"
# self.cpu.yaxis[0].formatter = NumeralTickFormatter(format='0%')
self.bandwidth.yaxis[0].formatter = NumeralTickFormatter(format="0.0b")
self.mem.yaxis[0].formatter = NumeralTickFormatter(format="0.0b")
plots = [self.cpu, self.mem, self.bandwidth]
if not WINDOWS:
self.num_fds = figure(
title="Number of File Descriptors",
x_axis_type="datetime",
height=height,
x_range=x_range,
tools=tools,
**kwargs
)
self.num_fds.line(source=self.source, x="time", y="num_fds")
plots.append(self.num_fds)
if "sizing_mode" in kwargs:
kw = {"sizing_mode": kwargs["sizing_mode"]}
else:
kw = {}
if not WINDOWS:
self.num_fds.y_range.start = 0
self.mem.y_range.start = 0
self.cpu.y_range.start = 0
self.bandwidth.y_range.start = 0
self.root = column(*plots, **kw)
self.worker.monitor.update()
def get_data(self):
d = self.worker.monitor.range_query(start=self.last)
d["time"] = [x * 1000 for x in d["time"]]
self.last = self.worker.monitor.count
return d
@without_property_validation
def update(self):
with log_errors():
self.source.stream(self.get_data(), 1000)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
class _CustomMapping(collections.Mapping):
def __init__(self, *args, **kwargs):
self._wrapped = dict(*args, **kwargs)
def __getitem__(self, key):
return self._wrapped[key]
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
class NestTest(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
if attr:
class BadAttr(object):
"""Class that has a non-iterable __attrs_attrs__."""
__attrs_attrs__ = None
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsFlattenAndPack(self):
if attr is None:
self.skipTest("attr module is unavailable.")
field_values = [1, 2]
sample_attr = NestTest.SampleAttr(*field_values)
self.assertFalse(nest._is_attrs(field_values))
self.assertTrue(nest._is_attrs(sample_attr))
flat = nest.flatten(sample_attr)
self.assertEqual(field_values, flat)
restructured_from_flat = nest.pack_sequence_as(sample_attr, flat)
self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr)
self.assertEqual(restructured_from_flat, sample_attr)
# Check that flatten fails if attributes are not iterable
with self.assertRaisesRegexp(TypeError, "object is not iterable"):
flat = nest.flatten(NestTest.BadAttr())
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
structure = (NestTest.PointXY(x=4, y=2),
((NestTest.PointXY(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, "flat_sequence"):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertIsInstance(custom_reconstruction, mapping_type)
self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack_withDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
NestTest.Abc(3, 4), {
"d": _CustomMapping({
41: 4
}),
"c": [
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
],
"b": 5
}, 17
]
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17])
structure_of_mess = [
14,
NestTest.Abc("a", True),
{
"d": _CustomMapping({
41: 42
}),
"c": [
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
],
"b": 3
},
"hi everybody",
]
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
unflattened_custom_mapping = unflattened[2]["d"]
self.assertIsInstance(unflattened_custom_mapping, _CustomMapping)
self.assertEqual(list(unflattened_custom_mapping.keys()), [41])
def testFlatten_numpyIsNotFlattened(self):
structure = np.array([1, 2, 3])
flattened = nest.flatten(structure)
self.assertEqual(len(flattened), 1)
def testFlatten_stringIsNotFlattened(self):
structure = "lots of letters"
flattened = nest.flatten(structure)
self.assertEqual(len(flattened), 1)
unflattened = nest.pack_sequence_as("goodbye", flattened)
self.assertEqual(structure, unflattened)
def testPackSequenceAs_notIterableError(self):
with self.assertRaisesRegexp(TypeError,
"flat_sequence must be a sequence"):
nest.pack_sequence_as("hi", "bye")
def testPackSequenceAs_wrongLengthsError(self):
with self.assertRaisesRegexp(
ValueError,
"Structure had 2 elements, but flat_sequence had 3 elements."):
nest.pack_sequence_as(["hello", "world"],
["and", "goodbye", "again"])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testIsSequence(self):
self.assertFalse(nest.is_sequence("1234"))
self.assertTrue(nest.is_sequence([1, 3, [4, 5]]))
self.assertTrue(nest.is_sequence(((7, 8), (5, 6))))
self.assertTrue(nest.is_sequence([]))
self.assertTrue(nest.is_sequence({"a": 1, "b": 2}))
self.assertFalse(nest.is_sequence(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_sequence(ones))
self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))
self.assertFalse(nest.is_sequence(np.ones((4, 5))))
@parameterized.parameters({"mapping_type": _CustomMapping},
{"mapping_type": dict})
def testFlattenDictItems(self, mapping_type):
dictionary = mapping_type({(4, 5, (6, 8)): ("a", "b", ("c", "d"))})
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
with self.assertRaisesRegexp(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
another_bad_dictionary = mapping_type({
(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
})
with self.assertRaisesRegexp(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
# pylint does not correctly recognize these as class names and
# suggests to use variable style under_score naming.
# pylint: disable=invalid-name
Named0ab = collections.namedtuple("named_0", ("a", "b"))
Named1ab = collections.namedtuple("named_1", ("a", "b"))
SameNameab = collections.namedtuple("same_name", ("a", "b"))
SameNameab2 = collections.namedtuple("same_name", ("a", "b"))
SameNamexy = collections.namedtuple("same_name", ("x", "y"))
SameName1xy = collections.namedtuple("same_name_1", ("x", "y"))
SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y"))
NotSameName = collections.namedtuple("not_same_name", ("a", "b"))
# pylint: enable=invalid-name
class SameNamedType1(SameNameab):
pass
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
"More specifically: Substructure "
r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while '
'substructure "type=str str=spam" is not\n'
"Entire first structure:\n"
r"\(\(\(\., \.\), \.\), \., \(\., \.\)\)\n"
"Entire second structure:\n"
r"\(\., \.\)")):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
r'is a sequence, while substructure "type=ndarray str=\[0 1\]" '
"is not")):
nest.assert_same_structure([0, 1], np.array([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
'is a sequence, while substructure "type=int str=0" '
"is not")):
nest.assert_same_structure(0, [0, 1])
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(structure1, structure_different_nesting)
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
NestTest.Named0ab("a", "b"))
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab([3], 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure([[3], 4], [3, [4]])
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError,
"don't have the same set of keys"):
nest.assert_same_structure({"a": 1}, {"b": 1})
nest.assert_same_structure(NestTest.SameNameab(0, 1),
NestTest.SameNameab2(2, 3))
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
nest.assert_same_structure(
NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2),
NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4))
expected_message = "The two structures don't have the same.*"
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_same_structure(
NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)),
NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3))
EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name
def testHeterogeneousComparison(self):
nest.assert_same_structure({"a": 4}, _CustomMapping(a=3))
nest.assert_same_structure(_CustomMapping(b=3), {"b": 4})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
# Empty structures
self.assertEqual((), nest.map_structure(lambda x: x + 1, ()))
self.assertEqual([], nest.map_structure(lambda x: x + 1, []))
self.assertEqual({}, nest.map_structure(lambda x: x + 1, {}))
self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1,
NestTest.EmptyNT()))
# This is checking actual equality of types, empty list != empty tuple
self.assertNotEqual((), nest.map_structure(lambda x: x + 1, []))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "at least one structure"):
nest.map_structure(lambda x: x)
with self.assertRaisesRegexp(ValueError, "same number of elements"):
nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, structure1, structure1_list)
nest.map_structure(lambda x, y: None, structure1, structure1_list,
check_types=False)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructureWithStrings(self):
inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz"))
inp_b = NestTest.ABTuple(a=2, b=(1, 3))
out = nest.map_structure(lambda string, repeats: string * repeats,
inp_a,
inp_b)
self.assertEqual("foofoo", out.a)
self.assertEqual("bar", out.b[0])
self.assertEqual("bazbazbaz", out.b[1])
nt = NestTest.ABTuple(a=("something", "something_else"),
b="yet another thing")
rev_nt = nest.map_structure(lambda x: x[::-1], nt)
# Check the output is the correct structure, and all strings are reversed.
nest.assert_same_structure(nt, rev_nt)
self.assertEqual(nt.a[0][::-1], rev_nt.a[0])
self.assertEqual(nt.a[1][::-1], rev_nt.a[1])
self.assertEqual(nt.b[::-1], rev_nt.b)
def testMapStructureOverPlaceholders(self):
inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b)
nest.assert_same_structure(output, inp_a)
self.assertShapeEqual(np.zeros((3, 4)), output[0])
self.assertShapeEqual(np.zeros((3, 7)), output[1])
feed_dict = {
inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)),
inp_b: (np.random.randn(3, 4), np.random.randn(3, 7))
}
with self.cached_session() as sess:
output_np = sess.run(output, feed_dict=feed_dict)
self.assertAllClose(output_np[0],
feed_dict[inp_a][0] + feed_dict[inp_b][0])
self.assertAllClose(output_np[1],
feed_dict[inp_a][1] + feed_dict[inp_b][1])
def testAssertShallowStructure(self):
inp_ab = ["a", "b"]
inp_abc = ["a", "b", "c"]
expected_message = (
"The two structures don't have the same sequence length. Input "
"structure has length 2, while shallow structure has length 3.")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = [(1, 1), (2, 2)]
inp_ab2 = [[1, 1], [2, 2]]
expected_message = (
"The two structures don't have the same sequence type. Input structure "
"has type <(type|class) 'tuple'>, while shallow structure has type "
"<(type|class) 'list'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
expected_message = (
r"The two structures don't have the same keys. Input "
r"structure has keys \['c'\], while shallow structure has "
r"keys \['d'\].")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
inp_shallow = NestTest.SameNameab(1, 2)
inp_deep = NestTest.SameNameab2(1, [1, 2, 3])
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
def testFlattenUpTo(self):
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = NestTest.ABTuple
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testMapStructureUpTo(self):
# Named tuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
# Lists.
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ["evens", ["odds", "primes"]]
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]])
# Dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesRegexp(ValueError, "same keys"):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
# Dict+custom mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dict/mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesRegexp(ValueError, "same keys"):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
def testGetTraverseShallowStructure(self):
scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []]
scalar_traverse_r = nest.get_traverse_shallow_structure(
lambda s: not isinstance(s, tuple),
scalar_traverse_input)
self.assertEqual(scalar_traverse_r,
[True, True, False, [True, True], {"a": False}, []])
nest.assert_shallow_structure(scalar_traverse_r,
scalar_traverse_input)
structure_traverse_input = [(1, [2]), ([1], 2)]
structure_traverse_r = nest.get_traverse_shallow_structure(
lambda s: (True, False) if isinstance(s, tuple) else True,
structure_traverse_input)
self.assertEqual(structure_traverse_r,
[(True, False), ([True], False)])
nest.assert_shallow_structure(structure_traverse_r,
structure_traverse_input)
with self.assertRaisesRegexp(TypeError, "returned structure"):
nest.get_traverse_shallow_structure(lambda _: [True], 0)
with self.assertRaisesRegexp(TypeError, "returned a non-bool scalar"):
nest.get_traverse_shallow_structure(lambda _: 1, [1])
with self.assertRaisesRegexp(
TypeError, "didn't return a depth=1 structure of bools"):
nest.get_traverse_shallow_structure(lambda _: [1], [1])
def testYieldFlatStringPaths(self):
for inputs_expected in ({"inputs": [], "expected": []},
{"inputs": 3, "expected": [()]},
{"inputs": [3], "expected": [(0,)]},
{"inputs": {"a": 3}, "expected": [("a",)]},
{"inputs": {"a": {"b": 4}},
"expected": [("a", "b")]},
{"inputs": [{"a": 2}], "expected": [(0, "a")]},
{"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]},
{"inputs": [{"a": [(23, 42)]}],
"expected": [(0, "a", 0, 0), (0, "a", 0, 1)]},
{"inputs": [{"a": ([23], 42)}],
"expected": [(0, "a", 0, 0), (0, "a", 1)]},
{"inputs": {"a": {"a": 2}, "c": [[[4]]]},
"expected": [("a", "a"), ("c", 0, 0, 0)]},
{"inputs": {"0": [{"1": 23}]},
"expected": [("0", 0, "1")]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(list(nest.yield_flat_paths(inputs)), expected)
def testFlattenWithStringPaths(self):
for inputs_expected in (
{"inputs": [], "expected": []},
{"inputs": [23, "42"], "expected": [("0", 23), ("1", "42")]},
{"inputs": [[[[108]]]], "expected": [("0/0/0/0", 108)]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(
nest.flatten_with_joined_string_paths(inputs, separator="/"),
expected)
# Need a separate test for namedtuple as we can't declare tuple definitions
# in the @parameterized arguments.
def testFlattenNamedTuple(self):
# pylint: disable=invalid-name
Foo = collections.namedtuple("Foo", ["a", "b"])
Bar = collections.namedtuple("Bar", ["c", "d"])
# pylint: enable=invalid-name
test_cases = [
(Foo(a=3, b=Bar(c=23, d=42)),
[("a", 3), ("b/c", 23), ("b/d", 42)]),
(Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="something")),
[("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "something")]),
(Bar(c=42, d=43),
[("c", 42), ("d", 43)]),
(Bar(c=[42], d=43),
[("c/0", 42), ("d", 43)]),
]
for inputs, expected in test_cases:
self.assertEqual(
list(nest.flatten_with_joined_string_paths(inputs)), expected)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))),
("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True,
{"a": ("a", 4), "b": ("b", 6)}),
("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))),
("nested",
{"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True,
{"a": [("a/0", 10), ("a/1", 12)],
"b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]}))
def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected):
def format_sum(path, *values):
return (path, sum(values))
result = nest.map_structure_with_paths(format_sum, s1, s2,
check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4, 5), ValueError),
("dicts", {"a": 1}, {"b": 2}, ValueError),
("mixed", (1, 2), [3, 4], TypeError),
("nested",
{"a": [2, 3], "b": [1, 3]},
{"b": [5, 6, 7], "a": [8, 9]},
ValueError
))
def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_paths(lambda path, *s: 0, s1, s2)
class NestBenchmark(test.Benchmark):
def run_and_report(self, s1, s2, name):
burn_iter, test_iter = 100, 30000
for _ in xrange(burn_iter):
nest.assert_same_structure(s1, s2)
t0 = time.time()
for _ in xrange(test_iter):
nest.assert_same_structure(s1, s2)
t1 = time.time()
self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter,
name=name)
def benchmark_assert_structure(self):
s1 = (((1, 2), 3), 4, (5, 6))
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
self.run_and_report(s1, s2, "assert_same_structure_6_elem")
s1 = (((1, 2), 3), 4, (5, 6)) * 10
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10
self.run_and_report(s1, s2, "assert_same_structure_60_elem")
if __name__ == "__main__":
test.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from barbicanclient import client as barbicanclient
from glanceclient.v2 import client as glanceclient
from heatclient.v1 import client as heatclient
import mock
from oslo_config import cfg
from magnum.common import clients
from magnum.common import exception
from magnum.tests import base
class ClientsTest(base.BaseTestCase):
def setUp(self):
super(ClientsTest, self).setUp()
cfg.CONF.set_override('auth_uri', 'http://server.test:5000/v2.0',
group='keystone_authtoken')
@mock.patch.object(clients.OpenStackClients, 'keystone')
def test_url_for(self, mock_keystone):
obj = clients.OpenStackClients(None)
obj.url_for(service_type='fake_service', endpoint_type='fake_endpoint')
mock_cat = mock_keystone.return_value.client.service_catalog
mock_cat.url_for.assert_called_once_with(service_type='fake_service',
endpoint_type='fake_endpoint')
@mock.patch.object(clients.OpenStackClients, 'keystone')
def test_magnum_url(self, mock_keystone):
fake_region = 'fake_region'
fake_endpoint = 'fake_endpoint'
cfg.CONF.set_override('region_name', fake_region,
group='magnum_client')
cfg.CONF.set_override('endpoint_type', fake_endpoint,
group='magnum_client')
obj = clients.OpenStackClients(None)
obj.magnum_url()
mock_cat = mock_keystone.return_value.client.service_catalog
mock_cat.url_for.assert_called_once_with(region_name=fake_region,
service_type='container',
endpoint_type=fake_endpoint)
@mock.patch.object(heatclient, 'Client')
@mock.patch.object(clients.OpenStackClients, 'url_for')
@mock.patch.object(clients.OpenStackClients, 'auth_url')
def _test_clients_heat(self, expected_region_name, mock_auth, mock_url,
mock_call):
mock_auth.__get__ = mock.Mock(return_value="keystone_url")
con = mock.MagicMock()
con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155"
con.auth_url = "keystone_url"
mock_url.return_value = "url_from_keystone"
obj = clients.OpenStackClients(con)
obj._heat = None
obj.heat()
mock_call.assert_called_once_with(
endpoint='url_from_keystone', username=None,
cert_file=None, token='3bcc3d3a03f44e3d8377f9247b0ad155',
auth_url='keystone_url', ca_file=None, key_file=None,
password=None, insecure=False)
mock_url.assert_called_once_with(service_type='orchestration',
endpoint_type='publicURL',
region_name=expected_region_name)
def test_clients_heat(self):
self._test_clients_heat(None)
def test_clients_heat_region(self):
cfg.CONF.set_override('region_name', 'myregion', group='heat_client')
self._test_clients_heat('myregion')
def test_clients_heat_noauth(self):
con = mock.MagicMock()
con.auth_token = None
con.auth_token_info = None
auth_url = mock.PropertyMock(name="auth_url",
return_value="keystone_url")
type(con).auth_url = auth_url
con.get_url_for = mock.Mock(name="get_url_for")
con.get_url_for.return_value = "url_from_keystone"
obj = clients.OpenStackClients(con)
obj._heat = None
self.assertRaises(exception.AuthorizationFailure, obj.heat)
@mock.patch.object(clients.OpenStackClients, 'url_for')
@mock.patch.object(clients.OpenStackClients, 'auth_url')
def test_clients_heat_cached(self, mock_auth, mock_url):
mock_auth.__get__ = mock.Mock(return_value="keystone_url")
con = mock.MagicMock()
con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155"
con.auth_url = "keystone_url"
mock_url.return_value = "url_from_keystone"
obj = clients.OpenStackClients(con)
obj._heat = None
heat = obj.heat()
heat_cached = obj.heat()
self.assertEqual(heat, heat_cached)
@mock.patch.object(glanceclient, 'Client')
@mock.patch.object(clients.OpenStackClients, 'url_for')
@mock.patch.object(clients.OpenStackClients, 'auth_url')
def _test_clients_glance(self, expected_region_name, mock_auth, mock_url,
mock_call):
mock_auth.__get__ = mock.Mock(return_value="keystone_url")
con = mock.MagicMock()
con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155"
con.auth_url = "keystone_url"
mock_url.return_value = "url_from_keystone"
obj = clients.OpenStackClients(con)
obj._glance = None
obj.glance()
mock_call.assert_called_once_with(
endpoint='url_from_keystone', username=None,
token='3bcc3d3a03f44e3d8377f9247b0ad155',
auth_url='keystone_url',
password=None)
mock_url.assert_called_once_with(service_type='image',
endpoint_type='publicURL',
region_name=expected_region_name)
def test_clients_glance(self):
self._test_clients_glance(None)
def test_clients_glance_region(self):
cfg.CONF.set_override('region_name', 'myregion', group='glance_client')
self._test_clients_glance('myregion')
def test_clients_glance_noauth(self):
con = mock.MagicMock()
con.auth_token = None
con.auth_token_info = None
auth_url = mock.PropertyMock(name="auth_url",
return_value="keystone_url")
type(con).auth_url = auth_url
con.get_url_for = mock.Mock(name="get_url_for")
con.get_url_for.return_value = "url_from_keystone"
obj = clients.OpenStackClients(con)
obj._glance = None
self.assertRaises(exception.AuthorizationFailure, obj.glance)
@mock.patch.object(clients.OpenStackClients, 'url_for')
@mock.patch.object(clients.OpenStackClients, 'auth_url')
def test_clients_glance_cached(self, mock_auth, mock_url):
mock_auth.__get__ = mock.Mock(return_value="keystone_url")
con = mock.MagicMock()
con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155"
con.auth_url = "keystone_url"
mock_url.return_value = "url_from_keystone"
obj = clients.OpenStackClients(con)
obj._glance = None
glance = obj.glance()
glance_cached = obj.glance()
self.assertEqual(glance, glance_cached)
@mock.patch.object(clients.OpenStackClients, 'keystone')
@mock.patch.object(barbicanclient, 'Client')
@mock.patch.object(clients.OpenStackClients, 'url_for')
def _test_clients_barbican(self, expected_region_name, mock_url,
mock_call, mock_keystone):
con = mock.MagicMock()
con.auth_url = "keystone_url"
mock_url.return_value = "url_from_keystone"
keystone = mock.MagicMock()
keystone.session = mock.MagicMock()
mock_keystone.return_value = keystone
obj = clients.OpenStackClients(con)
obj._barbican = None
obj.barbican()
mock_call.assert_called_once_with(
endpoint='url_from_keystone',
session=keystone.session)
mock_keystone.assert_called_once_with()
mock_url.assert_called_once_with(service_type='key-manager',
endpoint_type='publicURL',
region_name=expected_region_name)
def test_clients_barbican(self):
self._test_clients_barbican(None)
def test_clients_barbican_region(self):
cfg.CONF.set_override('region_name', 'myregion',
group='barbican_client')
self._test_clients_barbican('myregion')
def test_clients_barbican_noauth(self):
con = mock.MagicMock()
con.auth_token = None
con.auth_token_info = None
auth_url = mock.PropertyMock(name="auth_url",
return_value="keystone_url")
type(con).auth_url = auth_url
con.get_url_for = mock.Mock(name="get_url_for")
con.get_url_for.return_value = "url_from_keystone"
obj = clients.OpenStackClients(con)
obj._barbican = None
self.assertRaises(exception.AuthorizationFailure, obj.barbican)
@mock.patch.object(clients.OpenStackClients, 'keystone')
@mock.patch.object(clients.OpenStackClients, 'url_for')
def test_clients_barbican_cached(self, mock_url, mock_keystone):
con = mock.MagicMock()
con.auth_url = "keystone_url"
mock_url.return_value = "url_from_keystone"
keystone = mock.MagicMock()
keystone.session = mock.MagicMock()
mock_keystone.return_value = keystone
obj = clients.OpenStackClients(con)
obj._barbican = None
barbican = obj.barbican()
barbican_cached = obj.barbican()
self.assertEqual(barbican, barbican_cached)
|
|
# Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'train'
N_HIDDEN_CONFIG = 32
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 64])
b_conv1 = bias_varibale([64])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = h_conv1
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 64, 1])
b_conv2 = weight_variable([1])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = h_conv2
h_pool2 = tf.reshape(h_pool2, shape=[-1, 32, 36])
feature_mat = h_pool2
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close()
|
|
from datetime import datetime, timedelta
from random import shuffle
from bin import yt_title
import re
def create_yt_url(_youtube_id):
return 'https://www.youtube.com/watch?v={}'.format(_youtube_id)
def usage_msg(_cmd, _mention, _cmdchar):
return ':warning: {} **USAGE:** {}{}'.format(_mention, _cmdchar, _cmd)
def get_youtube_id(_url):
regex = re.compile(r'(https?://)?(www\.)?(youtube|youtu|youtube-nocookie)\.'
r'(com|be)/(watch\?v=|embed/|v/|.+\?v=)?(?P<id>[A-Za-z0-9\-=_]{11})')
match = regex.match(_url)
if match:
return match.group('id')
return None
def name_exists(_db_ex, _name):
for row in _db_ex.execute('SELECT name FROM playlist WHERE name=?;', (_name, )):
return True
return False
def song_exists(_db_ex, _youtube_id):
for row in _db_ex.execute('SELECT name FROM song WHERE youtube_id=?;', (_youtube_id, )):
return True
return False
def song_exists_in_playlist(_db_ex, _name, _youtube_id):
for row in _db_ex.execute('SELECT * FROM playlist_song WHERE name=? AND youtube_id=?;', (_name, _youtube_id)):
return True
return False
def name_valid(_name):
if re.match('^[\w\d_-]*$', _name):
return True
return False
def has_permission(_db_ex, _name, _channel, _author):
owner = ''
for row in _db_ex.execute('SELECT owner FROM playlist WHERE name=?;', (_name, )):
owner = row[0]
if owner == _author.id:
return True
elif _channel.permissions_for(_author).administrator:
return True
return False
def announce_song(_status, _curr_song_duration, _curr_song_end, _curr_song_title):
diff = _curr_song_duration - (_curr_song_end - datetime.now()).seconds
diff_min, diff_sec = divmod(diff, 60)
dur_min, dur_sec = divmod(_curr_song_duration, 60)
if _status == 0: # Not playing
return '''**Song Paused:**
```markdown
[{}:{:02}/{}:{:02}]: {}```'''.format(diff_min, diff_sec, dur_min, dur_sec, _curr_song_title)
elif _status == 1: # Is playing
return '''**Song Playing:**
```markdown
[{}:{:02}/{}:{:02}]: {}```'''.format(diff_min, diff_sec, dur_min, dur_sec, _curr_song_title)
else: # Playing next
return '''**Playing Next Song:**
```markdown
[{}:{:02}/{}:{:02}]: {}```'''.format(diff_min, diff_sec, dur_min, dur_sec, _curr_song_title)
class YTSong:
def __init__(self):
self.youtube_id = None
self.title = None
def set(self, _youtube_id):
valid, title = yt_title.get_yt_title(_youtube_id)
if valid:
self.youtube_id = _youtube_id
self.title = title
return True
return False
def full_set(self, _youtube_id, _title):
self.youtube_id = _youtube_id
self.title = _title
def get_youtube(self):
return create_yt_url(self.youtube_id)
class MusicPlayer:
def __init__(self):
self.dclient = None
self.text_channel = None
self.voice_channel = None
self.voice_manager = None
self.playing = False
self.player = None
self.curr_song = None
self.curr_song_duration = -1
self.curr_song_end = None
self.queue_spot = -1
self.song_queue = list()
self.mus_playlistnamecharlimit = None
self.cmdchar = None
self.db = None
self.db_ex = None
async def set(self, _dclient, _text_channel, _voice_channel, _mus_playlistnamecharlimit, _cmdchar, _db, _db_ex):
try:
self.dclient = _dclient
self.text_channel = _text_channel
self.voice_channel = _voice_channel
self.voice_manager = await self.dclient.join_voice_channel(self.voice_channel)
self.mus_playlistnamecharlimit = _mus_playlistnamecharlimit
self.cmdchar = _cmdchar
self.db = _db
self.db_ex = _db_ex
return True
except Exception:
return False
async def auto_switch_song(self):
if self.playing:
if self.player is not None:
if self.player.is_done():
if len(self.song_queue) > 0:
self.queue_spot += 1
if self.queue_spot >= len(self.song_queue):
self.queue_spot = 0
self.player.stop()
self.curr_song = self.song_queue[self.queue_spot]
self.player = None
self.player = await self.voice_manager.create_ytdl_player(self.curr_song.get_youtube())
self.curr_song_duration = self.player.duration
self.curr_song_end = datetime.now() + timedelta(seconds=self.player.duration)
self.player.start()
await self.dclient.send_message(self.text_channel, announce_song(2, self.curr_song_duration,
self.curr_song_end,
self.curr_song.title))
# -play, -play <youtube_url>
async def play(self, _msg, _mention):
_msg = _msg.split(' ')
# -play
if len(_msg) == 1:
if self.playing:
if self.player.is_playing():
await self.dclient.send_message(self.text_channel, ':warning: {} There\'s already a song playing '
'right now!'.format(_mention))
else:
if self.player.is_done():
if len(self.song_queue) > 0:
self.player.stop()
self.curr_song = self.song_queue[self.queue_spot]
self.player = None
self.player = await self.voice_manager.create_ytdl_player(self.curr_song.get_youtube())
self.curr_song_duration = self.player.duration
self.curr_song_end = datetime.now() + timedelta(seconds=self.player.duration)
self.player.start()
self.playing = True
await self.dclient.send_message(self.text_channel, announce_song(1, self.curr_song_duration,
self.curr_song_end,
self.curr_song.title))
else:
await self.dclient.send_message(self.text_channel,
':warning: {} There\'s no queued songs to '
'play!'.format(_mention))
else:
self.player.resume()
await self.dclient.send_message(self.text_channel, announce_song(1, self.curr_song_duration,
self.curr_song_end,
self.curr_song.title))
else:
if len(self.song_queue) > 0:
self.curr_song = self.song_queue[self.queue_spot]
self.player = None
self.player = await self.voice_manager.create_ytdl_player(self.curr_song.get_youtube())
self.curr_song_duration = self.player.duration
self.curr_song_end = datetime.now() + timedelta(seconds=self.player.duration)
self.player.start()
self.playing = True
await self.dclient.send_message(self.text_channel, announce_song(1, self.curr_song_duration,
self.curr_song_end,
self.curr_song.title))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} There\'s no queued songs to '
'play!'.format(_mention))
# -play <youtube_url>
elif len(_msg) == 2:
youtube_id = get_youtube_id(_msg[1])
if youtube_id is None:
await self.dclient.send_message(self.text_channel, ':warning: {} Invalid YouTube URL!'.format(_mention))
else:
new_song = YTSong()
if new_song.set(youtube_id):
if self.player is not None:
self.player.stop()
self.curr_song = new_song
self.player = None
self.player = await self.voice_manager.create_ytdl_player(self.curr_song.get_youtube())
self.curr_song_duration = self.player.duration
self.curr_song_end = datetime.now() + timedelta(seconds=self.player.duration)
self.player.start()
self.playing = True
await self.dclient.send_message(self.text_channel, announce_song(1, self.curr_song_duration,
self.curr_song_end,
self.curr_song.title))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} This YouTube video doesn\'t exist!'
.format(_mention))
else:
await self.dclient.send_message(self.text_channel, usage_msg('play <youtube_url>', _mention, self.cmdchar))
# -playing
async def play_status(self, _mention):
if self.playing:
if self.player.is_playing():
await self.dclient.send_message(self.text_channel, announce_song(1, self.curr_song_duration,
self.curr_song_end,
self.curr_song.title))
else:
await self.dclient.send_message(self.text_channel, announce_song(0, self.curr_song_duration,
self.curr_song_end,
self.curr_song.title))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} There\'s no song playing right now!'
.format(_mention))
# -pause
async def pause(self, _mention):
if self.playing:
if self.player.is_playing():
self.player.pause()
await self.dclient.send_message(self.text_channel, ':pause_button: Current song paused!')
else:
await self.dclient.send_message(self.text_channel, ':warning: {} Current song already paused!'
.format(_mention))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} There\'s no song in play/pause mode right '
'now!'.format(_mention))
# -stop
async def stop(self, _mention):
if self.playing:
self.playing = False
self.player.stop()
self.player = None
self.song_queue = list()
self.queue_spot = -1
self.curr_song = None
self.curr_song_duration = None
self.curr_song_end = None
await self.dclient.send_message(self.text_channel, ':octagonal_sign: Current song has been stopped and '
'removed!')
else:
await self.dclient.send_message(self.text_channel, ':warning: {} There\'s no song in play/pause mode right '
'now!'.format(_mention))
# -shuffle
async def shuffle(self, _mention):
if len(self.song_queue) > 2:
if not self.playing:
self.curr_song = self.song_queue[self.queue_spot]
del self.song_queue[self.queue_spot]
shuffle(self.song_queue)
self.song_queue.insert(0, self.curr_song)
self.queue_spot = 0
await self.dclient.send_message(self.text_channel, 'Active playlist has been shuffled!')
else:
await self.dclient.send_message(self.text_channel, ':warning: {} Can\'t shuffle the queued list that isn\'t'
' greater than 2!'.format(_mention))
# -queue <youtube_url>
async def queue(self, _msg, _mention):
_msg = _msg.split(' ')
if len(_msg) == 2:
youtube_id = get_youtube_id(_msg[1])
if youtube_id is None:
await self.dclient.send_message(self.text_channel, ':warning: {} Invalid YouTube URL!')
else:
new_song = YTSong()
if new_song.set(youtube_id):
self.song_queue.append(new_song)
await self.dclient.send_message(self.text_channel, '''**Next song in queue:**
```markdown
{}```'''.format(new_song.title))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} This YouTube video doesn\'t exist!'
.format(_mention))
else:
await self.dclient.send_message(self.text_channel, usage_msg('queue <youtube_url>', _mention, self.cmdchar))
# -clear
async def clear(self):
if self.player is None:
self.song_queue = list()
await self.dclient.send_message(self.text_channel, 'Active playlist/queued list has been cleared!')
else:
self.song_queue = list()
self.song_queue.append(self.curr_song)
self.queue_spot = 0
await self.dclient.send_message(self.text_channel, 'Active playlist/queued list has been cleared except for'
' the current song in play/pause mode!')
# -skip
async def skip(self, _mention):
if self.playing:
if len(self.song_queue) > 1:
if self.player.is_playing():
self.player.stop()
self.queue_spot += 1
if self.queue_spot >= len(self.song_queue):
self.queue_spot = 0
self.curr_song = self.song_queue[self.queue_spot]
self.player = None
self.player = await self.voice_manager.create_ytdl_player(self.curr_song.get_youtube())
self.curr_song_duration = self.player.duration
self.curr_song_end = datetime.now() + timedelta(seconds=self.player.duration)
self.player.start()
await self.dclient.send_message(self.text_channel, announce_song(1, self.curr_song_duration,
self.curr_song_end,
self.curr_song.title))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} There\'s only one song in the queued '
'list!'.format(_mention))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} There are no songs playing right now!'
.format(_mention))
# -volume <#>
async def volume(self, _msg, _mention):
_msg = _msg.split(' ')
if len(_msg) == 2:
try:
level = int(_msg[1])
if 0 <= level <= 200:
self.player.volume = level / 100.0
await self.dclient.send_message(self.text_channel, 'Volume has been set to `{}%`!'.format(level))
await self.dclient.send_message(self.text_channel, ':warning: {} `{}` % value must be between `0` and '
'`200`!'.format(_mention, level))
except ValueError:
await self.dclient.send_message(self.text_channel, ':warning: {} Invalid input! The value must be '
'numeric.'.format(_mention))
else:
await self.dclient.send_message(self.text_channel, usage_msg('volume <#>', _mention, self.cmdchar))
# -playlist, -playlist all, -playlist list <name>, -playlist create <name>, -playlist remove <name>,
# -playlist edit <name> add <youtube_url>, -playlist edit <name> remove <youtube_id>, -playlist empty <name>,
# -playlist load <name>
async def playlist(self, _msg, _mention, _author):
_msg = _msg.split(' ')
# -playlist
if len(_msg) == 1:
if len(self.song_queue) > 0:
response = '''**Active Playlist/Queued List:**
```css
'''
count = 0
for song in self.song_queue:
if count == self.queue_spot:
response += '''> {:5} {}
'''.format('[' + str(count + 1) + ']:', song.title)
else:
response += ''' {:5} {}
'''.format('[' + str(count + 1) + ']:', song.title)
count += 1
response += '''```'''
await self.dclient.send_message(self.text_channel, response)
else:
await self.dclient.send_message(self.text_channel, ':warning: {} There are no songs in the active '
'playlist/queued list!'.format(_mention))
else:
# -playlist all
if _msg[1].lower() == 'all':
names = list()
for row in self.db_ex.execute('SELECT name FROM playlist;'):
names.append(row[0])
if len(names) > 0:
await self.dclient.send_message(self.text_channel, '''**List of Playlists:**
```{}```'''.format(', '.join(names)))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} There\'s no playlists made!'
.format(_mention))
# -playlist list <name>
elif _msg[1].lower() == 'list':
if len(_msg) == 3:
name = _msg[2].lower()
if name_exists(self.db_ex, name):
songs = {}
for row in self.db_ex.execute('SELECT song.youtube_id, song.name FROM song JOIN playlist_song '
'ON song.youtube_id=playlist_song.youtube_id AND '
'playlist_song.name=?;', (name, )):
songs[row[0]] = row[1]
if len(songs) > 0:
response = '''**Playlist `{}`:**
```css
'''.format(name)
for youtube_id, title in songs.items():
response += '''{:13} {}
'''.format('[' + youtube_id + ']:', title)
response += '''```'''
await self.dclient.send_message(self.text_channel, response)
else:
await self.dclient.send_message(self.text_channel, ':warning: {} Playlist `{}` is empty!'
.format(_mention, name))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} Playlist `{}` doesn\'t exist!'
.format(_mention, name))
else:
await self.dclient.send_message(self.text_channel, usage_msg('playlist list <name>', _mention,
self.cmdchar))
# -playlist load <name>
elif _msg[1].lower() == 'load':
if len(_msg) == 3:
name = _msg[2].lower()
if name_exists(self.db_ex, name):
songs = {}
for row in self.db_ex.execute('SELECT song.youtube_id, song.name FROM song JOIN playlist_song '
'ON song.youtube_id=playlist_song.youtube_id AND '
'playlist_song.name=?;', (name,)):
songs[row[0]] = row[1]
if len(songs) > 0:
self.song_queue = list()
for youtube_id, title in songs.items():
song = YTSong()
song.full_set(youtube_id, title)
self.song_queue.append(song)
self.queue_spot = 0
if self.player is not None:
self.player.stop()
self.curr_song = self.song_queue[self.queue_spot]
self.player = await self.voice_manager.create_ytdl_player(self.curr_song.get_youtube())
self.curr_song_duration = self.player.duration
self.curr_song_end = datetime.now() + timedelta(seconds=self.player.duration)
self.player.start()
self.playing = True
await self.dclient.send_message(self.text_channel, 'Playlist `{}` has been loaded!'
.format(name))
await self.dclient.send_message(self.text_channel, announce_song(1, self.curr_song_duration,
self.curr_song_end,
self.curr_song.title))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} Playlist `{}` is empty!'
.format(_mention, name))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} Playlist `{}` doesn\'t exist!'
.format(_mention, name))
else:
await self.dclient.send_message(self.text_channel, usage_msg('playlist load <name>', _mention,
self.cmdchar))
# -playlist create <name>
elif _msg[1].lower() == 'create':
if len(_msg) == 3:
name = _msg[2].lower()
if name_exists(self.db_ex, name):
await self.dclient.send_message(self.text_channel, ':warning: {} Playlist `{}` already exists!'
.format(_mention, name))
else:
if len(name) > 50:
await self.dclient.send_message(self.text_channel, ':warning: {} Name `{}` must be less '
'than `50` characters!'.format(_mention,
name))
else:
if len(name) <= self.mus_playlistnamecharlimit:
if name_valid(name):
self.db_ex.execute('INSERT INTO playlist VALUES (?, ?);', (name, _author.id))
self.db.commit()
await self.dclient.send_message(self.text_channel, 'Playlist `{}` has been created!'
.format(name))
else:
await self.dclient.send_message(self.text_channel,
':warning: {} Playlist names can only contain any '
'`numbers`, `letters`, and `_` & `-`. '
.format(_mention))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} Name `{}` must be less'
' than `{}` characters!'
.format(_mention, name, self.mus_playlistnamecharlimit))
else:
await self.dclient.send_message(self.text_channel, usage_msg('playlist create <name>', _mention,
self.cmdchar))
# -playlist remove <name>
elif _msg[1].lower() == 'remove':
if len(_msg) == 3:
name = _msg[2].lower()
if name_exists(self.db_ex, name):
if name_exists(self.db_ex, name):
# TODO use transaction
self.db_ex.execute('DELETE FROM playlist_song WHERE name=?;', (name, ))
self.db_ex.execute('DELETE FROM playlist WHERE name=? AND owner=?', (name, _author.id))
self.db.commit()
await self.dclient.send_message(self.text_channel, 'Playlist `{}` has been removed!'.format(name))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} Playlist `{}` doesn\'t '
'exist!'.format(_mention, name))
else:
await self.dclient.send_message(self.text_channel, usage_msg('playlist remove <name>', _mention,
self.cmdchar))
# -playlist empty <name>
elif _msg[1].lower() == 'empty':
if len(_msg) == 3:
name = _msg[2].lower()
if name_exists(self.db_ex, name):
if name_exists(self.db_ex, name):
if has_permission(self.db_ex, name, self.text_channel, _author):
self.db_ex.execute('DELETE FROM playlist_song WHERE name=?;', (name, ))
self.db.commit()
await self.dclient.send_message(self.text_channel, 'Playlist `{}` has been emptied!'
.format(name))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} You do not own `{}` '
'playlist!'.format(_mention, name))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} Playlist `{}` doesn\'t '
'exist!'.format(_mention, name))
else:
await self.dclient.send_message(self.text_channel, usage_msg('playlist empty <name>', _mention,
self.cmdchar))
# -playlist edit <name> add <youtube_url>
# -playlist edit <name> remove <youtube_id>
elif _msg[1].lower() == 'edit':
if len(_msg) == 5:
name = _msg[2].lower()
if name_exists(self.db_ex, name):
if name_exists(self.db_ex, name):
if has_permission(self.db_ex, name, self.text_channel, _author):
# -playlist edit <name> add <youtube_url>
if _msg[3].lower() == 'add':
youtube_id = get_youtube_id(_msg[4])
if youtube_id is None:
await self.dclient.send_message(self.text_channel,
':warning: {} Invalid YouTube URL!')
else:
song = YTSong()
if song.set(youtube_id):
if not song_exists(self.db_ex, youtube_id):
self.db_ex.execute('INSERT INTO song VALUES (?, ?);', (youtube_id,
song.title))
self.db.commit()
if song_exists_in_playlist(self.db_ex, name, youtube_id):
await self.dclient.send_message(self.text_channel,
':warning: {} Song `{}` already exists '
'in the `{}` playlist!'
.format(_mention, name, song.title))
else:
self.db_ex.execute('INSERT INTO playlist_song VALUES (?, ?);',
(name, youtube_id))
self.db.commit()
await self.dclient.send_message(self.text_channel,
'Song `{}` has been added to `{}` '
'playlist!'.format(song.title, name))
else:
await self.dclient.send_message(self.text_channel,
':warning: {} This YouTube video doesn\'t '
'exist!'.format(_mention))
# -playlist edit <name> remove <youtube_id>
elif _msg[3].lower() == 'remove':
youtube_id = _msg[4]
if song_exists_in_playlist(self.db_ex, name, youtube_id):
self.db_ex.execute()
self.db.commit()
valid, title = yt_title.get_yt_title(youtube_id)
await self.dclient.send_message(self.text_channel,
'Song `{}` has been removed from `{}` playlist!'
.format(title, name))
else:
await self.dclient.send_message(self.text_channel,
':warning: {} YouTube ID `{}` doesn\'t exist in'
' `{}` playlist!'.format(_mention, youtube_id,
name))
else:
await self.dclient.send_message(self.text_channel,
usage_msg('playlist edit <name> <add|remove> '
'<youtube_url|youtube_id>', _mention,
self.cmdchar))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} You do not own `{}` '
'playlist!'.format(_mention, name))
else:
await self.dclient.send_message(self.text_channel, ':warning: {} Playlist `{}` doesn\'t '
'exist!'.format(_mention, name))
else:
await self.dclient.send_message(self.text_channel, usage_msg('playlist edit <name> <add|remove> '
'<youtube_url|youtube_id>', _mention,
self.cmdchar))
else:
await self.dclient.send_message(self.text_channel, usage_msg(
'playlist <all|list|load|create|remove|edit>', _mention, self.cmdchar))
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import sys
import mock
from mox3 import mox
from oslo_concurrency import processutils
from oslo_config import cfg
import testtools
from nova import exception
from nova import manager
from nova import objects
from nova.openstack.common import service as _service
from nova import rpc
from nova import service
from nova import test
from nova.tests.unit import utils
from nova import wsgi
test_service_opts = [
cfg.StrOpt("fake_manager",
default="nova.tests.unit.test_service.FakeManager",
help="Manager for testing"),
cfg.StrOpt("test_service_listen",
default='127.0.0.1',
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"),
]
CONF = cfg.CONF
CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
"""Fake manager for tests."""
def test_method(self):
return 'manager'
class ExtendedService(service.Service):
def test_method(self):
return 'service'
class ServiceManagerTestCase(test.NoDBTestCase):
"""Test cases for Services."""
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
'test',
'nova.tests.unit.test_service.FakeManager')
self.assertEqual('manager', serv.test_method())
def test_override_manager_method(self):
serv = ExtendedService('test',
'test',
'test',
'nova.tests.unit.test_service.FakeManager')
self.assertEqual('service', serv.test_method())
def test_service_with_min_down_time(self):
# TODO(hanlind): This really tests code in the servicegroup api.
self.flags(service_down_time=10, report_interval=10)
service.Service('test',
'test',
'test',
'nova.tests.unit.test_service.FakeManager')
self.assertEqual(25, CONF.service_down_time)
class ServiceTestCase(test.NoDBTestCase):
"""Test cases for Services."""
def setUp(self):
super(ServiceTestCase, self).setUp()
self.host = 'foo'
self.binary = 'nova-fake'
self.topic = 'fake'
self.flags(use_local=True, group='conductor')
def test_create(self):
# NOTE(vish): Create was moved out of mox replay to make sure that
# the looping calls are created in StartService.
app = service.Service.create(host=self.host, binary=self.binary,
topic=self.topic)
self.assertTrue(app)
def _service_start_mocks(self):
self.mox.StubOutWithMock(objects.Service, 'create')
self.mox.StubOutWithMock(objects.Service, 'get_by_host_and_binary')
objects.Service.get_by_host_and_binary(mox.IgnoreArg(), self.host,
self.binary)
objects.Service.create()
def test_init_and_start_hooks(self):
self.manager_mock = self.mox.CreateMock(FakeManager)
self.mox.StubOutWithMock(sys.modules[__name__],
'FakeManager', use_mock_anything=True)
self.mox.StubOutWithMock(self.manager_mock, 'init_host')
self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
FakeManager(host=self.host).AndReturn(self.manager_mock)
self.manager_mock.service_name = self.topic
self.manager_mock.additional_endpoints = []
# init_host is called before any service record is created
self.manager_mock.init_host()
self._service_start_mocks()
# pre_start_hook is called after service record is created,
# but before RPC consumer is created
self.manager_mock.pre_start_hook()
# post_start_hook is called after RPC consumer is created.
self.manager_mock.post_start_hook()
self.mox.ReplayAll()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager')
serv.start()
def _test_service_check_create_race(self, ex):
self.manager_mock = self.mox.CreateMock(FakeManager)
self.mox.StubOutWithMock(sys.modules[__name__], 'FakeManager',
use_mock_anything=True)
self.mox.StubOutWithMock(self.manager_mock, 'init_host')
self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
self.mox.StubOutWithMock(objects.Service, 'create')
self.mox.StubOutWithMock(objects.Service, 'get_by_host_and_binary')
FakeManager(host=self.host).AndReturn(self.manager_mock)
# init_host is called before any service record is created
self.manager_mock.init_host()
objects.Service.get_by_host_and_binary(mox.IgnoreArg(), self.host,
self.binary)
objects.Service.create().AndRaise(ex)
class TestException(Exception):
pass
objects.Service.get_by_host_and_binary(
mox.IgnoreArg(), self.host, self.binary).AndRaise(TestException())
self.mox.ReplayAll()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager')
self.assertRaises(TestException, serv.start)
def test_service_check_create_race_topic_exists(self):
ex = exception.ServiceTopicExists(host='foo', topic='bar')
self._test_service_check_create_race(ex)
def test_service_check_create_race_binary_exists(self):
ex = exception.ServiceBinaryExists(host='foo', binary='bar')
self._test_service_check_create_race(ex)
def test_parent_graceful_shutdown(self):
self.manager_mock = self.mox.CreateMock(FakeManager)
self.mox.StubOutWithMock(sys.modules[__name__],
'FakeManager', use_mock_anything=True)
self.mox.StubOutWithMock(self.manager_mock, 'init_host')
self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
self.mox.StubOutWithMock(_service.Service, 'stop')
FakeManager(host=self.host).AndReturn(self.manager_mock)
self.manager_mock.service_name = self.topic
self.manager_mock.additional_endpoints = []
# init_host is called before any service record is created
self.manager_mock.init_host()
self._service_start_mocks()
# pre_start_hook is called after service record is created,
# but before RPC consumer is created
self.manager_mock.pre_start_hook()
# post_start_hook is called after RPC consumer is created.
self.manager_mock.post_start_hook()
_service.Service.stop()
self.mox.ReplayAll()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager')
serv.start()
serv.stop()
@mock.patch('nova.servicegroup.API')
@mock.patch('nova.objects.service.Service.get_by_host_and_binary')
def test_parent_graceful_shutdown_with_cleanup_host(
self, mock_svc_get_by_host_and_binary, mock_API):
mock_manager = mock.Mock()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager')
serv.manager = mock_manager
serv.manager.additional_endpoints = []
serv.start()
serv.manager.init_host.assert_called_with()
serv.stop()
serv.manager.cleanup_host.assert_called_with()
@mock.patch('nova.servicegroup.API')
@mock.patch('nova.objects.service.Service.get_by_host_and_binary')
@mock.patch.object(rpc, 'get_server')
def test_service_stop_waits_for_rpcserver(
self, mock_rpc, mock_svc_get_by_host_and_binary, mock_API):
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager')
serv.start()
serv.stop()
serv.rpcserver.start.assert_called_once_with()
serv.rpcserver.stop.assert_called_once_with()
serv.rpcserver.wait.assert_called_once_with()
class TestWSGIService(test.TestCase):
def setUp(self):
super(TestWSGIService, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
def test_service_random_port(self):
test_service = service.WSGIService("test_service")
test_service.start()
self.assertNotEqual(0, test_service.port)
test_service.stop()
def test_workers_set_default(self):
test_service = service.WSGIService("osapi_compute")
self.assertEqual(test_service.workers, processutils.get_worker_count())
def test_workers_set_good_user_setting(self):
CONF.set_override('osapi_compute_workers', 8)
test_service = service.WSGIService("osapi_compute")
self.assertEqual(test_service.workers, 8)
def test_workers_set_zero_user_setting(self):
CONF.set_override('osapi_compute_workers', 0)
test_service = service.WSGIService("osapi_compute")
# If a value less than 1 is used, defaults to number of procs available
self.assertEqual(test_service.workers, processutils.get_worker_count())
def test_service_start_with_illegal_workers(self):
CONF.set_override("osapi_compute_workers", -1)
self.assertRaises(exception.InvalidInput,
service.WSGIService, "osapi_compute")
def test_openstack_compute_api_workers_set_default(self):
test_service = service.WSGIService("openstack_compute_api_v2")
self.assertEqual(test_service.workers, processutils.get_worker_count())
def test_openstack_compute_api_workers_set_good_user_setting(self):
CONF.set_override('osapi_compute_workers', 8)
test_service = service.WSGIService("openstack_compute_api_v2")
self.assertEqual(test_service.workers, 8)
def test_openstack_compute_api_workers_set_zero_user_setting(self):
CONF.set_override('osapi_compute_workers', 0)
test_service = service.WSGIService("openstack_compute_api_v2")
# If a value less than 1 is used, defaults to number of procs available
self.assertEqual(test_service.workers, processutils.get_worker_count())
def test_openstack_compute_api_service_start_with_illegal_workers(self):
CONF.set_override("osapi_compute_workers", -1)
self.assertRaises(exception.InvalidInput,
service.WSGIService, "openstack_compute_api_v2")
@testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_service_random_port_with_ipv6(self):
CONF.set_default("test_service_listen", "::1")
test_service = service.WSGIService("test_service")
test_service.start()
self.assertEqual("::1", test_service.host)
self.assertNotEqual(0, test_service.port)
test_service.stop()
def test_reset_pool_size_to_default(self):
test_service = service.WSGIService("test_service")
test_service.start()
# Stopping the service, which in turn sets pool size to 0
test_service.stop()
self.assertEqual(test_service.server._pool.size, 0)
# Resetting pool size to default
test_service.reset()
test_service.start()
self.assertEqual(test_service.server._pool.size,
CONF.wsgi_default_pool_size)
class TestLauncher(test.NoDBTestCase):
@mock.patch.object(_service, 'launch')
def test_launch_app(self, mock_launch):
service._launcher = None
service.serve(mock.sentinel.service)
mock_launch.assert_called_once_with(mock.sentinel.service,
workers=None)
@mock.patch.object(_service, 'launch')
def test_launch_app_with_workers(self, mock_launch):
service._launcher = None
service.serve(mock.sentinel.service, workers=mock.sentinel.workers)
mock_launch.assert_called_once_with(mock.sentinel.service,
workers=mock.sentinel.workers)
@mock.patch.object(_service, 'launch')
def test_launch_app_more_than_once_raises(self, mock_launch):
service._launcher = None
service.serve(mock.sentinel.service)
self.assertRaises(RuntimeError, service.serve, mock.sentinel.service)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.testing import assert_allclose
from tvm import topi
from tvm.topi.util import get_const_tuple
import pytest
import numpy as np
def check_grad(
out, inputs, args=[], data_range=(-10, 10), desired_grads=None, assert_no_jacobian=True
):
inputs = inputs if isinstance(inputs, list) else [inputs]
def check_device(device, host="llvm"):
ctx = tvm.context(device, 0)
if not tvm.testing.device_enabled(host):
return
sout = te.create_schedule(out.op)
mout = tvm.build(sout, [out] + inputs + args)
out_shape = get_const_tuple(out.shape)
l, h = data_range
input_data = [
tvm.nd.array(
np.random.uniform(l, h, size=get_const_tuple(input.shape)).astype(input.dtype)
)
for input in inputs
]
arg_vals = [
tvm.nd.array(np.random.uniform(l, h, size=get_const_tuple(arg.shape)).astype(arg.dtype))
for arg in args
]
ones = topi.full_like(out, 1.0)
# we provide head to sum and reduce the output dimension,
# which equals to grad(out.sum(), inputs)
grads = te.gradient(out, inputs, head=ones)
grad_sched = te.create_schedule([grad.op for grad in grads])
mgrad = tvm.build(grad_sched, list(grads) + inputs + args)
if assert_no_jacobian:
# TODO(yzhliu): it is better to visit the expression and do assertion
lowered_ir = str(tvm.lower(grad_sched, list(grads) + inputs + args, simple_mode=True))
assert "jacobian" not in lowered_ir, lowered_ir
grad_data = [tvm.nd.empty(get_const_tuple(i.shape), g.dtype) for i, g in zip(inputs, grads)]
mgrad(*grad_data, *input_data, *arg_vals)
g_res = [g.asnumpy() for g in grad_data]
if desired_grads:
assert isinstance(desired_grads, list)
for actual, desired in zip(g_res, desired_grads):
assert_allclose(actual, desired, rtol=0.1, atol=1e-2)
else:
def forward(*in_data):
out_data = tvm.nd.empty(out_shape, out.dtype)
mout(out_data, *[tvm.nd.array(d) for d in list(in_data)])
return out_data.asnumpy().sum()
tvm.testing.check_numerical_grads(
forward, [d.asnumpy() for d in input_data + arg_vals], g_res
)
check_device("cpu")
def test_basic_operation():
np.random.seed(0)
shape = (10, 10)
x = te.var("x", dtype="float32")
k = te.reduce_axis((0, 10), name="k")
l = te.reduce_axis((0, 10), name="l")
A0 = te.placeholder(shape, name="A0")
A1 = te.placeholder(shape, name="A1")
zeros = np.zeros(shape)
B = te.compute(shape, lambda i, j: A0[i, j], name="B")
check_grad(B, [A0])
B = te.compute(shape, lambda i, j: A0[i, j] + A1[i, j], name="B")
check_grad(B, [A0, A1])
B = te.compute(shape, lambda i, j: A0[i, j] + A0[j, i], name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.floor(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.ceil(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.trunc(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: te.round(A0[i, j]), name="B")
check_grad(B, A0, desired_grads=[zeros])
B = te.compute(shape, lambda i, j: A0[i, j] + te.exp(A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.log(0.1 + te.abs(A0[i, j] + te.exp(A0[j, i]))), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sigmoid(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.tanh(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sqrt(A0[i, j] * A0[i, j] * A0[j, i]), name="B")
check_grad(B, A0, data_range=(0.1, 10))
B = te.compute(shape, lambda i, j: te.power(te.abs(A0[i, j]), A0[j, i]), name="B")
check_grad(B, A0, data_range=(-4, 4))
B = te.compute(shape, lambda i, j: A0[i, j] * A0[j, i], name="B")
check_grad(B, A0)
B = te.compute((10,), lambda i: te.sum(A0[i, k] * A0[k, i], axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.sum(A0[i, k] * A0[k, i] + 5, axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: te.max(A0[i, k] * A0[k, j] + 5, axis=k), name="B")
check_grad(B, A0)
B = te.compute(shape, lambda i, j: A0[i, j] * (A1[j, i] + A0[j, i]), name="B")
check_grad(B, [A0, A1])
B = te.compute(
shape, lambda i, j: te.sum(A0[k, k] - A0[te.min(j + k, 9), j] * A0[i, k], axis=k), name="B"
)
check_grad(B, A0)
def fcombine(x, y):
return x * y
def fidentity(t0):
return tvm.tir.const(1, t0)
prod = te.comm_reducer(fcombine, fidentity, name="prod")
B = te.compute((10, 10), lambda i, j: prod(A0[i, k] + A0[k, i], axis=k), name="B")
check_grad(B, A0)
X = te.placeholder((10,), name="X")
A = te.compute((10,), lambda i: X[i] + X[9 - i])
B = te.compute((10,), lambda i: X[i] * X[9 - i])
Y = topi.tensordot(A, B, 1)
check_grad(Y, X)
def test_topi():
X = te.placeholder((1, 2, 4, 4), name="X")
W = te.placeholder((5, 2, 3, 3), name="W")
W1 = te.placeholder((2, 5, 3, 3), name="W1")
W2 = te.placeholder((1,), name="W2")
R = topi.nn.conv2d(X, W, 1, 1, 1)
check_grad(R, [X, W])
R1 = topi.nn.conv2d(topi.nn.relu(R), W1, 1, 0, 1)
check_grad(R1, [X, W, W1])
R = topi.broadcast_to(W2, (5, 2, 3, 3))
check_grad(R, [W2])
R = topi.nn.conv2d(X, topi.broadcast_to(W2, (5, 2, 3, 3)), 1, 1, 1)
check_grad(R, [X, W2])
R = topi.nn.pool(X, [2, 2], [2, 2], [0, 0, 0, 0], "avg")
check_grad(R, X)
R = topi.nn.pool(X, [2, 2], [2, 2], [0, 0, 0, 0], "max")
check_grad(R, X)
X = te.placeholder((1, 2, 5, 5), name="X")
R = topi.reshape(X, (1, 32))
check_grad(R, [X])
X = te.placeholder((1, 2, 5, 5), name="X")
W = te.placeholder((2, 2, 3, 3), name="W")
S = topi.reshape(X, (1, 50))
check_grad(S, [X])
R = X + topi.nn.conv2d(X + topi.nn.conv2d(X, W, 1, 1, 1), W, 1, 1, 1)
check_grad(R, [X, W])
S = topi.nn.softmax(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.sigmoid(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.tanh(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
S = topi.nn.log_softmax(topi.reshape(R, (1, 50)))
check_grad(S, [X, W])
check_grad(S, [W], [X])
X = te.placeholder((1, 2, 3, 5), name="X")
Y = te.placeholder((1, 2, 7, 5), name="Y")
S = topi.concatenate((X, Y), 2)
check_grad(S, [X, Y])
X = te.placeholder((1, 2, 6, 5), name="X")
(S, R) = topi.split(X, 2, 2)
check_grad(S, [X])
check_grad(R, [X])
R1 = topi.concatenate((S, R), 2)
check_grad(R1, [X])
R2 = topi.concatenate((R, S), 2)
check_grad(R2, [X])
X = te.placeholder((4, 5), name="X")
I = te.placeholder((100,), name="I", dtype="int32")
R = topi.take(X, topi.abs(I))
check_grad(R, [X], [I])
W = te.placeholder((5, 5), name="W")
exps = topi.exp(topi.nn.dense(X, W))
sumexps = topi.sum(exps, axis=-1, keepdims=True)
R = exps / sumexps
check_grad(R, [X, W], data_range=(-1, 1))
def test_stride_dilation():
X = te.placeholder((1, 2, 10, 10), name="X")
W = te.placeholder((2, 2, 1, 1), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
W = te.placeholder((2, 2, 2, 2), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
W = te.placeholder((2, 2, 3, 3), name="W")
Y = topi.nn.conv2d(X, W, 1, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 1)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 2)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 1, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 2, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.conv2d(X, W, 3, 0, 3)
check_grad(Y, [X, W])
Y = topi.nn.pool(X, [1, 1], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool(X, [1, 1], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool(X, [1, 1], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool(X, [2, 2], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool(X, [2, 2], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool(X, [2, 2], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool(X, [3, 3], [1, 1], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool(X, [3, 3], [2, 2], [0, 0, 0, 0], "max")
check_grad(Y, [X])
Y = topi.nn.pool(X, [3, 3], [3, 3], [0, 0, 0, 0], "max")
check_grad(Y, [X])
@pytest.mark.xfail
def test_reduction_init():
np.random.seed(0)
shape = (10, 10)
k = te.reduce_axis((0, 10), name="k")
A0 = te.placeholder(shape, name="A0")
B = te.compute((10,), lambda i: te.sum(A0[i, k] * A0[k, i], axis=k, init=0.0), name="B")
check_grad(B, A0)
if __name__ == "__main__":
test_basic_operation()
test_topi()
test_stride_dilation()
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
try:
from unittest import mock
except ImportError: # pragma: NO PY3 COVER
import mock
import pytest
from google.api_core import exceptions as core_exceptions
from google.cloud.ndb import context as context_module
from google.cloud.ndb import exceptions
from google.cloud.ndb import tasklets
from google.cloud.ndb import _transaction
class Test_in_transaction:
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_false():
assert _transaction.in_transaction() is False
@staticmethod
def test_true(in_context):
with in_context.new(transaction=b"tx123").use():
assert _transaction.in_transaction() is True
class Test_transaction:
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_propagation_nested():
with pytest.raises(exceptions.BadRequestError):
_transaction.transaction(
None, propagation=context_module.TransactionOptions.NESTED
)
@staticmethod
def test_already_in_transaction(in_context):
with in_context.new(transaction=b"tx123").use():
with pytest.raises(NotImplementedError):
_transaction.transaction(None)
@staticmethod
def test_transaction_inherits_and_merges_cache(in_context):
original_cache = in_context.cache
in_context.cache["test"] = "original value"
with in_context.new(transaction=b"tx123").use() as new_context:
assert new_context.cache is not original_cache
assert new_context.cache["test"] == original_cache["test"]
new_context.cache["test"] = "new_value"
assert new_context.cache["test"] != original_cache["test"]
assert in_context.cache["test"] == "new_value"
@staticmethod
@mock.patch("google.cloud.ndb._transaction.transaction_async")
def test_success(transaction_async):
transaction_async.return_value.result.return_value = 42
assert _transaction.transaction("callback") == 42
transaction_async.assert_called_once_with(
"callback",
read_only=False,
retries=3,
join=False,
xg=True,
propagation=None,
)
class Test_transaction_async:
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_success(_datastore_api):
context_module.get_context().cache["foo"] = "bar"
def callback():
# The transaction uses its own in-memory cache, which should be empty in
# the transaction context and not include the key set above.
context = context_module.get_context()
assert not context.cache
return "I tried, momma."
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
future = _transaction.transaction_async(callback)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_success_w_callbacks(_datastore_api):
context_module.get_context().cache["foo"] = "bar"
on_commit_callback = mock.Mock()
transaction_complete_callback = mock.Mock()
def callback():
# The transaction uses its own in-memory cache, which should be empty in
# the transaction context and not include the key set above.
context = context_module.get_context()
assert not context.cache
context.call_on_commit(on_commit_callback)
context.call_on_transaction_complete(transaction_complete_callback)
return "I tried, momma."
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
future = _transaction.transaction_async(callback)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
on_commit_callback.assert_called_once_with()
transaction_complete_callback.assert_called_once_with()
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_failure_w_callbacks(_datastore_api):
class SpuriousError(Exception):
pass
context_module.get_context().cache["foo"] = "bar"
on_commit_callback = mock.Mock()
transaction_complete_callback = mock.Mock()
def callback():
context = context_module.get_context()
assert not context.cache
context.call_on_commit(on_commit_callback)
context.call_on_transaction_complete(transaction_complete_callback)
raise SpuriousError()
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
rollback_future = tasklets.Future("rollback transaction")
_datastore_api.rollback.return_value = rollback_future
future = _transaction.transaction_async(callback)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
_datastore_api.commit.assert_not_called()
_datastore_api.rollback.assert_called_once_with(b"tx123")
rollback_future.set_result(None)
with pytest.raises(SpuriousError):
future.result()
on_commit_callback.assert_not_called()
transaction_complete_callback.assert_called_once_with()
@staticmethod
def test_success_join(in_context):
def callback():
return "I tried, momma."
with in_context.new(transaction=b"tx123").use():
future = _transaction.transaction_async(callback, join=True)
assert future.result() == "I tried, momma."
@staticmethod
def test_success_join_callback_returns_future(in_context):
future = tasklets.Future()
def callback():
return future
with in_context.new(transaction=b"tx123").use():
future = _transaction.transaction_async(callback, join=True)
future.set_result("I tried, momma.")
assert future.result() == "I tried, momma."
@staticmethod
def test_success_propagation_mandatory(in_context):
def callback():
return "I tried, momma."
with mock.patch(
"google.cloud.ndb._transaction.transaction_async_",
side_effect=_transaction.transaction_async_,
) as transaction_async_:
with in_context.new(transaction=b"tx123").use():
future = _transaction.transaction_async(
callback,
join=False,
propagation=context_module.TransactionOptions.MANDATORY,
)
assert future.result() == "I tried, momma."
transaction_async_.assert_called_once_with(
callback,
3,
False,
True,
True,
None,
)
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_failure_propagation_mandatory():
with pytest.raises(exceptions.BadRequestError):
_transaction.transaction_async(
None,
join=False,
propagation=context_module.TransactionOptions.MANDATORY,
)
@staticmethod
def test_invalid_propagation():
with pytest.raises(ValueError):
_transaction.transaction_async(
None,
propagation=99,
)
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_invalid_join(caplog, in_context):
def callback():
return "I tried, momma."
provided_join_arg = False
with mock.patch(
"google.cloud.ndb._transaction.transaction_async_",
side_effect=_transaction.transaction_async_,
) as transaction_async_:
with in_context.new(transaction=b"tx123").use():
with caplog.at_level(logging.WARNING):
future = _transaction.transaction_async(
callback,
join=provided_join_arg,
propagation=context_module.TransactionOptions.MANDATORY,
)
assert future.result() == "I tried, momma."
assert "Modifying join behaviour to maintain old NDB behaviour" in caplog.text
transaction_async_.assert_called_once_with(
callback,
3,
False,
True,
(not provided_join_arg),
None,
)
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_propagation_with_no_join_arg(caplog):
with caplog.at_level(logging.WARNING):
ctx, join = _transaction._Propagation(
context_module.TransactionOptions.ALLOWED
).handle_propagation()
assert (
"Modifying join behaviour to maintain old NDB behaviour" not in caplog.text
)
assert ctx is None
assert join
@staticmethod
@pytest.mark.usefixtures("in_context")
def test_failure_propagation():
with pytest.raises(exceptions.NoLongerImplementedError):
_transaction.transaction_async_(
None,
propagation=context_module.TransactionOptions.ALLOWED,
)
@staticmethod
def test_propagation_allowed_already_in_transaction(in_context):
def callback():
return "I tried, momma."
with mock.patch(
"google.cloud.ndb._transaction.transaction_async_",
side_effect=_transaction.transaction_async_,
) as transaction_async_:
with in_context.new(transaction=b"tx123").use():
future = _transaction.transaction_async(
callback,
join=False,
propagation=context_module.TransactionOptions.ALLOWED,
)
assert future.result() == "I tried, momma."
transaction_async_.assert_called_once_with(
callback,
3,
False,
True,
True,
None,
)
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_propagation_allowed_not_yet_in_transaction(_datastore_api):
def callback():
return "I tried, momma."
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
with mock.patch(
"google.cloud.ndb._transaction.transaction_async_",
side_effect=_transaction.transaction_async_,
) as transaction_async_:
future = _transaction.transaction_async(
callback,
join=False,
propagation=context_module.TransactionOptions.ALLOWED,
)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
transaction_async_.assert_called_once_with(
callback,
3,
False,
True,
True,
None,
)
@staticmethod
@mock.patch("google.cloud.ndb._datastore_api")
def test_propagation_independent_already_in_transaction(_datastore_api, in_context):
def callback():
return "I tried, momma."
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
with mock.patch(
"google.cloud.ndb._transaction.transaction_async_",
side_effect=_transaction.transaction_async_,
) as transaction_async_:
with in_context.new(transaction=b"tx123").use():
future = _transaction.transaction_async(
callback,
join=True,
propagation=context_module.TransactionOptions.INDEPENDENT,
)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx456")
_datastore_api.commit.assert_called_once_with(b"tx456", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
transaction_async_.assert_called_once_with(
callback,
3,
False,
False,
True,
None,
)
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_propagation_independent_not_yet_in_transaction(_datastore_api):
def callback():
return "I tried, momma."
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
with mock.patch(
"google.cloud.ndb._transaction.transaction_async_",
side_effect=_transaction.transaction_async_,
) as transaction_async_:
future = _transaction.transaction_async(
callback,
join=False,
propagation=context_module.TransactionOptions.INDEPENDENT,
)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
transaction_async_.assert_called_once_with(
callback,
3,
False,
False,
True,
None,
)
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_success_no_retries(_datastore_api):
def callback():
return "I tried, momma."
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
future = _transaction.transaction_async(callback, retries=0)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_success_callback_is_tasklet(_datastore_api):
tasklet = tasklets.Future("tasklet")
def callback():
return tasklet
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
future = _transaction.transaction_async(callback)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
tasklet.set_result("I tried, momma.")
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
commit_future.set_result(None)
assert future.result() == "I tried, momma."
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_run_inner_loop(_datastore_api):
begin_futures = [
tasklets.Future("begin transaction 1"),
tasklets.Future("begin transaction 2"),
]
_datastore_api.begin_transaction.side_effect = begin_futures
commit_futures = [
tasklets.Future("commit transaction 1"),
tasklets.Future("commit transaction 2"),
]
_datastore_api.commit.side_effect = commit_futures
@tasklets.tasklet
def callback():
# Scheduling the sleep call here causes control to go back up to
# the main loop before this tasklet, running in the transaction
# loop, has finished, forcing a call to run_inner_loop via the idle
# handler.
yield tasklets.sleep(0)
@tasklets.tasklet
def some_tasklet():
# This tasklet runs in the main loop. In order to get results back
# from the transaction_async calls, the run_inner_loop idle handler
# will have to be run.
yield [
_transaction.transaction_async(callback),
_transaction.transaction_async(callback),
]
# Scheduling this sleep call forces the run_inner_loop idle handler
# to be run again so we can run it in the case when there is no
# more work to be done in the transaction. (Branch coverage.)
yield tasklets.sleep(0)
raise tasklets.Return("I tried, momma.")
future = some_tasklet()
begin_futures[0].set_result(b"tx123")
begin_futures[1].set_result(b"tx234")
commit_futures[0].set_result(None)
commit_futures[1].set_result(None)
assert future.result() == "I tried, momma."
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_error(_datastore_api):
error = Exception("Spurious error.")
def callback():
raise error
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
rollback_future = tasklets.Future("rollback transaction")
_datastore_api.rollback.return_value = rollback_future
future = _transaction.transaction_async(callback)
_datastore_api.begin_transaction.assert_called_once_with(False, retries=0)
begin_future.set_result(b"tx123")
_datastore_api.rollback.assert_called_once_with(b"tx123")
rollback_future.set_result(None)
assert future.exception() is error
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb.tasklets.sleep")
@mock.patch("google.cloud.ndb._retry.core_retry")
@mock.patch("google.cloud.ndb._datastore_api")
def test_transient_error(_datastore_api, core_retry, sleep):
core_retry.exponential_sleep_generator.return_value = itertools.count()
core_retry.if_transient_error.return_value = True
callback = mock.Mock(side_effect=[Exception("Spurious error."), "foo"])
begin_future = tasklets.Future("begin transaction")
begin_future.set_result(b"tx123")
_datastore_api.begin_transaction.return_value = begin_future
rollback_future = tasklets.Future("rollback transaction")
_datastore_api.rollback.return_value = rollback_future
rollback_future.set_result(None)
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
commit_future.set_result(None)
sleep_future = tasklets.Future("sleep")
sleep_future.set_result(None)
sleep.return_value = sleep_future
future = _transaction.transaction_async(callback)
assert future.result() == "foo"
_datastore_api.begin_transaction.call_count == 2
_datastore_api.rollback.assert_called_once_with(b"tx123")
sleep.assert_called_once_with(0)
_datastore_api.commit.assert_called_once_with(b"tx123", retries=0)
@staticmethod
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb.tasklets.sleep")
@mock.patch("google.cloud.ndb._retry.core_retry")
@mock.patch("google.cloud.ndb._datastore_api")
def test_too_many_transient_errors(_datastore_api, core_retry, sleep):
core_retry.exponential_sleep_generator.return_value = itertools.count()
core_retry.if_transient_error.return_value = True
error = Exception("Spurious error.")
def callback():
raise error
begin_future = tasklets.Future("begin transaction")
begin_future.set_result(b"tx123")
_datastore_api.begin_transaction.return_value = begin_future
rollback_future = tasklets.Future("rollback transaction")
_datastore_api.rollback.return_value = rollback_future
rollback_future.set_result(None)
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
commit_future.set_result(None)
sleep_future = tasklets.Future("sleep")
sleep_future.set_result(None)
sleep.return_value = sleep_future
future = _transaction.transaction_async(callback)
with pytest.raises(core_exceptions.RetryError) as error_context:
future.check_success()
assert error_context.value.cause is error
assert _datastore_api.begin_transaction.call_count == 4
assert _datastore_api.rollback.call_count == 4
assert sleep.call_count == 4
_datastore_api.commit.assert_not_called()
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_transactional(_datastore_api):
@_transaction.transactional()
def simple_function(a, b):
return a + b
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
begin_future.set_result(b"tx123")
commit_future.set_result(None)
res = simple_function(100, 42)
assert res == 142
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_transactional_async(_datastore_api):
@_transaction.transactional_async()
def simple_function(a, b):
return a + b
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
begin_future.set_result(b"tx123")
commit_future.set_result(None)
res = simple_function(100, 42)
assert res.result() == 142
@pytest.mark.usefixtures("in_context")
@mock.patch("google.cloud.ndb._datastore_api")
def test_transactional_tasklet(_datastore_api):
@_transaction.transactional_tasklet()
def generator_function(dependency):
value = yield dependency
raise tasklets.Return(value + 42)
begin_future = tasklets.Future("begin transaction")
_datastore_api.begin_transaction.return_value = begin_future
commit_future = tasklets.Future("commit transaction")
_datastore_api.commit.return_value = commit_future
begin_future.set_result(b"tx123")
commit_future.set_result(None)
dependency = tasklets.Future()
dependency.set_result(100)
res = generator_function(dependency)
assert res.result() == 142
@pytest.mark.usefixtures("in_context")
def test_non_transactional_out_of_transaction():
@_transaction.non_transactional()
def simple_function(a, b):
return a + b
res = simple_function(100, 42)
assert res == 142
@pytest.mark.usefixtures("in_context")
def test_non_transactional_in_transaction(in_context):
with in_context.new(transaction=b"tx123").use():
def simple_function(a, b):
return a + b
wrapped_function = _transaction.non_transactional()(simple_function)
res = wrapped_function(100, 42)
assert res == 142
with pytest.raises(exceptions.BadRequestError):
wrapped_function = _transaction.non_transactional(allow_existing=False)(
simple_function
)
wrapped_function(100, 42)
|
|
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.contrib.admin.utils import (
NotRelationField, flatten, get_fields_from_path,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Combinable, F, OrderBy
from django.forms.models import (
BaseModelForm, BaseModelFormSet, _get_foreign_key,
)
from django.template import engines
from django.template.backends.django import DjangoTemplates
from django.utils.module_loading import import_string
def _issubclass(cls, classinfo):
"""
issubclass() variant that doesn't raise an exception if cls isn't a
class.
"""
try:
return issubclass(cls, classinfo)
except TypeError:
return False
def _contains_subclass(class_path, candidate_paths):
"""
Return whether or not a dotted class path (or a subclass of that class) is
found in a list of candidate paths.
"""
cls = import_string(class_path)
for path in candidate_paths:
try:
candidate_cls = import_string(path)
except ImportError:
# ImportErrors are raised elsewhere.
continue
if _issubclass(candidate_cls, cls):
return True
return False
def check_admin_app(app_configs, **kwargs):
from django.contrib.admin.sites import all_sites
errors = []
for site in all_sites:
errors.extend(site.check(app_configs))
return errors
def check_dependencies(**kwargs):
"""
Check that the admin's dependencies are correctly installed.
"""
if not apps.is_installed('django.contrib.admin'):
return []
errors = []
app_dependencies = (
('django.contrib.contenttypes', 401),
('django.contrib.auth', 405),
('django.contrib.messages', 406),
('django.contrib.sessions', 407),
)
for app_name, error_code in app_dependencies:
if not apps.is_installed(app_name):
errors.append(checks.Error(
"'%s' must be in INSTALLED_APPS in order to use the admin "
"application." % app_name,
id='admin.E%d' % error_code,
))
for engine in engines.all():
if isinstance(engine, DjangoTemplates):
django_templates_instance = engine.engine
break
else:
django_templates_instance = None
if not django_templates_instance:
errors.append(checks.Error(
"A 'django.template.backends.django.DjangoTemplates' instance "
"must be configured in TEMPLATES in order to use the admin "
"application.",
id='admin.E403',
))
else:
if ('django.contrib.auth.context_processors.auth'
not in django_templates_instance.context_processors and
_contains_subclass('django.contrib.auth.backends.ModelBackend', settings.AUTHENTICATION_BACKENDS)):
errors.append(checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id='admin.E402',
))
if ('django.contrib.messages.context_processors.messages'
not in django_templates_instance.context_processors):
errors.append(checks.Error(
"'django.contrib.messages.context_processors.messages' must "
"be enabled in DjangoTemplates (TEMPLATES) in order to use "
"the admin application.",
id='admin.E404',
))
if not _contains_subclass('django.contrib.auth.middleware.AuthenticationMiddleware', settings.MIDDLEWARE):
errors.append(checks.Error(
"'django.contrib.auth.middleware.AuthenticationMiddleware' must "
"be in MIDDLEWARE in order to use the admin application.",
id='admin.E408',
))
if not _contains_subclass('django.contrib.messages.middleware.MessageMiddleware', settings.MIDDLEWARE):
errors.append(checks.Error(
"'django.contrib.messages.middleware.MessageMiddleware' must "
"be in MIDDLEWARE in order to use the admin application.",
id='admin.E409',
))
return errors
class BaseModelAdminChecks:
def check(self, admin_obj, **kwargs):
return [
*self._check_autocomplete_fields(admin_obj),
*self._check_raw_id_fields(admin_obj),
*self._check_fields(admin_obj),
*self._check_fieldsets(admin_obj),
*self._check_exclude(admin_obj),
*self._check_form(admin_obj),
*self._check_filter_vertical(admin_obj),
*self._check_filter_horizontal(admin_obj),
*self._check_radio_fields(admin_obj),
*self._check_prepopulated_fields(admin_obj),
*self._check_view_on_site_url(admin_obj),
*self._check_ordering(admin_obj),
*self._check_readonly_fields(admin_obj),
]
def _check_autocomplete_fields(self, obj):
"""
Check that `autocomplete_fields` is a list or tuple of model fields.
"""
if not isinstance(obj.autocomplete_fields, (list, tuple)):
return must_be('a list or tuple', option='autocomplete_fields', obj=obj, id='admin.E036')
else:
return list(chain.from_iterable([
self._check_autocomplete_fields_item(obj, field_name, 'autocomplete_fields[%d]' % index)
for index, field_name in enumerate(obj.autocomplete_fields)
]))
def _check_autocomplete_fields_item(self, obj, field_name, label):
"""
Check that an item in `autocomplete_fields` is a ForeignKey or a
ManyToManyField and that the item has a related ModelAdmin with
search_fields defined.
"""
try:
field = obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E037')
else:
if not field.many_to_many and not isinstance(field, models.ForeignKey):
return must_be(
'a foreign key or a many-to-many field',
option=label, obj=obj, id='admin.E038'
)
related_admin = obj.admin_site._registry.get(field.remote_field.model)
if related_admin is None:
return [
checks.Error(
'An admin for model "%s" has to be registered '
'to be referenced by %s.autocomplete_fields.' % (
field.remote_field.model.__name__,
type(obj).__name__,
),
obj=obj.__class__,
id='admin.E039',
)
]
elif not related_admin.search_fields:
return [
checks.Error(
'%s must define "search_fields", because it\'s '
'referenced by %s.autocomplete_fields.' % (
related_admin.__class__.__name__,
type(obj).__name__,
),
obj=obj.__class__,
id='admin.E040',
)
]
return []
def _check_raw_id_fields(self, obj):
""" Check that `raw_id_fields` only contains field names that are listed
on the model. """
if not isinstance(obj.raw_id_fields, (list, tuple)):
return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001')
else:
return list(chain.from_iterable(
self._check_raw_id_fields_item(obj, field_name, 'raw_id_fields[%d]' % index)
for index, field_name in enumerate(obj.raw_id_fields)
))
def _check_raw_id_fields_item(self, obj, field_name, label):
""" Check an item of `raw_id_fields`, i.e. check that field named
`field_name` exists in model `model` and is a ForeignKey or a
ManyToManyField. """
try:
field = obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E002')
else:
if not field.many_to_many and not isinstance(field, models.ForeignKey):
return must_be('a foreign key or a many-to-many field', option=label, obj=obj, id='admin.E003')
else:
return []
def _check_fields(self, obj):
""" Check that `fields` only refer to existing fields, doesn't contain
duplicates. Check if at most one of `fields` and `fieldsets` is defined.
"""
if obj.fields is None:
return []
elif not isinstance(obj.fields, (list, tuple)):
return must_be('a list or tuple', option='fields', obj=obj, id='admin.E004')
elif obj.fieldsets:
return [
checks.Error(
"Both 'fieldsets' and 'fields' are specified.",
obj=obj.__class__,
id='admin.E005',
)
]
fields = flatten(obj.fields)
if len(fields) != len(set(fields)):
return [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
obj=obj.__class__,
id='admin.E006',
)
]
return list(chain.from_iterable(
self._check_field_spec(obj, field_name, 'fields')
for field_name in obj.fields
))
def _check_fieldsets(self, obj):
""" Check that fieldsets is properly formatted and doesn't contain
duplicates. """
if obj.fieldsets is None:
return []
elif not isinstance(obj.fieldsets, (list, tuple)):
return must_be('a list or tuple', option='fieldsets', obj=obj, id='admin.E007')
else:
seen_fields = []
return list(chain.from_iterable(
self._check_fieldsets_item(obj, fieldset, 'fieldsets[%d]' % index, seen_fields)
for index, fieldset in enumerate(obj.fieldsets)
))
def _check_fieldsets_item(self, obj, fieldset, label, seen_fields):
""" Check an item of `fieldsets`, i.e. check that this is a pair of a
set name and a dictionary containing "fields" key. """
if not isinstance(fieldset, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E008')
elif len(fieldset) != 2:
return must_be('of length 2', option=label, obj=obj, id='admin.E009')
elif not isinstance(fieldset[1], dict):
return must_be('a dictionary', option='%s[1]' % label, obj=obj, id='admin.E010')
elif 'fields' not in fieldset[1]:
return [
checks.Error(
"The value of '%s[1]' must contain the key 'fields'." % label,
obj=obj.__class__,
id='admin.E011',
)
]
elif not isinstance(fieldset[1]['fields'], (list, tuple)):
return must_be('a list or tuple', option="%s[1]['fields']" % label, obj=obj, id='admin.E008')
seen_fields.extend(flatten(fieldset[1]['fields']))
if len(seen_fields) != len(set(seen_fields)):
return [
checks.Error(
"There are duplicate field(s) in '%s[1]'." % label,
obj=obj.__class__,
id='admin.E012',
)
]
return list(chain.from_iterable(
self._check_field_spec(obj, fieldset_fields, '%s[1]["fields"]' % label)
for fieldset_fields in fieldset[1]['fields']
))
def _check_field_spec(self, obj, fields, label):
""" `fields` should be an item of `fields` or an item of
fieldset[1]['fields'] for any `fieldset` in `fieldsets`. It should be a
field name or a tuple of field names. """
if isinstance(fields, tuple):
return list(chain.from_iterable(
self._check_field_spec_item(obj, field_name, "%s[%d]" % (label, index))
for index, field_name in enumerate(fields)
))
else:
return self._check_field_spec_item(obj, fields, label)
def _check_field_spec_item(self, obj, field_name, label):
if field_name in obj.readonly_fields:
# Stuff can be put in fields that isn't actually a model field if
# it's in readonly_fields, readonly_fields will handle the
# validation of such things.
return []
else:
try:
field = obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
# If we can't find a field on the model that matches, it could
# be an extra field on the form.
return []
else:
if (isinstance(field, models.ManyToManyField) and
not field.remote_field.through._meta.auto_created):
return [
checks.Error(
"The value of '%s' cannot include the ManyToManyField '%s', "
"because that field manually specifies a relationship model."
% (label, field_name),
obj=obj.__class__,
id='admin.E013',
)
]
else:
return []
def _check_exclude(self, obj):
""" Check that exclude is a sequence without duplicates. """
if obj.exclude is None: # default value is None
return []
elif not isinstance(obj.exclude, (list, tuple)):
return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014')
elif len(obj.exclude) > len(set(obj.exclude)):
return [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
obj=obj.__class__,
id='admin.E015',
)
]
else:
return []
def _check_form(self, obj):
""" Check that form subclasses BaseModelForm. """
if not _issubclass(obj.form, BaseModelForm):
return must_inherit_from(parent='BaseModelForm', option='form',
obj=obj, id='admin.E016')
else:
return []
def _check_filter_vertical(self, obj):
""" Check that filter_vertical is a sequence of field names. """
if not isinstance(obj.filter_vertical, (list, tuple)):
return must_be('a list or tuple', option='filter_vertical', obj=obj, id='admin.E017')
else:
return list(chain.from_iterable(
self._check_filter_item(obj, field_name, "filter_vertical[%d]" % index)
for index, field_name in enumerate(obj.filter_vertical)
))
def _check_filter_horizontal(self, obj):
""" Check that filter_horizontal is a sequence of field names. """
if not isinstance(obj.filter_horizontal, (list, tuple)):
return must_be('a list or tuple', option='filter_horizontal', obj=obj, id='admin.E018')
else:
return list(chain.from_iterable(
self._check_filter_item(obj, field_name, "filter_horizontal[%d]" % index)
for index, field_name in enumerate(obj.filter_horizontal)
))
def _check_filter_item(self, obj, field_name, label):
""" Check one item of `filter_vertical` or `filter_horizontal`, i.e.
check that given field exists and is a ManyToManyField. """
try:
field = obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E019')
else:
if not field.many_to_many:
return must_be('a many-to-many field', option=label, obj=obj, id='admin.E020')
else:
return []
def _check_radio_fields(self, obj):
""" Check that `radio_fields` is a dictionary. """
if not isinstance(obj.radio_fields, dict):
return must_be('a dictionary', option='radio_fields', obj=obj, id='admin.E021')
else:
return list(chain.from_iterable(
self._check_radio_fields_key(obj, field_name, 'radio_fields') +
self._check_radio_fields_value(obj, val, 'radio_fields["%s"]' % field_name)
for field_name, val in obj.radio_fields.items()
))
def _check_radio_fields_key(self, obj, field_name, label):
""" Check that a key of `radio_fields` dictionary is name of existing
field and that the field is a ForeignKey or has `choices` defined. """
try:
field = obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E022')
else:
if not (isinstance(field, models.ForeignKey) or field.choices):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an "
"instance of ForeignKey, and does not have a 'choices' definition." % (
label, field_name
),
obj=obj.__class__,
id='admin.E023',
)
]
else:
return []
def _check_radio_fields_value(self, obj, val, label):
""" Check type of a value of `radio_fields` dictionary. """
from django.contrib.admin.options import HORIZONTAL, VERTICAL
if val not in (HORIZONTAL, VERTICAL):
return [
checks.Error(
"The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL." % label,
obj=obj.__class__,
id='admin.E024',
)
]
else:
return []
def _check_view_on_site_url(self, obj):
if not callable(obj.view_on_site) and not isinstance(obj.view_on_site, bool):
return [
checks.Error(
"The value of 'view_on_site' must be a callable or a boolean value.",
obj=obj.__class__,
id='admin.E025',
)
]
else:
return []
def _check_prepopulated_fields(self, obj):
""" Check that `prepopulated_fields` is a dictionary containing allowed
field types. """
if not isinstance(obj.prepopulated_fields, dict):
return must_be('a dictionary', option='prepopulated_fields', obj=obj, id='admin.E026')
else:
return list(chain.from_iterable(
self._check_prepopulated_fields_key(obj, field_name, 'prepopulated_fields') +
self._check_prepopulated_fields_value(obj, val, 'prepopulated_fields["%s"]' % field_name)
for field_name, val in obj.prepopulated_fields.items()
))
def _check_prepopulated_fields_key(self, obj, field_name, label):
""" Check a key of `prepopulated_fields` dictionary, i.e. check that it
is a name of existing field and the field is one of the allowed types.
"""
try:
field = obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E027')
else:
if isinstance(field, (models.DateTimeField, models.ForeignKey, models.ManyToManyField)):
return [
checks.Error(
"The value of '%s' refers to '%s', which must not be a DateTimeField, "
"a ForeignKey, a OneToOneField, or a ManyToManyField." % (label, field_name),
obj=obj.__class__,
id='admin.E028',
)
]
else:
return []
def _check_prepopulated_fields_value(self, obj, val, label):
""" Check a value of `prepopulated_fields` dictionary, i.e. it's an
iterable of existing fields. """
if not isinstance(val, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E029')
else:
return list(chain.from_iterable(
self._check_prepopulated_fields_value_item(obj, subfield_name, "%s[%r]" % (label, index))
for index, subfield_name in enumerate(val)
))
def _check_prepopulated_fields_value_item(self, obj, field_name, label):
""" For `prepopulated_fields` equal to {"slug": ("title",)},
`field_name` is "title". """
try:
obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E030')
else:
return []
def _check_ordering(self, obj):
""" Check that ordering refers to existing fields or is random. """
# ordering = None
if obj.ordering is None: # The default value is None
return []
elif not isinstance(obj.ordering, (list, tuple)):
return must_be('a list or tuple', option='ordering', obj=obj, id='admin.E031')
else:
return list(chain.from_iterable(
self._check_ordering_item(obj, field_name, 'ordering[%d]' % index)
for index, field_name in enumerate(obj.ordering)
))
def _check_ordering_item(self, obj, field_name, label):
""" Check that `ordering` refers to existing fields. """
if isinstance(field_name, (Combinable, OrderBy)):
if not isinstance(field_name, OrderBy):
field_name = field_name.asc()
if isinstance(field_name.expression, F):
field_name = field_name.expression.name
else:
return []
if field_name == '?' and len(obj.ordering) != 1:
return [
checks.Error(
"The value of 'ordering' has the random ordering marker '?', "
"but contains other fields as well.",
hint='Either remove the "?", or remove the other fields.',
obj=obj.__class__,
id='admin.E032',
)
]
elif field_name == '?':
return []
elif LOOKUP_SEP in field_name:
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
return []
else:
if field_name.startswith('-'):
field_name = field_name[1:]
if field_name == 'pk':
return []
try:
obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E033')
else:
return []
def _check_readonly_fields(self, obj):
""" Check that readonly_fields refers to proper attribute or field. """
if obj.readonly_fields == ():
return []
elif not isinstance(obj.readonly_fields, (list, tuple)):
return must_be('a list or tuple', option='readonly_fields', obj=obj, id='admin.E034')
else:
return list(chain.from_iterable(
self._check_readonly_fields_item(obj, field_name, "readonly_fields[%d]" % index)
for index, field_name in enumerate(obj.readonly_fields)
))
def _check_readonly_fields_item(self, obj, field_name, label):
if callable(field_name):
return []
elif hasattr(obj, field_name):
return []
elif hasattr(obj.model, field_name):
return []
else:
try:
obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"The value of '%s' is not a callable, an attribute of '%s', or an attribute of '%s.%s'." % (
label, obj.__class__.__name__, obj.model._meta.app_label, obj.model._meta.object_name
),
obj=obj.__class__,
id='admin.E035',
)
]
else:
return []
class ModelAdminChecks(BaseModelAdminChecks):
def check(self, admin_obj, **kwargs):
return [
*super().check(admin_obj),
*self._check_save_as(admin_obj),
*self._check_save_on_top(admin_obj),
*self._check_inlines(admin_obj),
*self._check_list_display(admin_obj),
*self._check_list_display_links(admin_obj),
*self._check_list_filter(admin_obj),
*self._check_list_select_related(admin_obj),
*self._check_list_per_page(admin_obj),
*self._check_list_max_show_all(admin_obj),
*self._check_list_editable(admin_obj),
*self._check_search_fields(admin_obj),
*self._check_date_hierarchy(admin_obj),
*self._check_action_permission_methods(admin_obj),
*self._check_actions_uniqueness(admin_obj),
]
def _check_save_as(self, obj):
""" Check save_as is a boolean. """
if not isinstance(obj.save_as, bool):
return must_be('a boolean', option='save_as',
obj=obj, id='admin.E101')
else:
return []
def _check_save_on_top(self, obj):
""" Check save_on_top is a boolean. """
if not isinstance(obj.save_on_top, bool):
return must_be('a boolean', option='save_on_top',
obj=obj, id='admin.E102')
else:
return []
def _check_inlines(self, obj):
""" Check all inline model admin classes. """
if not isinstance(obj.inlines, (list, tuple)):
return must_be('a list or tuple', option='inlines', obj=obj, id='admin.E103')
else:
return list(chain.from_iterable(
self._check_inlines_item(obj, item, "inlines[%d]" % index)
for index, item in enumerate(obj.inlines)
))
def _check_inlines_item(self, obj, inline, label):
""" Check one inline model admin. """
try:
inline_label = inline.__module__ + '.' + inline.__name__
except AttributeError:
return [
checks.Error(
"'%s' must inherit from 'InlineModelAdmin'." % obj,
obj=obj.__class__,
id='admin.E104',
)
]
from django.contrib.admin.options import InlineModelAdmin
if not _issubclass(inline, InlineModelAdmin):
return [
checks.Error(
"'%s' must inherit from 'InlineModelAdmin'." % inline_label,
obj=obj.__class__,
id='admin.E104',
)
]
elif not inline.model:
return [
checks.Error(
"'%s' must have a 'model' attribute." % inline_label,
obj=obj.__class__,
id='admin.E105',
)
]
elif not _issubclass(inline.model, models.Model):
return must_be('a Model', option='%s.model' % inline_label, obj=obj, id='admin.E106')
else:
return inline(obj.model, obj.admin_site).check()
def _check_list_display(self, obj):
""" Check that list_display only contains fields or usable attributes.
"""
if not isinstance(obj.list_display, (list, tuple)):
return must_be('a list or tuple', option='list_display', obj=obj, id='admin.E107')
else:
return list(chain.from_iterable(
self._check_list_display_item(obj, item, "list_display[%d]" % index)
for index, item in enumerate(obj.list_display)
))
def _check_list_display_item(self, obj, item, label):
if callable(item):
return []
elif hasattr(obj, item):
return []
elif hasattr(obj.model, item):
try:
field = obj.model._meta.get_field(item)
except FieldDoesNotExist:
return []
else:
if isinstance(field, models.ManyToManyField):
return [
checks.Error(
"The value of '%s' must not be a ManyToManyField." % label,
obj=obj.__class__,
id='admin.E109',
)
]
return []
else:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not a callable, "
"an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__,
obj.model._meta.app_label, obj.model._meta.object_name,
),
obj=obj.__class__,
id='admin.E108',
)
]
def _check_list_display_links(self, obj):
""" Check that list_display_links is a unique subset of list_display.
"""
from django.contrib.admin.options import ModelAdmin
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110')
# Check only if ModelAdmin.get_list_display() isn't overridden.
elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:
return list(chain.from_iterable(
self._check_list_display_links_item(obj, field_name, "list_display_links[%d]" % index)
for index, field_name in enumerate(obj.list_display_links)
))
return []
def _check_list_display_links_item(self, obj, field_name, label):
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not defined in 'list_display'." % (
label, field_name
),
obj=obj.__class__,
id='admin.E111',
)
]
else:
return []
def _check_list_filter(self, obj):
if not isinstance(obj.list_filter, (list, tuple)):
return must_be('a list or tuple', option='list_filter', obj=obj, id='admin.E112')
else:
return list(chain.from_iterable(
self._check_list_filter_item(obj, item, "list_filter[%d]" % index)
for index, item in enumerate(obj.list_filter)
))
def _check_list_filter_item(self, obj, item, label):
"""
Check one item of `list_filter`, i.e. check if it is one of three options:
1. 'field' -- a basic field filter, possibly w/ relationships (e.g.
'field__rel')
2. ('field', SomeFieldListFilter) - a field-based list filter class
3. SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import ListFilter, FieldListFilter
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not _issubclass(item, ListFilter):
return must_inherit_from(parent='ListFilter', option=label,
obj=obj, id='admin.E113')
# ... but not a FieldListFilter.
elif issubclass(item, FieldListFilter):
return [
checks.Error(
"The value of '%s' must not inherit from 'FieldListFilter'." % label,
obj=obj.__class__,
id='admin.E114',
)
]
else:
return []
elif isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not _issubclass(list_filter_class, FieldListFilter):
return must_inherit_from(parent='FieldListFilter', option='%s[1]' % label, obj=obj, id='admin.E115')
else:
return []
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(obj.model, field)
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of '%s' refers to '%s', which does not refer to a Field." % (label, field),
obj=obj.__class__,
id='admin.E116',
)
]
else:
return []
def _check_list_select_related(self, obj):
""" Check that list_select_related is a boolean, a list or a tuple. """
if not isinstance(obj.list_select_related, (bool, list, tuple)):
return must_be('a boolean, tuple or list', option='list_select_related', obj=obj, id='admin.E117')
else:
return []
def _check_list_per_page(self, obj):
""" Check that list_per_page is an integer. """
if not isinstance(obj.list_per_page, int):
return must_be('an integer', option='list_per_page', obj=obj, id='admin.E118')
else:
return []
def _check_list_max_show_all(self, obj):
""" Check that list_max_show_all is an integer. """
if not isinstance(obj.list_max_show_all, int):
return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119')
else:
return []
def _check_list_editable(self, obj):
""" Check that list_editable is a sequence of editable fields from
list_display without first element. """
if not isinstance(obj.list_editable, (list, tuple)):
return must_be('a list or tuple', option='list_editable', obj=obj, id='admin.E120')
else:
return list(chain.from_iterable(
self._check_list_editable_item(obj, item, "list_editable[%d]" % index)
for index, item in enumerate(obj.list_editable)
))
def _check_list_editable_item(self, obj, field_name, label):
try:
field = obj.model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label, obj=obj, id='admin.E121')
else:
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not "
"contained in 'list_display'." % (label, field_name),
obj=obj.__class__,
id='admin.E122',
)
]
elif obj.list_display_links and field_name in obj.list_display_links:
return [
checks.Error(
"The value of '%s' cannot be in both 'list_editable' and 'list_display_links'." % field_name,
obj=obj.__class__,
id='admin.E123',
)
]
# If list_display[0] is in list_editable, check that
# list_display_links is set. See #22792 and #26229 for use cases.
elif (obj.list_display[0] == field_name and not obj.list_display_links and
obj.list_display_links is not None):
return [
checks.Error(
"The value of '%s' refers to the first field in 'list_display' ('%s'), "
"which cannot be used unless 'list_display_links' is set." % (
label, obj.list_display[0]
),
obj=obj.__class__,
id='admin.E124',
)
]
elif not field.editable:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not editable through the admin." % (
label, field_name
),
obj=obj.__class__,
id='admin.E125',
)
]
else:
return []
def _check_search_fields(self, obj):
""" Check search_fields is a sequence. """
if not isinstance(obj.search_fields, (list, tuple)):
return must_be('a list or tuple', option='search_fields', obj=obj, id='admin.E126')
else:
return []
def _check_date_hierarchy(self, obj):
""" Check that date_hierarchy refers to DateField or DateTimeField. """
if obj.date_hierarchy is None:
return []
else:
try:
field = get_fields_from_path(obj.model, obj.date_hierarchy)[-1]
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of 'date_hierarchy' refers to '%s', which "
"does not refer to a Field." % obj.date_hierarchy,
obj=obj.__class__,
id='admin.E127',
)
]
else:
if not isinstance(field, (models.DateField, models.DateTimeField)):
return must_be('a DateField or DateTimeField', option='date_hierarchy', obj=obj, id='admin.E128')
else:
return []
def _check_action_permission_methods(self, obj):
"""
Actions with an allowed_permission attribute require the ModelAdmin to
implement a has_<perm>_permission() method for each permission.
"""
actions = obj._get_base_actions()
errors = []
for func, name, _ in actions:
if not hasattr(func, 'allowed_permissions'):
continue
for permission in func.allowed_permissions:
method_name = 'has_%s_permission' % permission
if not hasattr(obj, method_name):
errors.append(
checks.Error(
'%s must define a %s() method for the %s action.' % (
obj.__class__.__name__,
method_name,
func.__name__,
),
obj=obj.__class__,
id='admin.E129',
)
)
return errors
def _check_actions_uniqueness(self, obj):
"""Check that every action has a unique __name__."""
names = [name for _, name, _ in obj._get_base_actions()]
if len(names) != len(set(names)):
return [checks.Error(
'__name__ attributes of actions defined in %s must be '
'unique.' % obj.__class__,
obj=obj.__class__,
id='admin.E130',
)]
return []
class InlineModelAdminChecks(BaseModelAdminChecks):
def check(self, inline_obj, **kwargs):
parent_model = inline_obj.parent_model
return [
*super().check(inline_obj),
*self._check_relation(inline_obj, parent_model),
*self._check_exclude_of_parent_model(inline_obj, parent_model),
*self._check_extra(inline_obj),
*self._check_max_num(inline_obj),
*self._check_min_num(inline_obj),
*self._check_formset(inline_obj),
]
def _check_exclude_of_parent_model(self, obj, parent_model):
# Do not perform more specific checks if the base checks result in an
# error.
errors = super()._check_exclude(obj)
if errors:
return []
# Skip if `fk_name` is invalid.
if self._check_relation(obj, parent_model):
return []
if obj.exclude is None:
return []
fk = _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
if fk.name in obj.exclude:
return [
checks.Error(
"Cannot exclude the field '%s', because it is the foreign key "
"to the parent model '%s.%s'." % (
fk.name, parent_model._meta.app_label, parent_model._meta.object_name
),
obj=obj.__class__,
id='admin.E201',
)
]
else:
return []
def _check_relation(self, obj, parent_model):
try:
_get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
except ValueError as e:
return [checks.Error(e.args[0], obj=obj.__class__, id='admin.E202')]
else:
return []
def _check_extra(self, obj):
""" Check that extra is an integer. """
if not isinstance(obj.extra, int):
return must_be('an integer', option='extra', obj=obj, id='admin.E203')
else:
return []
def _check_max_num(self, obj):
""" Check that max_num is an integer. """
if obj.max_num is None:
return []
elif not isinstance(obj.max_num, int):
return must_be('an integer', option='max_num', obj=obj, id='admin.E204')
else:
return []
def _check_min_num(self, obj):
""" Check that min_num is an integer. """
if obj.min_num is None:
return []
elif not isinstance(obj.min_num, int):
return must_be('an integer', option='min_num', obj=obj, id='admin.E205')
else:
return []
def _check_formset(self, obj):
""" Check formset is a subclass of BaseModelFormSet. """
if not _issubclass(obj.formset, BaseModelFormSet):
return must_inherit_from(parent='BaseModelFormSet', option='formset', obj=obj, id='admin.E206')
else:
return []
def must_be(type, option, obj, id):
return [
checks.Error(
"The value of '%s' must be %s." % (option, type),
obj=obj.__class__,
id=id,
),
]
def must_inherit_from(parent, option, obj, id):
return [
checks.Error(
"The value of '%s' must inherit from '%s'." % (option, parent),
obj=obj.__class__,
id=id,
),
]
def refer_to_missing_field(field, option, obj, id):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an attribute of '%s.%s'." % (
option, field, obj.model._meta.app_label, obj.model._meta.object_name
),
obj=obj.__class__,
id=id,
),
]
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Entity state machines. These are state machines that perform CRUD operations
on entities.
"""
import hszinc
import fysom
from ...util import state
from ...util.asyncexc import AsynchronousException
from ...exception import HaystackError
class EntityRetrieveOperation(state.HaystackOperation):
"""
Base class for retrieving entity instances.
"""
def __init__(self, session, single):
"""
Initialise a request for the named IDs.
:param session: Haystack HTTP session object.
"""
single = bool(single)
super(EntityRetrieveOperation, self).__init__(
result_deepcopy=False, result_copy=not single)
self._session = session
self._entities = {}
self._single = single
def _on_read(self, operation, **kwargs):
"""
Process the grid, updating existing items.
"""
try:
# See if the read succeeded.
try:
grid = operation.result
except HaystackError as e:
# Is this a "not found" error?
if str(e).startswith('HNotFoundError'):
raise NameError('No matching entity found')
raise
self._log.debug('Received grid: %s', grid)
# Iterate over each row:
for row in grid:
# Ignore rows that don't specify an ID.
if 'id' not in row:
continue
row = row.copy()
entity_ref = row.pop('id')
# This entity does not exist
if entity_ref is None:
continue
entity_id = entity_ref.name
try:
entity = self._entities[entity_id]
entity._update_tags(row)
except KeyError:
try:
entity = self._session._entities[entity_id]
entity._update_tags(row)
except KeyError:
entity = self._session._tagging_model.create_entity(
entity_id, row)
# Stash/update entity references.
self._session._entities[entity_id] = entity
self._entities[entity_id] = entity
if self._single:
try:
result = list(self._entities.values())[0]
except IndexError:
raise NameError('No matching entity found')
else:
result = self._entities
self._state_machine.read_done(result=result)
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
class GetEntityOperation(EntityRetrieveOperation):
"""
Operation for retrieving entity instances by ID. This operation peforms
the following steps::
If refresh_all is False:
# State: init
For each entity_id in entity_ids:
If entity_id exists in cache:
Retrieve and store entity from cache.
Add entity_id to list got_ids.
For each entity_id in got_ids:
Discard entity_id from entity_ids.
If entity_ids is not empty:
# State: read
Perform a low-level read of the IDs.
For each row returned in grid:
If entity is not in cache:
Create new Entity instances for each row returned.
Else:
Update existing Entity instance with new row data.
Add the new entity instances to cache and store.
Return the stored entities.
# State: done
"""
def __init__(self, session, entity_ids, refresh_all, single):
"""
Initialise a request for the named IDs.
:param session: Haystack HTTP session object.
:param entity_ids: A list of IDs to request.
:param refresh_all: Refresh all entities, ignore existing content.
"""
self._log = session._log.getChild('get_entity')
super(GetEntityOperation, self).__init__(session, single)
self._entity_ids = set(map(lambda r : r.name \
if isinstance(r, hszinc.Ref) else r, entity_ids))
self._todo = self._entity_ids.copy()
self._refresh_all = refresh_all
self._state_machine = fysom.Fysom(
initial='init', final='done',
events=[
# Event Current State New State
('cache_checked', 'init', 'read'),
('read_done', 'read', 'done'),
('exception', '*', 'done'),
], callbacks={
'onenterread': self._do_read,
'onenterdone': self._do_done,
})
def go(self):
"""
Start the request, check cache for existing entities.
"""
# See what is in cache.
for entity_id in self._entity_ids:
if isinstance(entity_id, hszinc.Ref):
entity_id = entity_id.name
try:
self._entities[entity_id] = \
self._session._entities[entity_id]
except KeyError:
pass
if not self._refresh_all:
# Throw out what we've done
list(map(self._todo.discard, list(self._entities.keys())))
self._state_machine.cache_checked()
def _do_read(self, event):
"""
Read the entities that are left behind.
"""
try:
if bool(self._todo):
self._session.read(ids=list(self._todo),
callback=self._on_read)
else:
# Nothing needed to read.
if self._single:
try:
result = list(self._entities.values())[0]
except IndexError:
raise NameError('No matching entity found')
else:
result = self._entities
self._state_machine.read_done(result=result)
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
class FindEntityOperation(EntityRetrieveOperation):
"""
Operation for retrieving entity instances by filter.
This operation peforms the following steps::
Issue a read instruction with the given filter:
For each row returned in grid:
If entity is not in cache:
Create new Entity instances for each row returned.
Else:
Update existing Entity instance with new row data.
Add the new entity instances to cache and store.
Return the stored entities.
# State: done
"""
def __init__(self, session, filter_expr, limit, single):
"""
Initialise a request for the named IDs.
:param session: Haystack HTTP session object.
:param filter_expr: Filter expression.
:param limit: Maximum number of entities to fetch.
"""
self._log = session._log.getChild('find_entity')
super(FindEntityOperation, self).__init__(session, single)
self._filter_expr = filter_expr
self._limit = limit
self._state_machine = fysom.Fysom(
initial='init', final='done',
events=[
# Event Current State New State
('go', 'init', 'read'),
('read_done', 'read', 'done'),
('exception', '*', 'done'),
], callbacks={
'onenterdone': self._do_done,
})
def go(self):
"""
Start the request, check cache for existing entities.
"""
self._state_machine.go()
self._session.read(filter_expr=self._filter_expr, limit=self._limit,
callback=self._on_read)
|
|
from sympy import S, sin, cos, pi, sqrt, symbols
from sympy.physics.mechanics import (Dyadic, Particle, Point, ReferenceFrame,
RigidBody, Vector)
from sympy.physics.mechanics import (angular_momentum, cross, dot,
dynamicsymbols, express, inertia,
inertia_of_point_mass,
kinematic_equations, kinetic_energy,
linear_momentum, outer, partial_velocity,
potential_energy)
from sympy.utilities.pytest import raises
Vector.simp = True
q1, q2, q3, q4, q5 = symbols('q1 q2 q3 q4 q5')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = A.orientnew('B', 'Axis', [q2, A.x])
C = B.orientnew('C', 'Axis', [q3, B.y])
def test_dot():
assert dot(A.x, A.x) == 1
assert dot(A.x, A.y) == 0
assert dot(A.x, A.z) == 0
assert dot(A.y, A.x) == 0
assert dot(A.y, A.y) == 1
assert dot(A.y, A.z) == 0
assert dot(A.z, A.x) == 0
assert dot(A.z, A.y) == 0
assert dot(A.z, A.z) == 1
def test_dot_different_frames():
assert dot(N.x, A.x) == cos(q1)
assert dot(N.x, A.y) == -sin(q1)
assert dot(N.x, A.z) == 0
assert dot(N.y, A.x) == sin(q1)
assert dot(N.y, A.y) == cos(q1)
assert dot(N.y, A.z) == 0
assert dot(N.z, A.x) == 0
assert dot(N.z, A.y) == 0
assert dot(N.z, A.z) == 1
assert dot(N.x, A.x + A.y) == sqrt(2)*cos(q1 + pi/4) == dot(A.x + A.y, N.x)
assert dot(A.x, C.x) == cos(q3)
assert dot(A.x, C.y) == 0
assert dot(A.x, C.z) == sin(q3)
assert dot(A.y, C.x) == sin(q2)*sin(q3)
assert dot(A.y, C.y) == cos(q2)
assert dot(A.y, C.z) == -sin(q2)*cos(q3)
assert dot(A.z, C.x) == -cos(q2)*sin(q3)
assert dot(A.z, C.y) == sin(q2)
assert dot(A.z, C.z) == cos(q2)*cos(q3)
def test_cross():
assert cross(A.x, A.x) == 0
assert cross(A.x, A.y) == A.z
assert cross(A.x, A.z) == -A.y
assert cross(A.y, A.x) == -A.z
assert cross(A.y, A.y) == 0
assert cross(A.y, A.z) == A.x
assert cross(A.z, A.x) == A.y
assert cross(A.z, A.y) == -A.x
assert cross(A.z, A.z) == 0
def test_cross_different_frames():
assert cross(N.x, A.x) == sin(q1)*A.z
assert cross(N.x, A.y) == cos(q1)*A.z
assert cross(N.x, A.z) == -sin(q1)*A.x - cos(q1)*A.y
assert cross(N.y, A.x) == -cos(q1)*A.z
assert cross(N.y, A.y) == sin(q1)*A.z
assert cross(N.y, A.z) == cos(q1)*A.x - sin(q1)*A.y
assert cross(N.z, A.x) == A.y
assert cross(N.z, A.y) == -A.x
assert cross(N.z, A.z) == 0
assert cross(N.x, A.x) == sin(q1)*A.z
assert cross(N.x, A.y) == cos(q1)*A.z
assert cross(N.x, A.x + A.y) == sin(q1)*A.z + cos(q1)*A.z
assert cross(A.x + A.y, N.x) == -sin(q1)*A.z - cos(q1)*A.z
assert cross(A.x, C.x) == sin(q3)*C.y
assert cross(A.x, C.y) == -sin(q3)*C.x + cos(q3)*C.z
assert cross(A.x, C.z) == -cos(q3)*C.y
assert cross(C.x, A.x) == -sin(q3)*C.y
assert cross(C.y, A.x) == sin(q3)*C.x - cos(q3)*C.z
assert cross(C.z, A.x) == cos(q3)*C.y
def test_operator_match():
"""Test that the output of dot, cross, outer functions match
operator behavior.
"""
A = ReferenceFrame('A')
v = A.x + A.y
d = v | v
zerov = Vector(0)
zerod = Dyadic(0)
# dot products
assert d & d == dot(d, d)
assert d & zerod == dot(d, zerod)
assert zerod & d == dot(zerod, d)
assert d & v == dot(d, v)
assert v & d == dot(v, d)
assert d & zerov == dot(d, zerov)
assert zerov & d == dot(zerov, d)
raises(TypeError, lambda: dot(d, S(0)))
raises(TypeError, lambda: dot(S(0), d))
raises(TypeError, lambda: dot(d, 0))
raises(TypeError, lambda: dot(0, d))
assert v & v == dot(v, v)
assert v & zerov == dot(v, zerov)
assert zerov & v == dot(zerov, v)
raises(TypeError, lambda: dot(v, S(0)))
raises(TypeError, lambda: dot(S(0), v))
raises(TypeError, lambda: dot(v, 0))
raises(TypeError, lambda: dot(0, v))
# cross products
raises(TypeError, lambda: cross(d, d))
raises(TypeError, lambda: cross(d, zerod))
raises(TypeError, lambda: cross(zerod, d))
assert d ^ v == cross(d, v)
assert v ^ d == cross(v, d)
assert d ^ zerov == cross(d, zerov)
assert zerov ^ d == cross(zerov, d)
assert zerov ^ d == cross(zerov, d)
raises(TypeError, lambda: cross(d, S(0)))
raises(TypeError, lambda: cross(S(0), d))
raises(TypeError, lambda: cross(d, 0))
raises(TypeError, lambda: cross(0, d))
assert v ^ v == cross(v, v)
assert v ^ zerov == cross(v, zerov)
assert zerov ^ v == cross(zerov, v)
raises(TypeError, lambda: cross(v, S(0)))
raises(TypeError, lambda: cross(S(0), v))
raises(TypeError, lambda: cross(v, 0))
raises(TypeError, lambda: cross(0, v))
# outer products
raises(TypeError, lambda: outer(d, d))
raises(TypeError, lambda: outer(d, zerod))
raises(TypeError, lambda: outer(zerod, d))
raises(TypeError, lambda: outer(d, v))
raises(TypeError, lambda: outer(v, d))
raises(TypeError, lambda: outer(d, zerov))
raises(TypeError, lambda: outer(zerov, d))
raises(TypeError, lambda: outer(zerov, d))
raises(TypeError, lambda: outer(d, S(0)))
raises(TypeError, lambda: outer(S(0), d))
raises(TypeError, lambda: outer(d, 0))
raises(TypeError, lambda: outer(0, d))
assert v | v == outer(v, v)
assert v | zerov == outer(v, zerov)
assert zerov | v == outer(zerov, v)
raises(TypeError, lambda: outer(v, S(0)))
raises(TypeError, lambda: outer(S(0), v))
raises(TypeError, lambda: outer(v, 0))
raises(TypeError, lambda: outer(0, v))
def test_express():
assert express(A.x, C) == cos(q3)*C.x + sin(q3)*C.z
assert express(A.y, C) == sin(q2)*sin(q3)*C.x + cos(q2)*C.y - \
sin(q2)*cos(q3)*C.z
assert express(A.z, C) == -sin(q3)*cos(q2)*C.x + sin(q2)*C.y + \
cos(q2)*cos(q3)*C.z
assert express(A.x, N) == cos(q1)*N.x + sin(q1)*N.y
assert express(A.y, N) == -sin(q1)*N.x + cos(q1)*N.y
assert express(A.z, N) == N.z
assert express(A.x, A) == A.x
assert express(A.y, A) == A.y
assert express(A.z, A) == A.z
assert express(A.x, B) == B.x
assert express(A.y, B) == cos(q2)*B.y - sin(q2)*B.z
assert express(A.z, B) == sin(q2)*B.y + cos(q2)*B.z
assert express(A.x, C) == cos(q3)*C.x + sin(q3)*C.z
assert express(A.y, C) == sin(q2)*sin(q3)*C.x + cos(q2)*C.y - \
sin(q2)*cos(q3)*C.z
assert express(A.z, C) == -sin(q3)*cos(q2)*C.x + sin(q2)*C.y + \
cos(q2)*cos(q3)*C.z
# Check to make sure UnitVectors get converted properly
assert express(N.x, N) == N.x
assert express(N.y, N) == N.y
assert express(N.z, N) == N.z
assert express(N.x, A) == (cos(q1)*A.x - sin(q1)*A.y)
assert express(N.y, A) == (sin(q1)*A.x + cos(q1)*A.y)
assert express(N.z, A) == A.z
assert express(N.x, B) == (cos(q1)*B.x - sin(q1)*cos(q2)*B.y +
sin(q1)*sin(q2)*B.z)
assert express(N.y, B) == (sin(q1)*B.x + cos(q1)*cos(q2)*B.y -
sin(q2)*cos(q1)*B.z)
assert express(N.z, B) == (sin(q2)*B.y + cos(q2)*B.z)
assert express(N.x, C) == (
(cos(q1)*cos(q3) - sin(q1)*sin(q2)*sin(q3))*C.x -
sin(q1)*cos(q2)*C.y +
(sin(q3)*cos(q1) + sin(q1)*sin(q2)*cos(q3))*C.z)
assert express(N.y, C) == (
(sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*C.x +
cos(q1)*cos(q2)*C.y +
(sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*C.z)
assert express(N.z, C) == (-sin(q3)*cos(q2)*C.x + sin(q2)*C.y +
cos(q2)*cos(q3)*C.z)
assert express(A.x, N) == (cos(q1)*N.x + sin(q1)*N.y)
assert express(A.y, N) == (-sin(q1)*N.x + cos(q1)*N.y)
assert express(A.z, N) == N.z
assert express(A.x, A) == A.x
assert express(A.y, A) == A.y
assert express(A.z, A) == A.z
assert express(A.x, B) == B.x
assert express(A.y, B) == (cos(q2)*B.y - sin(q2)*B.z)
assert express(A.z, B) == (sin(q2)*B.y + cos(q2)*B.z)
assert express(A.x, C) == (cos(q3)*C.x + sin(q3)*C.z)
assert express(A.y, C) == (sin(q2)*sin(q3)*C.x + cos(q2)*C.y -
sin(q2)*cos(q3)*C.z)
assert express(A.z, C) == (-sin(q3)*cos(q2)*C.x + sin(q2)*C.y +
cos(q2)*cos(q3)*C.z)
assert express(B.x, N) == (cos(q1)*N.x + sin(q1)*N.y)
assert express(B.y, N) == (-sin(q1)*cos(q2)*N.x +
cos(q1)*cos(q2)*N.y + sin(q2)*N.z)
assert express(B.z, N) == (sin(q1)*sin(q2)*N.x -
sin(q2)*cos(q1)*N.y + cos(q2)*N.z)
assert express(B.x, A) == A.x
assert express(B.y, A) == (cos(q2)*A.y + sin(q2)*A.z)
assert express(B.z, A) == (-sin(q2)*A.y + cos(q2)*A.z)
assert express(B.x, B) == B.x
assert express(B.y, B) == B.y
assert express(B.z, B) == B.z
assert express(B.x, C) == (cos(q3)*C.x + sin(q3)*C.z)
assert express(B.y, C) == C.y
assert express(B.z, C) == (-sin(q3)*C.x + cos(q3)*C.z)
assert express(C.x, N) == (
(cos(q1)*cos(q3) - sin(q1)*sin(q2)*sin(q3))*N.x +
(sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*N.y -
sin(q3)*cos(q2)*N.z)
assert express(C.y, N) == (
-sin(q1)*cos(q2)*N.x + cos(q1)*cos(q2)*N.y + sin(q2)*N.z)
assert express(C.z, N) == (
(sin(q3)*cos(q1) + sin(q1)*sin(q2)*cos(q3))*N.x +
(sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*N.y +
cos(q2)*cos(q3)*N.z)
assert express(C.x, A) == (cos(q3)*A.x + sin(q2)*sin(q3)*A.y -
sin(q3)*cos(q2)*A.z)
assert express(C.y, A) == (cos(q2)*A.y + sin(q2)*A.z)
assert express(C.z, A) == (sin(q3)*A.x - sin(q2)*cos(q3)*A.y +
cos(q2)*cos(q3)*A.z)
assert express(C.x, B) == (cos(q3)*B.x - sin(q3)*B.z)
assert express(C.y, B) == B.y
assert express(C.z, B) == (sin(q3)*B.x + cos(q3)*B.z)
assert express(C.x, C) == C.x
assert express(C.y, C) == C.y
assert express(C.z, C) == C.z == (C.z)
# Check to make sure Vectors get converted back to UnitVectors
assert N.x == express((cos(q1)*A.x - sin(q1)*A.y), N)
assert N.y == express((sin(q1)*A.x + cos(q1)*A.y), N)
assert N.x == express((cos(q1)*B.x - sin(q1)*cos(q2)*B.y +
sin(q1)*sin(q2)*B.z), N)
assert N.y == express((sin(q1)*B.x + cos(q1)*cos(q2)*B.y -
sin(q2)*cos(q1)*B.z), N)
assert N.z == express((sin(q2)*B.y + cos(q2)*B.z), N)
"""
These don't really test our code, they instead test the auto simplification
(or lack thereof) of SymPy.
assert N.x == express((
(cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*C.x -
sin(q1)*cos(q2)*C.y +
(sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*C.z), N)
assert N.y == express((
(sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*C.x +
cos(q1)*cos(q2)*C.y +
(sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*C.z), N)
assert N.z == express((-sin(q3)*cos(q2)*C.x + sin(q2)*C.y +
cos(q2)*cos(q3)*C.z), N)
"""
assert A.x == express((cos(q1)*N.x + sin(q1)*N.y), A)
assert A.y == express((-sin(q1)*N.x + cos(q1)*N.y), A)
assert A.y == express((cos(q2)*B.y - sin(q2)*B.z), A)
assert A.z == express((sin(q2)*B.y + cos(q2)*B.z), A)
assert A.x == express((cos(q3)*C.x + sin(q3)*C.z), A)
# Tripsimp messes up here too.
#print express((sin(q2)*sin(q3)*C.x + cos(q2)*C.y -
# sin(q2)*cos(q3)*C.z), A)
assert A.y == express((sin(q2)*sin(q3)*C.x + cos(q2)*C.y -
sin(q2)*cos(q3)*C.z), A)
assert A.z == express((-sin(q3)*cos(q2)*C.x + sin(q2)*C.y +
cos(q2)*cos(q3)*C.z), A)
assert B.x == express((cos(q1)*N.x + sin(q1)*N.y), B)
assert B.y == express((-sin(q1)*cos(q2)*N.x +
cos(q1)*cos(q2)*N.y + sin(q2)*N.z), B)
assert B.z == express((sin(q1)*sin(q2)*N.x -
sin(q2)*cos(q1)*N.y + cos(q2)*N.z), B)
assert B.y == express((cos(q2)*A.y + sin(q2)*A.z), B)
assert B.z == express((-sin(q2)*A.y + cos(q2)*A.z), B)
assert B.x == express((cos(q3)*C.x + sin(q3)*C.z), B)
assert B.z == express((-sin(q3)*C.x + cos(q3)*C.z), B)
"""
assert C.x == express((
(cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*N.x +
(sin(q1)*cos(q3)+sin(q2)*sin(q3)*cos(q1))*N.y -
sin(q3)*cos(q2)*N.z), C)
assert C.y == express((
-sin(q1)*cos(q2)*N.x + cos(q1)*cos(q2)*N.y + sin(q2)*N.z), C)
assert C.z == express((
(sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*N.x +
(sin(q1)*sin(q3)-sin(q2)*cos(q1)*cos(q3))*N.y +
cos(q2)*cos(q3)*N.z), C)
"""
assert C.x == express((cos(q3)*A.x + sin(q2)*sin(q3)*A.y -
sin(q3)*cos(q2)*A.z), C)
assert C.y == express((cos(q2)*A.y + sin(q2)*A.z), C)
assert C.z == express((sin(q3)*A.x - sin(q2)*cos(q3)*A.y +
cos(q2)*cos(q3)*A.z), C)
assert C.x == express((cos(q3)*B.x - sin(q3)*B.z), C)
assert C.z == express((sin(q3)*B.x + cos(q3)*B.z), C)
def test_inertia():
N = ReferenceFrame('N')
ixx, iyy, izz = symbols('ixx iyy izz')
ixy, iyz, izx = symbols('ixy iyz izx')
assert inertia(N, ixx, iyy, izz) == (ixx * (N.x | N.x) + iyy *
(N.y | N.y) + izz * (N.z | N.z))
assert inertia(N, 0, 0, 0) == 0 * (N.x | N.x)
assert inertia(N, ixx, iyy, izz, ixy, iyz, izx) == (ixx * (N.x | N.x) +
ixy * (N.x | N.y) + izx * (N.x | N.z) + ixy * (N.y | N.x) + iyy *
(N.y | N.y) + iyz * (N.y | N.z) + izx * (N.z | N.x) + iyz * (N.z |
N.y) + izz * (N.z | N.z))
def test_kin_eqs():
q0, q1, q2, q3 = dynamicsymbols('q0 q1 q2 q3')
q0d, q1d, q2d, q3d = dynamicsymbols('q0 q1 q2 q3', 1)
u1, u2, u3 = dynamicsymbols('u1 u2 u3')
kds = kinematic_equations([u1, u2, u3], [q0, q1, q2, q3], 'quaternion')
assert kds == [-0.5 * q0 * u1 - 0.5 * q2 * u3 + 0.5 * q3 * u2 + q1d,
-0.5 * q0 * u2 + 0.5 * q1 * u3 - 0.5 * q3 * u1 + q2d,
-0.5 * q0 * u3 - 0.5 * q1 * u2 + 0.5 * q2 * u1 + q3d,
0.5 * q1 * u1 + 0.5 * q2 * u2 + 0.5 * q3 * u3 + q0d]
def test_inertia_of_point_mass():
r, s, t, m = symbols('r s t m')
N = ReferenceFrame('N')
px = r * N.x
I = inertia_of_point_mass(m, px, N)
assert I == m * r**2 * (N.y | N.y) + m * r**2 * (N.z | N.z)
py = s * N.y
I = inertia_of_point_mass(m, py, N)
assert I == m * s**2 * (N.x | N.x) + m * s**2 * (N.z | N.z)
pz = t * N.z
I = inertia_of_point_mass(m, pz, N)
assert I == m * t**2 * (N.x | N.x) + m * t**2 * (N.y | N.y)
p = px + py + pz
I = inertia_of_point_mass(m, p, N)
assert I == (m * (s**2 + t**2) * (N.x | N.x) -
m * r * s * (N.x | N.y) -
m * r * t * (N.x | N.z) -
m * r * s * (N.y | N.x) +
m * (r**2 + t**2) * (N.y | N.y) -
m * s * t * (N.y | N.z) -
m * r * t * (N.z | N.x) -
m * s * t * (N.z | N.y) +
m * (r**2 + s**2) * (N.z | N.z))
def test_partial_velocity():
q1, q2, q3, u1, u2, u3 = dynamicsymbols('q1 q2 q3 u1 u2 u3')
u4, u5 = dynamicsymbols('u4, u5')
r = symbols('r')
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
R.set_ang_vel(N, u1 * L.x + u2 * L.y + u3 * L.z)
C = Point('C')
C.set_vel(N, u4 * L.x + u5 * (Y.z ^ L.x))
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
vel_list = [Dmc.vel(N), C.vel(N), R.ang_vel_in(N)]
u_list = [u1, u2, u3, u4, u5]
assert (partial_velocity(vel_list, u_list, N) ==
[[- r*L.y, r*L.x, 0, L.x, cos(q2)*L.y - sin(q2)*L.z],
[0, 0, 0, L.x, cos(q2)*L.y - sin(q2)*L.z],
[L.x, L.y, L.z, 0, 0]])
def test_linear_momentum():
N = ReferenceFrame('N')
Ac = Point('Ac')
Ac.set_vel(N, 25 * N.y)
I = outer(N.x, N.x)
A = RigidBody('A', Ac, N, 20, (I, Ac))
P = Point('P')
Pa = Particle('Pa', P, 1)
Pa.point.set_vel(N, 10 * N.x)
assert linear_momentum(N, A, Pa) == 10 * N.x + 500 * N.y
def test_angular_momentum_and_linear_momentum():
m, M, l1 = symbols('m M l1')
q1d = dynamicsymbols('q1d')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, q1d * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
assert linear_momentum(
N, A, Pa) == 2 * m * q1d* l1 * N.y + M * l1 * q1d * N.y
assert angular_momentum(
O, N, A, Pa) == 4 * m * q1d * l1**2 * N.z + q1d * N.z
def test_kinetic_energy():
m, M, l1 = symbols('m M l1')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
assert 0 == kinetic_energy(N, Pa, A) - (M*l1**2*omega**2/2
+ 2*l1**2*m*omega**2 + omega**2/2)
def test_potential_energy():
m, M, l1, g, h, H = symbols('m M l1 g h H')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
O = Point('O')
O.set_vel(N, 0 * N.x)
Ac = O.locatenew('Ac', l1 * N.x)
P = Ac.locatenew('P', l1 * N.x)
a = ReferenceFrame('a')
a.set_ang_vel(N, omega * N.z)
Ac.v2pt_theory(O, N, a)
P.v2pt_theory(O, N, a)
Pa = Particle('Pa', P, m)
I = outer(N.z, N.z)
A = RigidBody('A', Ac, a, M, (I, Ac))
Pa.set_potential_energy(m * g * h)
A.set_potential_energy(M * g * H)
assert potential_energy(A, Pa) == m * g * h + M * g * H
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Virtual power driver
from oslo.config import cfg
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
import nova.virt.powervm.common as connection
opts = [
cfg.StrOpt('virtual_power_ssh_host',
default='',
help='ip or name to virtual power host'),
cfg.StrOpt('virtual_power_ssh_port',
default='22',
help='Port to use for ssh to virtual power host'),
cfg.StrOpt('virtual_power_type',
default='virsh',
help='base command to use for virtual power(vbox,virsh)'),
cfg.StrOpt('virtual_power_host_user',
default='',
help='user to execute virtual power commands as'),
cfg.StrOpt('virtual_power_host_pass',
default='',
help='password for virtual power host_user'),
cfg.StrOpt('virtual_power_host_key',
default=None,
help='ssh key for virtual power host_user'),
]
baremetal_vp = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_vp)
CONF.register_opts(opts, baremetal_vp)
_conn = None
_virtual_power_settings = None
_vp_cmd = None
_cmds = None
LOG = logging.getLogger(__name__)
class VirtualPowerManager(base.PowerManager):
"""Virtual Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of VMs based on their name and MAC address. It uses ssh to connect to the
VM's host and issue commands.
Node will be matched based on mac address
NOTE: for use in dev/test environments only!
"""
def __init__(self, **kwargs):
global _conn
global _virtual_power_settings
global _cmds
if _cmds is None:
LOG.debug("Setting up %s commands." %
CONF.baremetal.virtual_power_type)
_vpc = 'nova.virt.baremetal.virtual_power_driver_settings.%s' % \
CONF.baremetal.virtual_power_type
_cmds = importutils.import_class(_vpc)
self._vp_cmd = _cmds()
self.connection_data = _conn
node = kwargs.pop('node', {})
instance = kwargs.pop('instance', {})
self._node_name = instance.get('hostname', "")
self._mac_address = node.get('prov_mac_address', "")
self._mac_address = self._mac_address.replace(':', '')
self._connection = None
self._matched_name = ''
self.state = None
def _get_conn(self):
if not CONF.baremetal.virtual_power_ssh_host:
raise exception.NovaException(
_('virtual_power_ssh_host not defined. Can not Start'))
if not CONF.baremetal.virtual_power_host_user:
raise exception.NovaException(
_('virtual_power_host_user not defined. Can not Start'))
if not CONF.baremetal.virtual_power_host_pass:
# it is ok to not have a password if you have a keyfile
if CONF.baremetal.virtual_power_host_key is None:
raise exception.NovaException(
_('virtual_power_host_pass/key not set. Can not Start'))
_conn = connection.Connection(
CONF.baremetal.virtual_power_ssh_host,
CONF.baremetal.virtual_power_host_user,
CONF.baremetal.virtual_power_host_pass,
CONF.baremetal.virtual_power_ssh_port,
CONF.baremetal.virtual_power_host_key)
return _conn
def _set_connection(self):
if self._connection is None:
if self.connection_data is None:
self.connection_data = self._get_conn()
self._connection = connection.ssh_connect(self.connection_data)
def _get_full_node_list(self):
LOG.debug("Getting full node list.")
cmd = self._vp_cmd.list_cmd
full_list = self._run_command(cmd)
return full_list
def _check_for_node(self):
LOG.debug("Looking up Name for Mac address %s." % self._mac_address)
self._matched_name = ''
full_node_list = self._get_full_node_list()
for node in full_node_list:
cmd = self._vp_cmd.get_node_macs.replace('{_NodeName_}', node)
mac_address_list = self._run_command(cmd)
for mac in mac_address_list:
if self._mac_address.lower() in mac.lower():
self._matched_name = ('"%s"' % node)
break
if self._matched_name:
break
return self._matched_name
def activate_node(self):
LOG.info("activate_node name %s" % self._node_name)
if self._check_for_node():
cmd = self._vp_cmd.start_cmd
self._run_command(cmd)
if self.is_power_on():
self.state = baremetal_states.ACTIVE
else:
self.state = baremetal_states.ERROR
return self.state
def reboot_node(self):
LOG.info("reset node: %s" % self._node_name)
if self._check_for_node():
cmd = self._vp_cmd.reboot_cmd
self._run_command(cmd)
if self.is_power_on():
self.state = baremetal_states.ACTIVE
else:
self.state = baremetal_states.ERROR
return self.state
def deactivate_node(self):
LOG.info("deactivate_node name %s" % self._node_name)
if self._check_for_node():
if self.is_power_on():
cmd = self._vp_cmd.stop_cmd
self._run_command(cmd)
if self.is_power_on():
self.state = baremetal_states.ERROR
else:
self.state = baremetal_states.DELETED
return self.state
def is_power_on(self):
LOG.debug("Checking if %s is running" % self._node_name)
if not self._check_for_node():
return False
cmd = self._vp_cmd.list_running_cmd
running_node_list = self._run_command(cmd)
for node in running_node_list:
if self._matched_name in node:
return True
return False
def start_console(self):
pass
def stop_console(self):
pass
def _run_command(self, cmd, check_exit_code=True):
"""Run a remote command using an active ssh connection.
:param command: String with the command to run.
If {_NodeName_} is in the command it will get replaced by
the _matched_name value.
base_cmd will also get prepended to the command.
"""
self._set_connection()
cmd = cmd.replace('{_NodeName_}', self._matched_name)
cmd = '%s %s' % (self._vp_cmd.base_cmd, cmd)
try:
stdout, stderr = utils.ssh_execute(self._connection, cmd,
check_exit_code=check_exit_code)
result = stdout.strip().splitlines()
LOG.debug('Result for run_command: %s' % result)
except exception.ProcessExecutionError:
result = []
LOG.exception("Error running command: %s" % cmd)
return result
|
|
from scipy.linalg import norm
import numpy as np
from .result import LucasKanadeAlgorithmResult
# TODO: implement Inverse Additive Algorithm?
# TODO: implement sampling?
class LucasKanade(object):
r"""
Abstract class for a Lucas-Kanade optimization algorithm.
Parameters
----------
template : `menpo.image.Image` or subclass
The image template.
transform : `subclass` of :map:`DP` and :map:`DX`, optional
A differential affine transform object, e.g.
:map:`DifferentiableAlignmentAffine`.
residual : `class` subclass, optional
The residual that will get applied. All possible residuals are:
========================== ============================================
Class Description
========================== ============================================
:map:`SSD` Sum of Squared Differences
:map:`FourierSSD` Sum of Squared Differences on Fourier domain
:map:`ECC` Enhanced Correlation Coefficient
:map:`GradientImages` Image Gradient
:map:`GradientCorrelation` Gradient Correlation
========================== ============================================
eps : `float`, optional
Value for checking the convergence of the optimization.
"""
def __init__(self, template, transform, residual, eps=10**-10):
self.template = template
self.transform = transform
self.residual = residual
self.eps = eps
def warped_images(self, image, shapes):
r"""
Given an input test image and a list of shapes, it warps the image
into the shapes. This is useful for generating the warped images of a
fitting procedure stored within a :map:`LucasKanadeResult`.
Parameters
----------
image : `menpo.image.Image` or `subclass`
The input image to be warped.
shapes : `list` of `menpo.shape.PointCloud`
The list of shapes in which the image will be warped. The shapes
are obtained during the iterations of a fitting procedure.
Returns
-------
warped_images : `list` of `menpo.image.MaskedImage` or `ndarray`
The warped images.
"""
warped_images = []
for s in shapes:
self.transform.set_target(s)
warped_images.append(image.warp_to_mask(
self.template.mask, self.transform, warp_landmarks=False))
return warped_images
class ForwardAdditive(LucasKanade):
r"""
Forward Additive (FA) Lucas-Kanade algorithm.
"""
def run(self, image, initial_shape, gt_shape=None, max_iters=20,
return_costs=False):
r"""
Execute the optimization algorithm.
Parameters
----------
image : `menpo.image.Image`
The input test image.
initial_shape : `menpo.shape.PointCloud`
The initial shape from which the optimization will start.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape of the image. It is only needed in order
to get passed in the optimization result object, which has the
ability to compute the fitting error.
max_iters : `int`, optional
The maximum number of iterations. Note that the algorithm may
converge, and thus stop, earlier.
return_costs : `bool`, optional
If ``True``, then the cost function values will be computed
during the fitting procedure. Then these cost values will be
assigned to the returned `fitting_result`. *Note that the costs
computation increases the computational cost of the fitting. The
additional computation cost depends on the fitting method. Only
use this option for research purposes.*
Returns
-------
fitting_result : :map:`LucasKanadeAlgorithmResult`
The parametric iterative fitting result.
"""
# initialize transform
self.transform.set_target(initial_shape)
p_list = [self.transform.as_vector()]
shapes = [self.transform.target]
costs = None
if return_costs:
costs = []
# initialize iteration counter and epsilon
k = 0
eps = np.Inf
# Forward Compositional Algorithm
while k < max_iters and eps > self.eps:
# warp image
IWxp = image.warp_to_mask(self.template.mask, self.transform,
warp_landmarks=False)
# compute warp jacobian
dW_dp = np.rollaxis(
self.transform.d_dp(self.template.indices()), -1)
dW_dp = dW_dp.reshape(dW_dp.shape[:1] + self.template.shape +
dW_dp.shape[-1:])
# compute steepest descent images
filtered_J, J = self.residual.steepest_descent_images(
image, dW_dp, forward=(self.template, self.transform))
# compute hessian
H = self.residual.hessian(filtered_J, sdi2=J)
# compute steepest descent parameter updates.
sd_dp = self.residual.steepest_descent_update(
filtered_J, IWxp, self.template)
# compute gradient descent parameter updates
dp = -np.real(np.linalg.solve(H, sd_dp))
# Update warp weights
self.transform.from_vector_inplace(self.transform.as_vector() + dp)
p_list.append(self.transform.as_vector())
shapes.append(self.transform.target)
# update costs
if return_costs:
costs.append(self.residual.cost_closure())
# test convergence
eps = np.abs(norm(dp))
# increase iteration counter
k += 1
# return algorithm result
return LucasKanadeAlgorithmResult(
shapes=shapes, homogeneous_parameters=p_list,
initial_shape=initial_shape, image=image, gt_shape=gt_shape,
costs=costs)
def __str__(self):
return "Forward Additive Algorithm"
class ForwardCompositional(LucasKanade):
r"""
Forward Compositional (FC) Lucas-Kanade algorithm
Parameters
----------
template : `menpo.image.Image` or subclass
The image template.
transform : `subclass` of :map:`DP` and :map:`DX`, optional
A differential affine transform object, e.g.
:map:`DifferentiableAlignmentAffine`.
residual : `class` subclass, optional
The residual that will get applied. All possible residuals are:
========================== ============================================
Class Description
========================== ============================================
:map:`SSD` Sum of Squared Differences
:map:`FourierSSD` Sum of Squared Differences on Fourier domain
:map:`ECC` Enhanced Correlation Coefficient
:map:`GradientImages` Image Gradient
:map:`GradientCorrelation` Gradient Correlation
========================== ============================================
eps : `float`, optional
Value for checking the convergence of the optimization.
"""
def __init__(self, template, transform, residual, eps=10**-10):
super(ForwardCompositional, self).__init__(
template, transform, residual, eps=eps)
self._precompute()
def _precompute(self):
# compute warp jacobian
dW_dp = np.rollaxis(
self.transform.d_dp(self.template.indices()), -1)
self.dW_dp = dW_dp.reshape(dW_dp.shape[:1] + self.template.shape +
dW_dp.shape[-1:])
def run(self, image, initial_shape, gt_shape=None, max_iters=20,
return_costs=False):
r"""
Execute the optimization algorithm.
Parameters
----------
image : `menpo.image.Image`
The input test image.
initial_shape : `menpo.shape.PointCloud`
The initial shape from which the optimization will start.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape of the image. It is only needed in order
to get passed in the optimization result object, which has the
ability to compute the fitting error.
max_iters : `int`, optional
The maximum number of iterations. Note that the algorithm may
converge, and thus stop, earlier.
return_costs : `bool`, optional
If ``True``, then the cost function values will be computed
during the fitting procedure. Then these cost values will be
assigned to the returned `fitting_result`. *Note that the costs
computation increases the computational cost of the fitting. The
additional computation cost depends on the fitting method. Only
use this option for research purposes.*
Returns
-------
fitting_result : :map:`LucasKanadeAlgorithmResult`
The parametric iterative fitting result.
"""
# initialize transform
self.transform.set_target(initial_shape)
p_list = [self.transform.as_vector()]
shapes = [self.transform.target]
costs = None
if return_costs:
costs = []
# initialize iteration counter and epsilon
k = 0
eps = np.Inf
# Forward Compositional Algorithm
while k < max_iters and eps > self.eps:
# warp image
IWxp = image.warp_to_mask(self.template.mask, self.transform,
warp_landmarks=False)
# compute steepest descent images
filtered_J, J = self.residual.steepest_descent_images(
IWxp, self.dW_dp)
# compute hessian
H = self.residual.hessian(filtered_J, sdi2=J)
# compute steepest descent parameter updates.
sd_dp = self.residual.steepest_descent_update(
filtered_J, IWxp, self.template)
# compute gradient descent parameter updates
dp = -np.real(np.linalg.solve(H, sd_dp))
# Update warp weights
self.transform.compose_after_from_vector_inplace(dp)
p_list.append(self.transform.as_vector())
shapes.append(self.transform.target)
# update cost
if return_costs:
costs.append(self.residual.cost_closure())
# test convergence
eps = np.abs(norm(dp))
# increase iteration counter
k += 1
# return algorithm result
return LucasKanadeAlgorithmResult(
shapes=shapes, homogeneous_parameters=p_list,
initial_shape=initial_shape, image=image, gt_shape=gt_shape,
costs=costs)
def __str__(self):
return "Forward Compositional Algorithm"
class InverseCompositional(LucasKanade):
r"""
Inverse Compositional (IC) Lucas-Kanade algorithm
Parameters
----------
template : `menpo.image.Image` or subclass
The image template.
transform : `subclass` of :map:`DP` and :map:`DX`, optional
A differential affine transform object, e.g.
:map:`DifferentiableAlignmentAffine`.
residual : `class` subclass, optional
The residual that will get applied. All possible residuals are:
========================== ============================================
Class Description
========================== ============================================
:map:`SSD` Sum of Squared Differences
:map:`FourierSSD` Sum of Squared Differences on Fourier domain
:map:`ECC` Enhanced Correlation Coefficient
:map:`GradientImages` Image Gradient
:map:`GradientCorrelation` Gradient Correlation
========================== ============================================
eps : `float`, optional
Value for checking the convergence of the optimization.
"""
def __init__(self, template, transform, residual, eps=10**-10):
super(InverseCompositional, self).__init__(
template, transform, residual, eps=eps)
self._precompute()
def _precompute(self):
# compute warp jacobian
dW_dp = np.rollaxis(self.transform.d_dp(self.template.indices()), -1)
dW_dp = dW_dp.reshape(dW_dp.shape[:1] + self.template.shape +
dW_dp.shape[-1:])
# compute steepest descent images
self.filtered_J, J = self.residual.steepest_descent_images(
self.template, dW_dp)
# compute hessian
self.H = self.residual.hessian(self.filtered_J, sdi2=J)
def run(self, image, initial_shape, gt_shape=None, max_iters=20,
return_costs=False):
r"""
Execute the optimization algorithm.
Parameters
----------
image : `menpo.image.Image`
The input test image.
initial_shape : `menpo.shape.PointCloud`
The initial shape from which the optimization will start.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape of the image. It is only needed in order
to get passed in the optimization result object, which has the
ability to compute the fitting error.
max_iters : `int`, optional
The maximum number of iterations. Note that the algorithm may
converge, and thus stop, earlier.
return_costs : `bool`, optional
If ``True``, then the cost function values will be computed
during the fitting procedure. Then these cost values will be
assigned to the returned `fitting_result`. *Note that the costs
computation increases the computational cost of the fitting. The
additional computation cost depends on the fitting method. Only
use this option for research purposes.*
Returns
-------
fitting_result : :map:`LucasKanadeAlgorithmResult`
The parametric iterative fitting result.
"""
# initialize transform
self.transform.set_target(initial_shape)
p_list = [self.transform.as_vector()]
shapes = [self.transform.target]
costs = None
if return_costs:
costs = []
# initialize iteration counter and epsilon
k = 0
eps = np.Inf
# Baker-Matthews, Inverse Compositional Algorithm
while k < max_iters and eps > self.eps:
# warp image
IWxp = image.warp_to_mask(self.template.mask, self.transform,
warp_landmarks=False)
# compute steepest descent parameter updates.
sd_dp = self.residual.steepest_descent_update(
self.filtered_J, IWxp, self.template)
# compute gradient descent parameter updates
dp = np.real(np.linalg.solve(self.H, sd_dp))
# update warp
inv_dp = self.transform.pseudoinverse_vector(dp)
self.transform.compose_after_from_vector_inplace(inv_dp)
p_list.append(self.transform.as_vector())
shapes.append(self.transform.target)
# update cost
if return_costs:
costs.append(self.residual.cost_closure())
# test convergence
eps = np.abs(norm(dp))
# increase iteration counter
k += 1
# return algorithm result
return LucasKanadeAlgorithmResult(
shapes=shapes, homogeneous_parameters=p_list,
initial_shape=initial_shape, image=image, gt_shape=gt_shape,
costs=costs)
def __str__(self):
return "Inverse Compositional Algorithm"
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for backpropagation using the tape utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import imperative_grad
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_op_attr_type_cache = {}
def op_attr_type(op_type, attr_name):
try:
return _op_attr_type_cache[(op_type, attr_name)]
except KeyError:
h = context.context()._handle # pylint: disable=protected-access
attr_type = pywrap_tensorflow.TFE_OpNameGetAttrType(h, op_type, attr_name)
_op_attr_type_cache[(op_type, attr_name)] = attr_type
return attr_type
def make_attr(attr_type, value):
if attr_type == pywrap_tensorflow.TF_ATTR_TYPE:
return dtypes.as_dtype(value)
elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]:
return [dtypes.as_dtype(v) for v in value]
elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE:
return tensor_shape.as_shape(value).as_proto()
elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]:
return [tensor_shape.as_shape(v).as_proto() for v in value]
return value
class _MockOp(object):
"""Pretends to be a tf.Operation for the gradient functions."""
def __init__(self, attrs, inputs, outputs, typ):
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.type = typ
def get_attr(self, attr):
typ = op_attr_type(self.type, attr)
for i in range(0, len(self.attrs), 2):
if self.attrs[i] == attr:
return make_attr(typ, self.attrs[i + 1])
raise KeyError(attr)
def _get_control_flow_context(self):
raise NotImplementedError(
"tf.GradientTape.gradients() does not support graph control flow "
"operations like tf.cond or tf.while at this time. Use tf.gradients() "
"instead. If you need this feature, please file a feature request at "
"https://github.com/tensorflow/tensorflow/issues/new"
)
def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,
out_grads):
"""Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
Returns:
The gradients with respect to the inputs of the function, as a list.
"""
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name)
grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access
if grad_fn is None:
return [None] * num_inputs
return grad_fn(mock_op, *out_grads)
pywrap_tensorflow.TFE_Py_RegisterGradientFunction(_gradient_function)
_tracing = False
# TODO(agarwal): use an automatic mechanism for handling None arguments to
# gradient functions.
# Some gradient functions can accept None arguments for gradients. The following
# maps the operation name to the indices at which the corresponding gradient
# function can accept None values.
# e.g. FusedBatchNorm outputs 5 values and hence receives 5 gradient values
# during backprop. However the gradient function uses only the first of those
# values and ignores the rest. The entry, "FusedBatchNorm": [1, 2, 3, 4],
# indicates that only the gradient corresponding to index 0 is used, and the
# gradient values at indices 1-4 are ignored (and hence can be None). The
# backprop algorithm can then leverage this by not constructing zeros to
# pass for those indices.
_grad_fn_accepts_none_for_indices = {
"SoftmaxCrossEntropyWithLogits": [1],
"FusedBatchNorm": [1, 2, 3, 4]
}
def _record_gradient(op_name, inputs, attrs, results, name):
return pywrap_tensorflow.TFE_Py_RecordGradient(op_name, inputs, attrs,
results, name)
execute.record_gradient = _record_gradient
def implicit_val_and_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the value and the gradient of f when called with
the same arguments. The gradient is with respect to all trainable TFE
variables accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
val_grad_fn = tfe.implicit_value_and_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
value, grads_and_vars = val_grad_fn(x, y)
print('Value of loss: %s' % value)
# Apply the gradients to Variables.
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a tuple pair.
Its first element is the value to which the function evaluates.
Its second element is list of (gradient, variable) pairs.
Raises:
ValueError: if `f` returns None.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
this_tape = tape.push_new_tape()
try:
end_node = f(*args, **kwds)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
# Note: variables are returned in construction order. This ensures unique
# order across executions.
variables = this_tape.watched_variables()
if not variables:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
sources = [v.handle for v in variables]
grad = imperative_grad.imperative_grad(_default_vspace,
this_tape,
nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
return grad_fn
def implicit_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the gradient of f when called with the same
arguments. The gradient is with respect to all trainable TFE variables
accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
grad_fn = tfe.implicit_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
grads_and_vars = grad_fn(x, y)
# Apply the gradients to Variables.
optimizer = tf.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a list of (gradient, variable) pairs.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
return implicit_val_and_grad(f)(*args, **kwds)[1]
return grad_fn
def _get_arg_spec(f, params, param_args):
"""The positions of the parameters of f to be differentiated in param_args."""
try:
args = tf_inspect.getargspec(f).args
except TypeError as e:
# TypeError can happen when f is a callable object.
if params is None:
return range(len(param_args))
elif all(isinstance(x, int) for x in params):
return params
raise ValueError("Either callable provided is not a function or could not "
"inspect its arguments by name: %s. Original error: %s"
% (f, e))
if params is None:
if not args:
return range(len(param_args))
return range(len(args))
elif all(isinstance(x, six.string_types) for x in params):
return [args.index(n) for n in params]
elif all(isinstance(x, int) for x in params):
return params
else:
raise ValueError(
"params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None):
"""Returns a function which differentiates f with respect to params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
# The 2nd order derivatives with respect to x is:
# d^2 f / (dx)^2 = 6 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns 1st order gradients.
grad_fn = tfe.gradients_function(f)
x = 2.0
y = 3.0
# Invoke the 1st order gradient function.
x_grad, y_grad = grad_fn(x, y)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# Obtain a function that returns the 2nd order gradient with respect to x.
gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])
# Invoke the 2nd order gradient function.
x_gradgrad = gradgrad_fn(x, y)[0]
assert x_gradgrad.numpy() == 6 * 2 * 3
# To obtain a callable that returns the gradient(s) of `f` with respect to a
# subset of its inputs, use the `params` keyword argument with
# `gradients_function()`.
ygrad_fn = tfe.gradients_function(f, params=[1])
(y_grad,) = ygrad_fn(x, y)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Note that only tensors with real or complex dtypes are differentiable.
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing None
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the gradient of the decorated function."""
_, grad = val_and_grad_function(f, params=params)(*args, **kwds)
return grad
return decorated
def _ensure_unique_tensor_objects(parameter_positions, args):
"""Make each of the parameter_positions in args a unique ops.Tensor object.
Ensure that each parameter is treated independently.
For example:
def f(x, y): return x * y
g = gradients_function(f)
one = tf.constant(1.)
g(one, one) should return [1., 1.]
(even though the two arguments are the same Tensor object).
Args:
parameter_positions: List of indices into args defining the arguments to
differentiate against.
args: A list of arguments to the function to be differentiated.
Returns:
args, possibly edited in-place.
"""
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = gen_array_ops.identity(args[i])
else:
s.add(tid)
return args
def val_and_grad_function(f, params=None):
"""Returns a function that computes f and its derivative w.r.t. params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns the function value and the 1st order
# gradients.
val_grads_fn = tfe.value_and_gradients_function(f)
x = 2.0
y = 3.0
# Invoke the value-and-gradients function.
f_val, (x_grad, y_grad) = val_grads_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# To obtain a callable that returns the value of `f` and the gradient(s) of
# `f` with respect to a subset of its inputs, use the `params` keyword
# argument with `value_and_gradients_function()`.
val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])
f_val, (y_grad,) = val_ygrad_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing `None`
differentiates with respect to all parameters.
Returns: function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if kwds:
raise ValueError("Functions to be differentiated cannot "
"receive keyword arguments.")
val, vjp = make_vjp(f, params)(*args, **kwds)
return val, vjp(dy=dy)
return decorated
def make_vjp(f, params=None, persistent=True):
"""Returns a function that computes f and is vjp w.r.t. params.
The term "vjp" here is an abbreviation for vector-jacobian product.
Args:
f: the function to be differentiated.
params: the parameters (numbers or names) to differentiate with respect to.
A value of None will differentiate with respect to all parameters.
persistent: Boolean controlling whether the VJP function can be re-used.
Must be True or False.
Returns:
A function, which when called, returns a tuple (value, vjp), where:
- value is the result of calling f.
- vjp is a function, which takes a vector as an argument and
returns the product of that vector with the Jacobian of f.
Providing no argument to vjp is equivalent to providing a
vector of ones.
For example,
```python
def f(x):
return x * x
wrapped_fn = tfe.make_vjp(f)
result, vjp = wrapped_fn(tf.constant(3.0))
# result is 9.0
vjp() # the vjp function rturns 6.0
Raises:
ValueError: if `f` returns None.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
this_tape = tape.push_new_tape(persistent=persistent)
try:
sources = []
args = [
ops.convert_to_tensor(args[i])
if i in parameter_positions else args[i]
for i in range(len(args))
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
sources.append(args[i])
tape.watch(args[i])
result = f(*args)
if result is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
flat_result = nest.flatten(result)
flat_result = [gen_array_ops.identity(x) for x in flat_result]
result = nest.pack_sequence_as(result, flat_result)
finally:
tape.pop_tape(this_tape)
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
_default_vspace, this_tape, nest.flatten(result), sources,
output_gradients=dy)
return result, vjp
return decorated
def _aggregate_grads(gradients):
"""Aggregate gradients from multiple sources.
Args:
gradients: A list of 'Tensor' or 'IndexedSlices' gradients.
Returns:
If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.
Otherwise returns an aggregated 'IndexedSlices'.
"""
assert gradients, "No gradients to aggregate"
if len(gradients) == 1:
return gradients[0]
if all([isinstance(g, ops.Tensor) for g in gradients]):
return math_ops.add_n(gradients)
else:
assert all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in gradients])
indexed_slices_list = []
for grad in gradients:
# TODO(xpan): Support nested IndexedSlices and core IndexedSlices
if isinstance(grad, ops.Tensor):
indexed_slices = ops.IndexedSlices(
grad,
math_ops.range(grad.shape[0]),
constant_op.constant(grad.shape.as_list()))
indexed_slices_list.append(indexed_slices)
else:
indexed_slices_list.append(grad)
# Dense shapes from all gradients should be the same.
dense_shape = indexed_slices_list[0].dense_shape
# For simplicity now, always cast to int64.
indices = array_ops.concat([math_ops.cast(x.indices, dtypes.int64)
for x in indexed_slices_list], 0)
values = array_ops.concat([x.values for x in indexed_slices_list], 0)
return ops.IndexedSlices(values, indices, dense_shape)
def _num_elements(grad):
"""The number of elements in the `grad` tensor."""
if isinstance(grad, ops.Tensor):
return functools.reduce(operator.mul, grad._shape_tuple(), 1) # pylint: disable=protected-access
if isinstance(grad, ops.IndexedSlices):
return functools.reduce(operator.mul, grad.values._shape_tuple(), 1) # pylint: disable=protected-access
raise ValueError("`grad` not a Tensor or IndexedSlices.")
_zeros_cache = context._TensorCache() # pylint: disable=protected-access
def _fast_fill(value, shape, dtype):
return array_ops.fill(shape, constant_op.constant(value, dtype=dtype))
def _zeros(shape, dtype):
"""Wraps array_ops.zeros to cache last zero for a given shape and dtype."""
device = context.context().device_name
if dtype == dtypes.variant:
# TODO(apassos): need to save enough information about variant tensors to do
# a zeros
return None
# pylint: disable=protected-access
cache_key = shape, dtype, device, context.context()._eager_context.mode
# pylint: enable=protected-access
cached = _zeros_cache.get(cache_key)
if cached is None:
cached = _fast_fill(0, shape, dtype)
_zeros_cache.put(cache_key, cached)
return cached
def _ones(shape, dtype):
if shape == (): # pylint: disable=g-explicit-bool-comparison
return constant_op.constant(1, dtype=dtype)
return _fast_fill(1, shape, dtype)
_default_vspace = imperative_grad.VSpace(
num_elements_fn=_num_elements,
aggregate_fn=_aggregate_grads,
tensor_id=ops.tensor_id,
zeros=_zeros,
ones=_ones)
def _handle_or_self(x):
"""If x is ResourceVariable, return its handle, else x."""
if isinstance(x, resource_variable_ops.ResourceVariable):
x = x.handle
return x
@tf_export("GradientTape")
class GradientTape(object):
"""Record operations for automatic differentiation.
Operations are recorded if they are executed within this context manager and
at least one of their inputs is being "watched".
Trainable variables (created by `tf.contrib.eager.Variable` or
@{tf.get_variable}, trainable=True is default in both cases) are automatically
watched. Tensors can be manually watched by invoking the `watch` method on
this context manager.
For example, consider the function `y = x * x`. The gradient at `x = 3.0` can
be computed as:
```python
x = tf.constant(3.0)
with tf.GradientTape() as g:
g.watch(x)
y = x * x
dy_dx = g.gradient(y, x) # Will compute to 6.0
```
GradientTapes can be nested to compute higher-order derivatives. For example,
```python
x = tf.constant(3.0)
with tf.GradientTape() as g:
with tf.GradientTape() as gg:
gg.watch(x)
y = x * x
dy_dx = gg.gradient(y, x) # Will compute to 6.0
d2y_dx2 = g.gradient(dy_dx, x) # Will compute to 2.0
```
By default, the resources held by a GradientTape are released as soon as
GradientTape.gradient() method is called. To compute multiple gradients over
the same computation, create a persistent gradient tape. This allows multiple
calls to the gradient() method as resources are released when the tape object
is garbage collected. For example:
```python
x = tf.constant(3.0)
with tf.GradientTape(persistent=True) as g:
g.watch(x)
y = x * x
z = y * y
dz_dx = g.gradient(z, x) # 108.0 (4*x^3 at x = 3)
dy_dx = g.gradient(y, x) # 6.0
del g # Drop the reference to the tape
```
Note that only tensors with real or complex dtypes are differentiable.
"""
def __init__(self, persistent=False):
"""Creates a new GradientTape.
Args:
persistent: Boolean controlling whether a persistent gradient tape
is created. False by default, which means at most one call can
be made to the gradient() method on this object.
"""
self._tape = None
self._persistent = persistent
self._recording = False
def __enter__(self):
"""Enters a context inside which operations are recorded on this tape."""
self._push_tape()
return self
def __exit__(self, typ, value, traceback):
"""Exits the recording context, no further operations are traced."""
if self._recording:
self._pop_tape()
def _push_tape(self, existing_tape=False):
if self._recording:
raise ValueError("Tape is already recording.")
if existing_tape:
if self._tape is None:
raise ValueError("There is no existing tape.")
tape.push_tape(self._tape)
else:
self._tape = tape.push_new_tape(persistent=self._persistent)
self._recording = True
def _pop_tape(self):
if not self._recording:
raise ValueError("Tape is not recording.")
tape.pop_tape(self._tape)
self._recording = False
def watch(self, tensor):
"""Ensures that `tensor` is being traced by this tape.
Args:
tensor: a Tensor or list of Tensors.
"""
for t in nest.flatten(tensor):
tape.watch(_handle_or_self(t))
@tf_contextlib.contextmanager
def stop_recording(self):
"""Temporarily stops recording operations on this tape.
Operations executed while this context manager is active will not be
recorded on the tape. This is useful for reducing the memory used by tracing
all computations.
For example:
```
with tf.GradientTape(persistent=True) as t:
loss = compute_loss(model)
with t.stop_recording():
# The gradient computation below is not traced, saving memory.
grads = t.gradient(loss, model.variables)
```
Yields:
None
Raises:
RuntimeError: if the tape is not currently recording.
"""
if self._tape is None:
raise RuntimeError(
"Trying to stop recording a tape which is not recording.")
self._pop_tape()
try:
yield
finally:
self._push_tape(existing_tape=True)
def reset(self):
"""Clears all information stored in this tape.
Equivalent to exiting and reentering the tape context manager with a new
tape. For example, the two following code blocks are equivalent:
```
with tf.GradientTape() as t:
loss = loss_fn()
with tf.GradientTape() as t:
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
# The following is equivalent to the above
with tf.GradientTape() as t:
loss = loss_fn()
t.reset()
loss += other_loss_fn()
t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn
```
This is useful if you don't want to exit the context manager for the tape,
or can't because the desired reset point is inside a control flow construct:
```
with tf.GradientTape() as t:
loss = ...
if loss > k:
t.reset()
```
"""
self._pop_tape()
self._push_tape()
def watched_variables(self):
"""Returns variables watched by this tape in order of construction."""
return self._tape.watched_variables()
def gradient(self, target, sources, output_gradients=None):
"""Computes the gradient using operations recorded in context of this tape.
Args:
target: Tensor (or list of tensors) to be differentiated.
sources: a list or nested structure of Tensors or Variables. `target`
will be differentiated against elements in `sources`.
output_gradients: a list of gradients, one for each element of
target. Defaults to None.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None),
one for each element in `sources`. Returned structure is the same as
the structure of `sources`.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once on a non-persistent tape.
"""
if self._tape is None:
raise RuntimeError("GradientTape.gradient can only be called once on "
"non-persistent tapes.")
if self._recording:
if not self._persistent:
self._pop_tape()
else:
logging.log_first_n(logging.WARN,
"Calling GradientTape.gradient on a persistent "
"tape inside it's context is significantly less "
"efficient than calling it outside the context (it "
"causes the gradient ops to be recorded on the "
"tape, leading to increased CPU and memory usage). "
"Only call GradientTape.gradient inside the "
"context if you actually want to trace the "
"gradient in order to compute higher order "
"derrivatives.", 1)
flat_sources = nest.flatten(sources)
flat_sources = [_handle_or_self(x) for x in flat_sources]
if output_gradients is not None:
output_gradients = [None if x is None else ops.convert_to_tensor(x)
for x in nest.flatten(output_gradients)]
flat_grad = imperative_grad.imperative_grad(
_default_vspace, self._tape, nest.flatten(target), flat_sources,
output_gradients=output_gradients)
if not self._persistent:
self._tape = None
grad = nest.pack_sequence_as(sources, flat_grad)
return grad
|
|
"""Support for Tesla cars."""
import asyncio
from collections import defaultdict
from datetime import timedelta
import logging
import async_timeout
import httpx
from teslajsonpy import Controller as TeslaAPI
from teslajsonpy.exceptions import IncompleteCredentials, TeslaException
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
CONF_ACCESS_TOKEN,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_TOKEN,
CONF_USERNAME,
EVENT_HOMEASSISTANT_CLOSE,
HTTP_UNAUTHORIZED,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.httpx_client import SERVER_SOFTWARE, USER_AGENT
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from homeassistant.util import slugify
from .config_flow import CannotConnect, InvalidAuth, validate_input
from .const import (
CONF_EXPIRATION,
CONF_WAKE_ON_START,
DATA_LISTENER,
DEFAULT_SCAN_INTERVAL,
DEFAULT_WAKE_ON_START,
DOMAIN,
ICONS,
MIN_SCAN_INTERVAL,
PLATFORMS,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): vol.All(cv.positive_int, vol.Clamp(min=MIN_SCAN_INTERVAL)),
}
)
},
extra=vol.ALLOW_EXTRA,
)
@callback
def _async_save_tokens(hass, config_entry, access_token, refresh_token):
hass.config_entries.async_update_entry(
config_entry,
data={
**config_entry.data,
CONF_ACCESS_TOKEN: access_token,
CONF_TOKEN: refresh_token,
},
)
@callback
def _async_configured_emails(hass):
"""Return a set of configured Tesla emails."""
return {
entry.data[CONF_USERNAME]
for entry in hass.config_entries.async_entries(DOMAIN)
if CONF_USERNAME in entry.data
}
async def async_setup(hass, base_config):
"""Set up of Tesla component."""
def _update_entry(email, data=None, options=None):
data = data or {}
options = options or {
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_WAKE_ON_START: DEFAULT_WAKE_ON_START,
}
for entry in hass.config_entries.async_entries(DOMAIN):
if email != entry.title:
continue
hass.config_entries.async_update_entry(entry, data=data, options=options)
config = base_config.get(DOMAIN)
if not config:
return True
email = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
scan_interval = config[CONF_SCAN_INTERVAL]
if email in _async_configured_emails(hass):
try:
info = await validate_input(hass, config)
except (CannotConnect, InvalidAuth):
return False
_update_entry(
email,
data={
CONF_USERNAME: email,
CONF_PASSWORD: password,
CONF_ACCESS_TOKEN: info[CONF_ACCESS_TOKEN],
CONF_TOKEN: info[CONF_TOKEN],
CONF_EXPIRATION: info[CONF_EXPIRATION],
},
options={CONF_SCAN_INTERVAL: scan_interval},
)
else:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: email, CONF_PASSWORD: password},
)
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][email] = {CONF_SCAN_INTERVAL: scan_interval}
return True
async def async_setup_entry(hass, config_entry):
"""Set up Tesla as config entry."""
hass.data.setdefault(DOMAIN, {})
config = config_entry.data
# Because users can have multiple accounts, we always create a new session so they have separate cookies
async_client = httpx.AsyncClient(headers={USER_AGENT: SERVER_SOFTWARE}, timeout=60)
email = config_entry.title
if email in hass.data[DOMAIN] and CONF_SCAN_INTERVAL in hass.data[DOMAIN][email]:
scan_interval = hass.data[DOMAIN][email][CONF_SCAN_INTERVAL]
hass.config_entries.async_update_entry(
config_entry, options={CONF_SCAN_INTERVAL: scan_interval}
)
hass.data[DOMAIN].pop(email)
try:
controller = TeslaAPI(
async_client,
email=config.get(CONF_USERNAME),
password=config.get(CONF_PASSWORD),
refresh_token=config[CONF_TOKEN],
access_token=config[CONF_ACCESS_TOKEN],
expiration=config.get(CONF_EXPIRATION, 0),
update_interval=config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
)
result = await controller.connect(
wake_if_asleep=config_entry.options.get(
CONF_WAKE_ON_START, DEFAULT_WAKE_ON_START
)
)
refresh_token = result["refresh_token"]
access_token = result["access_token"]
except IncompleteCredentials as ex:
await async_client.aclose()
raise ConfigEntryAuthFailed from ex
except httpx.ConnectTimeout as ex:
await async_client.aclose()
raise ConfigEntryNotReady from ex
except TeslaException as ex:
await async_client.aclose()
if ex.code == HTTP_UNAUTHORIZED:
raise ConfigEntryAuthFailed from ex
_LOGGER.error("Unable to communicate with Tesla API: %s", ex.message)
return False
async def _async_close_client(*_):
await async_client.aclose()
@callback
def _async_create_close_task():
asyncio.create_task(_async_close_client())
config_entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_close_client)
)
config_entry.async_on_unload(_async_create_close_task)
_async_save_tokens(hass, config_entry, access_token, refresh_token)
coordinator = TeslaDataUpdateCoordinator(
hass, config_entry=config_entry, controller=controller
)
# Fetch initial data so we have data when entities subscribe
entry_data = hass.data[DOMAIN][config_entry.entry_id] = {
"coordinator": coordinator,
"devices": defaultdict(list),
DATA_LISTENER: [config_entry.add_update_listener(update_listener)],
}
_LOGGER.debug("Connected to the Tesla API")
await coordinator.async_config_entry_first_refresh()
all_devices = controller.get_homeassistant_components()
if not all_devices:
return False
for device in all_devices:
entry_data["devices"][device.hass_type].append(device)
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(hass, config_entry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
for listener in hass.data[DOMAIN][config_entry.entry_id][DATA_LISTENER]:
listener()
username = config_entry.title
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
_LOGGER.debug("Unloaded entry for %s", username)
return True
return False
async def update_listener(hass, config_entry):
"""Update when config_entry options update."""
controller = hass.data[DOMAIN][config_entry.entry_id]["coordinator"].controller
old_update_interval = controller.update_interval
controller.update_interval = config_entry.options.get(CONF_SCAN_INTERVAL)
if old_update_interval != controller.update_interval:
_LOGGER.debug(
"Changing scan_interval from %s to %s",
old_update_interval,
controller.update_interval,
)
class TeslaDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Tesla data."""
def __init__(self, hass, *, config_entry, controller):
"""Initialize global Tesla data updater."""
self.controller = controller
self.config_entry = config_entry
update_interval = timedelta(seconds=MIN_SCAN_INTERVAL)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=update_interval,
)
async def _async_update_data(self):
"""Fetch data from API endpoint."""
if self.controller.is_token_refreshed():
result = self.controller.get_tokens()
refresh_token = result["refresh_token"]
access_token = result["access_token"]
_async_save_tokens(
self.hass, self.config_entry, access_token, refresh_token
)
_LOGGER.debug("Saving new tokens in config_entry")
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(30):
return await self.controller.update()
except TeslaException as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
class TeslaDevice(CoordinatorEntity):
"""Representation of a Tesla device."""
def __init__(self, tesla_device, coordinator):
"""Initialise the Tesla device."""
super().__init__(coordinator)
self.tesla_device = tesla_device
self._name = self.tesla_device.name
self._unique_id = slugify(self.tesla_device.uniq_name)
self._attributes = self.tesla_device.attrs.copy()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def icon(self):
"""Return the icon of the sensor."""
if self.device_class:
return None
return ICONS.get(self.tesla_device.type)
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
attr = self._attributes
if self.tesla_device.has_battery():
attr[ATTR_BATTERY_LEVEL] = self.tesla_device.battery_level()
attr[ATTR_BATTERY_CHARGING] = self.tesla_device.battery_charging()
return attr
@property
def device_info(self):
"""Return the device_info of the device."""
return {
"identifiers": {(DOMAIN, self.tesla_device.id())},
"name": self.tesla_device.car_name(),
"manufacturer": "Tesla",
"model": self.tesla_device.car_type,
"sw_version": self.tesla_device.car_version,
}
async def async_added_to_hass(self):
"""Register state update callback."""
self.async_on_remove(self.coordinator.async_add_listener(self.refresh))
@callback
def refresh(self) -> None:
"""Refresh the state of the device.
This assumes the coordinator has updated the controller.
"""
self.tesla_device.refresh()
self._attributes = self.tesla_device.attrs.copy()
self.async_write_ha_state()
|
|
#!/usr/bin/env python
import unittest
from .. import Parser
from .. import Interpreter
from ..tokens import Bool, Cons, EmptyList, Identifier, Number, String
def parseOne(arg):
return Parser.parse(arg)[0]
def run(arg):
return Interpreter.Interpreter().run(Parser.parse(arg))
def runOne(arg):
return Interpreter.Interpreter().evalOne(parseOne(arg))
class TestParser(unittest.TestCase):
def testPositiveInteger(self):
result = parseOne("7")
self.assertEqual(result, Number(7))
def testReallyPositiveInteger(self):
result = parseOne("+7")
self.assertEqual(result, Number(7))
def testNegativeInteger(self):
result = parseOne("-42")
self.assertEqual(result, Number(-42))
def testRejectIntegerContainingAlphas(self):
with self.assertRaises(Parser.BadTokenError):
parseOne("123abc")
def testRejectIntegerWithInterposedPlusSign(self):
with self.assertRaises(Parser.BadTokenError):
parseOne("123+456")
def testString(self):
result = parseOne('"Hello mom"')
self.assertEqual(result, String("Hello mom"))
def testIgnoreWhitespace(self):
result = parseOne(' \t\r\n\f\v"Hello mom" \t\r\n\f\v')
self.assertEqual(result, String("Hello mom"))
def testEscapeBackslash(self):
result = parseOne(r'"Hello\\\\mom"')
self.assertEqual(result, String(r"Hello\\mom"))
def testEscapeDoublequote(self):
result = parseOne(r'"Hello\"mom"')
self.assertEqual(result, String('Hello"mom'))
def testBadEscape(self):
with self.assertRaises(Parser.EscapeError):
parseOne(r'"Hello\mom"')
def testUnterminatedString(self):
with self.assertRaises(Parser.UnterminatedError):
parseOne(r'"Hello mom')
def testUnterminatedStringAfterBackslash(self):
# I can't decide whether this should raise UnterminatedError or
# EscapeError. What matters here is that an error is raised, so
# I'll accept SyntaxError (the superclass of both).
with self.assertRaises(Parser.SyntaxError):
parseOne('"Hello mom\\')
def testIgnoreComments(self):
result = parseOne(";Comment\n7")
self.assertEqual(result, Number(7))
def testJustACommentProducesEmptyExpressionList(self):
self.assertEqual(Parser.parse(";Comment"), [])
def testSimpleDotPair(self):
result = parseOne("(7 . 2)")
self.assertEqual(result, Cons(Number(7), Number(2)))
def testUnterminatedDotPair(self):
with self.assertRaises(Parser.UnterminatedError):
parseOne("(7 . 2")
def testRejectStrayParen(self):
# @TODO@ - more specific exception type?
with self.assertRaises(Parser.BadTokenError):
parseOne(")")
def testRejectStrayDotOperator(self):
# @TODO@ - more specific exception type?
with self.assertRaises(Parser.BadTokenError):
parseOne(".")
def testImproperList(self):
result = parseOne("(1 2 . 3)")
self.assertEqual(result, Cons(Number(1), Cons(Number(2), Number(3))))
def testRejectMoreThanOneExpressionAfterDotOperator(self):
with self.assertRaises(Parser.BadDotPairError):
parseOne("(1 2 . 3 4)")
def testRejectDotPairWithNoCar(self):
with self.assertRaises(Parser.BadDotPairError):
parseOne("(. 7)")
def testSimpleList(self):
result = parseOne("(1 2 3)")
self.assertEqual(result, Cons(Number(1), Cons(Number(2), Cons(Number(3), EmptyList()))))
def testIdentifier(self):
result = parseOne("identifier")
self.assertEqual(result, Identifier("identifier"))
def testRejectIdentifierStartingWithPlus(self):
with self.assertRaises(Parser.BadTokenError):
result = parseOne("+id")
def testRejectIdentifierStartingWithMinus(self):
with self.assertRaises(Parser.BadTokenError):
result = parseOne("-id")
def testRejectIdentifierStartingWithDot(self):
with self.assertRaises(Parser.BadTokenError):
result = parseOne(".id")
def testBarePlusIsAnIdentifier(self):
result = parseOne("+")
self.assertEqual(result, Identifier("+"))
def testBareMinusIsAnIdentifier(self):
result = parseOne("-")
self.assertEqual(result, Identifier("-"))
def testEmptyList(self):
self.assertEqual(parseOne("()"), EmptyList())
def testParseTrue(self):
self.assertEqual(parseOne("#t"), Bool(True))
def testParseFalse(self):
self.assertEqual(parseOne("#f"), Bool(False))
def testUnexpectedCharAfterHash(self):
with self.assertRaises(Parser.BadHashError):
parseOne("#q")
def testEOFAfterHash(self):
with self.assertRaises(Parser.UnexpectedEOFError):
parseOne("#")
class TestBareInterpeter(unittest.TestCase):
def testInteger(self):
self.assertEqual(runOne("-1337"), Number(-1337))
def testQuoteInteger(self):
self.assertEqual(runOne("(quote 7)"), Number(7))
def testRejectQuoteWithMultipleArguments(self):
with self.assertRaises(Interpreter.TooManyArgumentsError):
runOne("(quote 7 2)")
def testRejectQuoteWithNoArguments(self):
with self.assertRaises(Interpreter.NotEnoughArgumentsError):
runOne("(quote)")
def testQuoteList(self):
self.assertEqual(runOne("(quote (1 2 3))"), Cons(Number(1), Cons(Number(2), Cons(Number(3), EmptyList()))))
def testAdditionOfTwoOperands(self):
self.assertEqual(runOne("(+ 123 456)"), Number(579))
def testAdditionOfThreeOperands(self):
self.assertEqual(runOne("(+ 1 2 3)"), Number(6))
def testAdditionOfNoOperands(self):
self.assertEqual(runOne("(+)"), Number(0))
def testNestedAddition(self):
self.assertEqual(runOne("(+ (+ 1 2) (+ 3 4))"), Number(10))
def testRejectNullProcedure(self):
with self.assertRaises(Interpreter.NotCallableError):
runOne("()")
def testRejectNumberAsProcedure(self):
with self.assertRaises(Interpreter.NotCallableError):
runOne("(27)")
def testRejectProcedureCallWithDot(self):
with self.assertRaises(Interpreter.ImproperListCallError):
runOne("(+ 1 . 2)")
def testRejectUnknownIdentifier(self):
with self.assertRaises(Interpreter.UnknownIdentifier):
runOne("this-does-not-exist")
def testRejectCallingUnknownIdentifier(self):
with self.assertRaises(Interpreter.UnknownIdentifier):
runOne("(this-does-not-exist)")
def testDefine(self):
result = run("(define the-answer 42) the-answer")
self.assertEqual(result, [None, Number(42)])
def testRejectDefineWithTooManyArguments(self):
with self.assertRaises(Interpreter.TooManyArgumentsError):
runOne("(define hello 2 3)")
def testRejectDefineWithNoArguments(self):
with self.assertRaises(Interpreter.NotEnoughArgumentsError):
runOne("(define)")
def testRejectDefineWithOneArgument(self):
with self.assertRaises(Interpreter.NotEnoughArgumentsError):
runOne("(define foo)")
def testDefineRequiresAnIdentifierAsFirstArgument(self):
with self.assertRaises(Interpreter.BadArgumentError):
runOne("(define 7 2)")
def testSubtraction(self):
self.assertEqual(runOne("(- 2 7)"), Number(-5))
def testRejectSubtrationWithNoOperands(self):
with self.assertRaises(Interpreter.NotEnoughArgumentsError):
runOne("(-)")
def testSubtractionWithOneOperandIsNegation(self):
self.assertEqual(runOne("(- 8)"), Number(-8))
def testMultiplication(self):
self.assertEqual(runOne("(* 7 2)"), Number(14))
def testMultiplicationWithNoOperands(self):
self.assertEqual(runOne("(*)"), Number(1))
def testAdditionWithDefine(self):
result = run("(define one 1) (+ one one)")
self.assertEqual(result, [None, Number(2)])
def testDefiningAliasForProcedure(self):
result = run("(define plus +) (plus 7 2)")
self.assertEqual(result, [None, Number(9)])
def testCaseInsensitivityOfBoundVariables(self):
result = run("(define FOO 42) FoO")
self.assertEqual(result, [None, Number(42)])
def testIfTrue(self):
self.assertEqual(runOne("(if #t 1 2)"), Number(1))
def testIfFalse(self):
self.assertEqual(runOne("(if #f 1 2)"), Number(2))
def testRejectIfWithTooFewArguments(self):
with self.assertRaises(Interpreter.NotEnoughArgumentsError):
runOne("(if #t)")
def testRejectIfWithTooManyArguments(self):
with self.assertRaises(Interpreter.TooManyArgumentsError):
runOne("(if #t 1 2 3)")
def testIfWithNoFalseBranchAndYetItsFalse(self):
self.assertEqual(runOne("(if #f 42)"), None)
def testIfEvaluatesCondition(self):
self.assertEqual(runOne("(if (if #t #f) 1 2)"), Number(2))
def testIfEvaluatesFirstBranch(self):
self.assertEqual(runOne("(if #t (+ 0 1) 2)"), Number(1))
def testIfEvaluatesSecondBranch(self):
self.assertEqual(runOne("(if #f 1 (+ 0 2))"), Number(2))
# The real difference between Lisp and Scheme! ;)
def testEmptyListIsTrue(self):
self.assertEqual(runOne("(if (quote ()) 1 2)"), Number(1))
def testTrivialLambda(self):
result = run("""(define get-the-answer
(lambda ()
(* 21 2)))
(get-the-answer)""")
self.assertEqual(result, [None, Number(42)])
def testRejectLambdaWithTooFewArguments(self):
with self.assertRaises(Interpreter.NotEnoughArgumentsError):
runOne("(lambda ())")
def testDefineProcedureThatTakesOneArgument(self):
result = run("""(define add2
(lambda (x)
(+ x 2)))
(add2 40)""")
self.assertEqual(result, [None, Number(42)])
def testDefineProcedureThatTakesTwoArguments(self):
result = run("""(define add
(lambda (x y)
(+ x y)))
(add 32 10)""")
self.assertEqual(result, [None, Number(42)])
if __name__ == '__main__':
unittest.main()
|
|
#
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from __future__ import division, print_function, absolute_import
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinv2', 'pinvh']
import warnings
import numpy as np
from .flinalg import get_flinalg_funcs
from .lapack import get_lapack_funcs, _compute_lwork
from .misc import LinAlgError, _datacopied
from .decomp import _asarray_validated
from . import decomp, decomp_svd
from ._solve_toeplitz import levinson
# Linear equations
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
overwrite_b=False, debug=False, check_finite=True):
"""
Solve the equation ``a x = b`` for ``x``.
Parameters
----------
a : (M, M) array_like
A square matrix.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``a x = b``.
sym_pos : bool, optional
Assume `a` is symmetric and positive definite.
lower : bool, optional
Use only data contained in the lower triangle of `a`, if `sym_pos` is
true. Default is to use upper triangle.
overwrite_a : bool, optional
Allow overwriting data in `a` (may enhance performance).
Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance).
Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system ``a x = b``. Shape of the return matches the
shape of `b`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square
Examples
--------
Given `a` and `b`, solve for `x`:
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_a=', overwrite_a)
print('solve:overwrite_b=', overwrite_b)
if sym_pos:
posv, = get_lapack_funcs(('posv',), (a1, b1))
c, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
else:
gesv, = get_lapack_funcs(('gesv',), (a1, b1))
lu, piv, x, info = gesv(a1, b1, overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal gesv|posv' %
-info)
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=False, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_b=', overwrite_b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
(info-1))
raise ValueError('illegal value in %d-th argument of internal trtrs' %
(-info))
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
debug=False, check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
"""
a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(l, u) = l_and_u
if l + u + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper "
"diagonals: l+u+1 (%d) does not equal ab.shape[0] "
"(%d)" % (l+u+1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[-1] == 1:
b2 = np.array(b1, copy=(not overwrite_b))
b2 /= a1[1, 0]
return b2
if l == u == 1:
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0, 1:]
d = a1[1, :]
dl = a1[2, :-1]
du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*l+u+1, a1.shape[1]), dtype=gbsv.dtype)
a2[l:, :] = a1
lu, piv, x, info = gbsv(l, u, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal gbsv/gtsv' %
-info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of a is (6, 6), `u` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (`u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Shape of return matches shape
of `b`.
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0, :].real
e = a1[1, :-1]
else:
d = a1[1, :].real
e = a1[0, 1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal pbsv' %
-info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
"""
# If numerical stability of this algorithm is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (http://www.jstor.org/stable/2153371) or Bareiss.
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
# Form a 1D array of values to be used in the matrix, containing a reversed
# copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
if vals.shape[0] != (2*b.shape[0] - 1):
raise ValueError('incompatible dimensions')
b = _asarray_validated(b)
if np.iscomplexobj(vals) or np.iscomplexobj(b):
vals = np.asarray(vals, dtype=np.complex128, order='c')
b = np.asarray(b, dtype=np.complex128)
else:
vals = np.asarray(vals, dtype=np.double, order='c')
b = np.asarray(b, dtype=np.double)
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
b_shape = b.shape
b = b.reshape(b.shape[0], -1)
x = np.column_stack(
(levinson(vals, np.ascontiguousarray(b[:, i]))[0])
for i in range(b.shape[1]))
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError("'%saxis' entry is out of bounds" % (aname,))
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant
Notes
-----
For a one-dimensional vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Incompatible c and b axis lengths')
fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.rollaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square, or not 2-dimensional.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
#XXX: I found no advantage or disadvantage of using finv.
## finv, = get_flinalg_funcs(('inv',),(a1,))
## if finv is not None:
## a_inv,info = finv(a1,overwrite_a=overwrite_a)
## if info==0:
## return a_inv
## if info>0: raise LinAlgError, "singular matrix"
## if info<0: raise ValueError,\
## 'illegal value in %d-th argument of internal inv.getrf|getri'%(-info)
getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
'getri_lwork'),
(a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = _compute_lwork(getri_lwork, a1.shape[0])
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
### Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant of a square matrix is a value derived arithmetically
from the coefficients of the matrix.
The determinant for a 3x3 matrix, for example, is computed as follows::
a b c
d e f = A
g h i
det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
Parameters
----------
a : (M, M) array_like
A square matrix.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : float or complex
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
0.0
>>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
3.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
### Linear Least Squares
class LstsqLapackError(LinAlgError):
pass
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True, lapack_driver=None):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left hand side matrix (2-D array).
b : (M,) or (M, K) array_like
Right hand side matrix or vector (1-D or 2-D array).
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver: str, optional
Which LAPACK driver is used to solve the least-squares problem.
Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
(``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
faster on many problems. ``'gelss'`` was used historically. It is
generally slow but uses less memory.
.. versionadded:: 0.17.0
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution. Return shape matches shape of `b`.
residues : () or (1,) or (K,) ndarray
Sums of residues, squared 2-norm for each column in ``b - a x``.
If rank of matrix a is ``< N`` or ``> M``, or ``'gelsy'`` is used,
this is an empty array. If b was 1-D, this is an (1,) shape array,
otherwise the shape is (K,).
rank : int
Effective rank of matrix `a`.
s : (min(M,N),) ndarray or None
Singular values of `a`. The condition number of a is
``abs(s[0] / s[-1])``. None is returned when ``'gelsy'`` is used.
Raises
------
LinAlgError
If computation does not converge.
ValueError
When parameters are wrong.
See Also
--------
optimize.nnls : linear least squares with non-negativity constraint
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('incompatible dimensions')
driver = lapack_driver
if driver is None:
driver = lstsq.default_lapack_driver
if driver not in ('gelsd', 'gelsy', 'gelss'):
raise ValueError('LAPACK driver "%s" is not found' % driver)
lapack_func, lapack_lwork = get_lapack_funcs((driver,
'%s_lwork' % driver), (a1, b1))
real_data = True if (lapack_func.dtype.kind == 'f') else False
if m < n:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
b2[:m, :] = b1
else:
b2 = np.zeros(n, dtype=lapack_func.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if cond is None:
cond = np.finfo(lapack_func.dtype).eps
if driver in ('gelss', 'gelsd'):
if driver == 'gelss':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
elif driver == 'gelsd':
if real_data:
lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
if iwork == 0:
# this is LAPACK bug 0038: dgelsd does not provide the
# size of the iwork array in query mode. This bug was
# fixed in LAPACK 3.2.2, released July 21, 2010.
mesg = ("internal gelsd driver lwork query error, "
"required iwork dimension not returned. "
"This is likely the result of LAPACK bug "
"0038, fixed in LAPACK 3.2.2 (released "
"July 21, 2010). ")
if lapack_driver is None:
# restart with gelss
lstsq.default_lapack_driver = 'gelss'
mesg += "Falling back to 'gelss' driver."
warnings.warn(mesg, RuntimeWarning)
return lstsq(a, b, cond, overwrite_a, overwrite_b,
check_finite, lapack_driver='gelss')
# can't proceed, bail out
mesg += ("Use a different lapack_driver when calling lstsq "
"or upgrade LAPACK.")
raise LstsqLapackError(mesg)
x, s, rank, info = lapack_func(a1, b1, lwork,
iwork, cond, False, False)
else: # complex data
lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
cond, False, False)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s'
% (-info, lapack_driver))
resids = np.asarray([], dtype=x.dtype)
if m > n:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
elif driver == 'gelsy':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
jptv = np.zeros((a1.shape[1],1), dtype=np.int32)
v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
lwork, False, False)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal "
"gelsy" % -info)
if m > n:
x1 = x[:n]
x = x1
return x, np.array([], x.dtype), rank, None
lstsq.default_lapack_driver = 'gelsd'
def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using a least-squares
solver.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float, optional
Cutoff for 'small' singular values in the least-squares solver.
Singular values smaller than ``rcond * largest_singular_value``
are considered zero.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
b = np.identity(a.shape[0], dtype=a.dtype)
if rcond is not None:
cond = rcond
x, resids, rank, s = lstsq(a, b, cond=cond, check_finite=False)
if return_rank:
return x, rank
else:
return x
def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float or None
Cutoff for 'small' singular values.
Singular values smaller than ``rcond*largest_singular_value``
are considered zero.
If None or -1, suitable machine precision is used.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If SVD computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv2(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
rank = np.sum(s > cond * np.max(s))
u = u[:, :rank]
u /= s[:rank]
B = np.transpose(np.conjugate(np.dot(u, vh[:rank])))
if return_rank:
return B, rank
else:
return B
def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False,
check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a Hermitian or real symmetric matrix
using its eigenvalue decomposition and including all eigenvalues with
'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> from scipy.linalg import pinvh
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = decomp.eigh(a, lower=lower, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# For Hermitian matrices, singular values equal abs(eigenvalues)
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = np.dot(u * psigma_diag, np.conjugate(u).T)
if return_rank:
return B, len(psigma_diag)
else:
return B
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import errno
from os.path import isdir, isfile, join, dirname
import random
import shutil
import time
import itertools
from six import viewkeys
import six.moves.cPickle as pickle
from swift import gettext_ as _
import eventlet
from eventlet import GreenPool, tpool, Timeout, sleep, hubs
from eventlet.green import subprocess
from eventlet.support.greenlets import GreenletExit
from swift.common.ring.utils import is_local_device
from swift.common.utils import whataremyips, unlink_older_than, \
compute_eta, get_logger, dump_recon_cache, ismount, \
rsync_module_interpolation, mkdirs, config_true_value, list_from_csv, \
get_hub, tpool_reraise, config_auto_int_value, storage_directory
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
from swift.obj import ssync_sender
from swift.obj.diskfile import DiskFileManager, get_data_dir, get_tmp_dir
from swift.common.storage_policy import POLICIES, REPL_POLICY
DEFAULT_RSYNC_TIMEOUT = 900
hubs.use_hub(get_hub())
class ObjectReplicator(Daemon):
"""
Replicate objects.
Encapsulates most logic and data needed by the object replication process.
Each call to .replicate() performs one replication pass. It's up to the
caller to do this in a loop.
"""
def __init__(self, conf, logger=None):
"""
:param conf: configuration object obtained from ConfigParser
:param logger: logging object
"""
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-replicator')
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.bind_ip = conf.get('bind_ip', '0.0.0.0')
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6200))
self.concurrency = int(conf.get('concurrency', 1))
self.stats_interval = int(conf.get('stats_interval', '300'))
self.ring_check_interval = int(conf.get('ring_check_interval', 15))
self.next_check = time.time() + self.ring_check_interval
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
self.partition_times = []
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.rsync_timeout = int(conf.get('rsync_timeout',
DEFAULT_RSYNC_TIMEOUT))
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
self.rsync_compress = config_true_value(
conf.get('rsync_compress', 'no'))
self.rsync_module = conf.get('rsync_module', '').rstrip('/')
if not self.rsync_module:
self.rsync_module = '{replication_ip}::object'
if config_true_value(conf.get('vm_test_mode', 'no')):
self.logger.warning('Option object-replicator/vm_test_mode '
'is deprecated and will be removed in a '
'future version. Update your '
'configuration to use option '
'object-replicator/rsync_module.')
self.rsync_module += '{replication_port}'
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.node_timeout = float(conf.get('node_timeout', 10))
self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.default_headers = {
'Content-Length': '0',
'user-agent': 'object-replicator %s' % os.getpid()}
self.rsync_error_log_line_length = \
int(conf.get('rsync_error_log_line_length', 0))
self.handoffs_first = config_true_value(conf.get('handoffs_first',
False))
self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0)
if any((self.handoff_delete, self.handoffs_first)):
self.logger.warning('Handoff only mode is not intended for normal '
'operation, please disable handoffs_first and '
'handoff_delete before the next '
'normal rebalance')
self._diskfile_mgr = DiskFileManager(conf, self.logger)
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0,
'hashmatch': 0, 'rsync': 0, 'remove': 0,
'start': time.time(), 'failure_nodes': {}}
def _add_failure_stats(self, failure_devs_info):
for node, dev in failure_devs_info:
self.stats['failure'] += 1
failure_devs = self.stats['failure_nodes'].setdefault(node, {})
failure_devs.setdefault(dev, 0)
failure_devs[dev] += 1
def _get_my_replication_ips(self):
my_replication_ips = set()
ips = whataremyips()
for policy in POLICIES:
self.load_object_ring(policy)
for local_dev in [dev for dev in policy.object_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
my_replication_ips.add(local_dev['replication_ip'])
return list(my_replication_ips)
# Just exists for doc anchor point
def sync(self, node, job, suffixes, *args, **kwargs):
"""
Synchronize local suffix directories from a partition with a remote
node.
:param node: the "dev" entry for the remote node to sync with
:param job: information about the partition being synced
:param suffixes: a list of suffixes which need to be pushed
:returns: boolean and dictionary, boolean indicating success or failure
"""
return self.sync_method(node, job, suffixes, *args, **kwargs)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def _rsync(self, args):
"""
Execute the rsync binary to replicate a partition.
:returns: return code of rsync process. 0 is successful
"""
start_time = time.time()
ret_val = None
try:
with Timeout(self.rsync_timeout):
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
results = proc.stdout.read()
ret_val = proc.wait()
except Timeout:
self.logger.error(_("Killing long-running rsync: %s"), str(args))
proc.kill()
return 1 # failure response code
total_time = time.time() - start_time
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
if not ret_val:
self.logger.info(result)
else:
self.logger.error(result)
if ret_val:
error_line = _('Bad rsync return code: %(ret)d <- %(args)s') % \
{'args': str(args), 'ret': ret_val}
if self.rsync_error_log_line_length:
error_line = error_line[:self.rsync_error_log_line_length]
self.logger.error(error_line)
else:
log_method = self.logger.info if results else self.logger.debug
log_method(
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
"""
Uses rsync to implement the sync method. This was the first
sync method in Swift.
"""
if not os.path.exists(job['path']):
return False, {}
args = [
'rsync',
'--recursive',
'--whole-file',
'--human-readable',
'--xattrs',
'--itemize-changes',
'--ignore-existing',
'--timeout=%s' % self.rsync_io_timeout,
'--contimeout=%s' % self.rsync_io_timeout,
'--bwlimit=%s' % self.rsync_bwlimit,
'--exclude=.*.%s' % ''.join('[0-9a-zA-Z]' for i in range(6))
]
if self.rsync_compress and \
job['region'] != node['region']:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
args.append('--compress')
rsync_module = rsync_module_interpolation(self.rsync_module, node)
had_any = False
for suffix in suffixes:
spath = join(job['path'], suffix)
if os.path.exists(spath):
args.append(spath)
had_any = True
if not had_any:
return False, {}
data_dir = get_data_dir(job['policy'])
args.append(join(rsync_module, node['device'],
data_dir, job['partition']))
return self._rsync(args) == 0, {}
def ssync(self, node, job, suffixes, remote_check_objs=None):
return ssync_sender.Sender(
self, node, job, suffixes, remote_check_objs)()
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def update_deleted(self, job):
"""
High-level method that replicates a single partition that doesn't
belong on this node.
:param job: a dict containing info about the partition to be replicated
"""
def tpool_get_suffixes(path):
return [suff for suff in os.listdir(path)
if len(suff) == 3 and isdir(join(path, suff))]
self.replication_count += 1
self.logger.increment('partition.delete.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
failure_devs_info = set()
begin = time.time()
handoff_partition_deleted = False
try:
responses = []
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
synced_remote_regions = {}
delete_objs = None
if suffixes:
for node in job['nodes']:
self.stats['rsync'] += 1
kwargs = {}
if node['region'] in synced_remote_regions and \
self.conf.get('sync_method', 'rsync') == 'ssync':
kwargs['remote_check_objs'] = \
synced_remote_regions[node['region']]
# candidates is a dict(hash=>timestamp) of objects
# for deletion
success, candidates = self.sync(
node, job, suffixes, **kwargs)
if success:
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'],
node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes), headers=headers)
conn.getresponse().read()
if node['region'] != job['region']:
synced_remote_regions[node['region']] = viewkeys(
candidates)
else:
failure_devs_info.add((node['replication_ip'],
node['device']))
responses.append(success)
for region, cand_objs in synced_remote_regions.items():
if delete_objs is None:
delete_objs = cand_objs
else:
delete_objs = delete_objs & cand_objs
if self.handoff_delete:
# delete handoff if we have had handoff_delete successes
delete_handoff = len([resp for resp in responses if resp]) >= \
self.handoff_delete
else:
# delete handoff if all syncs were successful
delete_handoff = len(responses) == len(job['nodes']) and \
all(responses)
if delete_handoff:
self.stats['remove'] += 1
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
delete_objs is not None):
self.logger.info(_("Removing %s objects"),
len(delete_objs))
_junk, error_paths = self.delete_handoff_objs(
job, delete_objs)
# if replication works for a hand-off device and it failed,
# the remote devices which are target of the replication
# from the hand-off device will be marked. Because cleanup
# after replication failed means replicator needs to
# replicate again with the same info.
if error_paths:
failure_devs_info.update(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
else:
self.delete_partition(job['path'])
handoff_partition_deleted = True
elif not suffixes:
self.delete_partition(job['path'])
handoff_partition_deleted = True
except (Exception, Timeout):
self.logger.exception(_("Error syncing handoff partition"))
self._add_failure_stats(failure_devs_info)
finally:
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in job['nodes']])
self.stats['success'] += len(target_devs_info - failure_devs_info)
if not handoff_partition_deleted:
self.handoffs_remaining += 1
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.delete.timing', begin)
def delete_partition(self, path):
self.logger.info(_("Removing partition: %s"), path)
tpool.execute(shutil.rmtree, path)
def delete_handoff_objs(self, job, delete_objs):
success_paths = []
error_paths = []
for object_hash in delete_objs:
object_path = storage_directory(job['obj_path'], job['partition'],
object_hash)
tpool.execute(shutil.rmtree, object_path, ignore_errors=True)
suffix_dir = dirname(object_path)
try:
os.rmdir(suffix_dir)
success_paths.append(object_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
error_paths.append(object_path)
self.logger.exception(
"Unexpected error trying to cleanup suffix dir:%r",
suffix_dir)
return success_paths, error_paths
def update(self, job):
"""
High-level method that replicates a single partition.
:param job: a dict containing info about the partition to be replicated
"""
self.replication_count += 1
self.logger.increment('partition.update.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
target_devs_info = set()
failure_devs_info = set()
begin = time.time()
try:
hashed, local_hash = tpool_reraise(
self._diskfile_mgr._get_hashes, job['path'],
do_listdir=(self.replication_count % 10) == 0,
reclaim_age=self.reclaim_age)
self.suffix_hash += hashed
self.logger.update_stats('suffix.hashes', hashed)
attempts_left = len(job['nodes'])
synced_remote_regions = set()
random.shuffle(job['nodes'])
nodes = itertools.chain(
job['nodes'],
job['policy'].object_ring.get_more_nodes(
int(job['partition'])))
while attempts_left > 0:
# If this throws StopIteration it will be caught way below
node = next(nodes)
target_devs_info.add((node['replication_ip'], node['device']))
attempts_left -= 1
# if we have already synced to this remote region,
# don't sync again on this replication pass
if node['region'] in synced_remote_regions:
continue
try:
with Timeout(self.http_timeout):
resp = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'', headers=headers).getresponse()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.logger.error(
_('%(replication_ip)s/%(device)s '
'responded as unmounted'), node)
attempts_left += 1
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
if resp.status != HTTP_OK:
self.logger.error(_("Invalid response %(resp)s "
"from %(ip)s"),
{'resp': resp.status,
'ip': node['replication_ip']})
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
remote_hash = pickle.loads(resp.read())
del resp
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
if not suffixes:
self.stats['hashmatch'] += 1
continue
hashed, recalc_hash = tpool_reraise(
self._diskfile_mgr._get_hashes,
job['path'], recalculate=suffixes,
reclaim_age=self.reclaim_age)
self.logger.update_stats('suffix.hashes', hashed)
local_hash = recalc_hash
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
self.stats['rsync'] += 1
success, _junk = self.sync(node, job, suffixes)
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes),
headers=headers)
conn.getresponse().read()
if not success:
failure_devs_info.add((node['replication_ip'],
node['device']))
# add only remote region when replicate succeeded
if success and node['region'] != job['region']:
synced_remote_regions.add(node['region'])
self.suffix_sync += len(suffixes)
self.logger.update_stats('suffix.syncs', len(suffixes))
except (Exception, Timeout):
failure_devs_info.add((node['replication_ip'],
node['device']))
self.logger.exception(_("Error syncing with node: %s") %
node)
self.suffix_count += len(local_hash)
except (Exception, Timeout):
failure_devs_info.update(target_devs_info)
self._add_failure_stats(failure_devs_info)
self.logger.exception(_("Error syncing partition"))
finally:
self.stats['success'] += len(target_devs_info - failure_devs_info)
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.update.timing', begin)
def stats_line(self):
"""
Logs various stats for the currently running replication pass.
"""
if self.replication_count:
elapsed = (time.time() - self.start) or 0.000001
rate = self.replication_count / elapsed
self.logger.info(
_("%(replicated)d/%(total)d (%(percentage).2f%%)"
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
"%(remaining)s remaining)"),
{'replicated': self.replication_count, 'total': self.job_count,
'percentage': self.replication_count * 100.0 / self.job_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' % compute_eta(self.start,
self.replication_count,
self.job_count)})
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
if self.suffix_count:
self.logger.info(
_("%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
{'checked': self.suffix_count,
'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
'synced': (self.suffix_sync * 100.0) / self.suffix_count})
self.partition_times.sort()
self.logger.info(
_("Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs"),
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info(
_("Nothing replicated for %s seconds."),
(time.time() - self.start))
def kill_coros(self):
"""Utility function that kills all coroutines currently running."""
for coro in list(self.run_pool.coroutines_running):
try:
coro.kill(GreenletExit)
except GreenletExit:
pass
def heartbeat(self):
"""
Loop that runs in the background during replication. It periodically
logs progress.
"""
while True:
eventlet.sleep(self.stats_interval)
self.stats_line()
def detect_lockups(self):
"""
In testing, the pool.waitall() call very occasionally failed to return.
This is an attempt to make sure the replicator finishes its replication
pass in some eventuality.
"""
while True:
eventlet.sleep(self.lockup_timeout)
if self.replication_count == self.last_replication_count:
self.logger.error(_("Lockup detected.. killing live coros."))
self.kill_coros()
self.last_replication_count = self.replication_count
def build_replication_jobs(self, policy, ips, override_devices=None,
override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
self.all_devs_info.update(
[(dev['replication_ip'], dev['device'])
for dev in policy.object_ring.devs if dev])
data_dir = get_data_dir(policy)
found_local = False
for local_dev in [dev for dev in policy.object_ring.devs
if (dev
and is_local_device(ips,
self.port,
dev['replication_ip'],
dev['replication_port'])
and (override_devices is None
or dev['device'] in override_devices))]:
found_local = True
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy))
if self.mount_check and not ismount(dev_path):
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
self.logger.warning(
_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
if (override_partitions is not None
and partition not in override_partitions):
continue
if (partition.startswith('auditor_status_') and
partition.endswith('.json')):
# ignore auditor status files
continue
part_nodes = None
try:
job_path = join(obj_path, partition)
part_nodes = policy.object_ring.get_part_nodes(
int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy=policy,
partition=partition,
region=local_dev['region']))
except ValueError:
if part_nodes:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
else:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
continue
if not found_local:
self.logger.error("Can't find itself in policy with index %d with"
" ips %s and with port %s in ring file, not"
" replicating",
int(policy), ", ".join(ips), self.port)
return jobs
def collect_jobs(self, override_devices=None, override_partitions=None,
override_policies=None):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be rsynced.
:param override_devices: if set, only jobs on these devices
will be returned
:param override_partitions: if set, only jobs on these partitions
will be returned
:param override_policies: if set, only jobs in these storage
policies will be returned
"""
jobs = []
ips = whataremyips(self.bind_ip)
for policy in POLICIES:
if policy.policy_type == REPL_POLICY:
if (override_policies is not None and
str(policy.idx) not in override_policies):
continue
# ensure rings are loaded for policy
self.load_object_ring(policy)
jobs += self.build_replication_jobs(
policy, ips, override_devices=override_devices,
override_partitions=override_partitions)
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff parts to the front of the list
jobs.sort(key=lambda job: not job['delete'])
self.job_count = len(jobs)
return jobs
def replicate(self, override_devices=None, override_partitions=None,
override_policies=None):
"""Run a replication pass"""
self.start = time.time()
self.suffix_count = 0
self.suffix_sync = 0
self.suffix_hash = 0
self.replication_count = 0
self.last_replication_count = -1
self.partition_times = []
self.my_replication_ips = self._get_my_replication_ips()
self.all_devs_info = set()
self.handoffs_remaining = 0
stats = eventlet.spawn(self.heartbeat)
lockup_detector = eventlet.spawn(self.detect_lockups)
eventlet.sleep() # Give spawns a cycle
current_nodes = None
try:
self.run_pool = GreenPool(size=self.concurrency)
jobs = self.collect_jobs(override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
for job in jobs:
current_nodes = job['nodes']
if override_devices and job['device'] not in override_devices:
continue
if override_partitions and \
job['partition'] not in override_partitions:
continue
dev_path = join(self.devices_dir, job['device'])
if self.mount_check and not ismount(dev_path):
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
self.logger.warning(_('%s is not mounted'), job['device'])
continue
if self.handoffs_first and not job['delete']:
# in handoffs first mode, we won't process primary
# partitions until rebalance was successful!
if self.handoffs_remaining:
self.logger.warning(_(
"Handoffs first mode still has handoffs "
"remaining. Aborting current "
"replication pass."))
break
if not self.check_ring(job['policy'].object_ring):
self.logger.info(_("Ring change detected. Aborting "
"current replication pass."))
return
try:
if isfile(job['path']):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning(
'Removing partition directory '
'which was a file: %s', job['path'])
os.remove(job['path'])
continue
except OSError:
continue
if job['delete']:
self.run_pool.spawn(self.update_deleted, job)
else:
self.run_pool.spawn(self.update, job)
current_nodes = None
with Timeout(self.lockup_timeout):
self.run_pool.waitall()
except (Exception, Timeout):
if current_nodes:
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in current_nodes])
else:
self._add_failure_stats(self.all_devs_info)
self.logger.exception(_("Exception in top-level replication loop"))
self.kill_coros()
finally:
stats.kill()
lockup_detector.kill()
self.stats_line()
self.stats['attempted'] = self.replication_count
def run_once(self, *args, **kwargs):
self._zero_stats()
self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get('devices'))
override_partitions = list_from_csv(kwargs.get('partitions'))
override_policies = list_from_csv(kwargs.get('policies'))
if not override_devices:
override_devices = None
if not override_partitions:
override_partitions = None
if not override_policies:
override_policies = None
self.replicate(
override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
def run_forever(self, *args, **kwargs):
self.logger.info(_("Starting object replicator in daemon mode."))
# Run the replicator continually
while True:
self._zero_stats()
self.logger.info(_("Starting object replication pass."))
# Run the replicator
self.replicate()
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete. (%.02f minutes)"), total)
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
self.logger.debug('Replication sleeping for %s seconds.',
self.interval)
sleep(self.interval)
|
|
import logging
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.exceptions import (PermissionDenied,
ObjectDoesNotExist,
ValidationError)
from django.db.models import Q
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST,
INVALID_FORM_DATA,
NOT_LOGGED_IN,
PERMISSION_DENIED)
from djblets.webapi.fields import (BooleanFieldType,
ChoiceFieldType,
DateTimeFieldType,
DictFieldType,
IntFieldType,
ListFieldType,
ResourceFieldType,
ResourceListFieldType,
StringFieldType)
from reviewboard.admin.server import build_server_url
from reviewboard.diffviewer.errors import (DiffTooBigError,
DiffParserError,
EmptyDiffError)
from reviewboard.diffviewer.features import dvcs_feature
from reviewboard.hostingsvcs.errors import HostingServiceError
from reviewboard.reviews.errors import (CloseError,
PermissionError,
PublishError,
ReopenError)
from reviewboard.reviews.fields import get_review_request_field
from reviewboard.reviews.models import ReviewRequest
from reviewboard.scmtools.errors import (AuthenticationError,
ChangeNumberInUseError,
EmptyChangeSetError,
InvalidChangeNumberError,
SCMError,
RepositoryNotFoundError)
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.ssh.errors import SSHError
from reviewboard.scmtools.models import Repository
from reviewboard.webapi.base import ImportExtraDataError, WebAPIResource
from reviewboard.webapi.decorators import (webapi_check_local_site,
webapi_check_login_required)
from reviewboard.webapi.errors import (CHANGE_NUMBER_IN_USE,
CLOSE_ERROR,
COMMIT_ID_ALREADY_EXISTS,
DIFF_EMPTY,
DIFF_TOO_BIG,
DIFF_PARSE_ERROR,
EMPTY_CHANGESET,
INVALID_CHANGE_NUMBER,
INVALID_REPOSITORY,
INVALID_USER,
MISSING_REPOSITORY,
PUBLISH_ERROR,
REOPEN_ERROR,
REPO_AUTHENTICATION_ERROR,
REPO_INFO_ERROR)
from reviewboard.webapi.mixins import MarkdownFieldsMixin
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.repository import RepositoryResource
from reviewboard.webapi.resources.review_group import ReviewGroupResource
from reviewboard.webapi.resources.review_request_draft import \
ReviewRequestDraftResource
from reviewboard.webapi.resources.user import UserResource
logger = logging.getLogger(__name__)
class ReviewRequestResource(MarkdownFieldsMixin, WebAPIResource):
"""Provides information on review requests.
Review requests are one of the central concepts in Review Board. They
represent code or files that are being placed up for review.
A review request has a number of fields that can be filled out, indicating
the summary, description of the change, testing that was done, affected
bugs, and more. These must be filled out through the associated Review
Request Draft resource.
When a review request is published, it can be reviewed by users. It can
then be updated, again through the Review Request Draft resource, or closed
as submitted or discarded.
"""
model = ReviewRequest
name = 'review_request'
fields = {
'id': {
'type': IntFieldType,
'description': 'The numeric ID of the review request.',
},
'approved': {
'type': BooleanFieldType,
'description': 'Whether the review request has been approved '
'by reviewers.\n'
'\n'
'On a default install, a review request is '
'approved if it has at least one Ship It! and '
'no open issues. Extensions may change these '
'requirements.',
'added_in': '2.0',
},
'approval_failure': {
'type': StringFieldType,
'description': 'The reason why the review request was not '
'approved. This will be ``null`` if approved.',
'added_in': '2.0',
},
'blocks': {
'type': ResourceListFieldType,
'resource': 'reviewboard.webapi.resources.review_request.'
'ReviewRequestResource',
'description': 'The list of review requests that this '
'review request is blocking.',
'added_in': '1.7.9',
},
'close_description': {
'type': StringFieldType,
'description': 'The text describing the closing of the review '
'request.',
'added_in': '2.0.12',
'supports_text_types': True,
},
'close_description_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The current or forced text type for the '
'``close_description`` field.',
'added_in': '2.0.12',
},
'created_with_history': {
'type': BooleanFieldType,
'description': 'Whether or not the review request was created '
'with history support.\n'
'\n'
'A value of true indicates that the review request '
'will have commits attached.',
'added_in': '4.0',
},
'depends_on': {
'type': ResourceListFieldType,
'resource': 'reviewboard.webapi.resources.review_request.'
'ReviewRequestResource',
'description': 'The list of review requests that this '
'review request depends on.',
'added_in': '1.7.9',
},
'extra_data': {
'type': DictFieldType,
'description': 'Extra data as part of the review request. '
'This can be set by the API or extensions.',
'added_in': '2.0',
},
'issue_dropped_count': {
'type': IntFieldType,
'description': 'The number of dropped issues on this '
'review request',
'added_in': '2.0',
},
'issue_open_count': {
'type': IntFieldType,
'description': 'The number of open issues on this review request',
'added_in': '2.0',
},
'issue_resolved_count': {
'type': IntFieldType,
'description': 'The number of resolved issues on this '
'review request',
'added_in': '2.0',
},
'issue_verifying_count': {
'type': IntFieldType,
'description': 'The number of issues waiting for verification to '
'resolve or drop on this review request',
'added_in': '3.0.3',
},
'submitter': {
'type': ResourceFieldType,
'resource': UserResource,
'description': 'The user who submitted the review request.',
},
'time_added': {
'type': DateTimeFieldType,
'description': 'The date and time that the review request was '
'added.',
},
'last_updated': {
'type': DateTimeFieldType,
'description': 'The date and time that the review request was '
'last updated.',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'Formerly responsible for indicating the text '
'type for text fields. Replaced by '
'``close_description_text_type``, '
'``description_text_type``, and '
'``testing_done_text_type`` in 2.0.12.',
'added_in': '2.0',
'deprecated_in': '2.0.12',
},
'status': {
'type': ChoiceFieldType,
'choices': ('discarded', 'pending', 'submitted'),
'description': 'The current status of the review request.',
},
'public': {
'type': BooleanFieldType,
'description': 'Whether or not the review request is currently '
'visible to other users.',
},
'changenum': {
'type': IntFieldType,
'description': 'The change number that the review request '
'represents. These are server-side repository-'
'specific change numbers, and are not supported '
'by all types of repositories. It may be '
'``null``.\n'
'\n'
'This is deprecated in favor of the ``commit_id`` '
'field.',
'deprecated_in': '2.0',
},
'commit_id': {
'type': StringFieldType,
'description': 'The commit that the review request represents. '
'This obsoletes the ``changenum`` field.',
'added_in': '2.0',
},
'repository': {
'type': ResourceFieldType,
'resource': RepositoryResource,
'description': "The repository that the review request's code "
"is stored on.",
},
'ship_it_count': {
'type': IntFieldType,
'description': 'The number of Ship Its given to this '
'review request.',
'added_in': '2.0',
},
'summary': {
'type': StringFieldType,
'description': "The review request's brief summary.",
},
'description': {
'type': StringFieldType,
'description': "The review request's description.",
'supports_text_types': True,
},
'description_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The current or forced text type for the '
'``description`` field.',
'added_in': '2.0.12',
},
'testing_done': {
'type': StringFieldType,
'description': 'The information on the testing that was done '
'for the change.',
'supports_text_types': True,
},
'testing_done_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The current or forced text type for the '
'``testing_done`` field.',
'added_in': '2.0.12',
},
'bugs_closed': {
'type': ListFieldType,
'items': {
'type': StringFieldType,
},
'description': 'The list of bugs closed or referenced by this '
'change.',
},
'branch': {
'type': StringFieldType,
'description': 'The branch that the code was changed on or that '
'the code will be committed to. This is a '
'free-form field that can store any text.',
},
'target_groups': {
'type': ResourceListFieldType,
'resource': ReviewGroupResource,
'description': 'The list of review groups who were requested '
'to review this change.',
},
'target_people': {
'type': ResourceListFieldType,
'resource': UserResource,
'description': 'The list of users who were requested to review '
'this change.',
},
'url': {
'type': StringFieldType,
'description': "The URL to the review request's page on the site. "
"This is deprecated and will be removed in a "
"future version.",
'added_in': '1.7.8',
'deprecated_in': '2.0',
},
'absolute_url': {
'type': StringFieldType,
'description': "The absolute URL to the review request's page on "
"the site.",
'added_in': '2.0',
},
}
uri_object_key = 'review_request_id'
model_object_key = 'display_id'
item_child_resources = [
resources.change,
resources.diff,
resources.diff_context,
resources.file_attachment,
resources.review,
resources.review_request_draft,
resources.review_request_last_update,
resources.screenshot,
resources.status_update,
]
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
_close_type_map = {
'submitted': ReviewRequest.SUBMITTED,
'discarded': ReviewRequest.DISCARDED,
}
def get_related_links(self, obj=None, request=None, *args,
**kwargs):
"""Return related links for the resource.
This will serialize the ``latest_diff`` link when called for the
item resource with a resource that has associated diffs.
Args:
obj (reviewboard.reviews.models.review_request.ReviewRequest, optional):
The review request.
request (django.http.HttpRequest, optional):
The current HTTP request.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
A dictionary of links related to the resource.
"""
links = super(ReviewRequestResource, self).get_related_links(
obj=obj, request=request, *args, **kwargs)
if obj:
# We already have the diffsets due to get_queryset(), so we aren't
# performing another query here.
diffsets = list(obj.diffset_history.diffsets.all())
if diffsets:
latest_diffset = diffsets[-1]
links['latest_diff'] = {
'href': build_server_url(local_site_reverse(
'diff-resource',
request,
kwargs={
'review_request_id': obj.display_id,
'diff_revision': latest_diffset.revision,
})),
'method': 'GET',
}
return links
def get_queryset(self, request, is_list=False, local_site_name=None,
*args, **kwargs):
"""Returns a queryset for ReviewRequest models.
By default, this returns all published or formerly published
review requests.
If the queryset is being used for a list of review request
resources, then it can be further filtered by one or more arguments
in the URL. These are listed in @webapi_request_fields for get_list().
Some arguments accept dates. The handling of dates is quite flexible,
accepting a variety of date/time formats, but we recommend sticking
with ISO8601 format.
ISO8601 format defines a date as being in ``{yyyy}-{mm}-{dd}`` format,
and a date/time as being in ``{yyyy}-{mm}-{dd}T{HH}:{MM}:{SS}``.
A timezone can also be appended to this, using ``-{HH:MM}``.
The following examples are valid dates and date/times:
* ``2010-06-27``
* ``2010-06-27T16:26:30``
* ``2010-06-27T16:26:30-08:00``
"""
local_site = self._get_local_site(local_site_name)
if is_list:
q = Q()
if 'to-groups' in request.GET:
for group_name in request.GET.get('to-groups').split(','):
q = q & self.model.objects.get_to_group_query(group_name,
local_site)
if 'to-users' in request.GET:
for username in request.GET.get('to-users').split(','):
q = q & self.model.objects.get_to_user_query(username)
if 'to-users-directly' in request.GET:
to_users_directly = \
request.GET.get('to-users-directly').split(',')
for username in to_users_directly:
q = q & self.model.objects.get_to_user_directly_query(
username)
if 'to-users-groups' in request.GET:
for username in request.GET.get('to-users-groups').split(','):
q = q & self.model.objects.get_to_user_groups_query(
username)
if 'from-user' in request.GET:
q = q & self.model.objects.get_from_user_query(
request.GET.get('from-user'))
if 'repository' in request.GET:
q = q & Q(repository=int(request.GET.get('repository')))
commit_q = Q()
if 'changenum' in request.GET:
try:
commit_q = Q(changenum=int(request.GET.get('changenum')))
except (TypeError, ValueError):
pass
commit_id = request.GET.get('commit-id', None)
if commit_id is not None:
commit_q = commit_q | Q(commit_id=commit_id)
if commit_q:
q = q & commit_q
if 'branch' in kwargs:
q &= Q(branch=kwargs['branch'])
if 'ship-it' in request.GET:
ship_it = request.GET.get('ship-it')
if ship_it in ('1', 'true', 'True'):
q = q & Q(shipit_count__gt=0)
elif ship_it in ('0', 'false', 'False'):
q = q & Q(shipit_count=0)
q = q & self.build_queries_for_int_field(
request, 'shipit_count', 'ship-it-count')
for issue_field in ('issue_open_count',
'issue_dropped_count',
'issue_resolved_count',
'issue_verifying_count'):
q = q & self.build_queries_for_int_field(
request, issue_field)
if 'time-added-from' in kwargs:
q = q & Q(time_added__gte=kwargs['time-added-from'])
if 'time-added-to' in kwargs:
q = q & Q(time_added__lt=kwargs['time-added-to'])
if 'last-updated-from' in kwargs:
q = q & Q(last_updated__gte=kwargs['last-updated-from'])
if 'last-updated-to' in kwargs:
q = q & Q(last_updated__lt=kwargs['last-updated-to'])
status = ReviewRequest.string_to_status(
request.GET.get('status', 'pending'))
can_submit_as = request.user.has_perm(
'reviews.can_submit_as_another_user', local_site)
request_unpublished = request.GET.get('show-all-unpublished', '0')
if request_unpublished in ('0', 'false', 'False'):
request_unpublished = False
else:
request_unpublished = True
show_all_unpublished = (request_unpublished and
(can_submit_as or
request.user.is_superuser))
queryset = self.model.objects.public(
user=request.user,
status=status,
local_site=local_site,
extra_query=q,
show_all_unpublished=show_all_unpublished)
# Only select/prefetch these for list resources, since we want to
# reduce the number of queries. We don't want to do this when
# retrieving individual items, as they'd end up stuck with
# prefetched state, which could impact things when handling
# PUT/DELETE operations.
#
# Here's a real-world example (which is interesting enough to
# talk about): We had a bug before when the prefetching was done
# for item resources where a publish on the draft resource would
# fetch the review request from this resource (going through this
# function and therefore prefetching), and then the publish
# operation would associate the new diffset and then emit the
# review_request_published signal. Handlers listening to this that
# tried to fetch diffsets (Review Bot, in our case) would not see
# the new diffset.
#
# By having this only in the list condition, we get the perforamnce
# benefits we wanted without triggering that sort of bug.
queryset = (
queryset
.select_related('diffset_history')
.prefetch_related('changedescs',
'diffset_history__diffsets')
)
else:
queryset = self.model.objects.filter(local_site=local_site)
return queryset
def has_access_permissions(self, request, review_request, *args, **kwargs):
return review_request.is_accessible_by(request.user)
def has_modify_permissions(self, request, review_request, *args, **kwargs):
return review_request.is_mutable_by(request.user)
def has_delete_permissions(self, request, review_request, *args, **kwargs):
return review_request.is_deletable_by(request.user)
def get_extra_data_field_supports_markdown(self, review_request, key):
field_cls = get_review_request_field(key)
return field_cls and getattr(field_cls, 'enable_markdown', False)
def get_is_close_description_rich_text(self, obj):
if obj.status in (obj.SUBMITTED, obj.DISCARDED):
if hasattr(obj, '_close_description'):
# This was set when updating the description in a POST, so
# use that instead of looking up from the database again.
return obj._close_description_rich_text
else:
return obj.get_close_info()['is_rich_text']
else:
return False
def serialize_object(self, obj, request=None, *args, **kwargs):
"""Serialize a review request.
This method excludes fields from features that are not enabled.
Args:
obj (reviewboard.reviews.models.review_request.ReviewRequest):
The review request to serialize.
request (django.http.HttpRequest, optional):
The HTTP request from the client.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The serialized review request.
"""
result = super(ReviewRequestResource, self).serialize_object(
obj, request=request, *args, **kwargs)
if not dvcs_feature.is_enabled(request=request):
# The field may not have been serialized (e.g., if `only-fields`
# was set to a subset of fields that excludes
# `created_with_history`).
result.pop('created_with_history', None)
return result
def serialize_bugs_closed_field(self, obj, **kwargs):
return obj.get_bug_list()
def serialize_close_description_field(self, obj, **kwargs):
if obj.status in (obj.SUBMITTED, obj.DISCARDED):
if hasattr(obj, '_close_description'):
# This was set when updating the description in a POST, so
# use that instead of looking up from the database again.
return obj._close_description
else:
return obj.get_close_info()['close_description']
else:
return None
def serialize_close_description_text_type_field(self, obj, **kwargs):
# This will be overridden by MarkdownFieldsMixin.
return None
def serialize_description_text_type_field(self, obj, **kwargs):
# This will be overridden by MarkdownFieldsMixin.
return None
def serialize_ship_it_count_field(self, obj, **kwargs):
return obj.shipit_count
def serialize_status_field(self, obj, **kwargs):
return ReviewRequest.status_to_string(obj.status)
def serialize_testing_done_text_type_field(self, obj, **kwargs):
# This will be overridden by MarkdownFieldsMixin.
return None
def serialize_id_field(self, obj, **kwargs):
return obj.display_id
def serialize_url_field(self, obj, **kwargs):
return obj.get_absolute_url()
def serialize_absolute_url_field(self, obj, request, **kwargs):
return request.build_absolute_uri(obj.get_absolute_url())
def serialize_commit_id_field(self, obj, **kwargs):
return obj.commit
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(NOT_LOGGED_IN, PERMISSION_DENIED, INVALID_USER,
INVALID_REPOSITORY, CHANGE_NUMBER_IN_USE,
INVALID_CHANGE_NUMBER, EMPTY_CHANGESET,
REPO_AUTHENTICATION_ERROR, REPO_INFO_ERROR,
MISSING_REPOSITORY, DIFF_EMPTY, DIFF_TOO_BIG,
DIFF_PARSE_ERROR)
@webapi_request_fields(
optional={
'changenum': {
'type': IntFieldType,
'description': 'The optional change number to look up for the '
'review request details. This only works with '
'repositories that support server-side '
'changesets.\n'
'\n'
'This is deprecated in favor of the '
'``commit_id`` field.',
'deprecated_in': '2.0',
},
'commit_id': {
'type': StringFieldType,
'description': 'The optional commit to create the review '
'request for. This should be used in place of '
'the ``changenum`` field.\n'
'\n'
'If ``create_from_commit_id=1`` is passed, '
'then the review request information and diff '
'will be based on this commit ID.',
'added_in': '2.0',
},
'create_from_commit_id': {
'type': BooleanFieldType,
'description': 'If true, and if ``commit_id`` is provided, '
'the review request information and (when '
'supported) the idff will be based on the '
'commit ID.\n'
'\n'
'This field cannot be set if '
'"create_with_history" is set.',
'added_in': '2.0',
},
'create_with_history': {
'type': BooleanFieldType,
'description': 'Whether or not to create the review request '
'with support for history.\n'
'\n'
'This field cannot be set if '
'"create_from_commit_id" is set.',
'added_in': '4.0',
},
'force_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The text type, if any, to force for returned '
'text fields. The contents will be converted '
'to the requested type in the payload, but '
'will not be saved as that type.',
'added_in': '2.0.9',
},
'repository': {
'type': StringFieldType,
'description': 'The path or ID of the repository that the '
'review request is for.',
},
'submit_as': {
'type': StringFieldType,
'description': 'The optional user to submit the review '
'request as. This requires that the actual '
'logged in user is either a superuser or has '
'the ``reviews.can_submit_as_another_user`` '
'permission.',
},
},
allow_unknown=True
)
def create(self, request, repository=None, submit_as=None, changenum=None,
commit_id=None, local_site_name=None,
create_from_commit_id=False, create_with_history=False,
extra_fields={}, *args, **kwargs):
"""Creates a new review request.
The new review request will start off as private and pending, and
will normally be blank. However, if ``changenum`` or both
``commit_id`` and ``create_from_commit_id=1`` is passed and the given
repository both supports server-side changesets and has changeset
support in Review Board, some details (Summary, Description and
Testing Done sections, for instance) may be automatically filled in
from the server.
Any new review request will have an associated draft (reachable
through the ``draft`` link). All the details of the review request
must be set through the draft. The new review request will be public
when that first draft is published.
A repository can be passed. This is required for diffs associated
with a review request. A valid repository is in the form of a numeric
repository ID, the name of a repository, or the path to a repository
(matching exactly the registered repository's Path or Mirror Path
fields in the adminstration interface).
If a repository is not passed, this review request can only be
used for attached files.
Clients can create review requests on behalf of another user by setting
the ``submit_as`` parameter to the username of the desired user. This
requires that the client is currently logged in as a user that has the
``reviews.can_submit_as_another_user`` permission set. This capability
is useful when writing automation scripts, such as post-commit hooks,
that need to create review requests for another user.
Extra data can be stored later lookup. See
:ref:`webapi2.0-extra-data` for more information.
"""
user = request.user
local_site = self._get_local_site(local_site_name)
changenum = changenum or None
commit_id = commit_id or None
if changenum is not None and commit_id is None:
commit_id = str(changenum)
# Preserve the old changenum behavior.
create_from_commit_id = True
if submit_as and user.username != submit_as:
if not user.has_perm('reviews.can_submit_as_another_user',
local_site):
return self.get_no_access_error(request)
user = self._find_user(submit_as, local_site, request)
if not user:
return INVALID_USER
if not dvcs_feature.is_enabled(request=request):
create_with_history = False
if repository is not None:
try:
repository = Repository.objects.get_best_match(
repo_identifier=repository,
local_site=local_site)
except Repository.DoesNotExist:
return INVALID_REPOSITORY, {
'repository': repository
}
except Repository.MultipleObjectsReturned:
msg = ('Too many repositories matched "%s". '
'Try specifying the repository by name instead.'
% repository)
return INVALID_REPOSITORY.with_message(msg), {
'repository': repository,
}
if not repository.is_accessible_by(request.user):
return self.get_no_access_error(request)
try:
review_request = ReviewRequest.objects.create(
user, repository, commit_id, local_site,
create_from_commit_id=create_from_commit_id,
create_with_history=create_with_history)
if extra_fields:
try:
self.import_extra_data(review_request,
review_request.extra_data,
extra_fields)
except ImportExtraDataError as e:
return e.error_payload
review_request.save(update_fields=['extra_data'])
return 201, {
self.item_result_key: review_request
}
except AuthenticationError:
return REPO_AUTHENTICATION_ERROR
except RepositoryNotFoundError:
return MISSING_REPOSITORY
except ChangeNumberInUseError as e:
return CHANGE_NUMBER_IN_USE, {
'review_request': e.review_request
}
except InvalidChangeNumberError:
return INVALID_CHANGE_NUMBER
except EmptyChangeSetError:
return EMPTY_CHANGESET
except DiffTooBigError:
return DIFF_TOO_BIG
except EmptyDiffError:
return DIFF_EMPTY
except DiffParserError as e:
return DIFF_PARSE_ERROR, {
'linenum': e.linenum,
'message': str(e),
}
except HostingServiceError as e:
logger.exception('Got unexpected HostingServiceError when '
'creating repository: %s'
% e,
request=request)
return REPO_INFO_ERROR.with_message(str(e))
except SSHError as e:
logger.exception('Got unexpected SSHError when creating '
'review request: %s',
e,
request=request)
return REPO_INFO_ERROR.with_message('SSH Error: %s' % e)
except HostingServiceError as e:
return REPO_INFO_ERROR.with_message(str(e))
except SCMError as e:
return REPO_INFO_ERROR.with_message(str(e))
except ValidationError:
return COMMIT_ID_ALREADY_EXISTS
except ValueError as e:
return INVALID_FORM_DATA, {
'reason': str(e),
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED,
REPO_INFO_ERROR)
@webapi_request_fields(
optional={
'status': {
'type': ChoiceFieldType,
'choices': ('discarded', 'pending', 'submitted'),
'description': 'The status of the review request. This can '
'be changed to close or reopen the review '
'request',
},
'changenum': {
'type': IntFieldType,
'description': 'The optional change number to set or update.\n'
'\n'
'This can be used to re-associate with a new '
'change number, or to create/update a draft '
'with new information from the current '
'change number.\n'
'\n'
'This only works with repositories that '
'support server-side changesets.\n'
'\n'
'This is deprecated. Instead, set '
'``commit_id`` and ``update_from_commit_id=1`` '
' on the draft.',
'added_in': '1.5.4',
'deprecated_in': '2.0',
},
'close_description': {
'type': StringFieldType,
'description': 'The description of the update. Should only be '
'used if the review request have been '
'submitted or discarded.\n'
'\n'
'This replaces the old ``description`` field.',
'added_in': '2.0.9',
'supports_text_types': True,
},
'close_description_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The text type for the close description '
'of the update field.',
'added_in': '2.0',
'deprecated_in': '2.0.12',
},
'description': {
'type': StringFieldType,
'description': 'The description of the update. Should only be '
'used if the review request have been '
'submitted or discarded.\n'
'\n'
'This is deprecated. Instead, set '
'``close_description``.',
'added_in': '1.6',
'deprecated_in': '2.0.9',
'supports_text_types': True,
},
'force_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The text type, if any, to force for returned '
'text fields. The contents will be converted '
'to the requested type in the payload, but '
'will not be saved as that type.',
'added_in': '2.0.9',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The text type for the close description '
'of the update field.\n'
'\n'
'This is deprecated. Please use '
'``close_description_text_type`` instead.',
'added_in': '2.0',
'deprecated_in': '2.0.12',
},
},
allow_unknown=True
)
def update(self, request, status=None, changenum=None,
close_description=None, close_description_text_type=None,
description=None, text_type=None,
extra_fields={}, *args, **kwargs):
"""Updates the status of the review request.
The only supported update to a review request's resource is to change
the status, the associated server-side, change number, or to update
information from the existing change number.
The status can be set in order to close the review request as
discarded or submitted, or to reopen as pending.
For Perforce, a change number can either be changed to a new number, or
the current change number can be passed. In either case, a new draft
will be created or an existing one updated to include information from
the server based on the change number. This behavior is deprecated,
and instead, the commit_id field should be set on the draft.
Changes to a review request's fields, such as the summary or the
list of reviewers, is made on the Review Request Draft resource.
This can be accessed through the ``draft`` link. Only when that
draft is published will the changes end up back in this resource.
Extra data can be stored later lookup. See
:ref:`webapi2.0-extra-data` for more information.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
is_mutating_field = (
changenum is not None or
extra_fields
)
if ((is_mutating_field and
not self.has_modify_permissions(request, review_request)) or
(status is not None and
not review_request.is_status_mutable_by(request.user))):
return self.get_no_access_error(request)
if (status is not None and
(review_request.status != ReviewRequest.string_to_status(status) or
review_request.status != ReviewRequest.PENDING_REVIEW)):
try:
if status in self._close_type_map:
close_description = close_description or description
close_description_text_type = \
close_description_text_type or text_type
close_description_rich_text = (
close_description_text_type ==
self.TEXT_TYPE_MARKDOWN)
try:
review_request.close(
self._close_type_map[status],
request.user,
close_description,
rich_text=close_description_rich_text)
except CloseError as e:
return CLOSE_ERROR.with_message(str(e))
# Set this so that we'll return this new value when
# serializing the object.
review_request._close_description = close_description
review_request._close_description_rich_text = \
close_description_rich_text
elif status == 'pending':
try:
review_request.reopen(request.user)
except ReopenError as e:
return REOPEN_ERROR.with_message(str(e))
else:
raise AssertionError("Code path for invalid status '%s' "
"should never be reached." % status)
except PermissionError:
return self.get_no_access_error(request)
except PublishError as e:
return PUBLISH_ERROR.with_message(str(e))
# Preserve the old changenum behavior.
changed_fields = []
if changenum is not None:
if review_request.repository is None:
return INVALID_CHANGE_NUMBER
if changenum != review_request.changenum:
review_request.commit = str(changenum)
changed_fields.append('changenum')
changed_fields.append('commit_id')
try:
review_request.reopen(request.user)
except ReopenError as e:
return REOPEN_ERROR.with_message(str(e))
try:
draft = ReviewRequestDraftResource.prepare_draft(
request, review_request)
except PermissionDenied:
return PERMISSION_DENIED
try:
draft.update_from_commit_id(str(changenum))
except InvalidChangeNumberError:
return INVALID_CHANGE_NUMBER
except EmptyChangeSetError:
return EMPTY_CHANGESET
except HostingServiceError as e:
return REPO_INFO_ERROR.with_message(str(e))
except SCMError as e:
return REPO_INFO_ERROR.with_message(str(e))
draft.save()
if extra_fields:
try:
self.import_extra_data(review_request,
review_request.extra_data,
extra_fields)
except ImportExtraDataError as e:
return e.error_payload
changed_fields.append('extra_data')
if changed_fields:
review_request.save(update_fields=changed_fields)
return 200, {
self.item_result_key: review_request,
}
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def delete(self, *args, **kwargs):
"""Deletes the review request permanently.
This is a dangerous call to make, as it will delete the review
request, associated screenshots, diffs, and reviews. There is no
going back after this call is made.
Only users who have been granted the ``reviews.delete_reviewrequest``
permission (which includes administrators) can perform a delete on
the review request.
After a successful delete, this will return :http:`204`.
"""
pass
@webapi_check_login_required
@webapi_check_local_site
@webapi_request_fields(
optional={
'branch': {
'type': str,
'description': 'The branch field on a review request to '
'filter by.',
'added_in': '3.0.16',
},
'changenum': {
'type': IntFieldType,
'description': 'The change number the review requests must '
'have set. This will only return one review '
'request per repository, and only works for '
'repository types that support server-side '
'changesets. This is deprecated in favor of '
'the ``commit_id`` field.',
},
'commit-id': {
'type': StringFieldType,
'description': 'The commit that review requests must have '
'set. This will only return one review request '
'per repository.\n'
'\n'
'This obsoletes the ``changenum`` field.',
'added_in': '2.0',
},
'time-added-to': {
'type': DateTimeFieldType,
'description': 'The date/time that all review requests must '
'be added before. This is compared against the '
'review request\'s ``time_added`` field. This '
'must be a valid :term:`date/time format`.',
},
'time-added-from': {
'type': DateTimeFieldType,
'description': 'The earliest date/time the review request '
'could be added. This is compared against the '
'review request\'s ``time_added`` field. This '
'must be a valid :term:`date/time format`.',
},
'last-updated-to': {
'type': DateTimeFieldType,
'description': 'The date/time that all review requests must '
'be last updated before. This is compared '
'against the review request\'s '
'``last_updated`` field. This must be a valid '
':term:`date/time format`.',
},
'last-updated-from': {
'type': DateTimeFieldType,
'description': 'The earliest date/time the review request '
'could be last updated. This is compared '
'against the review request\'s '
'``last_updated`` field. This must be a valid '
':term:`date/time format`.',
},
'from-user': {
'type': StringFieldType,
'description': 'The username that the review requests must '
'be owned by.',
},
'repository': {
'type': IntFieldType,
'description': 'The ID of the repository that the review '
'requests must be on.',
},
'show-all-unpublished': {
'type': BooleanFieldType,
'description': 'If set, and if the user is an admin or has '
'the "reviews.can_submit_as_another_user" '
'permission, unpublished review requests '
'will also be returned.',
'added_in': '2.0.8',
},
'issue-dropped-count': {
'type': IntFieldType,
'description': 'The review request must have exactly the '
'provided number of dropped issues.',
'added_in': '2.0',
},
'issue-dropped-count-lt': {
'type': IntFieldType,
'description': 'The review request must have less than the '
'provided number of dropped issues.',
'added_in': '2.0',
},
'issue-dropped-count-lte': {
'type': IntFieldType,
'description': 'The review request must have at most the '
'provided number of dropped issues.',
'added_in': '2.0',
},
'issue-dropped-count-gt': {
'type': IntFieldType,
'description': 'The review request must have more than the '
'provided number of dropped issues.',
'added_in': '2.0',
},
'issue-dropped-count-gte': {
'type': IntFieldType,
'description': 'The review request must have at least the '
'provided number of dropped issues.',
'added_in': '2.0',
},
'issue-open-count': {
'type': IntFieldType,
'description': 'The review request must have exactly the '
'provided number of open issues.',
'added_in': '2.0',
},
'issue-open-count-lt': {
'type': IntFieldType,
'description': 'The review request must have less than the '
'provided number of open issues.',
'added_in': '2.0',
},
'issue-open-count-lte': {
'type': IntFieldType,
'description': 'The review request must have at most the '
'provided number of open issues.',
'added_in': '2.0',
},
'issue-open-count-gt': {
'type': IntFieldType,
'description': 'The review request must have more than the '
'provided number of open issues.',
'added_in': '2.0',
},
'issue-open-count-gte': {
'type': IntFieldType,
'description': 'The review request must have at least the '
'provided number of open issues.',
'added_in': '2.0',
},
'issue-resolved-count': {
'type': IntFieldType,
'description': 'The review request must have exactly the '
'provided number of resolved issues.',
'added_in': '2.0',
},
'issue-resolved-count-lt': {
'type': IntFieldType,
'description': 'The review request must have less than the '
'provided number of resolved issues.',
'added_in': '2.0',
},
'issue-resolved-count-lte': {
'type': IntFieldType,
'description': 'The review request must have at most the '
'provided number of resolved issues.',
'added_in': '2.0',
},
'issue-resolved-count-gt': {
'type': IntFieldType,
'description': 'The review request must have more than the '
'provided number of resolved issues.',
'added_in': '2.0',
},
'issue-resolved-count-gte': {
'type': IntFieldType,
'description': 'The review request must have at least the '
'provided number of resolved issues.',
'added_in': '2.0',
},
'ship-it': {
'type': BooleanFieldType,
'description': 'The review request must have at least one '
'review with Ship It set, if this is 1. '
'Otherwise, if 0, it must not have any marked '
'Ship It.',
'added_in': '1.6',
'deprecated_in': '2.0',
},
'ship-it-count': {
'type': IntFieldType,
'description': 'The review request must have exactly the '
'provided number of Ship Its.',
'added_in': '2.0',
},
'ship-it-count-lt': {
'type': IntFieldType,
'description': 'The review request must have less than the '
'provided number of Ship Its.',
'added_in': '2.0',
},
'ship-it-count-lte': {
'type': IntFieldType,
'description': 'The review request must have at most the '
'provided number of Ship Its.',
'added_in': '2.0',
},
'ship-it-count-gt': {
'type': IntFieldType,
'description': 'The review request must have more than the '
'provided number of Ship Its.',
'added_in': '2.0',
},
'ship-it-count-gte': {
'type': IntFieldType,
'description': 'The review request must have at least the '
'provided number of Ship Its.',
'added_in': '2.0',
},
'status': {
'type': ChoiceFieldType,
'choices': ('all', 'discarded', 'pending', 'submitted'),
'description': 'The status of the review requests.',
},
'to-groups': {
'type': StringFieldType,
'description': 'A comma-separated list of review group names '
'that the review requests must have in the '
'reviewer list.',
},
'to-user-groups': {
'type': StringFieldType,
'description': 'A comma-separated list of usernames who are '
'in groups that the review requests must have '
'in the reviewer list.',
},
'to-users': {
'type': StringFieldType,
'description': 'A comma-separated list of usernames that the '
'review requests must either have in the '
'reviewer list specifically or by way of '
'a group.',
},
'to-users-directly': {
'type': StringFieldType,
'description': 'A comma-separated list of usernames that the '
'review requests must have in the reviewer '
'list specifically.',
}
},
allow_unknown=True
)
@augment_method_from(WebAPIResource)
def get_list(self, request, *args, **kwargs):
"""Returns all review requests that the user has read access to.
By default, this returns all published or formerly published
review requests.
The resulting list can be filtered down through the many
request parameters.
"""
pass
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Returns information on a particular review request.
This contains full information on the latest published review request.
If the review request is not public, then the client's logged in user
must either be the owner of the review request or must have the
``reviews.can_edit_reviewrequest`` permission set. Otherwise, an
error will be returned.
"""
pass
def get_object(self, request, local_site_name=None, *args, **kwargs):
"""Returns an object, given captured parameters from a URL.
This is an override of the djblets WebAPIResource get_object, which
knows about local_id and local_site_name.
"""
if local_site_name:
id_field = 'local_id'
else:
id_field = 'pk'
return super(ReviewRequestResource, self).get_object(
request, id_field=id_field, local_site_name=local_site_name,
*args, **kwargs)
def get_href(self, obj, request, *args, **kwargs):
"""Returns the URL for this object.
This is an override of WebAPIResource.get_href which will use the
local_id instead of the pk.
"""
if obj.local_site_id:
local_site_name = obj.local_site.name
else:
local_site_name = None
href_kwargs = {
self.uri_object_key: obj.display_id,
}
href_kwargs.update(self.get_href_parent_ids(obj))
return request.build_absolute_uri(
self.get_item_url(local_site_name=local_site_name, **href_kwargs))
def _find_user(self, username, local_site, request):
"""Finds a User object matching ``username``.
This will search all authentication backends, and may create the
User object if the authentication backend knows that the user exists.
"""
username = username.strip()
if local_site:
users = local_site.users
else:
users = User.objects
try:
user = users.get(username=username)
except User.DoesNotExist:
user = None
if not local_site:
for backend in auth.get_backends():
try:
return backend.get_or_create_user(username, request)
except Exception as e:
logger.error('Error when calling get_or_create_user '
'for auth backend %r: %s',
backend, e, exc_info=1)
return user
review_request_resource = ReviewRequestResource()
|
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for comparing proto2 messages in Python.
Forked from tensorflow.python.util.prtobuf
ProtoEq() compares two proto2 messages for equality.
ClearDefaultValuedFields() recursively clears the fields that are set to their
default values. This is useful for comparing protocol buffers where the
semantics of unset fields and default valued fields are the same.
assertProtoEqual() is useful for unit tests. It produces much more helpful
output than assertEqual() for proto2 messages, e.g. this:
outer {
inner {
- strings: "x"
? ^
+ strings: "y"
? ^
}
}
...compared to the default output from assertEqual() that looks like this:
AssertionError: <my.Msg object at 0x9fb353c> != <my.Msg object at 0x9fb35cc>
Call it inside your unit test's googletest.TestCase subclasses like this:
from tensorflow.python.util.protobuf import compare
class MyTest(googletest.TestCase):
...
def testXXX(self):
...
compare.assertProtoEqual(self, a, b)
Alternatively:
from tensorflow.python.util.protobuf import compare
class MyTest(compare.ProtoAssertions, googletest.TestCase):
...
def testXXX(self):
...
self.assertProtoEqual(a, b)
"""
import collections.abc as collections_abc
import difflib
from google.protobuf import descriptor
from google.protobuf import descriptor_pool
from google.protobuf import message
from google.protobuf import text_format
def assertProtoEqual(self, a, b, check_initialized=True, # pylint: disable=invalid-name
normalize_numbers=False, msg=None):
"""Fails with a useful error if a and b aren't equal.
Comparison of repeated fields matches the semantics of
unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter.
Args:
self: googletest.TestCase
a: proto2 PB instance, or text string representing one.
b: proto2 PB instance -- message.Message or subclass thereof.
check_initialized: boolean, whether to fail if either a or b isn't
initialized.
normalize_numbers: boolean, whether to normalize types and precision of
numbers before comparison.
msg: if specified, is used as the error message on failure.
"""
pool = descriptor_pool.Default()
if isinstance(a, str):
a = text_format.Parse(a, b.__class__(), descriptor_pool=pool)
for pb in a, b:
if check_initialized:
errors = pb.FindInitializationErrors()
if errors:
self.fail('Initialization errors: %s\n%s' % (errors, pb))
if normalize_numbers:
NormalizeNumberFields(pb)
a_str = text_format.MessageToString(a, descriptor_pool=pool)
b_str = text_format.MessageToString(b, descriptor_pool=pool)
# Some Python versions would perform regular diff instead of multi-line
# diff if string is longer than 2**16. We substitute this behavior
# with a call to unified_diff instead to have easier-to-read diffs.
# For context, see: https://bugs.python.org/issue11763.
if len(a_str) < 2**16 and len(b_str) < 2**16:
self.assertMultiLineEqual(a_str, b_str, msg=msg)
else:
diff = '\n' + ''.join(difflib.unified_diff(a_str.splitlines(True),
b_str.splitlines(True)))
self.fail('%s : %s' % (msg, diff))
def NormalizeNumberFields(pb):
"""Normalizes types and precisions of number fields in a protocol buffer.
Due to subtleties in the python protocol buffer implementation, it is possible
for values to have different types and precision depending on whether they
were set and retrieved directly or deserialized from a protobuf. This function
normalizes integer values to ints and longs based on width, 32-bit floats to
five digits of precision to account for python always storing them as 64-bit,
and ensures doubles are floating point for when they're set to integers.
Modifies pb in place. Recurses into nested objects.
Args:
pb: proto2 message.
Returns:
the given pb, modified in place.
"""
for desc, values in pb.ListFields():
is_repeated = True
if desc.label != descriptor.FieldDescriptor.LABEL_REPEATED:
is_repeated = False
values = [values]
normalized_values = None
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if desc.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_SINT64):
normalized_values = [int(x) for x in values]
elif desc.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_ENUM):
normalized_values = [int(x) for x in values]
elif desc.type == descriptor.FieldDescriptor.TYPE_FLOAT:
normalized_values = [round(x, 6) for x in values]
elif desc.type == descriptor.FieldDescriptor.TYPE_DOUBLE:
normalized_values = [round(float(x), 7) for x in values]
if normalized_values is not None:
if is_repeated:
pb.ClearField(desc.name)
getattr(pb, desc.name).extend(normalized_values)
else:
setattr(pb, desc.name, normalized_values[0])
if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE or
desc.type == descriptor.FieldDescriptor.TYPE_GROUP):
if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
desc.message_type.has_options and
desc.message_type.GetOptions().map_entry):
# This is a map, only recurse if the values have a message type.
if (desc.message_type.fields_by_number[2].type ==
descriptor.FieldDescriptor.TYPE_MESSAGE):
for v in values.items():
NormalizeNumberFields(v)
else:
for v in values:
# recursive step
NormalizeNumberFields(v)
return pb
def _IsMap(value):
return isinstance(value, collections_abc.Mapping)
def _IsRepeatedContainer(value):
if isinstance(value, str):
return False
try:
iter(value)
return True
except TypeError:
return False
def ProtoEq(a, b):
"""Compares two proto2 objects for equality.
Recurses into nested messages. Uses list (not set) semantics for comparing
repeated fields, ie duplicates and order matter.
Args:
a: A proto2 message or a primitive.
b: A proto2 message or a primitive.
Returns:
`True` if the messages are equal.
"""
def Format(pb):
"""Returns a dictionary or unchanged pb bases on its type.
Specifically, this function returns a dictionary that maps tag
number (for messages) or element index (for repeated fields) to
value, or just pb unchanged if it's neither.
Args:
pb: A proto2 message or a primitive.
Returns:
A dict or unchanged pb.
"""
if isinstance(pb, message.Message):
return dict((desc.number, value) for desc, value in pb.ListFields())
elif _IsMap(pb):
return dict(pb.items())
elif _IsRepeatedContainer(pb):
return dict(enumerate(list(pb)))
else:
return pb
a, b = Format(a), Format(b)
# Base case
if not isinstance(a, dict) or not isinstance(b, dict):
return a == b
# This list performs double duty: it compares two messages by tag value *or*
# two repeated fields by element, in order. the magic is in the format()
# function, which converts them both to the same easily comparable format.
for tag in sorted(set(a.keys()) | set(b.keys())):
if tag not in a or tag not in b:
return False
else:
# Recursive step
if not ProtoEq(a[tag], b[tag]):
return False
# Didn't find any values that differed, so they're equal!
return True
class ProtoAssertions(object):
"""Mix this into a googletest.TestCase class to get proto2 assertions.
Usage:
class SomeTestCase(compare.ProtoAssertions, googletest.TestCase):
...
def testSomething(self):
...
self.assertProtoEqual(a, b)
See module-level definitions for method documentation.
"""
# pylint: disable=invalid-name
def assertProtoEqual(self, *args, **kwargs):
return assertProtoEqual(self, *args, **kwargs)
|
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__all__ = ['MultiSlotDataGenerator', 'MultiSlotStringDataGenerator']
class DataGenerator(object):
"""
DataGenerator is a general Base class for user to inherit
A user who wants to define his/her own python processing logic
with paddle.fluid.dataset should inherit this class
"""
def __init__(self):
self._proto_info = None
self.batch_size_ = 32
def _set_line_limit(self, line_limit):
if not isinstance(line_limit, int):
raise ValueError("line_limit%s must be in int type" %
type(line_limit))
if line_limit < 1:
raise ValueError("line_limit can not less than 1")
self._line_limit = line_limit
def set_batch(self, batch_size):
'''
Set batch size of current DataGenerator
This is necessary only if a user wants to define generator_batch
Example:
.. code-block:: python
import paddle.fluid.incubate.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", int_words)
return local_iter
def generate_batch(self, samples):
def local_iter():
for s in samples:
yield ("words", s[1].extend([s[1][0]]))
mydata = MyData()
mydata.set_batch(128)
'''
self.batch_size_ = batch_size
def run_from_memory(self):
'''
This function generator data from memory, it is usually used for
debug and benchmarking
Example:
.. code-block:: python
import paddle.fluid.incubate.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
yield ("words", [1, 2, 3, 4])
return local_iter
mydata = MyData()
mydata.run_from_memory()
'''
batch_samples = []
line_iter = self.generate_sample(None)
for user_parsed_line in line_iter():
if user_parsed_line == None:
continue
batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
batch_samples = []
if len(batch_samples) > 0:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
def run_from_stdin(self):
'''
This function reads the data row from stdin, parses it with the
process function, and further parses the return value of the
process function with the _gen_str function. The parsed data will
be wrote to stdout and the corresponding protofile will be
generated.
Example:
.. code-block:: python
import paddle.fluid.incubate.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", [int_words])
return local_iter
mydata = MyData()
mydata.run_from_stdin()
'''
batch_samples = []
for line in sys.stdin:
line_iter = self.generate_sample(line)
for user_parsed_line in line_iter():
if user_parsed_line == None:
continue
batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
batch_samples = []
if len(batch_samples) > 0:
batch_iter = self.generate_batch(batch_samples)
for sample in batch_iter():
sys.stdout.write(self._gen_str(sample))
def _gen_str(self, line):
'''
Further processing the output of the process() function rewritten by
user, outputting data that can be directly read by the datafeed,and
updating proto_info information.
Args:
line(str): the output of the process() function rewritten by user.
Returns:
Return a string data that can be read directly by the datafeed.
'''
raise NotImplementedError(
"pls use MultiSlotDataGenerator or PairWiseDataGenerator")
def generate_sample(self, line):
'''
This function needs to be overridden by the user to process the
original data row into a list or tuple.
Args:
line(str): the original data row
Returns:
Returns the data processed by the user.
The data format is list or tuple:
[(name, [feasign, ...]), ...]
or ((name, [feasign, ...]), ...)
For example:
[("words", [1926, 08, 17]), ("label", [1])]
or (("words", [1926, 08, 17]), ("label", [1]))
Note:
The type of feasigns must be in int or float. Once the float
element appears in the feasign, the type of that slot will be
processed into a float.
Example:
.. code-block:: python
import paddle.fluid.incubate.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", [int_words])
return local_iter
'''
raise NotImplementedError(
"Please rewrite this function to return a list or tuple: " +
"[(name, [feasign, ...]), ...] or ((name, [feasign, ...]), ...)")
def generate_batch(self, samples):
'''
This function needs to be overridden by the user to process the
generated samples from generate_sample(self, str) function
It is usually used as batch processing when a user wants to
do preprocessing on a batch of samples, e.g. padding according to
the max length of a sample in the batch
Args:
samples(list tuple): generated sample from generate_sample
Returns:
a python generator, the same format as return value of generate_sample
Example:
.. code-block:: python
import paddle.fluid.incubate.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", int_words)
return local_iter
def generate_batch(self, samples):
def local_iter():
for s in samples:
yield ("words", s[1].extend([s[1][0]]))
mydata = MyData()
mydata.set_batch(128)
'''
def local_iter():
for sample in samples:
yield sample
return local_iter
# TODO: guru4elephant
# add more generalized DataGenerator that can adapt user-defined slot
# for example, [(name, float_list), (name, str_list), (name, int_list)]
class MultiSlotStringDataGenerator(DataGenerator):
def _gen_str(self, line):
'''
Further processing the output of the process() function rewritten by
user, outputting data that can be directly read by the MultiSlotDataFeed,
and updating proto_info information.
The input line will be in this format:
>>> [(name, [str(feasign), ...]), ...]
>>> or ((name, [str(feasign), ...]), ...)
The output will be in this format:
>>> [ids_num id1 id2 ...] ...
For example, if the input is like this:
>>> [("words", ["1926", "08", "17"]), ("label", ["1"])]
>>> or (("words", ["1926", "08", "17"]), ("label", ["1"]))
the output will be:
>>> 3 1234 2345 3456 1 1
Args:
line(str): the output of the process() function rewritten by user.
Returns:
Return a string data that can be read directly by the MultiSlotDataFeed.
'''
if not isinstance(line, list) and not isinstance(line, tuple):
raise ValueError(
"the output of process() must be in list or tuple type"
"Examples: [('words', ['1926', '08', '17']), ('label', ['1'])]")
output = ""
for index, item in enumerate(line):
name, elements = item
if output:
output += " "
out_str = []
out_str.append(str(len(elements)))
out_str.extend(elements)
output += " ".join(out_str)
return output + "\n"
class MultiSlotDataGenerator(DataGenerator):
def _gen_str(self, line):
'''
Further processing the output of the process() function rewritten by
user, outputting data that can be directly read by the MultiSlotDataFeed,
and updating proto_info information.
The input line will be in this format:
>>> [(name, [feasign, ...]), ...]
>>> or ((name, [feasign, ...]), ...)
The output will be in this format:
>>> [ids_num id1 id2 ...] ...
The proto_info will be in this format:
>>> [(name, type), ...]
For example, if the input is like this:
>>> [("words", [1926, 08, 17]), ("label", [1])]
>>> or (("words", [1926, 08, 17]), ("label", [1]))
the output will be:
>>> 3 1234 2345 3456 1 1
the proto_info will be:
>>> [("words", "uint64"), ("label", "uint64")]
Args:
line(str): the output of the process() function rewritten by user.
Returns:
Return a string data that can be read directly by the MultiSlotDataFeed.
'''
if not isinstance(line, list) and not isinstance(line, tuple):
raise ValueError(
"the output of process() must be in list or tuple type"
"Example: [('words', [1926, 08, 17]), ('label', [1])]")
output = ""
if self._proto_info is None:
self._proto_info = []
for item in line:
name, elements = item
if not isinstance(name, str):
raise ValueError("name%s must be in str type" % type(name))
if not isinstance(elements, list):
raise ValueError("elements%s must be in list type" %
type(elements))
if not elements:
raise ValueError(
"the elements of each field can not be empty, you need padding it in process()."
)
self._proto_info.append((name, "uint64"))
if output:
output += " "
output += str(len(elements))
for elem in elements:
if isinstance(elem, float):
self._proto_info[-1] = (name, "float")
elif not isinstance(elem, int) and not isinstance(elem,
long):
raise ValueError(
"the type of element%s must be in int or float" %
type(elem))
output += " " + str(elem)
else:
if len(line) != len(self._proto_info):
raise ValueError(
"the complete field set of two given line are inconsistent.")
for index, item in enumerate(line):
name, elements = item
if not isinstance(name, str):
raise ValueError("name%s must be in str type" % type(name))
if not isinstance(elements, list):
raise ValueError("elements%s must be in list type" %
type(elements))
if not elements:
raise ValueError(
"the elements of each field can not be empty, you need padding it in process()."
)
if name != self._proto_info[index][0]:
raise ValueError(
"the field name of two given line are not match: require<%s>, get<%s>."
% (self._proto_info[index][0], name))
if output:
output += " "
output += str(len(elements))
for elem in elements:
if self._proto_info[index][1] != "float":
if isinstance(elem, float):
self._proto_info[index] = (name, "float")
elif not isinstance(elem, int) and not isinstance(elem,
long):
raise ValueError(
"the type of element%s must be in int or float"
% type(elem))
output += " " + str(elem)
return output + "\n"
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import unittest
import datetime
from libcloud.utils.iso8601 import UTC
try:
import simplejson as json
except ImportError:
import json
from mock import Mock, patch
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import method_type
from libcloud.utils.py3 import u
from libcloud.common.types import InvalidCredsError, MalformedResponseError, \
LibcloudError
from libcloud.compute.types import Provider, KeyPairDoesNotExistError
from libcloud.compute.providers import get_driver
from libcloud.compute.drivers.openstack import (
OpenStack_1_0_NodeDriver, OpenStack_1_0_Response,
OpenStack_1_1_NodeDriver, OpenStackSecurityGroup,
OpenStackSecurityGroupRule, OpenStack_1_1_FloatingIpPool,
OpenStack_1_1_FloatingIpAddress, OpenStackKeyPair
)
from libcloud.compute.base import Node, NodeImage, NodeSize
from libcloud.pricing import set_pricing, clear_pricing_data
from libcloud.test import MockResponse, MockHttpTestCase, XML_HEADERS
from libcloud.test.file_fixtures import ComputeFileFixtures, OpenStackFixtures
from libcloud.test.compute import TestCaseMixin
from libcloud.test.secrets import OPENSTACK_PARAMS
BASE_DIR = os.path.abspath(os.path.split(__file__)[0])
class OpenStack_1_0_ResponseTestCase(unittest.TestCase):
XML = """<?xml version="1.0" encoding="UTF-8"?><root/>"""
def test_simple_xml_content_type_handling(self):
http_response = MockResponse(
200, OpenStack_1_0_ResponseTestCase.XML, headers={'content-type': 'application/xml'})
body = OpenStack_1_0_Response(http_response, None).parse_body()
self.assertTrue(hasattr(body, 'tag'), "Body should be parsed as XML")
def test_extended_xml_content_type_handling(self):
http_response = MockResponse(200,
OpenStack_1_0_ResponseTestCase.XML,
headers={'content-type': 'application/xml; charset=UTF-8'})
body = OpenStack_1_0_Response(http_response, None).parse_body()
self.assertTrue(hasattr(body, 'tag'), "Body should be parsed as XML")
def test_non_xml_content_type_handling(self):
RESPONSE_BODY = "Accepted"
http_response = MockResponse(
202, RESPONSE_BODY, headers={'content-type': 'text/html'})
body = OpenStack_1_0_Response(http_response, None).parse_body()
self.assertEqual(
body, RESPONSE_BODY, "Non-XML body should be returned as is")
class OpenStack_1_0_Tests(unittest.TestCase, TestCaseMixin):
should_list_locations = False
should_list_volumes = False
driver_klass = OpenStack_1_0_NodeDriver
driver_args = OPENSTACK_PARAMS
driver_kwargs = {}
# driver_kwargs = {'ex_force_auth_version': '1.0'}
@classmethod
def create_driver(self):
if self is not OpenStack_1_0_FactoryMethodTests:
self.driver_type = self.driver_klass
return self.driver_type(*self.driver_args, **self.driver_kwargs)
def setUp(self):
# monkeypatch get_endpoint because the base openstack driver doesn't actually
# work with old devstack but this class/tests are still used by the rackspace
# driver
def get_endpoint(*args, **kwargs):
return "https://servers.api.rackspacecloud.com/v1.0/slug"
self.driver_klass.connectionCls.get_endpoint = get_endpoint
self.driver_klass.connectionCls.conn_classes = (OpenStackMockHttp,
OpenStackMockHttp)
self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com"
OpenStackMockHttp.type = None
self.driver = self.create_driver()
# normally authentication happens lazily, but we force it here
self.driver.connection._populate_hosts_and_request_paths()
clear_pricing_data()
@patch('libcloud.common.openstack.OpenStackServiceCatalog')
def test_populate_hosts_and_requests_path(self, _):
tomorrow = datetime.datetime.today() + datetime.timedelta(1)
cls = self.driver_klass.connectionCls
count = 5
# Test authentication and token re-use
con = cls('username', 'key')
osa = con.get_auth_class()
mocked_auth_method = Mock()
osa.authenticate = mocked_auth_method
# Valid token returned on first call, should be reused.
for i in range(0, count):
con._populate_hosts_and_request_paths()
if i == 0:
osa.auth_token = '1234'
osa.auth_token_expires = tomorrow
self.assertEqual(mocked_auth_method.call_count, 1)
osa.auth_token = None
osa.auth_token_expires = None
# ex_force_auth_token provided, authenticate should never be called
con = cls('username', 'key', ex_force_base_url='http://ponies',
ex_force_auth_token='1234')
osa = con.get_auth_class()
mocked_auth_method = Mock()
osa.authenticate = mocked_auth_method
for i in range(0, count):
con._populate_hosts_and_request_paths()
self.assertEqual(mocked_auth_method.call_count, 0)
def test_auth_token_is_set(self):
self.driver.connection._populate_hosts_and_request_paths()
self.assertEqual(
self.driver.connection.auth_token, "aaaaaaaaaaaa-bbb-cccccccccccccc")
def test_auth_token_expires_is_set(self):
self.driver.connection._populate_hosts_and_request_paths()
expires = self.driver.connection.auth_token_expires
self.assertEqual(expires.isoformat(), "2031-11-23T21:00:14-06:00")
def test_auth(self):
if self.driver.connection._auth_version == '2.0':
return
OpenStackMockHttp.type = 'UNAUTHORIZED'
try:
self.driver = self.create_driver()
self.driver.list_nodes()
except InvalidCredsError:
e = sys.exc_info()[1]
self.assertEqual(True, isinstance(e, InvalidCredsError))
else:
self.fail('test should have thrown')
def test_auth_missing_key(self):
if self.driver.connection._auth_version == '2.0':
return
OpenStackMockHttp.type = 'UNAUTHORIZED_MISSING_KEY'
try:
self.driver = self.create_driver()
self.driver.list_nodes()
except MalformedResponseError:
e = sys.exc_info()[1]
self.assertEqual(True, isinstance(e, MalformedResponseError))
else:
self.fail('test should have thrown')
def test_auth_server_error(self):
if self.driver.connection._auth_version == '2.0':
return
OpenStackMockHttp.type = 'INTERNAL_SERVER_ERROR'
try:
self.driver = self.create_driver()
self.driver.list_nodes()
except MalformedResponseError:
e = sys.exc_info()[1]
self.assertEqual(True, isinstance(e, MalformedResponseError))
else:
self.fail('test should have thrown')
def test_error_parsing_when_body_is_missing_message(self):
OpenStackMockHttp.type = 'NO_MESSAGE_IN_ERROR_BODY'
try:
self.driver.list_images()
except Exception:
e = sys.exc_info()[1]
self.assertEqual(True, isinstance(e, Exception))
else:
self.fail('test should have thrown')
def test_list_locations(self):
locations = self.driver.list_locations()
self.assertEqual(len(locations), 1)
def test_list_nodes(self):
OpenStackMockHttp.type = 'EMPTY'
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 0)
OpenStackMockHttp.type = None
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual('67.23.21.33', node.public_ips[0])
self.assertTrue('10.176.168.218' in node.private_ips)
self.assertEqual(node.extra.get('flavorId'), '1')
self.assertEqual(node.extra.get('imageId'), '11')
self.assertEqual(type(node.extra.get('metadata')), type(dict()))
OpenStackMockHttp.type = 'METADATA'
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual(type(node.extra.get('metadata')), type(dict()))
self.assertEqual(node.extra.get('metadata').get('somekey'),
'somevalue')
OpenStackMockHttp.type = None
def test_list_images(self):
ret = self.driver.list_images()
expected = {10: {'serverId': None,
'status': 'ACTIVE',
'created': '2009-07-20T09:14:37-05:00',
'updated': '2009-07-20T09:14:37-05:00',
'progress': None,
'minDisk': None,
'minRam': None},
11: {'serverId': '91221',
'status': 'ACTIVE',
'created': '2009-11-29T20:22:09-06:00',
'updated': '2009-11-29T20:24:08-06:00',
'progress': '100',
'minDisk': '5',
'minRam': '256'}}
for ret_idx, extra in list(expected.items()):
for key, value in list(extra.items()):
self.assertEqual(ret[ret_idx].extra[key], value)
def test_create_node(self):
image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)',
driver=self.driver)
size = NodeSize(1, '256 slice', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='racktest', image=image, size=size)
self.assertEqual(node.name, 'racktest')
self.assertEqual(node.extra.get('password'), 'racktestvJq7d3')
def test_create_node_without_adminPass(self):
OpenStackMockHttp.type = 'NO_ADMIN_PASS'
image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)',
driver=self.driver)
size = NodeSize(1, '256 slice', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='racktest', image=image, size=size)
self.assertEqual(node.name, 'racktest')
self.assertEqual(node.extra.get('password'), None)
def test_create_node_ex_shared_ip_group(self):
OpenStackMockHttp.type = 'EX_SHARED_IP_GROUP'
image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)',
driver=self.driver)
size = NodeSize(1, '256 slice', None, None, None, None,
driver=self.driver)
node = self.driver.create_node(name='racktest', image=image, size=size,
ex_shared_ip_group_id='12345')
self.assertEqual(node.name, 'racktest')
self.assertEqual(node.extra.get('password'), 'racktestvJq7d3')
def test_create_node_with_metadata(self):
OpenStackMockHttp.type = 'METADATA'
image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)',
driver=self.driver)
size = NodeSize(1, '256 slice', None, None, None, None,
driver=self.driver)
metadata = {'a': 'b', 'c': 'd'}
files = {'/file1': 'content1', '/file2': 'content2'}
node = self.driver.create_node(name='racktest', image=image, size=size,
metadata=metadata, files=files)
self.assertEqual(node.name, 'racktest')
self.assertEqual(node.extra.get('password'), 'racktestvJq7d3')
self.assertEqual(node.extra.get('metadata'), metadata)
def test_reboot_node(self):
node = Node(id=72258, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
ret = node.reboot()
self.assertTrue(ret is True)
def test_destroy_node(self):
node = Node(id=72258, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
ret = node.destroy()
self.assertTrue(ret is True)
def test_ex_limits(self):
limits = self.driver.ex_limits()
self.assertTrue("rate" in limits)
self.assertTrue("absolute" in limits)
def test_create_image(self):
node = Node(id=444222, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
image = self.driver.create_image(node, "imgtest")
self.assertEqual(image.name, "imgtest")
self.assertEqual(image.id, "12345")
def test_delete_image(self):
image = NodeImage(id=333111, name='Ubuntu 8.10 (intrepid)',
driver=self.driver)
ret = self.driver.delete_image(image)
self.assertTrue(ret)
def test_ex_list_ip_addresses(self):
ret = self.driver.ex_list_ip_addresses(node_id=72258)
self.assertEqual(2, len(ret.public_addresses))
self.assertTrue('67.23.10.131' in ret.public_addresses)
self.assertTrue('67.23.10.132' in ret.public_addresses)
self.assertEqual(1, len(ret.private_addresses))
self.assertTrue('10.176.42.16' in ret.private_addresses)
def test_ex_list_ip_groups(self):
ret = self.driver.ex_list_ip_groups()
self.assertEqual(2, len(ret))
self.assertEqual('1234', ret[0].id)
self.assertEqual('Shared IP Group 1', ret[0].name)
self.assertEqual('5678', ret[1].id)
self.assertEqual('Shared IP Group 2', ret[1].name)
self.assertTrue(ret[0].servers is None)
def test_ex_list_ip_groups_detail(self):
ret = self.driver.ex_list_ip_groups(details=True)
self.assertEqual(2, len(ret))
self.assertEqual('1234', ret[0].id)
self.assertEqual('Shared IP Group 1', ret[0].name)
self.assertEqual(2, len(ret[0].servers))
self.assertEqual('422', ret[0].servers[0])
self.assertEqual('3445', ret[0].servers[1])
self.assertEqual('5678', ret[1].id)
self.assertEqual('Shared IP Group 2', ret[1].name)
self.assertEqual(3, len(ret[1].servers))
self.assertEqual('23203', ret[1].servers[0])
self.assertEqual('2456', ret[1].servers[1])
self.assertEqual('9891', ret[1].servers[2])
def test_ex_create_ip_group(self):
ret = self.driver.ex_create_ip_group('Shared IP Group 1', '5467')
self.assertEqual('1234', ret.id)
self.assertEqual('Shared IP Group 1', ret.name)
self.assertEqual(1, len(ret.servers))
self.assertEqual('422', ret.servers[0])
def test_ex_delete_ip_group(self):
ret = self.driver.ex_delete_ip_group('5467')
self.assertEqual(True, ret)
def test_ex_share_ip(self):
ret = self.driver.ex_share_ip('1234', '3445', '67.23.21.133')
self.assertEqual(True, ret)
def test_ex_unshare_ip(self):
ret = self.driver.ex_unshare_ip('3445', '67.23.21.133')
self.assertEqual(True, ret)
def test_ex_resize(self):
node = Node(id=444222, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
size = NodeSize(1, '256 slice', None, None, None, None,
driver=self.driver)
self.assertTrue(self.driver.ex_resize(node=node, size=size))
def test_ex_confirm_resize(self):
node = Node(id=444222, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
self.assertTrue(self.driver.ex_confirm_resize(node=node))
def test_ex_revert_resize(self):
node = Node(id=444222, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
self.assertTrue(self.driver.ex_revert_resize(node=node))
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 7, 'Wrong sizes count')
for size in sizes:
self.assertTrue(isinstance(size.price, float),
'Wrong size price type')
if self.driver.api_name == 'openstack':
self.assertEqual(size.price, 0,
'Size price should be zero by default')
def test_list_sizes_with_specified_pricing(self):
if self.driver.api_name != 'openstack':
return
pricing = dict((str(i), i) for i in range(1, 8))
set_pricing(driver_type='compute', driver_name='openstack',
pricing=pricing)
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 7, 'Wrong sizes count')
for size in sizes:
self.assertTrue(isinstance(size.price, float),
'Wrong size price type')
self.assertEqual(float(size.price), float(pricing[size.id]))
class OpenStack_1_0_FactoryMethodTests(OpenStack_1_0_Tests):
should_list_locations = False
should_list_volumes = False
driver_klass = OpenStack_1_0_NodeDriver
driver_type = get_driver(Provider.OPENSTACK)
driver_args = OPENSTACK_PARAMS + ('1.0',)
def test_factory_method_invalid_version(self):
try:
self.driver_type(*(OPENSTACK_PARAMS + ('15.5',)))
except NotImplementedError:
pass
else:
self.fail('Exception was not thrown')
class OpenStackMockHttp(MockHttpTestCase):
fixtures = ComputeFileFixtures('openstack')
auth_fixtures = OpenStackFixtures()
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
# fake auth token response
def _v1_0(self, method, url, body, headers):
headers = {
'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug',
'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06',
'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06',
'x-storage-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06',
'x-storage-url': 'https://storage4.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'}
return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT])
def _v1_0_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _v1_0_INTERNAL_SERVER_ERROR(self, method, url, body, headers):
return (httplib.INTERNAL_SERVER_ERROR, "<h1>500: Internal Server Error</h1>", {},
httplib.responses[httplib.INTERNAL_SERVER_ERROR])
def _v1_0_slug_images_detail_NO_MESSAGE_IN_ERROR_BODY(self, method, url, body, headers):
body = self.fixtures.load('300_multiple_choices.json')
return (httplib.MULTIPLE_CHOICES, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_0_UNAUTHORIZED_MISSING_KEY(self, method, url, body, headers):
headers = {
'x-server-management-url': 'https://servers.api.rackspacecloud.com/v1.0/slug',
'x-auth-tokenx': 'FE011C19-CF86-4F87-BE5D-9229145D7A06',
'x-cdn-management-url': 'https://cdn.clouddrive.com/v1/MossoCloudFS_FE011C19-CF86-4F87-BE5D-9229145D7A06'}
return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT])
def _v2_0_tokens(self, method, url, body, headers):
body = self.auth_fixtures.load('_v2_0__auth.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail_EMPTY(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_servers_detail_empty.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_servers_detail.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers_detail_METADATA(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_servers_detail_metadata.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_images_333111(self, method, url, body, headers):
if method != "DELETE":
raise NotImplementedError()
# this is currently used for deletion of an image
# as such it should not accept GET/POST
return(httplib.NO_CONTENT, "", "", httplib.responses[httplib.NO_CONTENT])
def _v1_0_slug_images(self, method, url, body, headers):
if method != "POST":
raise NotImplementedError()
# this is currently used for creation of new image with
# POST request, don't handle GET to avoid possible confusion
body = self.fixtures.load('v1_slug_images_post.xml')
return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED])
def _v1_0_slug_images_detail(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_images_detail.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_servers.xml')
return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED])
def _v1_0_slug_servers_NO_ADMIN_PASS(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_servers_no_admin_pass.xml')
return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED])
def _v1_0_slug_servers_EX_SHARED_IP_GROUP(self, method, url, body, headers):
# test_create_node_ex_shared_ip_group
# Verify that the body contains sharedIpGroupId XML element
body = u(body)
self.assertTrue(body.find('sharedIpGroupId="12345"') != -1)
body = self.fixtures.load('v1_slug_servers.xml')
return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED])
def _v1_0_slug_servers_METADATA(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_servers_metadata.xml')
return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED])
def _v1_0_slug_servers_72258_action(self, method, url, body, headers):
if method != "POST" or body[:8] != "<reboot ":
raise NotImplementedError()
# only used by reboot() right now, but we will need to parse body
# someday !!!!
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
def _v1_0_slug_limits(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_limits.xml')
return (httplib.ACCEPTED, body, XML_HEADERS, httplib.responses[httplib.ACCEPTED])
def _v1_0_slug_servers_72258(self, method, url, body, headers):
if method != "DELETE":
raise NotImplementedError()
# only used by destroy node()
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
def _v1_0_slug_servers_72258_ips(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_servers_ips.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_shared_ip_groups_5467(self, method, url, body, headers):
if method != 'DELETE':
raise NotImplementedError()
return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT])
def _v1_0_slug_shared_ip_groups(self, method, url, body, headers):
fixture = 'v1_slug_shared_ip_group.xml' if method == 'POST' else 'v1_slug_shared_ip_groups.xml'
body = self.fixtures.load(fixture)
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_shared_ip_groups_detail(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_shared_ip_groups_detail.xml')
return (httplib.OK, body, XML_HEADERS, httplib.responses[httplib.OK])
def _v1_0_slug_servers_3445_ips_public_67_23_21_133(self, method, url, body, headers):
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
def _v1_0_slug_servers_444222_action(self, method, url, body, headers):
body = u(body)
if body.find('resize') != -1:
# test_ex_resize_server
return (httplib.ACCEPTED, "", headers, httplib.responses[httplib.NO_CONTENT])
elif body.find('confirmResize') != -1:
# test_ex_confirm_resize
return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT])
elif body.find('revertResize') != -1:
# test_ex_revert_resize
return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT])
def _v1_0_slug_flavors_detail(self, method, url, body, headers):
body = self.fixtures.load('v1_slug_flavors_detail.xml')
headers = {
'date': 'Tue, 14 Jun 2011 09:43:55 GMT', 'content-length': '529'}
headers.update(XML_HEADERS)
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _v1_1_auth(self, method, url, body, headers):
body = self.auth_fixtures.load('_v1_1__auth.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_auth_UNAUTHORIZED(self, method, url, body, headers):
body = self.auth_fixtures.load('_v1_1__auth_unauthorized.json')
return (httplib.UNAUTHORIZED, body, self.json_content_headers, httplib.responses[httplib.UNAUTHORIZED])
def _v1_1_auth_UNAUTHORIZED_MISSING_KEY(self, method, url, body, headers):
body = self.auth_fixtures.load('_v1_1__auth_mssing_token.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_auth_INTERNAL_SERVER_ERROR(self, method, url, body, headers):
return (httplib.INTERNAL_SERVER_ERROR, "<h1>500: Internal Server Error</h1>", {'content-type': 'text/html'},
httplib.responses[httplib.INTERNAL_SERVER_ERROR])
class OpenStack_1_1_Tests(unittest.TestCase, TestCaseMixin):
should_list_locations = False
should_list_volumes = True
driver_klass = OpenStack_1_1_NodeDriver
driver_type = OpenStack_1_1_NodeDriver
driver_args = OPENSTACK_PARAMS
driver_kwargs = {'ex_force_auth_version': '2.0'}
@classmethod
def create_driver(self):
if self is not OpenStack_1_1_FactoryMethodTests:
self.driver_type = self.driver_klass
return self.driver_type(*self.driver_args, **self.driver_kwargs)
def setUp(self):
self.driver_klass.connectionCls.conn_classes = (
OpenStack_2_0_MockHttp, OpenStack_2_0_MockHttp)
self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com"
OpenStackMockHttp.type = None
OpenStack_1_1_MockHttp.type = None
OpenStack_2_0_MockHttp.type = None
self.driver = self.create_driver()
# normally authentication happens lazily, but we force it here
self.driver.connection._populate_hosts_and_request_paths()
clear_pricing_data()
self.node = self.driver.list_nodes()[1]
def _force_reauthentication(self):
"""
Trash current auth token so driver will be forced to re-authentication
on next request.
"""
self.driver.connection._ex_force_base_url = 'http://ex_force_base_url.com:666/forced_url'
self.driver.connection.auth_token = None
self.driver.connection.auth_token_expires = None
self.driver.connection._osa.auth_token = None
self.driver.connection._osa.auth_token_expires = None
def test_auth_token_is_set(self):
self._force_reauthentication()
self.driver.connection._populate_hosts_and_request_paths()
self.assertEqual(
self.driver.connection.auth_token, "aaaaaaaaaaaa-bbb-cccccccccccccc")
def test_auth_token_expires_is_set(self):
self._force_reauthentication()
self.driver.connection._populate_hosts_and_request_paths()
expires = self.driver.connection.auth_token_expires
self.assertEqual(expires.isoformat(), "2031-11-23T21:00:14-06:00")
def test_ex_force_base_url(self):
# change base url and trash the current auth token so we can
# re-authenticate
self.driver.connection._ex_force_base_url = 'http://ex_force_base_url.com:666/forced_url'
self.driver.connection.auth_token = None
self.driver.connection._populate_hosts_and_request_paths()
# assert that we use the base url and not the auth url
self.assertEqual(self.driver.connection.host, 'ex_force_base_url.com')
self.assertEqual(self.driver.connection.port, '666')
self.assertEqual(self.driver.connection.request_path, '/forced_url')
def test_get_endpoint_populates_host_port_and_request_path(self):
# simulate a subclass overriding this method
self.driver.connection.get_endpoint = lambda: 'http://endpoint_auth_url.com:1555/service_url'
self.driver.connection.auth_token = None
self.driver.connection._ex_force_base_url = None
self.driver.connection._populate_hosts_and_request_paths()
# assert that we use the result of get endpoint
self.assertEqual(self.driver.connection.host, 'endpoint_auth_url.com')
self.assertEqual(self.driver.connection.port, '1555')
self.assertEqual(self.driver.connection.request_path, '/service_url')
def test_set_auth_token_populates_host_port_and_request_path(self):
# change base url and trash the current auth token so we can
# re-authenticate
self.driver.connection._ex_force_base_url = 'http://some_other_ex_force_base_url.com:1222/some-service'
self.driver.connection.auth_token = "preset-auth-token"
self.driver.connection._populate_hosts_and_request_paths()
# assert that we use the base url and not the auth url
self.assertEqual(
self.driver.connection.host, 'some_other_ex_force_base_url.com')
self.assertEqual(self.driver.connection.port, '1222')
self.assertEqual(self.driver.connection.request_path, '/some-service')
def test_auth_token_without_base_url_raises_exception(self):
kwargs = {
'ex_force_auth_version': '2.0',
'ex_force_auth_token': 'preset-auth-token'
}
try:
self.driver_type(*self.driver_args, **kwargs)
self.fail('Expected failure setting auth token without base url')
except LibcloudError:
pass
else:
self.fail('Expected failure setting auth token without base url')
def test_ex_force_auth_token_passed_to_connection(self):
base_url = 'https://servers.api.rackspacecloud.com/v1.1/slug'
kwargs = {
'ex_force_auth_version': '2.0',
'ex_force_auth_token': 'preset-auth-token',
'ex_force_base_url': base_url
}
driver = self.driver_type(*self.driver_args, **kwargs)
driver.list_nodes()
self.assertEqual(kwargs['ex_force_auth_token'],
driver.connection.auth_token)
self.assertEqual('servers.api.rackspacecloud.com',
driver.connection.host)
self.assertEqual('/v1.1/slug', driver.connection.request_path)
self.assertEqual(443, driver.connection.port)
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 2)
node = nodes[0]
self.assertEqual('12065', node.id)
# test public IPv4
self.assertTrue('12.16.18.28' in node.public_ips)
self.assertTrue('50.57.94.35' in node.public_ips)
# floating ip
self.assertTrue('192.168.3.3' in node.public_ips)
# test public IPv6
self.assertTrue(
'2001:4801:7808:52:16:3eff:fe47:788a' in node.public_ips)
# test private IPv4
self.assertTrue('10.182.64.34' in node.private_ips)
# floating ip
self.assertTrue('10.3.3.3' in node.private_ips)
# test private IPv6
self.assertTrue(
'fec0:4801:7808:52:16:3eff:fe60:187d' in node.private_ips)
self.assertEqual(node.extra.get('flavorId'), '2')
self.assertEqual(node.extra.get('imageId'), '7')
self.assertEqual(node.extra.get('metadata'), {})
self.assertEqual(node.extra['updated'], '2011-10-11T00:50:04Z')
self.assertEqual(node.extra['created'], '2011-10-11T00:51:39Z')
def test_list_nodes_no_image_id_attribute(self):
# Regression test for LIBCLOD-455
self.driver_klass.connectionCls.conn_classes[0].type = 'ERROR_STATE_NO_IMAGE_ID'
self.driver_klass.connectionCls.conn_classes[1].type = 'ERROR_STATE_NO_IMAGE_ID'
nodes = self.driver.list_nodes()
self.assertEqual(nodes[0].extra['imageId'], None)
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertEqual(len(volumes), 2)
volume = volumes[0]
self.assertEqual('cd76a3a1-c4ce-40f6-9b9f-07a61508938d', volume.id)
self.assertEqual('test_volume_2', volume.name)
self.assertEqual(2, volume.size)
self.assertEqual(volume.extra, {
'description': '',
'attachments': [{
'id': 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d',
"device": "/dev/vdb",
"serverId": "12065",
"volumeId": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d",
}],
'snapshot_id': None,
'state': 'available',
'location': 'nova',
'volume_type': 'None',
'metadata': {},
'created_at': '2013-06-24T11:20:13.000000',
})
volume = volumes[1]
self.assertEqual('cfcec3bc-b736-4db5-9535-4c24112691b5', volume.id)
self.assertEqual('test_volume', volume.name)
self.assertEqual(50, volume.size)
self.assertEqual(volume.extra, {
'description': 'some description',
'attachments': [],
'snapshot_id': '01f48111-7866-4cd2-986a-e92683c4a363',
'state': 'available',
'location': 'nova',
'volume_type': 'None',
'metadata': {},
'created_at': '2013-06-21T12:39:02.000000',
})
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 8, 'Wrong sizes count')
for size in sizes:
self.assertTrue(isinstance(size.price, float),
'Wrong size price type')
self.assertEqual(sizes[0].vcpus, 8)
def test_list_sizes_with_specified_pricing(self):
pricing = dict((str(i), i * 5.0) for i in range(1, 9))
set_pricing(driver_type='compute',
driver_name=self.driver.api_name, pricing=pricing)
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 8, 'Wrong sizes count')
for size in sizes:
self.assertTrue(isinstance(size.price, float),
'Wrong size price type')
self.assertEqual(size.price, pricing[size.id],
'Size price should match')
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 13, 'Wrong images count')
image = images[0]
self.assertEqual(image.id, '13')
self.assertEqual(image.name, 'Windows 2008 SP2 x86 (B24)')
self.assertEqual(image.extra['updated'], '2011-08-06T18:14:02Z')
self.assertEqual(image.extra['created'], '2011-08-06T18:13:11Z')
self.assertEqual(image.extra['status'], 'ACTIVE')
self.assertEqual(image.extra['metadata']['os_type'], 'windows')
self.assertEqual(
image.extra['serverId'], '52415800-8b69-11e0-9b19-734f335aa7b3')
self.assertEqual(image.extra['minDisk'], 0)
self.assertEqual(image.extra['minRam'], 0)
def test_create_node(self):
image = NodeImage(
id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver)
size = NodeSize(
1, '256 slice', None, None, None, None, driver=self.driver)
node = self.driver.create_node(name='racktest', image=image, size=size)
self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe')
self.assertEqual(node.name, 'racktest')
self.assertEqual(node.extra['password'], 'racktestvJq7d3')
self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1')
def test_create_node_with_ex_keyname_and_ex_userdata(self):
image = NodeImage(
id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver)
size = NodeSize(
1, '256 slice', None, None, None, None, driver=self.driver)
node = self.driver.create_node(name='racktest', image=image, size=size,
ex_keyname='devstack',
ex_userdata='sample data')
self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe')
self.assertEqual(node.name, 'racktest')
self.assertEqual(node.extra['password'], 'racktestvJq7d3')
self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1')
self.assertEqual(node.extra['key_name'], 'devstack')
def test_create_node_with_availability_zone(self):
image = NodeImage(
id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver)
size = NodeSize(
1, '256 slice', None, None, None, None, driver=self.driver)
node = self.driver.create_node(name='racktest', image=image, size=size,
availability_zone='testaz')
self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe')
self.assertEqual(node.name, 'racktest')
self.assertEqual(node.extra['password'], 'racktestvJq7d3')
self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1')
self.assertEqual(node.extra['availability_zone'], 'testaz')
def test_create_node_with_ex_disk_config(self):
OpenStack_1_1_MockHttp.type = 'EX_DISK_CONFIG'
image = NodeImage(
id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver)
size = NodeSize(
1, '256 slice', None, None, None, None, driver=self.driver)
node = self.driver.create_node(name='racktest', image=image, size=size,
ex_disk_config='AUTO')
self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe')
self.assertEqual(node.name, 'racktest')
self.assertEqual(node.extra['disk_config'], 'AUTO')
def test_create_node_with_ex_config_drive(self):
OpenStack_1_1_MockHttp.type = 'EX_CONFIG_DRIVE'
image = NodeImage(
id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver)
size = NodeSize(
1, '256 slice', None, None, None, None, driver=self.driver)
node = self.driver.create_node(name='racktest', image=image, size=size,
ex_config_drive=True)
self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe')
self.assertEqual(node.name, 'racktest')
self.assertTrue(node.extra['config_drive'])
def test_destroy_node(self):
self.assertTrue(self.node.destroy())
def test_reboot_node(self):
self.assertTrue(self.node.reboot())
def test_create_volume(self):
volume = self.driver.create_volume(1, 'test')
self.assertEqual(volume.name, 'test')
self.assertEqual(volume.size, 1)
def test_destroy_volume(self):
volume = self.driver.ex_get_volume(
'cd76a3a1-c4ce-40f6-9b9f-07a61508938d')
self.assertEqual(self.driver.destroy_volume(volume), True)
def test_attach_volume(self):
node = self.driver.list_nodes()[0]
volume = self.driver.ex_get_volume(
'cd76a3a1-c4ce-40f6-9b9f-07a61508938d')
self.assertEqual(
self.driver.attach_volume(node, volume, '/dev/sdb'), True)
def test_detach_volume(self):
node = self.driver.list_nodes()[0]
volume = self.driver.ex_get_volume(
'cd76a3a1-c4ce-40f6-9b9f-07a61508938d')
self.assertEqual(
self.driver.attach_volume(node, volume, '/dev/sdb'), True)
self.assertEqual(self.driver.detach_volume(volume), True)
def test_ex_set_password(self):
self.assertTrue(self.driver.ex_set_password(self.node, 'New1&53jPass'))
def test_ex_rebuild(self):
image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)',
driver=self.driver)
success = self.driver.ex_rebuild(self.node, image=image)
self.assertTrue(success)
def test_ex_rebuild_with_ex_disk_config(self):
image = NodeImage(id=58, name='Ubuntu 10.10 (intrepid)',
driver=self.driver)
node = Node(id=12066, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
success = self.driver.ex_rebuild(node, image=image,
ex_disk_config='MANUAL')
self.assertTrue(success)
def test_ex_rebuild_with_ex_config_drive(self):
image = NodeImage(id=58, name='Ubuntu 10.10 (intrepid)',
driver=self.driver)
node = Node(id=12066, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
success = self.driver.ex_rebuild(node, image=image,
ex_disk_config='MANUAL',
ex_config_drive=True)
self.assertTrue(success)
def test_ex_resize(self):
size = NodeSize(1, '256 slice', None, None, None, None,
driver=self.driver)
try:
self.driver.ex_resize(self.node, size)
except Exception:
e = sys.exc_info()[1]
self.fail('An error was raised: ' + repr(e))
def test_ex_confirm_resize(self):
try:
self.driver.ex_confirm_resize(self.node)
except Exception:
e = sys.exc_info()[1]
self.fail('An error was raised: ' + repr(e))
def test_ex_revert_resize(self):
try:
self.driver.ex_revert_resize(self.node)
except Exception:
e = sys.exc_info()[1]
self.fail('An error was raised: ' + repr(e))
def test_create_image(self):
image = self.driver.create_image(self.node, 'new_image')
self.assertEqual(image.name, 'new_image')
self.assertEqual(image.id, '4949f9ee-2421-4c81-8b49-13119446008b')
def test_ex_set_server_name(self):
old_node = Node(
id='12064', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver,
)
new_node = self.driver.ex_set_server_name(old_node, 'Bob')
self.assertEqual('Bob', new_node.name)
def test_ex_set_metadata(self):
old_node = Node(
id='12063', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver,
)
metadata = {'Image Version': '2.1', 'Server Label': 'Web Head 1'}
returned_metadata = self.driver.ex_set_metadata(old_node, metadata)
self.assertEqual(metadata, returned_metadata)
def test_ex_get_metadata(self):
node = Node(
id='12063', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver,
)
metadata = {'Image Version': '2.1', 'Server Label': 'Web Head 1'}
returned_metadata = self.driver.ex_get_metadata(node)
self.assertEqual(metadata, returned_metadata)
def test_ex_update_node(self):
old_node = Node(
id='12064',
name=None, state=None, public_ips=None, private_ips=None, driver=self.driver,
)
new_node = self.driver.ex_update_node(old_node, name='Bob')
self.assertTrue(new_node)
self.assertEqual('Bob', new_node.name)
self.assertEqual('50.57.94.30', new_node.public_ips[0])
def test_ex_get_node_details(self):
node_id = '12064'
node = self.driver.ex_get_node_details(node_id)
self.assertEqual(node.id, '12064')
self.assertEqual(node.name, 'lc-test')
def test_ex_get_size(self):
size_id = '7'
size = self.driver.ex_get_size(size_id)
self.assertEqual(size.id, size_id)
self.assertEqual(size.name, '15.5GB slice')
def test_get_image(self):
image_id = '13'
image = self.driver.get_image(image_id)
self.assertEqual(image.id, image_id)
self.assertEqual(image.name, 'Windows 2008 SP2 x86 (B24)')
self.assertEqual(image.extra['serverId'], None)
self.assertEqual(image.extra['minDisk'], "5")
self.assertEqual(image.extra['minRam'], "256")
def test_delete_image(self):
image = NodeImage(
id='26365521-8c62-11f9-2c33-283d153ecc3a', name='My Backup', driver=self.driver)
result = self.driver.delete_image(image)
self.assertTrue(result)
def test_extract_image_id_from_url(self):
url = 'http://127.0.0.1/v1.1/68/images/1d4a8ea9-aae7-4242-a42d-5ff4702f2f14'
url_two = 'http://127.0.0.1/v1.1/68/images/13'
image_id = self.driver._extract_image_id_from_url(url)
image_id_two = self.driver._extract_image_id_from_url(url_two)
self.assertEqual(image_id, '1d4a8ea9-aae7-4242-a42d-5ff4702f2f14')
self.assertEqual(image_id_two, '13')
def test_ex_rescue_with_password(self):
node = Node(id=12064, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
n = self.driver.ex_rescue(node, 'foo')
self.assertEqual(n.extra['password'], 'foo')
def test_ex_rescue_no_password(self):
node = Node(id=12064, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
n = self.driver.ex_rescue(node)
self.assertEqual(n.extra['password'], 'foo')
def test_ex_unrescue(self):
node = Node(id=12064, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
result = self.driver.ex_unrescue(node)
self.assertTrue(result)
def test_ex_get_node_security_groups(self):
node = Node(id='1c01300f-ef97-4937-8f03-ac676d6234be', name=None,
state=None, public_ips=None, private_ips=None, driver=self.driver)
security_groups = self.driver.ex_get_node_security_groups(node)
self.assertEqual(
len(security_groups), 2, 'Wrong security groups count')
security_group = security_groups[1]
self.assertEqual(security_group.id, 4)
self.assertEqual(security_group.tenant_id, '68')
self.assertEqual(security_group.name, 'ftp')
self.assertEqual(
security_group.description, 'FTP Client-Server - Open 20-21 ports')
self.assertEqual(security_group.rules[0].id, 1)
self.assertEqual(security_group.rules[0].parent_group_id, 4)
self.assertEqual(security_group.rules[0].ip_protocol, "tcp")
self.assertEqual(security_group.rules[0].from_port, 20)
self.assertEqual(security_group.rules[0].to_port, 21)
self.assertEqual(security_group.rules[0].ip_range, '0.0.0.0/0')
def test_ex_list_security_groups(self):
security_groups = self.driver.ex_list_security_groups()
self.assertEqual(
len(security_groups), 2, 'Wrong security groups count')
security_group = security_groups[1]
self.assertEqual(security_group.id, 4)
self.assertEqual(security_group.tenant_id, '68')
self.assertEqual(security_group.name, 'ftp')
self.assertEqual(
security_group.description, 'FTP Client-Server - Open 20-21 ports')
self.assertEqual(security_group.rules[0].id, 1)
self.assertEqual(security_group.rules[0].parent_group_id, 4)
self.assertEqual(security_group.rules[0].ip_protocol, "tcp")
self.assertEqual(security_group.rules[0].from_port, 20)
self.assertEqual(security_group.rules[0].to_port, 21)
self.assertEqual(security_group.rules[0].ip_range, '0.0.0.0/0')
def test_ex_create_security_group(self):
name = 'test'
description = 'Test Security Group'
security_group = self.driver.ex_create_security_group(
name, description)
self.assertEqual(security_group.id, 6)
self.assertEqual(security_group.tenant_id, '68')
self.assertEqual(security_group.name, name)
self.assertEqual(security_group.description, description)
self.assertEqual(len(security_group.rules), 0)
def test_ex_delete_security_group(self):
security_group = OpenStackSecurityGroup(
id=6, tenant_id=None, name=None, description=None, driver=self.driver)
result = self.driver.ex_delete_security_group(security_group)
self.assertTrue(result)
def test_ex_create_security_group_rule(self):
security_group = OpenStackSecurityGroup(
id=6, tenant_id=None, name=None, description=None, driver=self.driver)
security_group_rule = self.driver.ex_create_security_group_rule(
security_group, 'tcp', 14, 16, '0.0.0.0/0')
self.assertEqual(security_group_rule.id, 2)
self.assertEqual(security_group_rule.parent_group_id, 6)
self.assertEqual(security_group_rule.ip_protocol, 'tcp')
self.assertEqual(security_group_rule.from_port, 14)
self.assertEqual(security_group_rule.to_port, 16)
self.assertEqual(security_group_rule.ip_range, '0.0.0.0/0')
self.assertEqual(security_group_rule.tenant_id, None)
def test_ex_delete_security_group_rule(self):
security_group_rule = OpenStackSecurityGroupRule(
id=2, parent_group_id=None, ip_protocol=None, from_port=None, to_port=None, driver=self.driver)
result = self.driver.ex_delete_security_group_rule(security_group_rule)
self.assertTrue(result)
def test_list_key_pairs(self):
keypairs = self.driver.list_key_pairs()
self.assertEqual(len(keypairs), 2, 'Wrong keypairs count')
keypair = keypairs[1]
self.assertEqual(keypair.name, 'key2')
self.assertEqual(
keypair.fingerprint, '5d:66:33:ae:99:0f:fb:cb:86:f2:bc:ae:53:99:b6:ed')
self.assertTrue(len(keypair.public_key) > 10)
self.assertEqual(keypair.private_key, None)
def test_get_key_pair(self):
key_pair = self.driver.get_key_pair(name='test-key-pair')
self.assertEqual(key_pair.name, 'test-key-pair')
def test_get_key_pair_doesnt_exist(self):
self.assertRaises(KeyPairDoesNotExistError,
self.driver.get_key_pair,
name='doesnt-exist')
def test_create_key_pair(self):
name = 'key0'
keypair = self.driver.create_key_pair(name=name)
self.assertEqual(keypair.name, name)
self.assertEqual(keypair.fingerprint,
'80:f8:03:a7:8e:c1:c3:b1:7e:c5:8c:50:04:5e:1c:5b')
self.assertTrue(len(keypair.public_key) > 10)
self.assertTrue(len(keypair.private_key) > 10)
def test_import_key_pair_from_file(self):
name = 'key3'
path = os.path.join(
os.path.dirname(__file__), 'fixtures', 'misc', 'dummy_rsa.pub')
pub_key = open(path, 'r').read()
keypair = self.driver.import_key_pair_from_file(name=name,
key_file_path=path)
self.assertEqual(keypair.name, name)
self.assertEqual(
keypair.fingerprint, '97:10:a6:e7:92:65:7e:69:fe:e6:81:8f:39:3c:8f:5a')
self.assertEqual(keypair.public_key, pub_key)
self.assertEqual(keypair.private_key, None)
def test_import_key_pair_from_string(self):
name = 'key3'
path = os.path.join(
os.path.dirname(__file__), 'fixtures', 'misc', 'dummy_rsa.pub')
pub_key = open(path, 'r').read()
keypair = self.driver.import_key_pair_from_string(name=name,
key_material=pub_key)
self.assertEqual(keypair.name, name)
self.assertEqual(
keypair.fingerprint, '97:10:a6:e7:92:65:7e:69:fe:e6:81:8f:39:3c:8f:5a')
self.assertEqual(keypair.public_key, pub_key)
self.assertEqual(keypair.private_key, None)
def test_delete_key_pair(self):
keypair = OpenStackKeyPair(
name='key1', fingerprint=None, public_key=None, driver=self.driver)
result = self.driver.delete_key_pair(key_pair=keypair)
self.assertTrue(result)
def test_ex_list_floating_ip_pools(self):
ret = self.driver.ex_list_floating_ip_pools()
self.assertEqual(ret[0].name, 'public')
self.assertEqual(ret[1].name, 'foobar')
def test_ex_attach_floating_ip_to_node(self):
image = NodeImage(
id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver)
size = NodeSize(
1, '256 slice', None, None, None, None, driver=self.driver)
node = self.driver.create_node(name='racktest', image=image, size=size)
node.id = 4242
ip = '42.42.42.42'
self.assertTrue(self.driver.ex_attach_floating_ip_to_node(node, ip))
def test_detach_floating_ip_from_node(self):
image = NodeImage(
id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver)
size = NodeSize(
1, '256 slice', None, None, None, None, driver=self.driver)
node = self.driver.create_node(name='racktest', image=image, size=size)
node.id = 4242
ip = '42.42.42.42'
self.assertTrue(self.driver.ex_detach_floating_ip_from_node(node, ip))
def test_OpenStack_1_1_FloatingIpPool_list_floating_ips(self):
pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection)
ret = pool.list_floating_ips()
self.assertEqual(ret[0].id, '09ea1784-2f81-46dc-8c91-244b4df75bde')
self.assertEqual(ret[0].pool, pool)
self.assertEqual(ret[0].ip_address, '10.3.1.42')
self.assertEqual(ret[0].node_id, None)
self.assertEqual(ret[1].id, '04c5336a-0629-4694-ba30-04b0bdfa88a4')
self.assertEqual(ret[1].pool, pool)
self.assertEqual(ret[1].ip_address, '10.3.1.1')
self.assertEqual(
ret[1].node_id, 'fcfc96da-19e2-40fd-8497-f29da1b21143')
def test_OpenStack_1_1_FloatingIpPool_get_floating_ip(self):
pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection)
ret = pool.get_floating_ip('10.3.1.42')
self.assertEqual(ret.id, '09ea1784-2f81-46dc-8c91-244b4df75bde')
self.assertEqual(ret.pool, pool)
self.assertEqual(ret.ip_address, '10.3.1.42')
self.assertEqual(ret.node_id, None)
def test_OpenStack_1_1_FloatingIpPool_create_floating_ip(self):
pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection)
ret = pool.create_floating_ip()
self.assertEqual(ret.id, '09ea1784-2f81-46dc-8c91-244b4df75bde')
self.assertEqual(ret.pool, pool)
self.assertEqual(ret.ip_address, '10.3.1.42')
self.assertEqual(ret.node_id, None)
def test_OpenStack_1_1_FloatingIpPool_delete_floating_ip(self):
pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection)
ip = OpenStack_1_1_FloatingIpAddress('foo-bar-id', '42.42.42.42', pool)
self.assertTrue(pool.delete_floating_ip(ip))
def test_OpenStack_1_1_FloatingIpAddress_delete(self):
pool = OpenStack_1_1_FloatingIpPool('foo', self.driver.connection)
pool.delete_floating_ip = Mock()
ip = OpenStack_1_1_FloatingIpAddress('foo-bar-id', '42.42.42.42', pool)
ip.pool.delete_floating_ip()
self.assertEqual(pool.delete_floating_ip.call_count, 1)
def test_ex_list_network(self):
networks = self.driver.ex_list_networks()
network = networks[0]
self.assertEqual(len(networks), 3)
self.assertEqual(network.name, 'test1')
self.assertEqual(network.cidr, '127.0.0.0/24')
def test_ex_create_network(self):
network = self.driver.ex_create_network(name='test1',
cidr='127.0.0.0/24')
self.assertEqual(network.name, 'test1')
self.assertEqual(network.cidr, '127.0.0.0/24')
def test_ex_delete_network(self):
network = self.driver.ex_list_networks()[0]
self.assertTrue(self.driver.ex_delete_network(network=network))
def test_ex_get_metadata_for_node(self):
image = NodeImage(id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver)
size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver)
node = self.driver.create_node(name='foo',
image=image,
size=size)
metadata = self.driver.ex_get_metadata_for_node(node)
self.assertEqual(metadata['My Server Name'], 'Apache1')
self.assertEqual(len(metadata), 1)
def test_ex_pause_node(self):
node = Node(
id='12063', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver,
)
ret = self.driver.ex_pause_node(node)
self.assertTrue(ret is True)
def test_ex_unpause_node(self):
node = Node(
id='12063', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver,
)
ret = self.driver.ex_unpause_node(node)
self.assertTrue(ret is True)
def test_ex_suspend_node(self):
node = Node(
id='12063', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver,
)
ret = self.driver.ex_suspend_node(node)
self.assertTrue(ret is True)
def test_ex_resume_node(self):
node = Node(
id='12063', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver,
)
ret = self.driver.ex_resume_node(node)
self.assertTrue(ret is True)
def test_ex_get_console_output(self):
node = Node(
id='12086', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver,
)
resp = self.driver.ex_get_console_output(node)
expected_output = 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
self.assertEqual(resp['output'], expected_output)
def test_ex_list_snapshots(self):
if self.driver_type.type == 'rackspace':
self.conn_classes[0].type = 'RACKSPACE'
self.conn_classes[1].type = 'RACKSPACE'
snapshots = self.driver.ex_list_snapshots()
self.assertEqual(len(snapshots), 3)
self.assertEqual(snapshots[0].created, datetime.datetime(2012, 2, 29, 3, 50, 7, tzinfo=UTC))
self.assertEqual(snapshots[0].extra['created'], "2012-02-29T03:50:07Z")
self.assertEqual(snapshots[0].extra['name'], 'snap-001')
# invalid date is parsed as None
assert snapshots[2].created is None
def test_list_volume_snapshots(self):
volume = self.driver.list_volumes()[0]
# rackspace needs a different mocked response for snapshots, but not for volumes
if self.driver_type.type == 'rackspace':
self.conn_classes[0].type = 'RACKSPACE'
self.conn_classes[1].type = 'RACKSPACE'
snapshots = self.driver.list_volume_snapshots(volume)
self.assertEqual(len(snapshots), 1)
self.assertEqual(snapshots[0].id, '4fbbdccf-e058-6502-8844-6feeffdf4cb5')
def test_ex_create_snapshot(self):
volume = self.driver.list_volumes()[0]
if self.driver_type.type == 'rackspace':
self.conn_classes[0].type = 'RACKSPACE'
self.conn_classes[1].type = 'RACKSPACE'
ret = self.driver.ex_create_snapshot(volume,
'Test Volume',
'This is a test')
self.assertEqual(ret.id, '3fbbcccf-d058-4502-8844-6feeffdf4cb5')
def test_ex_delete_snapshot(self):
if self.driver_type.type == 'rackspace':
self.conn_classes[0].type = 'RACKSPACE'
self.conn_classes[1].type = 'RACKSPACE'
snapshot = self.driver.ex_list_snapshots()[0]
ret = self.driver.ex_delete_snapshot(snapshot)
self.assertTrue(ret)
class OpenStack_1_1_FactoryMethodTests(OpenStack_1_1_Tests):
should_list_locations = False
should_list_volumes = True
driver_klass = OpenStack_1_1_NodeDriver
driver_type = get_driver(Provider.OPENSTACK)
driver_args = OPENSTACK_PARAMS + ('1.1',)
driver_kwargs = {'ex_force_auth_version': '2.0'}
class OpenStack_1_1_MockHttp(MockHttpTestCase):
fixtures = ComputeFileFixtures('openstack_v1.1')
auth_fixtures = OpenStackFixtures()
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v2_0_tokens(self, method, url, body, headers):
body = self.auth_fixtures.load('_v2_0__auth.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_0(self, method, url, body, headers):
headers = {
'x-auth-token': 'FE011C19-CF86-4F87-BE5D-9229145D7A06',
'x-server-management-url': 'https://api.example.com/v1.1/slug',
}
return (httplib.NO_CONTENT, "", headers, httplib.responses[httplib.NO_CONTENT])
def _v1_1_slug_servers_detail(self, method, url, body, headers):
body = self.fixtures.load('_servers_detail.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_servers_detail_ERROR_STATE_NO_IMAGE_ID(self, method, url, body, headers):
body = self.fixtures.load('_servers_detail_ERROR_STATE.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_flavors_detail(self, method, url, body, headers):
body = self.fixtures.load('_flavors_detail.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_images_detail(self, method, url, body, headers):
body = self.fixtures.load('_images_detail.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_servers(self, method, url, body, headers):
if method == "POST":
body = self.fixtures.load('_servers_create.json')
elif method == "GET":
body = self.fixtures.load('_servers.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
'_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_servers_12065_action(self, method, url, body, headers):
if method != "POST":
self.fail('HTTP method other than POST to action URL')
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
def _v1_1_slug_servers_12064_action(self, method, url, body, headers):
if method != "POST":
self.fail('HTTP method other than POST to action URL')
if "createImage" in json.loads(body):
return (httplib.ACCEPTED, "",
{"location": "http://127.0.0.1/v1.1/68/images/4949f9ee-2421-4c81-8b49-13119446008b"},
httplib.responses[httplib.ACCEPTED])
elif "rescue" in json.loads(body):
return (httplib.OK, '{"adminPass": "foo"}', {},
httplib.responses[httplib.OK])
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
def _v1_1_slug_servers_12066_action(self, method, url, body, headers):
if method != "POST":
self.fail('HTTP method other than POST to action URL')
if "rebuild" not in json.loads(body):
self.fail("Did not get expected action (rebuild) in action URL")
self.assertTrue('\"OS-DCF:diskConfig\": \"MANUAL\"' in body,
msg="Manual disk configuration option was not specified in rebuild body: " + body)
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
def _v1_1_slug_servers_12065(self, method, url, body, headers):
if method == "DELETE":
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
else:
raise NotImplementedError()
def _v1_1_slug_servers_12064(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_servers_12064.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
elif method == "PUT":
body = self.fixtures.load('_servers_12064_updated_name_bob.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
elif method == "DELETE":
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
else:
raise NotImplementedError()
def _v1_1_slug_servers_12062(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_servers_12064.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_servers_12063_metadata(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_servers_12063_metadata_two_keys.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
elif method == "PUT":
body = self.fixtures.load('_servers_12063_metadata_two_keys.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_servers_EX_DISK_CONFIG(self, method, url, body, headers):
if method == "POST":
body = u(body)
self.assertTrue(body.find('\"OS-DCF:diskConfig\": \"AUTO\"'))
body = self.fixtures.load('_servers_create_disk_config.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_flavors_7(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_flavors_7.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
else:
raise NotImplementedError()
def _v1_1_slug_images_13(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_images_13.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
else:
raise NotImplementedError()
def _v1_1_slug_images_26365521_8c62_11f9_2c33_283d153ecc3a(self, method, url, body, headers):
if method == "DELETE":
return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT])
else:
raise NotImplementedError()
def _v1_1_slug_images_4949f9ee_2421_4c81_8b49_13119446008b(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
'_images_4949f9ee_2421_4c81_8b49_13119446008b.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
else:
raise NotImplementedError()
def _v1_1_slug_servers_1c01300f_ef97_4937_8f03_ac676d6234be_os_security_groups(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
'_servers_1c01300f-ef97-4937-8f03-ac676d6234be_os-security-groups.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_security_groups(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_os_security_groups.json')
elif method == "POST":
body = self.fixtures.load('_os_security_groups_create.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_security_groups_6(self, method, url, body, headers):
if method == "DELETE":
return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT])
else:
raise NotImplementedError()
def _v1_1_slug_os_security_group_rules(self, method, url, body, headers):
if method == "POST":
body = self.fixtures.load('_os_security_group_rules_create.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_security_group_rules_2(self, method, url, body, headers):
if method == "DELETE":
return (httplib.NO_CONTENT, "", {}, httplib.responses[httplib.NO_CONTENT])
else:
raise NotImplementedError()
def _v1_1_slug_os_keypairs(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_os_keypairs.json')
elif method == "POST":
if 'public_key' in body:
body = self.fixtures.load('_os_keypairs_create_import.json')
else:
body = self.fixtures.load('_os_keypairs_create.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_keypairs_test_key_pair(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('_os_keypairs_get_one.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_keypairs_doesnt_exist(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('_os_keypairs_not_found.json')
else:
raise NotImplementedError()
return (httplib.NOT_FOUND, body, self.json_content_headers,
httplib.responses[httplib.NOT_FOUND])
def _v1_1_slug_os_keypairs_key1(self, method, url, body, headers):
if method == "DELETE":
return (httplib.ACCEPTED, "", {}, httplib.responses[httplib.ACCEPTED])
else:
raise NotImplementedError()
def _v1_1_slug_os_volumes(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_os_volumes.json')
elif method == "POST":
body = self.fixtures.load('_os_volumes_create.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load(
'_os_volumes_cd76a3a1_c4ce_40f6_9b9f_07a61508938d.json')
elif method == "DELETE":
body = ''
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_servers_12065_os_volume_attachments(self, method, url, body, headers):
if method == "POST":
body = self.fixtures.load(
'_servers_12065_os_volume_attachments.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_servers_12065_os_volume_attachments_cd76a3a1_c4ce_40f6_9b9f_07a61508938d(self, method, url, body,
headers):
if method == "DELETE":
body = ''
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_floating_ip_pools(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_floating_ip_pools.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
else:
raise NotImplementedError()
def _v1_1_slug_os_floating_ips_foo_bar_id(self, method, url, body, headers):
if method == "DELETE":
body = ''
return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK])
else:
raise NotImplementedError()
def _v1_1_slug_os_floating_ips(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_floating_ips.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
elif method == "POST":
body = self.fixtures.load('_floating_ip.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
else:
raise NotImplementedError()
def _v1_1_slug_servers_4242_action(self, method, url, body, headers):
if method == "POST":
body = ''
return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK])
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_networks(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('_os_networks.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
elif method == 'POST':
body = self.fixtures.load('_os_networks_POST.json')
return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v1_1_slug_os_networks_f13e5051_feea_416b_827a_1a0acc2dad14(self, method, url, body, headers):
if method == 'DELETE':
body = ''
return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v1_1_slug_servers_72258_action(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('_servers_suspend.json')
return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK])
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_servers_12063_action(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('_servers_unpause.json')
return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK])
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_servers_12086_action(self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load('_servers_12086_console_output.json')
return (httplib.ACCEPTED, body, self.json_content_headers, httplib.responses[httplib.OK])
else:
raise NotImplementedError()
def _v1_1_slug_os_snapshots(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('_os_snapshots.json')
elif method == 'POST':
body = self.fixtures.load('_os_snapshots_create.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_snapshots_RACKSPACE(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('_os_snapshots_rackspace.json')
elif method == 'POST':
body = self.fixtures.load('_os_snapshots_create_rackspace.json')
else:
raise NotImplementedError()
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_snapshots_3fbbcccf_d058_4502_8844_6feeffdf4cb5(self, method, url, body, headers):
if method == 'DELETE':
body = ''
status_code = httplib.NO_CONTENT
else:
raise NotImplementedError()
return (status_code, body, self.json_content_headers, httplib.responses[httplib.OK])
def _v1_1_slug_os_snapshots_3fbbcccf_d058_4502_8844_6feeffdf4cb5_RACKSPACE(self, method, url, body, headers):
if method == 'DELETE':
body = ''
status_code = httplib.NO_CONTENT
else:
raise NotImplementedError()
return (status_code, body, self.json_content_headers, httplib.responses[httplib.OK])
# This exists because the nova compute url in devstack has v2 in there but the v1.1 fixtures
# work fine.
class OpenStack_2_0_MockHttp(OpenStack_1_1_MockHttp):
def __init__(self, *args, **kwargs):
super(OpenStack_2_0_MockHttp, self).__init__(*args, **kwargs)
methods1 = OpenStack_1_1_MockHttp.__dict__
names1 = [m for m in methods1 if m.find('_v1_1') == 0]
for name in names1:
method = methods1[name]
new_name = name.replace('_v1_1_slug_', '_v2_1337_')
setattr(self, new_name, method_type(method, self,
OpenStack_2_0_MockHttp))
class OpenStack_1_1_Auth_2_0_Tests(OpenStack_1_1_Tests):
driver_args = OPENSTACK_PARAMS + ('1.1',)
driver_kwargs = {'ex_force_auth_version': '2.0'}
def setUp(self):
self.driver_klass.connectionCls.conn_classes = \
(OpenStack_2_0_MockHttp, OpenStack_2_0_MockHttp)
self.driver_klass.connectionCls.auth_url = "https://auth.api.example.com"
OpenStackMockHttp.type = None
OpenStack_1_1_MockHttp.type = None
OpenStack_2_0_MockHttp.type = None
self.driver = self.create_driver()
# normally authentication happens lazily, but we force it here
self.driver.connection._populate_hosts_and_request_paths()
clear_pricing_data()
self.node = self.driver.list_nodes()[1]
def test_auth_user_info_is_set(self):
self.driver.connection._populate_hosts_and_request_paths()
self.assertEqual(self.driver.connection.auth_user_info, {
'id': '7',
'name': 'testuser',
'roles': [{'description': 'Default Role.',
'id': 'identity:default',
'name': 'identity:default'}]})
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
from __future__ import absolute_import
from django.conf import settings
from django.contrib.auth import authenticate, login, get_backends
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.core.urlresolvers import reverse
from zerver.decorator import authenticated_json_post_view, require_post
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.middleware.csrf import get_token
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.utils.translation import ugettext as _
from six import text_type
from six.moves import urllib
from typing import Any, Dict, Optional
from confirmation.models import Confirmation
from zerver.forms import OurAuthenticationForm, WRONG_SUBDOMAIN_ERROR
from zerver.lib.request import REQ, has_request_variables, JsonableError
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import get_subdomain
from zerver.models import PreregistrationUser, UserProfile, remote_user_to_email
from zerver.views import create_homepage_form, create_preregistration_user
from zproject.backends import password_auth_enabled, dev_auth_enabled, google_auth_enabled
from zproject.jinja2 import render_to_response
import hashlib
import hmac
import jwt
import logging
import requests
import time
def maybe_send_to_registration(request, email, full_name=''):
# type: (HttpRequest, text_type, text_type) -> HttpResponse
form = create_homepage_form(request, user_info={'email': email})
request.verified_email = None
if form.is_valid():
# Construct a PreregistrationUser object and send the user over to
# the confirmation view.
prereg_user = None
if settings.ONLY_SSO:
try:
prereg_user = PreregistrationUser.objects.filter(email__iexact=email).latest("invited_at")
except PreregistrationUser.DoesNotExist:
prereg_user = create_preregistration_user(email, request)
else:
prereg_user = create_preregistration_user(email, request)
return redirect("".join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
"/",
# Split this so we only get the part after the /
Confirmation.objects.get_link_for_object(prereg_user).split("/", 3)[3],
'?full_name=',
# urllib does not handle Unicode, so coerece to encoded byte string
# Explanation: http://stackoverflow.com/a/5605354/90777
urllib.parse.quote_plus(full_name.encode('utf8')))))
else:
url = reverse('register')
return render_to_response('zerver/accounts_home.html',
{'form': form, 'current_url': lambda: url},
request=request)
def redirect_to_subdomain_login_url():
# type: () -> HttpResponseRedirect
login_url = reverse('django.contrib.auth.views.login')
redirect_url = login_url + '?subdomain=1'
return HttpResponseRedirect(redirect_url)
def login_or_register_remote_user(request, remote_username, user_profile, full_name='',
invalid_subdomain=False):
# type: (HttpRequest, text_type, UserProfile, text_type, Optional[bool]) -> HttpResponse
if invalid_subdomain:
# Show login page with an error message
return redirect_to_subdomain_login_url()
elif user_profile is None or user_profile.is_mirror_dummy:
# Since execution has reached here, the client specified a remote user
# but no associated user account exists. Send them over to the
# PreregistrationUser flow.
return maybe_send_to_registration(request, remote_user_to_email(remote_username), full_name)
else:
login(request, user_profile)
if settings.OPEN_REALM_CREATION and user_profile.realm.subdomain is not None:
return HttpResponseRedirect("%s%s.%s" % (settings.EXTERNAL_URI_SCHEME,
user_profile.realm.subdomain,
settings.EXTERNAL_HOST))
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
def remote_user_sso(request):
# type: (HttpRequest) -> HttpResponse
try:
remote_user = request.META["REMOTE_USER"]
except KeyError:
raise JsonableError(_("No REMOTE_USER set."))
user_profile = authenticate(remote_user=remote_user, realm_subdomain=get_subdomain(request))
return login_or_register_remote_user(request, remote_user, user_profile)
@csrf_exempt
def remote_user_jwt(request):
# type: (HttpRequest) -> HttpResponse
try:
json_web_token = request.POST["json_web_token"]
payload, signing_input, header, signature = jwt.load(json_web_token)
except KeyError:
raise JsonableError(_("No JSON web token passed in request"))
except jwt.DecodeError:
raise JsonableError(_("Bad JSON web token"))
remote_user = payload.get("user", None)
if remote_user is None:
raise JsonableError(_("No user specified in JSON web token claims"))
domain = payload.get('realm', None)
if domain is None:
raise JsonableError(_("No domain specified in JSON web token claims"))
email = "%s@%s" % (remote_user, domain)
try:
jwt.verify_signature(payload, signing_input, header, signature,
settings.JWT_AUTH_KEYS[domain])
# We do all the authentication we need here (otherwise we'd have to
# duplicate work), but we need to call authenticate with some backend so
# that the request.backend attribute gets set.
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=email,
realm_subdomain=get_subdomain(request),
return_data=return_data,
use_dummy_backend=True)
if return_data.get('invalid_subdomain'):
logging.warning("User attempted to JWT login to wrong subdomain %s: %s" % (get_subdomain(request), email,))
raise JsonableError(_("Wrong subdomain"))
except (jwt.DecodeError, jwt.ExpiredSignature):
raise JsonableError(_("Bad JSON web token signature"))
except KeyError:
raise JsonableError(_("Realm not authorized for JWT login"))
except UserProfile.DoesNotExist:
user_profile = None
return login_or_register_remote_user(request, email, user_profile, remote_user)
def google_oauth2_csrf(request, value):
# type: (HttpRequest, str) -> HttpResponse
return hmac.new(get_token(request).encode('utf-8'), value.encode("utf-8"), hashlib.sha256).hexdigest()
def start_google_oauth2(request):
# type: (HttpRequest) -> HttpResponse
uri = 'https://accounts.google.com/o/oauth2/auth?'
cur_time = str(int(time.time()))
csrf_state = '{}:{}'.format(
cur_time,
google_oauth2_csrf(request, cur_time),
)
prams = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.auth.finish_google_oauth2'),
)),
'scope': 'profile email',
'state': csrf_state,
}
return redirect(uri + urllib.parse.urlencode(prams))
def finish_google_oauth2(request):
# type: (HttpRequest) -> HttpResponse
error = request.GET.get('error')
if error == 'access_denied':
return redirect('/')
elif error is not None:
logging.warning('Error from google oauth2 login: %s' % (request.GET.get("error"),))
return HttpResponse(status=400)
csrf_state = request.GET.get('state')
if csrf_state is None or len(csrf_state.split(':')) != 2:
logging.warning('Missing Google oauth2 CSRF state')
return HttpResponse(status=400)
value, hmac_value = csrf_state.split(':')
if hmac_value != google_oauth2_csrf(request, value):
logging.warning('Google oauth2 CSRF error')
return HttpResponse(status=400)
resp = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.auth.finish_google_oauth2'),
)),
'grant_type': 'authorization_code',
},
)
if resp.status_code == 400:
logging.warning('User error converting Google oauth2 login to token: %s' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
logging.error('Could not convert google oauth2 code to access_token: %s' % (resp.text,))
return HttpResponse(status=400)
access_token = resp.json()['access_token']
resp = requests.get(
'https://www.googleapis.com/plus/v1/people/me',
params={'access_token': access_token}
)
if resp.status_code == 400:
logging.warning('Google login failed making info API call: %s' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
logging.error('Google login failed making API call: %s' % (resp.text,))
return HttpResponse(status=400)
body = resp.json()
try:
full_name = body['name']['formatted']
except KeyError:
# Only google+ users have a formated name. I am ignoring i18n here.
full_name = u'{} {}'.format(
body['name']['givenName'], body['name']['familyName']
)
for email in body['emails']:
if email['type'] == 'account':
break
else:
logging.error('Google oauth2 account email not found: %s' % (body,))
return HttpResponse(status=400)
email_address = email['value']
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=email_address,
realm_subdomain=get_subdomain(request),
use_dummy_backend=True,
return_data=return_data)
invalid_subdomain = bool(return_data.get('invalid_subdomain'))
return login_or_register_remote_user(request, email_address, user_profile, full_name,
invalid_subdomain)
def login_page(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
extra_context = kwargs.pop('extra_context', {})
if dev_auth_enabled():
# Development environments usually have only a few users, but
# it still makes sense to limit how many users we render to
# support performance testing with DevAuthBackend.
MAX_DEV_BACKEND_USERS = 100
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True)
users = users_query.order_by('email')[0:MAX_DEV_BACKEND_USERS]
extra_context['direct_admins'] = [u.email for u in users if u.is_realm_admin]
extra_context['direct_users'] = [u.email for u in users if not u.is_realm_admin]
template_response = django_login_page(
request, authentication_form=OurAuthenticationForm,
extra_context=extra_context, **kwargs)
try:
template_response.context_data['email'] = request.GET['email']
except KeyError:
pass
try:
template_response.context_data['subdomain'] = request.GET['subdomain']
template_response.context_data['wrong_subdomain_error'] = WRONG_SUBDOMAIN_ERROR
except KeyError:
pass
return template_response
def dev_direct_login(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
# This function allows logging in without a password and should only be called in development environments.
# It may be called if the DevAuthBackend is included in settings.AUTHENTICATION_BACKENDS
if (not dev_auth_enabled()) or settings.PRODUCTION:
# This check is probably not required, since authenticate would fail without an enabled DevAuthBackend.
raise Exception('Direct login not supported.')
email = request.POST['direct_email']
user_profile = authenticate(username=email, realm_subdomain=get_subdomain(request))
if user_profile is None:
raise Exception("User cannot login")
login(request, user_profile)
if settings.OPEN_REALM_CREATION and settings.DEVELOPMENT:
if user_profile.realm.subdomain is not None:
return HttpResponseRedirect("%s%s.%s" % (settings.EXTERNAL_URI_SCHEME,
user_profile.realm.subdomain,
settings.EXTERNAL_HOST))
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
@csrf_exempt
@require_post
@has_request_variables
def api_dev_fetch_api_key(request, username=REQ()):
# type: (HttpRequest, str) -> HttpResponse
"""This function allows logging in without a password on the Zulip
mobile apps when connecting to a Zulip development environment. It
requires DevAuthBackend to be included in settings.AUTHENTICATION_BACKENDS.
"""
if not dev_auth_enabled() or settings.PRODUCTION:
return json_error(_("Dev environment not enabled."))
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=username,
realm_subdomain=get_subdomain(request),
return_data=return_data)
if return_data.get("inactive_realm") == True:
return json_error(_("Your realm has been deactivated."),
data={"reason": "realm deactivated"}, status=403)
if return_data.get("inactive_user") == True:
return json_error(_("Your account has been disabled."),
data={"reason": "user disable"}, status=403)
login(request, user_profile)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@csrf_exempt
def api_dev_get_emails(request):
# type: (HttpRequest) -> HttpResponse
if not dev_auth_enabled() or settings.PRODUCTION:
return json_error(_("Dev environment not enabled."))
MAX_DEV_BACKEND_USERS = 100 # type: int
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True)
users = users_query.order_by('email')[0:MAX_DEV_BACKEND_USERS]
return json_success(dict(direct_admins=[u.email for u in users if u.is_realm_admin],
direct_users=[u.email for u in users if not u.is_realm_admin]))
@csrf_exempt
@require_post
@has_request_variables
def api_fetch_api_key(request, username=REQ(), password=REQ()):
# type: (HttpRequest, str, str) -> HttpResponse
return_data = {} # type: Dict[str, bool]
if username == "google-oauth2-token":
user_profile = authenticate(google_oauth2_token=password,
realm_subdomain=get_subdomain(request),
return_data=return_data)
else:
user_profile = authenticate(username=username,
password=password,
realm_subdomain=get_subdomain(request),
return_data=return_data)
if return_data.get("inactive_user") == True:
return json_error(_("Your account has been disabled."),
data={"reason": "user disable"}, status=403)
if return_data.get("inactive_realm") == True:
return json_error(_("Your realm has been deactivated."),
data={"reason": "realm deactivated"}, status=403)
if return_data.get("password_auth_disabled") == True:
return json_error(_("Password auth is disabled in your team."),
data={"reason": "password auth disabled"}, status=403)
if user_profile is None:
if return_data.get("valid_attestation") == True:
# We can leak that the user is unregistered iff they present a valid authentication string for the user.
return json_error(_("This user is not registered; do so from a browser."),
data={"reason": "unregistered"}, status=403)
return json_error(_("Your username or password is incorrect."),
data={"reason": "incorrect_creds"}, status=403)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@csrf_exempt
def api_get_auth_backends(request):
# type: (HttpRequest) -> HttpResponse
# May return a false positive for password auth if it's been disabled
# for a specific realm. Currently only happens for zulip.com on prod
return json_success({"password": password_auth_enabled(None),
"dev": dev_auth_enabled(),
"google": google_auth_enabled(),
})
@authenticated_json_post_view
@has_request_variables
def json_fetch_api_key(request, user_profile, password=REQ(default='')):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
if password_auth_enabled(user_profile.realm):
if not authenticate(username=user_profile.email, password=password,
realm_subdomain=get_subdomain(request)):
return json_error(_("Your username or password is incorrect."))
return json_success({"api_key": user_profile.api_key})
@csrf_exempt
def api_fetch_google_client_id(request):
# type: (HttpRequest) -> HttpResponse
if not settings.GOOGLE_CLIENT_ID:
return json_error(_("GOOGLE_CLIENT_ID is not configured"), status=400)
return json_success({"google_client_id": settings.GOOGLE_CLIENT_ID})
@require_post
def logout_then_login(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
return django_logout_then_login(request, kwargs)
|
|
import os
import os.path as op
import platform
import re
import shutil
from operator import itemgetter
from werkzeug import secure_filename
from flask import flash, url_for, redirect, abort, request
from wtforms import fields, validators
from flask.ext.admin import form, helpers
from flask.ext.admin._compat import urljoin
from flask.ext.admin.base import BaseView, expose
from flask.ext.admin.actions import action, ActionsMixin
from flask.ext.admin.babel import gettext, lazy_gettext
class NameForm(form.BaseForm):
"""
Form with a filename input field.
Validates if provided name is valid for *nix and Windows systems.
"""
name = fields.TextField()
regexp = re.compile(r'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$')
def validate_name(self, field):
if not self.regexp.match(field.data):
raise validators.ValidationError(gettext('Invalid directory name'))
class UploadForm(form.BaseForm):
"""
File upload form. Works with FileAdmin instance to check if it is allowed
to upload file with given extension.
"""
upload = fields.FileField(lazy_gettext('File to upload'))
def __init__(self, admin):
self.admin = admin
super(UploadForm, self).__init__(helpers.get_form_data())
def validate_upload(self, field):
if not self.upload.data:
raise validators.ValidationError(gettext('File required.'))
filename = self.upload.data.filename
if not self.admin.is_file_allowed(filename):
raise validators.ValidationError(gettext('Invalid file type.'))
class EditForm(form.BaseForm):
content = fields.TextAreaField(lazy_gettext('Content'),
(validators.required(),))
class FileAdmin(BaseView, ActionsMixin):
"""
Simple file-management interface.
Requires two parameters:
:param path:
Path to the directory which will be managed
:param url:
Base URL for the directory. Will be used to generate
static links to the files.
Sample usage::
admin = Admin()
path = op.join(op.dirname(__file__), 'static')
admin.add_view(FileAdmin(path, '/static/', name='Static Files'))
admin.setup_app(app)
"""
can_upload = True
"""
Is file upload allowed.
"""
can_delete = True
"""
Is file deletion allowed.
"""
can_delete_dirs = True
"""
Is recursive directory deletion is allowed.
"""
can_mkdir = True
"""
Is directory creation allowed.
"""
can_rename = True
"""
Is file and directory renaming allowed.
"""
allowed_extensions = None
"""
List of allowed extensions for uploads, in lower case.
Example::
class MyAdmin(FileAdmin):
allowed_extensions = ('swf', 'jpg', 'gif', 'png')
"""
editable_extensions = tuple()
"""
List of editable extensions, in lower case.
Example::
class MyAdmin(FileAdmin):
editable_extensions = ('md', 'html', 'txt')
"""
list_template = 'admin/file/list.html'
"""
File list template
"""
upload_template = 'admin/file/form.html'
"""
File upload template
"""
mkdir_template = 'admin/file/form.html'
"""
Directory creation (mkdir) template
"""
rename_template = 'admin/file/rename.html'
"""
Rename template
"""
edit_template = 'admin/file/edit.html'
"""
Edit template
"""
def __init__(self, base_path, base_url,
name=None, category=None, endpoint=None, url=None,
verify_path=True):
"""
Constructor.
:param base_path:
Base file storage location
:param base_url:
Base URL for the files
:param name:
Name of this view. If not provided, will default to the class name.
:param category:
View category
:param endpoint:
Endpoint name for the view
:param url:
URL for view
:param verify_path:
Verify if path exists. If set to `True` and path does not exist
will raise an exception.
"""
self.base_path = base_path
self.base_url = base_url
self.init_actions()
self._on_windows = platform.system() == 'Windows'
# Convert allowed_extensions to set for quick validation
if (self.allowed_extensions and
not isinstance(self.allowed_extensions, set)):
self.allowed_extensions = set(self.allowed_extensions)
# Convert editable_extensions to set for quick validation
if (self.editable_extensions and
not isinstance(self.editable_extensions, set)):
self.editable_extensions = set(self.editable_extensions)
# Check if path exists
if not op.exists(base_path):
raise IOError('FileAdmin path "%s" does not exist or is not accessible' % base_path)
super(FileAdmin, self).__init__(name, category, endpoint, url)
def is_accessible_path(self, path):
"""
Verify if the provided path is accessible for the current user.
Override to customize behavior.
:param path:
Relative path to the root
"""
return True
def get_base_path(self):
"""
Return base path. Override to customize behavior (per-user
directories, etc)
"""
return op.normpath(self.base_path)
def get_base_url(self):
"""
Return base URL. Override to customize behavior (per-user
directories, etc)
"""
return self.base_url
def is_file_allowed(self, filename):
"""
Verify if file can be uploaded.
Override to customize behavior.
:param filename:
Source file name
"""
ext = op.splitext(filename)[1].lower()
if ext.startswith('.'):
ext = ext[1:]
if self.allowed_extensions and ext not in self.allowed_extensions:
return False
return True
def is_file_editable(self, filename):
"""
Determine if the file can be edited.
Override to customize behavior.
:param filename:
Source file name
"""
ext = op.splitext(filename)[1].lower()
if ext.startswith('.'):
ext = ext[1:]
if not self.editable_extensions or ext not in self.editable_extensions:
return False
return True
def is_in_folder(self, base_path, directory):
"""
Verify that `directory` is in `base_path` folder
:param base_path:
Base directory path
:param directory:
Directory path to check
"""
return op.normpath(directory).startswith(base_path)
def save_file(self, path, file_data):
"""
Save uploaded file to the disk
:param path:
Path to save to
:param file_data:
Werkzeug `FileStorage` object
"""
file_data.save(path)
def _get_dir_url(self, endpoint, path, **kwargs):
"""
Return prettified URL
:param endpoint:
Endpoint name
:param path:
Directory path
:param kwargs:
Additional arguments
"""
if not path:
return url_for(endpoint)
else:
if self._on_windows:
path = path.replace('\\', '/')
kwargs['path'] = path
return url_for(endpoint, **kwargs)
def _get_file_url(self, path):
"""
Return static file url
:param path:
Static file path
"""
if self.is_file_editable(path):
return url_for(".edit", path=path)
else:
base_url = self.get_base_url()
return urljoin(base_url, path)
def _normalize_path(self, path):
"""
Verify and normalize path.
If the path is not relative to the base directory, will raise a 404 exception.
If the path does not exist, this will also raise a 404 exception.
"""
base_path = self.get_base_path()
if path is None:
directory = base_path
path = ''
else:
path = op.normpath(path)
directory = op.normpath(op.join(base_path, path))
if not self.is_in_folder(base_path, directory):
abort(404)
if not op.exists(directory):
abort(404)
return base_path, directory, path
def is_action_allowed(self, name):
if name == 'delete' and not self.can_delete:
return False
return True
def on_rename(self, full_path, dir_base, filename):
"""
Perform some actions after a file or directory has been renamed.
Called from rename method
By default do nothing.
"""
pass
def on_edit_file(self, full_path, path):
"""
Perform some actions after a file has been successfully changed.
Called from edit method
By default do nothing.
"""
pass
def on_file_upload(self, directory, path, filename):
"""
Perform some actions after a file has been successfully uploaded.
Called from upload method
By default do nothing.
"""
pass
def on_mkdir(self, parent_dir, dir_name):
"""
Perform some actions after a directory has successfully been created.
Called from mkdir method
By default do nothing.
"""
pass
def on_directory_delete(self, full_path, dir_name):
"""
Perform some actions after a directory has successfully been deleted.
Called from delete method
By default do nothing.
"""
pass
def on_file_delete(self, full_path, filename):
"""
Perform some actions after a file has successfully been deleted.
Called from delete method
By default do nothing.
"""
pass
@expose('/')
@expose('/b/<path:path>')
def index(self, path=None):
"""
Index view method
:param path:
Optional directory path. If not provided, will use the base directory
"""
# Get path and verify if it is valid
base_path, directory, path = self._normalize_path(path)
# Get directory listing
items = []
# Parent directory
if directory != base_path:
parent_path = op.normpath(op.join(path, '..'))
if parent_path == '.':
parent_path = None
items.append(('..', parent_path, True, 0))
for f in os.listdir(directory):
fp = op.join(directory, f)
items.append((f, op.join(path, f), op.isdir(fp), op.getsize(fp)))
# Sort by name
items.sort(key=itemgetter(0))
# Sort by type
items.sort(key=itemgetter(2), reverse=True)
# Generate breadcrumbs
accumulator = []
breadcrumbs = []
for n in path.split(os.sep):
accumulator.append(n)
breadcrumbs.append((n, op.join(*accumulator)))
# Actions
actions, actions_confirmation = self.get_actions_list()
return self.render(self.list_template,
dir_path=path,
breadcrumbs=breadcrumbs,
get_dir_url=self._get_dir_url,
get_file_url=self._get_file_url,
items=items,
actions=actions,
actions_confirmation=actions_confirmation)
@expose('/upload/', methods=('GET', 'POST'))
@expose('/upload/<path:path>', methods=('GET', 'POST'))
def upload(self, path=None):
"""
Upload view method
:param path:
Optional directory path. If not provided, will use the base directory
"""
# Get path and verify if it is valid
base_path, directory, path = self._normalize_path(path)
if not self.can_upload:
flash(gettext('File uploading is disabled.'), 'error')
return redirect(self._get_dir_url('.index', path))
form = UploadForm(self)
if helpers.validate_form_on_submit(form):
filename = op.join(directory,
secure_filename(form.upload.data.filename))
if op.exists(filename):
flash(gettext('File "%(name)s" already exists.', name=filename),
'error')
else:
try:
self.save_file(filename, form.upload.data)
self.on_file_upload(directory, path, filename)
return redirect(self._get_dir_url('.index', path))
except Exception as ex:
flash(gettext('Failed to save file: %(error)s', error=ex))
return self.render(self.upload_template, form=form)
@expose('/mkdir/', methods=('GET', 'POST'))
@expose('/mkdir/<path:path>', methods=('GET', 'POST'))
def mkdir(self, path=None):
"""
Directory creation view method
:param path:
Optional directory path. If not provided, will use the base directory
"""
# Get path and verify if it is valid
base_path, directory, path = self._normalize_path(path)
dir_url = self._get_dir_url('.index', path)
if not self.can_mkdir:
flash(gettext('Directory creation is disabled.'), 'error')
return redirect(dir_url)
form = NameForm(helpers.get_form_data())
if helpers.validate_form_on_submit(form):
try:
os.mkdir(op.join(directory, form.name.data))
self.on_mkdir(directory, form.name.data)
return redirect(dir_url)
except Exception as ex:
flash(gettext('Failed to create directory: %(error)s', ex), 'error')
return self.render(self.mkdir_template,
form=form,
dir_url=dir_url)
@expose('/delete/', methods=('POST',))
def delete(self):
"""
Delete view method
"""
path = request.form.get('path')
if not path:
return redirect(url_for('.index'))
# Get path and verify if it is valid
base_path, full_path, path = self._normalize_path(path)
return_url = self._get_dir_url('.index', op.dirname(path))
if not self.can_delete:
flash(gettext('Deletion is disabled.'))
return redirect(return_url)
if op.isdir(full_path):
if not self.can_delete_dirs:
flash(gettext('Directory deletion is disabled.'))
return redirect(return_url)
try:
shutil.rmtree(full_path)
self.on_directory_delete(full_path, path)
flash(gettext('Directory "%s" was successfully deleted.' % path))
except Exception as ex:
flash(gettext('Failed to delete directory: %(error)s', error=ex), 'error')
else:
try:
os.remove(full_path)
self.on_file_delete(full_path, path)
flash(gettext('File "%(name)s" was successfully deleted.', name=path))
except Exception as ex:
flash(gettext('Failed to delete file: %(name)s', name=ex), 'error')
return redirect(return_url)
@expose('/rename/', methods=('GET', 'POST'))
def rename(self):
"""
Rename view method
"""
path = request.args.get('path')
if not path:
return redirect(url_for('.index'))
base_path, full_path, path = self._normalize_path(path)
return_url = self._get_dir_url('.index', op.dirname(path))
if not self.can_rename:
flash(gettext('Renaming is disabled.'))
return redirect(return_url)
if not op.exists(full_path):
flash(gettext('Path does not exist.'))
return redirect(return_url)
form = NameForm(helpers.get_form_data(), name=op.basename(path))
if helpers.validate_form_on_submit(form):
try:
dir_base = op.dirname(full_path)
filename = secure_filename(form.name.data)
os.rename(full_path, op.join(dir_base, filename))
self.on_rename(full_path, dir_base, filename)
flash(gettext('Successfully renamed "%(src)s" to "%(dst)s"',
src=op.basename(path),
dst=filename))
except Exception as ex:
flash(gettext('Failed to rename: %(error)s', error=ex), 'error')
return redirect(return_url)
return self.render(self.rename_template,
form=form,
path=op.dirname(path),
name=op.basename(path),
dir_url=return_url)
@expose('/edit/', methods=('GET', 'POST'))
def edit(self):
"""
Edit view method
"""
path = request.args.getlist('path')
next_url = None
if not path:
return redirect(url_for('.index'))
if len(path) > 1:
next_url = url_for('.edit', path=path[1:])
path = path[0]
base_path, full_path, path = self._normalize_path(path)
dir_url = self._get_dir_url('.index', os.path.dirname(path))
next_url = next_url or dir_url
form = EditForm(helpers.get_form_data())
error = False
if helpers.validate_form_on_submit(form):
form.process(request.form, content='')
if form.validate():
try:
with open(full_path, 'w') as f:
f.write(request.form['content'])
except IOError:
flash(gettext("Error saving changes to %(name)s.", name=path), 'error')
error = True
else:
self.on_edit_file(full_path, path)
flash(gettext("Changes to %(name)s saved successfully.", name=path))
return redirect(next_url)
else:
try:
with open(full_path, 'r') as f:
content = f.read()
except IOError:
flash(gettext("Error reading %(name)s.", name=path), 'error')
error = True
except:
flash(gettext("Unexpected error while reading from %(name)s", name=path), 'error')
error = True
else:
try:
content.decode('utf8')
except UnicodeDecodeError:
flash(gettext("Cannot edit %(name)s.", name=path), 'error')
error = True
except:
flash(gettext("Unexpected error while reading from %(name)s", name=path), 'error')
error = True
else:
form.content.data = content
return self.render(self.edit_template, dir_url=dir_url, path=path,
form=form, error=error)
@expose('/action/', methods=('POST',))
def action_view(self):
return self.handle_action()
# Actions
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete these files?'))
def action_delete(self, items):
for path in items:
base_path, full_path, path = self._normalize_path(path)
try:
os.remove(full_path)
flash(gettext('File "%(name)s" was successfully deleted.', name=path))
except Exception as ex:
flash(gettext('Failed to delete file: %(name)s', name=ex), 'error')
@action('edit', lazy_gettext('Edit'))
def action_edit(self, items):
return redirect(url_for('.edit', path=items))
|
|
# -*- coding: utf8 -*-
"""
unpack ipynb
Tested 2015 March
"""
import os
import re
def dont_do_anything(fw, cell_dict):
print("won't do anything about", cell_dict["cell_type"])
print(cell_dict)
def handle_heading(fw, cell_dict):
output = convert_heading(cell_dict)
fw.write(output)
return output
def convert_heading(cell_dict):
"""
string to be written to file
:param cell_dict:
:return:
>>> cd = {'source': ["Fraud detection with Benford's law"], 'cell_type': 'heading', 'level': 1, 'metadata': {}}
>>> convert_heading(cd)
############################################################
# Fraud detection with Benford's law
############################################################
"""
'''
sample heading cell
{'cell_type': 'heading',
'level': 1,
'metadata': {},
'source': ['HYPOTHESIS TESTING EXERCISES - SOLUTION']}
'''
# decoration for heading
output = ('#' * 60 + '\n')
# print lines in 'source' field
for line in cell_dict['source']:
output += '# ' + line + '\n'
# decoration for heading
output += '#'.ljust(60, '#') + '\n'
return output
def handle_markdown(fw, cell_dict):
""""""
'''
{'cell_type': 'markdown',
'metadata': {},
'source': ["Verify the validity of Benford's law when applied to 1)
the population of a country; 2) the number of breast cancer
cases in each country.\n",
'\n',
'1. Collect a count of the first digits of all the numbers in the data sets\n',
"2. Use a statistical tests to compare the observed count to the one
expected by Benford's law"]}
'''
code_list = cell_dict['source']
fw.write('"""\n')
for code in code_list:
fw.write(code)
fw.write('\n"""\n')
def handle_code(fw, cell_dict):
""""""
'''
{'cell_type': 'code',
'collapsed': False,
'input': ['%matplotlib inline\n',
'\n',
'import numpy as np\n',
'import pandas as pd\n',
'import matplotlib.pyplot as plt\n',
'import statsmodels.api as sm\n',
'from scipy import stats'],
'language': 'python',
'metadata': {},
'outputs': [],
'prompt_number': 1}
'''
'''handle input cell'''
process_input_source(cell_dict, fw, "input")
'''handle source cell'''
process_input_source(cell_dict, fw, "source")
'''handle output cell'''
output = cell_dict.get("output", [])
for code in output:
fw.write('## ')
fw.write(code)
fw.write('\n')
fw.write('#'.ljust(20, '#'))
fw.write('\n\n')
def process_input_source(cell_dict, fw, marker):
for code in cell_dict.get(marker, []):
if code:
'''magic command'''
py_name = find_py_name_from_run_magic_cmd(code)
if py_name:
fw.write('from %.124s import *\n' % py_name)
else:
if '%' == code[0]:
fw.write('#')
code_strip = code.strip()
if code_strip and ('?' == code_strip[-1]) and ('#' != code_strip[0]):
fw.write('help(')
fw.write(code.strip()[:-1])
fw.write(')\n')
else:
fw.write(code)
def find_py_name_from_run_magic_cmd(code):
"""
find ??? of magic command '%run ???.py'
:param code:
:return:
>>> find_py_name_from_run_magic_cmd('%run phugoid.py')
'phugoid'
"""
result = re.findall(r'%run\s(.*).py', code)
if result:
result = result[0]
return result
# lookup table of cell handlers
handler = {'heading': handle_heading,
'code': handle_code,
'markdown': handle_markdown,
'raw': handle_markdown,
}
def unpack(filename, b_verbose=False):
"""
:param filename:
:param b_verbose: if True print more detailed information
:return:
"""
split_ext = os.path.splitext(filename)
if ".ipynb" != split_ext[1]:
filename = split_ext[0] + ".ipynb"
py_name = split_ext[0] + ".py"
if not os.path.exists(py_name):
fw = open(py_name, 'w', encoding='utf8')
''' read file '''
if os.path.exists(filename):
f = open(filename, 'r', encoding='utf8')
txt = f.read()
f.close()
''' replace all triple double quotes to triple qutes to avoid
possible confusion '''
txt = txt.replace('"""', "'''")
''' decompose '''
false = False
true = True
null = None
d = eval(txt)
try:
worksheets = d.get('worksheets', [])
if worksheets:
for worksheet in worksheets:
cells = worksheet.get('cells', [])
process_cells(cells, fw, b_verbose)
else:
cells = d.get('cells', [])
process_cells(cells, fw, b_verbose)
if b_verbose and not cells:
print("No worksheet to process")
except:
print(filename)
raise
fw.close()
def process_cells(cells, fw, b_verbose=False):
if cells:
fw.write("# -*- coding: utf8 -*-\nfrom pylab import *\n")
for cell in cells:
# process cell, or don't do anything
process_one_cell(fw, cell)
# to present the result at least at the end
fw.write('print (" The presented result might be overlapping. ".center(60, "*"))')
fw.write("\nshow()\n")
else:
if b_verbose:
print("No cell to process")
def process_one_cell(fw, cell):
call_this = handler.get(cell['cell_type'],
dont_do_anything)
call_this(fw, cell)
def convert_tree(full_path=os.getcwd()):
for dir_path, dir_names, file_names in os.walk(full_path):
for filename in file_names:
if filename.endswith(".ipynb"):
full_path = os.path.join(dir_path, filename)
unpack(full_path)
if "__main__" == __name__:
import sys
if 2 <= len(sys.argv):
unpack(sys.argv[1])
else:
convert_tree(os.getcwd())
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import StringIO
import json
import logging
import os
import re
import sys
import tempfile
import time
import traceback
import zipfile
from django.conf import settings
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods
import django.views.debug
import desktop.conf
import desktop.log.log_buffer
from desktop.api import massaged_tags_for_json, massaged_documents_for_json, _get_docs
from desktop.lib import django_mako
from desktop.lib.conf import GLOBAL_CONFIG, BoundConfig
from desktop.lib.django_util import JsonResponse, login_notrequired, render_json, render
from desktop.lib.i18n import smart_str
from desktop.lib.paths import get_desktop_root
from desktop.lib.thread_util import dump_traceback
from desktop.log.access import access_log_level, access_warn
from desktop.models import UserPreferences, Settings, Document2
from desktop import appmanager
LOG = logging.getLogger(__name__)
@require_http_methods(['HEAD'])
def is_alive(request):
return HttpResponse('')
def home(request):
docs = _get_docs(request.user)
apps = appmanager.get_apps_dict(request.user)
return render('home.mako', request, {
'apps': apps,
'json_documents': json.dumps(massaged_documents_for_json(docs, request.user)),
'json_tags': json.dumps(massaged_tags_for_json(docs, request.user)),
'tours_and_tutorials': Settings.get_settings().tours_and_tutorials
})
def home2(request):
docs = Document2.objects.filter(owner=request.user)
apps = appmanager.get_apps_dict(request.user)
return render('home2.mako', request, {
'apps': apps,
'json_documents': json.dumps([doc.to_dict() for doc in docs]),
'tours_and_tutorials': Settings.get_settings().tours_and_tutorials
})
@access_log_level(logging.WARN)
def log_view(request):
"""
We have a log handler that retains the last X characters of log messages.
If it is attached to the root logger, this view will display that history,
otherwise it will report that it can't be found.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
return render('logs.mako', request, dict(log=[l for l in h.buf], query=request.GET.get("q", "")))
return render('logs.mako', request, dict(log=[_("No logs found!")]))
@access_log_level(logging.WARN)
def download_log_view(request):
"""
Zip up the log buffer and then return as a file attachment.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
try:
# We want to avoid doing a '\n'.join of the entire log in memory
# in case it is rather big. So we write it to a file line by line
# and pass that file to zipfile, which might follow a more efficient path.
tmp = tempfile.NamedTemporaryFile()
log_tmp = tempfile.NamedTemporaryFile("w+t")
for l in h.buf:
log_tmp.write(smart_str(l, errors='replace') + '\n')
# This is not just for show - w/out flush, we often get truncated logs
log_tmp.flush()
t = time.time()
zip = zipfile.ZipFile(tmp, "w", zipfile.ZIP_DEFLATED)
zip.write(log_tmp.name, "hue-logs/hue-%s.log" % t)
zip.close()
length = tmp.tell()
# if we don't seek to start of file, no bytes will be written
tmp.seek(0)
wrapper = FileWrapper(tmp)
response = HttpResponse(wrapper, content_type="application/zip")
response['Content-Disposition'] = 'attachment; filename=hue-logs-%s.zip' % t
response['Content-Length'] = length
return response
except Exception, e:
LOG.exception("Couldn't construct zip file to write logs")
return log_view(request)
return render_to_response("logs.mako", dict(log=[_("No logs found.")]))
@access_log_level(logging.DEBUG)
def prefs(request, key=None):
"""Get or set preferences."""
if key is None:
d = dict( (x.key, x.value) for x in UserPreferences.objects.filter(user=request.user))
return render_json(d)
else:
if "set" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
except UserPreferences.DoesNotExist:
x = UserPreferences(user=request.user, key=key)
x.value = request.REQUEST["set"]
x.save()
return render_json(True)
if "delete" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
x.delete()
return render_json(True)
except UserPreferences.DoesNotExist:
return render_json(False)
else:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
return render_json(x.value)
except UserPreferences.DoesNotExist:
return render_json(None)
def bootstrap(request):
"""Concatenates bootstrap.js files from all installed Hue apps."""
# Has some None's for apps that don't have bootsraps.
all_bootstraps = [(app, app.get_bootstrap_file()) for app in appmanager.DESKTOP_APPS if request.user.has_hue_permission(action="access", app=app.name)]
# Iterator over the streams.
concatenated = ["\n/* %s */\n%s" % (app.name, b.read()) for app, b in all_bootstraps if b is not None]
# HttpResponse can take an iteratable as the first argument, which
# is what happens here.
return HttpResponse(concatenated, content_type='text/javascript')
_status_bar_views = []
def register_status_bar_view(view):
global _status_bar_views
_status_bar_views.append(view)
@access_log_level(logging.DEBUG)
def status_bar(request):
"""
Concatenates multiple views together to build up a "status bar"/"status_bar".
These views are registered using register_status_bar_view above.
"""
resp = ""
for view in _status_bar_views:
try:
r = view(request)
if r.status_code == 200:
resp += r.content
else:
LOG.warning("Failed to execute status_bar view %s" % (view,))
except:
LOG.exception("Failed to execute status_bar view %s" % (view,))
return HttpResponse(resp)
def dump_config(request):
# Note that this requires login (as do most apps).
show_private = False
conf_dir = os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf")))
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
if request.GET.get("private"):
show_private = True
apps = sorted(appmanager.DESKTOP_MODULES, key=lambda app: app.name)
apps_names = [app.name for app in apps]
top_level = sorted(GLOBAL_CONFIG.get().values(), key=lambda obj: apps_names.index(obj.config.key))
return render("dump_config.mako", request, dict(
show_private=show_private,
top_level=top_level,
conf_dir=conf_dir,
apps=apps))
@access_log_level(logging.WARN)
def threads(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
out = StringIO.StringIO()
dump_traceback(file=out)
return HttpResponse(out.getvalue(), content_type="text/plain")
@access_log_level(logging.WARN)
def memory(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
if not hasattr(settings, 'MEMORY_PROFILER'):
return HttpResponse(_("You must enable the memory profiler via the memory_profiler config in the hue.ini."))
# type, from, to, index
command_order = {
'type': 0,
'from': 1,
'to': 2,
'index': 3
}
default_command = [None, None, None, None]
commands = []
for item in request.GET:
res = re.match(r'(?P<command>\w+)\.(?P<count>\d+)', item)
if res:
d = res.groupdict()
count = int(d['count'])
command = str(d['command'])
while len(commands) <= count:
commands.append(default_command[:])
commands[count][command_order.get(command)] = request.GET.get(item)
heap = settings.MEMORY_PROFILER.heap()
for command in commands:
if command[0] is not None:
heap = getattr(heap, command[0])
if command[1] is not None and command[2] is not None:
heap = heap[int(command[1]):int(command[2])]
if command[3] is not None:
heap = heap[int(command[3])]
return HttpResponse(str(heap), content_type="text/plain")
def jasmine(request):
return render('jasmine.mako', request, None)
@login_notrequired
def unsupported(request):
return render('unsupported.mako', request, None)
def index(request):
if request.user.is_superuser and request.COOKIES.get('hueLandingPage') != 'home':
return redirect(reverse('about:index'))
else:
return home(request)
def csrf_failure(request, reason=None):
"""Registered handler for CSRF."""
access_warn(request, reason)
return render("403_csrf.mako", request, dict(uri=request.build_absolute_uri()), status=403)
def serve_403_error(request, *args, **kwargs):
"""Registered handler for 403. We just return a simple error"""
access_warn(request, "403 access forbidden")
return render("403.mako", request, dict(uri=request.build_absolute_uri()), status=403)
def serve_404_error(request, *args, **kwargs):
"""Registered handler for 404. We just return a simple error"""
access_warn(request, "404 not found")
return render("404.mako", request, dict(uri=request.build_absolute_uri()), status=404)
def serve_500_error(request, *args, **kwargs):
"""Registered handler for 500. We use the debug view to make debugging easier."""
try:
exc_info = sys.exc_info()
if exc_info:
if desktop.conf.HTTP_500_DEBUG_MODE.get() and exc_info[0] and exc_info[1]:
# If (None, None, None), default server error describing why this failed.
return django.views.debug.technical_500_response(request, *exc_info)
else:
# Could have an empty traceback
return render("500.mako", request, {'traceback': traceback.extract_tb(exc_info[2])})
else:
# exc_info could be empty
return render("500.mako", request, {})
finally:
# Fallback to default 500 response if ours fails
# Will end up here:
# - Middleware or authentication backends problems
# - Certain missing imports
# - Packaging and install issues
pass
_LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
_MAX_LOG_FRONTEND_EVENT_LENGTH = 1024
_LOG_FRONTEND_LOGGER = logging.getLogger("desktop.views.log_frontend_event")
@login_notrequired
def log_frontend_event(request):
"""
Logs arguments to server's log. Returns an
empty string.
Parameters (specified via either GET or POST) are
"logname", "level" (one of "debug", "info", "warning",
"error", or "critical"), and "message".
"""
def get(param, default=None):
return request.REQUEST.get(param, default)
level = _LOG_LEVELS.get(get("level"), logging.INFO)
msg = "Untrusted log event from user %s: %s" % (
request.user,
get("message", "")[:_MAX_LOG_FRONTEND_EVENT_LENGTH])
_LOG_FRONTEND_LOGGER.log(level, msg)
return HttpResponse("")
def commonheader(title, section, user, padding="90px"):
"""
Returns the rendered common header
"""
current_app = None
other_apps = []
if user.is_authenticated():
apps = appmanager.get_apps(user)
apps_list = appmanager.get_apps_dict(user)
for app in apps:
if app.display_name not in [
'beeswax', 'impala', 'pig', 'jobsub', 'jobbrowser', 'metastore', 'hbase', 'sqoop', 'oozie', 'filebrowser',
'useradmin', 'search', 'help', 'about', 'zookeeper', 'proxy', 'rdbms', 'spark', 'indexer', 'security', 'notebook']:
other_apps.append(app)
if section == app.display_name:
current_app = app
else:
apps_list = []
return django_mako.render_to_string("common_header.mako", {
'current_app': current_app,
'apps': apps_list,
'other_apps': other_apps,
'title': title,
'section': section,
'padding': padding,
'user': user,
'is_demo': desktop.conf.DEMO_ENABLED.get(),
'is_ldap_setup': 'desktop.auth.backend.LdapBackend' in desktop.conf.AUTH.BACKEND.get()
})
def commonshare():
return django_mako.render_to_string("common_share.mako", {})
def commonimportexport(request):
return django_mako.render_to_string("common_import_export.mako", {'request': request})
def commonfooter(messages=None):
"""
Returns the rendered common footer
"""
if messages is None:
messages = {}
hue_settings = Settings.get_settings()
return django_mako.render_to_string("common_footer.mako", {
'messages': messages,
'version': settings.HUE_DESKTOP_VERSION,
'collect_usage': collect_usage(),
'tours_and_tutorials': hue_settings.tours_and_tutorials
})
def collect_usage():
return desktop.conf.COLLECT_USAGE.get() and Settings.get_settings().collect_usage
# If the app's conf.py has a config_validator() method, call it.
CONFIG_VALIDATOR = 'config_validator'
#
# Cache config errors because (1) they mostly don't go away until restart,
# and (2) they can be costly to compute. So don't stress the system just because
# the dock bar wants to refresh every n seconds.
#
# The actual viewing of all errors may choose to disregard the cache.
#
_CONFIG_ERROR_LIST = None
def _get_config_errors(request, cache=True):
"""Returns a list of (confvar, err_msg) tuples."""
global _CONFIG_ERROR_LIST
if not cache or _CONFIG_ERROR_LIST is None:
error_list = [ ]
for module in appmanager.DESKTOP_MODULES:
# Get the config_validator() function
try:
validator = getattr(module.conf, CONFIG_VALIDATOR)
except AttributeError:
continue
if not callable(validator):
LOG.warn("Auto config validation: %s.%s is not a function" %
(module.conf.__name__, CONFIG_VALIDATOR))
continue
try:
for confvar, error in validator(request.user):
error = {
'name': confvar if isinstance(confvar, str) else confvar.get_fully_qualifying_key(),
'message': error,
}
if isinstance(confvar, BoundConfig):
error['value'] = confvar.get()
error_list.append(error)
except Exception, ex:
LOG.exception("Error in config validation by %s: %s" % (module.nice_name, ex))
_CONFIG_ERROR_LIST = error_list
return _CONFIG_ERROR_LIST
def check_config(request):
"""Check config and view for the list of errors"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
context = {
'conf_dir': os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf"))),
'error_list': _get_config_errors(request, cache=False),
}
if request.GET.get('format') == 'json':
return JsonResponse(context)
else:
return render('check_config.mako', request, context, force_template=True)
def check_config_ajax(request):
"""Alert administrators about configuration problems."""
if not request.user.is_superuser:
return HttpResponse('')
error_list = _get_config_errors(request)
if not error_list:
# Return an empty response, rather than using the mako template, for performance.
return HttpResponse('')
return render('config_alert_dock.mako',
request,
dict(error_list=error_list),
force_template=True)
# This is a global non-view for inline KO i18n
def _ko(str=""):
return _(str).replace("'", "\\'")
# This global Mako filtering option, use it with ${ yourvalue | n,antixss }
def antixss(value):
xss_regex = re.compile(r'<[^>]+>')
return xss_regex.sub('', value)
|
|
#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs Util - Interactive Mode Helpers
===========================================
Provides auxiliary methods for the interactive mode.
"""
import os
import inspect
# Debug mode: Changed to true from interactive.py if specified by the user
# when starting the runtime. Enables the explicit prints.
DEBUG = False
SEPARATORS = { # for user defined lines in the entire/global scope
'globals_separator': "### GLOBALS ###",
# for user defined classes
'classes_separator': '### CLASSES ###',
# for user defined functions (that are not decorated)
'functions_separator': "### FUNCTIONS ###",
# for user defined tasks
'tasks_separator': "### TASKS ###"}
PREFIXES = ("@implement", "@constraint", "@decaf", "@mpi",
"@ompss", "@binary", "@opencl")
# ################################################################# #
# ################# MAIN FUNCTION ################################# #
# ################################################################# #
def update_tasks_code_file(f, file_path):
"""
Main interactive helper function.
Analyses the user code that has been executed and parses it looking for:
- imports
- tasks
- functions
Builds a file where the necessary contents for the worker are.
Also updates the old code with the new if functions or tasks are redefined.
:param f: new task function
:param file_path: file where the code is stored
:return: None
"""
if not os.path.exists(file_path):
_create_tasks_code_file(file_path)
if DEBUG:
print("Task definition detected.")
# Intercept the code
imports = _get_ipython_imports() # [import\n, import\n, ...]
global_code = _get_ipython_globals() # [var\n, var\n, ...]
classes_code = _get_classes() # {'name': str(line\nline\n...)}
functions_code = _get_functions() # {'name': str(line\nline\n...)}
task_code = _get_task_code(f) # {'name': str(line\nline\n...)}
old_code = _get_old_code(file_path) # old_code structure:
# {'imports':[import\n, import\n, ...],
# 'tasks':{'name':str(line\nline\n...),
# 'name':str(line\nline\n...), ...}}
# Look for new/modified pieces of code and compares the existing code with
# the new additions.
new_imports = _update_imports(imports, old_code['imports'])
new_globals = _update_globals(global_code, old_code['globals'])
new_classes = _update_classes(classes_code, old_code['classes'])
# Check that there are no functions with the same name as a newly defined
# tasks
for k in task_code.keys():
functions_code.pop(k, None)
old_code['functions'].pop(k, None)
# Continue with comparisons
new_functions = _update_functions(functions_code, old_code['functions'])
new_tasks = _update_tasks(task_code, old_code['tasks'])
# Update the file where the code is stored.
_update_code_file(new_imports,
new_globals,
new_classes,
new_functions,
new_tasks,
file_path)
# ###################################################################
# ############### AUXILIAR METHODS ##################################
# ###################################################################
# CODE INTERCEPTION FUNCTIONS
def _create_tasks_code_file(file_path):
"""
Creates a file where to store the user code.
:param file_path: File location and name
:return: None
"""
user_code_file = open(file_path, 'a')
user_code_file.write('\n')
user_code_file.write(SEPARATORS['globals_separator'] + "\n")
user_code_file.write('\n')
user_code_file.write(SEPARATORS['classes_separator'] + "\n")
user_code_file.write('\n')
user_code_file.write(SEPARATORS['functions_separator'] + "\n")
user_code_file.write('\n')
user_code_file.write(SEPARATORS['tasks_separator'] + "\n")
user_code_file.write('\n')
user_code_file.close()
def _get_raw_code():
"""
Retrieve the raw code from interactive session.
:return: the list of the blocks defined by the user that are currently
loaded in globals
"""
import IPython
ipython = IPython.get_ipython()
raw_code = ipython.user_ns['In']
return raw_code
def _get_ipython_imports():
"""
Finds the user imports.
:return: A list of imports: [import\n, import\n, ...]
"""
raw_code = _get_raw_code()
imports = []
for i in raw_code:
# Each i can have more than one line (jupyter-notebook block)
# We only get the lines that start with from or import and do not
# have blank spaces before.
lines = i.split('\n')
for l in lines:
if l.startswith("from") or l.startswith("import"):
imports.append(l + '\n')
return imports
def _get_ipython_globals():
"""
Finds the user global variables.
WARNING: Assignations using any of the master api calls will be ignored
in order to avoid the worker to try to call the runtime.
WARNING2: We will consider only global variables that must be seen by the
workers if the variable name is defined in capital letters.
Please, take caution with the modification on the global
variables since they can lead to raise conditions in the user
code execution.
:return: A list of lines: [var\n, var\n, ...]
"""
api_calls = ['compss_open',
'compss_delete_file',
'compss_wait_on_file',
'compss_delete_object',
'compss_wait_on']
raw_code = _get_raw_code()
glob_lines = {}
for i in raw_code:
# Each i can have more than one line (jupyter-notebook block)
# We only get the lines that start with from or import and do not
# have blank spaces before.
lines = i.split('\n')
found_one = False
for l in lines:
# if the line starts without spaces and is a variable assignation
glob_name = ''
if not (l.startswith(' ') or l.startswith('\t')) and \
_is_variable_assignation(l):
line_parts = l.split()
glob_name = line_parts[0]
if not glob_name.isupper():
# It is an assignation where the variable name is not in
# capital letters
found_one = False
elif any(call in line_parts[2:] for call in api_calls):
# It is an assignation that does not contain a master api
# call
found_one = False
else:
glob_lines[glob_name] = l.strip()
found_one = True
continue
# if the next line/s start with space or tab belong also to the
# global variable
if found_one and (l.startswith(' ') or l.startswith('\t')):
# It is a multiple lines global variable definition
glob_lines[glob_name] += l.strip()
else:
found_one = False
return glob_lines
def _is_variable_assignation(line):
"""
This function is used to check if a line of code represents a variable
assignation:
* if contains a '=' (assignation) and does not start with import, nor @,
nor def, nor class.
* then it is ==> is a global variable assignation.
:param line: Line to parse
:return: <Boolean>
"""
if '=' in line:
parts = line.split()
if not (line.startswith("from") or
line.startswith("import") or
line.startswith("@") or
line.startswith("def") or
line.startswith("class")) \
and len(parts) >= 3 and parts[1] == '=':
# It is actually an assignation
return True
else:
# It is an import/function/decorator/class definition
return False
else:
# Not an assignation if does not contain '='
return False
def _get_classes():
"""
Finds the user defined classes in the code.
:return: A dictionary with the user classes code:
{'name': str(line\nline\n...)}
"""
raw_code = _get_raw_code()
classes = {}
for block in raw_code:
lines = block.split('\n')
# Look for classes in the block
class_found = False
for line in lines:
class_name = ''
if line.startswith('class'):
# Class header: find name and include it in the functions dict
# split and remove empty spaces
header = [name for name in line.split(" ") if name]
# the name may be followed by the parameters parenthesis
class_name = header[1].split("(")[0].strip()
# create an entry in the functions dict
classes[class_name] = [line + '\n']
class_found = True
elif (line.startswith(" ") or
(line.startswith("\t")) or
(line.startswith('\n')) or
(line == '')) and class_found:
# class body: append
classes[class_name].append(line + '\n')
else:
class_found = False
# Plain classes content (from {key: [line, line,...]} to {key: line\nline})
for k, v in list(classes.items()):
# Collapse all lines into a single one
classes[k] = ''.join(v).strip()
return classes
def _get_functions():
"""
Finds the user defined functions in the code.
:return: A dictionary with the user functions code:
{'name': str(line\nline\n...)}
"""
raw_code = _get_raw_code()
functions = {}
for block in raw_code:
lines = block.split('\n')
# Look for functions in the block
is_task = False
is_function = False
function_found = False
func_name = ''
decorators = ''
for line in lines:
if line.startswith('@task'):
# The following function detected will be a task --> ignore
is_task = True
if line.startswith("@") and \
not any(map(line.startswith, PREFIXES)) and \
not is_task:
# It is a function preceded by a decorator
is_function = True
is_task = False
if line.startswith("def") and not is_task:
# A function which is not a task has been defined --> capture
# with is_function boolean
# Restore the is_task boolean to control if another task is
# defined in the same block.
is_function = True
is_task = False
if is_function:
if line.startswith("@"):
decorators += line + '\n'
if line.startswith("def"):
# Function header: find name and include it in the
# functions dict. Split and remove empty spaces
header = [name for name in line.split(" ") if name]
# the name may be followed by the parameters parenthesis
func_name = header[1].split("(")[0].strip()
# create an entry in the functions dict
functions[func_name] = [decorators + line + '\n']
decorators = ''
function_found = True
elif (line.startswith(" ") or
(line.startswith("\t")) or
(line.startswith('\n')) or
(line == '')) and function_found:
# Function body: append
functions[func_name].append(line + '\n')
else:
function_found = False
# Plain functions content:
# from {key: [line, line,...]} to {key: line\nline}
for k, v in list(functions.items()):
functions[k] = ''.join(v).strip() # Collapse all lines into one
return functions
def _get_task_code(f):
"""
Finds the task code.
:param f: Task function
:return: A dictionary with the task code:
{'name': str(line\nline\n...)}
"""
try:
task_code = inspect.getsource(f)
except TypeError:
# This is a numba jit declared task
task_code = inspect.getsource(f.py_func)
if task_code.startswith((' ', '\t')):
return {}
else:
name = ''
lines = task_code.split('\n')
for line in lines:
# Ignore the decorator stack
if line.strip().startswith('def'):
name = line.replace('(', ' (').split(' ')[1].strip()
break # Just need the first
return {name: task_code}
def _clean(lines_list):
"""
Removes the blank lines from a list of strings.
* _get_old_code auxiliary method - Clean imports list.
:param lines_list: List of strings
:return: The list without '\n' strings.
"""
result = []
if len(lines_list) == 1 and lines_list[0].strip() == '':
# If the lines_list only contains a single line jump remove it
return result
else:
# If it is longer, remove all single \n appearances
for l in lines_list:
if l.strip() != '':
result.append(l)
return result
def _get_old_code(file_path):
"""
Retrieve the old code from a file.
:param file_path: The file where the code is located.
:return: A dictionary with the imports and existing tasks.
"""
# Read the entire file
code_file = open(file_path, 'r')
contents = code_file.readlines()
code_file.close()
# Separate imports from tasks
file_imports = []
file_globals = []
file_classes = []
file_functions = []
file_tasks = []
found_glob_separator = False
found_class_separator = False
found_func_separator = False
found_task_separator = False
for line in contents:
if line == SEPARATORS['globals_separator'] + '\n':
found_glob_separator = True
elif line == SEPARATORS['classes_separator'] + '\n':
found_class_separator = True
elif line == SEPARATORS['functions_separator'] + '\n':
found_func_separator = True
elif line == SEPARATORS['tasks_separator'] + '\n':
found_task_separator = True
else:
if not found_glob_separator and \
not found_class_separator and \
not found_func_separator and \
not found_task_separator:
file_imports.append(line)
elif found_glob_separator and \
not found_class_separator and \
not found_func_separator and \
not found_task_separator:
file_globals.append(line)
elif found_glob_separator and \
found_class_separator and \
not found_func_separator and \
not found_task_separator:
file_classes.append(line)
elif found_glob_separator and \
found_class_separator and \
found_func_separator and \
not found_task_separator:
file_functions.append(line)
else:
file_tasks.append(line)
file_imports = _clean(file_imports)
file_globals = _clean(file_globals)
# file_classes = _clean(file_classes)
# file_functions = _clean(file_functions)
# file_tasks = _clean(file_tasks)
# Process globals
globs = {}
if len(file_globals) != 0:
# Collapse all lines into a single one
collapsed = ''.join(file_globals).strip()
scattered = collapsed.split('\n')
# Add classes to dictionary by class name:
for g in scattered:
glob_code = g.strip()
glob_name = g.split()[0].strip()
globs[glob_name] = glob_code
file_globals = globs
# Process classes
classes = {}
# Collapse all lines into a single one
collapsed = ''.join(file_classes).strip()
# Then split by "class" and filter the empty results, then iterate
# concatenating "class" to all results.
cls = [('class ' + l) for l in
[name for name in collapsed.split('class ') if name]]
# Add classes to dictionary by class name:
for c in cls:
class_code = c.strip()
class_name = c.replace('(', ' (').split(' ')[1].strip()
classes[class_name] = class_code
# Process functions
functions = {}
# Clean empty lines
clean_functions = [l for l in file_functions if l]
# Iterate over the lines splitting by the ones that start with def
funcs = []
f = ''
for line in clean_functions:
if line.startswith('def'):
if f:
funcs.append(f)
f = line
else:
f += line
# Add functions to dictionary by function name:
for f in funcs:
func_code = f.strip()
func_name = f.replace('(', ' (').split(' ')[1].strip()
functions[func_name] = func_code
# Process tasks
tasks = {}
# Collapse all lines into a single one
collapsed = ''.join(file_tasks).strip()
# Then split by "@" and filter the empty results, then iterate
# concatenating "@" to all results.
tsks = [('@' + l) for l in [deco for deco in collapsed.split('@') if deco]]
# Take into account that other decorators my be over @task, so it is
# necessary to collapse the function stack
tasks_list = []
tsk = ""
for t in tsks:
if any(map(t.startswith, PREFIXES)):
tsk += t
if t.startswith("@task"):
tsk += t
tasks_list.append(tsk)
tsk = ""
elif not any(map(t.startswith, PREFIXES)):
# If a decorator over the function is provided, it will
# have to be included in the last task
tasks_list[-1] += t
# Add functions to dictionary by function name:
for t in tasks_list:
# Example: '@task(returns=int)\ndef mytask(v):\n return v+1'
task_code = t.strip()
task_header = t.split('\ndef')[1]
task_name = task_header.replace('(', ' (').split(' ')[1].strip()
tasks[task_name] = task_code
old = {'imports': file_imports,
'globals': file_globals,
'classes': classes,
'functions': functions,
'tasks': tasks}
return old
# #######################
# CODE UPDATE FUNCTIONS #
# #######################
def _update_imports(new_imports, old_imports):
"""
Compare the old imports against the new ones and returns the old imports
with the new imports that did not existed previously.
:param new_imports: All new imports <Dictionary>
:param old_imports: All old imports <Dictionary>
:return: A list of imports as strings.
"""
not_in_imports = []
for i in new_imports:
already = False
for j in old_imports:
if i == j:
already = True
if not already:
not_in_imports.append(i)
# Merge the minimum imports
imports = old_imports + not_in_imports
return imports
def _update_globals(new_globals, old_globals):
"""
Compare the old globals against the new ones and returns the old globals
with the new globals that did not existed previously.
:param new_globals: All new globals <Dictionary>
:param old_globals: All old globals <Dictionary>
:return: A list of globals as strings.
"""
if len(old_globals) == 0:
return new_globals
else:
for gName in list(new_globals.keys()):
if DEBUG and gName in old_globals and \
(not new_globals[gName] == old_globals[gName]):
print("WARNING! Global variable " + gName +
" has been redefined (the previous will be deprecated).")
old_globals[gName] = new_globals[gName]
return old_globals
def _update_classes(new_classes, old_classes):
"""
Compare the old classes against the new ones. This function is essential
due to the fact that a jupyter-notebook user may rewrite a function and
the latest version is the one that needs to be kept.
:param new_classes: dictionary containing all classes (last version)
:param old_classes: dictionary containing the existing classes.
:return: dictionary with the merging result (keeping all classes and
updating the old ones).
"""
if len(old_classes) == 0:
return new_classes
else:
for cName in list(new_classes.keys()):
if DEBUG and cName in old_classes and \
(not new_classes[cName] == old_classes[cName]):
print("WARNING! Class " + cName +
" has been redefined (the previous will be deprecated).")
old_classes[cName] = new_classes[cName]
return old_classes
def _update_functions(new_functions, old_functions):
"""
Compare the old functions against the new ones. This function is essential
due to the fact that a jupyter-notebook user may rewrite a function and
the latest version is the one that needs to be kept.
:param new_functions: dictionary containing all functions (last version)
:param old_functions: dictionary containing the existing functions.
:return: dictionary with the merging result (keeping all functions and
updating the old ones).
"""
if len(old_functions) == 0:
return new_functions
else:
for fName in list(new_functions.keys()):
if DEBUG and fName in old_functions and\
(not new_functions[fName] == old_functions[fName]):
print("WARNING! Function " + fName +
" has been redefined (the previous will be deprecated).")
old_functions[fName] = new_functions[fName]
return old_functions
def _update_tasks(new_tasks, old_tasks):
"""
Compare the old tasks against the new ones. This function is essential due
to the fact that a jupyter-notebook user may rewrite a task and the latest
version is the one that needs to be kept.
:param new_tasks: new tasks code
:param old_tasks: existing tasks
:return: dictionary with the merging result.
"""
if not new_tasks:
# when new_tasks is empty, means that the update was triggered by a
# class task. No need to update as a tasks since the class has already
# been updated
pass
else:
task_name = list(new_tasks.keys())[0]
if DEBUG and task_name in old_tasks and\
(not new_tasks[task_name] == old_tasks[task_name]):
print("WARNING! Task " + task_name +
" has been redefined (the previous will be deprecated).")
old_tasks[task_name] = new_tasks[task_name]
return old_tasks
# #######################
# FILE UPDATE FUNCTIONS #
# #######################
def _update_code_file(new_imports, new_globals, new_classes, new_functions,
new_tasks, file_path):
"""
Writes the results to the code file used by the workers.
:param new_imports: new imports
:param new_globals: new global variables
:param new_classes: new classes
:param new_functions: new functions
:param new_tasks: new tasks
:param file_path: File to update.
:return: None
"""
code_file = open(file_path, 'w')
# Write imports
for i in new_imports:
code_file.write(i)
code_file.write('\n')
# Write globals separator
code_file.write(SEPARATORS['globals_separator'] + '\n')
# Write globals
if len(new_globals) == 0:
code_file.write('\n')
else:
for k, v in list(new_globals.items()):
for line in v:
code_file.write(line)
code_file.write('\n')
code_file.write('\n')
# Write classes separator
code_file.write(SEPARATORS['classes_separator'] + '\n')
# Write classes
if len(new_classes) == 0:
code_file.write('\n')
else:
for k, v in list(new_classes.items()):
for line in v:
code_file.write(line)
code_file.write('\n')
code_file.write('\n')
# Write functions separator
code_file.write(SEPARATORS['functions_separator'] + '\n')
# Write functions
if len(new_functions) == 0:
code_file.write('\n')
else:
for k, v in list(new_functions.items()):
for line in v:
code_file.write(line)
code_file.write('\n')
code_file.write('\n')
# Write tasks separator
code_file.write(SEPARATORS['tasks_separator'] + '\n')
# Write tasks
if len(new_tasks) == 0:
code_file.write('\n')
else:
for k, v in list(new_tasks.items()):
for line in v:
code_file.write(line)
code_file.write('\n')
code_file.write('\n')
code_file.flush()
code_file.close()
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import win32api
import struct
import pywintypes
TEST=0
LOAD_LIBRARY_AS_DATAFILE = 2
RT_VERSION = 16
def getRaw(o):
return str(buffer(o))
def decode(pathnm):
h = win32api.LoadLibraryEx(pathnm, 0, LOAD_LIBRARY_AS_DATAFILE)
nm = win32api.EnumResourceNames(h, RT_VERSION)[0]
data = win32api.LoadResource(h, RT_VERSION, nm)
vs = VSVersionInfo()
j = vs.fromRaw(data)
if TEST:
print vs
if data[:j] != vs.toRaw():
print "AAAAAGGHHHH"
glbls = {
'VSVersionInfo': VSVersionInfo,
'FixedFileInfo': FixedFileInfo,
'StringFileInfo': StringFileInfo,
'StringTable': StringTable,
'StringStruct': StringStruct,
'VarFileInfo': VarFileInfo,
'VarStruct': VarStruct,
}
vs2 = eval(repr(vs), glbls)
if vs.toRaw() != vs2.toRaw():
print
print 'reconstruction not the same!'
print vs2
win32api.FreeLibrary(h)
return vs
class VSVersionInfo:
"""
WORD wLength; // length of the VS_VERSION_INFO structure
WORD wValueLength; // length of the Value member
WORD wType; // 1 means text, 0 means binary
WCHAR szKey[]; // Contains the Unicode string "VS_VERSION_INFO".
WORD Padding1[];
VS_FIXEDFILEINFO Value;
WORD Padding2[];
WORD Children[]; // zero or more StringFileInfo or VarFileInfo
// structures (or both) that are children of the
// current version structure.
"""
def __init__(self, ffi=None, kids=None):
self.ffi = ffi
self.kids = kids or []
def fromRaw(self, data):
i, (sublen, vallen, wType, nm) = parseCommon(data)
#vallen is length of the ffi, typ is 0, nm is 'VS_VERSION_INFO'
i = ((i + 3) / 4) * 4
# now a VS_FIXEDFILEINFO
self.ffi = FixedFileInfo()
j = self.ffi.fromRaw(data, i)
#print ffi
if TEST and data[i:j] != self.ffi.toRaw():
print "raw:", `data[i:j]`
print "ffi:", `self.ffi.toRaw()`
i = j
while i < sublen:
j = i
i, (csublen, cvallen, ctyp, nm) = parseCommon(data, i)
if str(nm).strip() == "StringFileInfo":
sfi = StringFileInfo()
k = sfi.fromRaw(csublen, cvallen, nm, data, i, j+csublen)
if TEST and data[j:k] != sfi.toRaw():
rd = data[j:k]
sd = sfi.toRaw()
for x in range(0, len(rd), 16):
rds = rd[x:x+16]
sds = sd[x:x+16]
if rds != sds:
print "rd[%s:%s+16]: %r" % (x, x, rds)
print "sd[%s:%s+16]: %r" % (x, x, sds)
print
print ("raw: len %d, wLength %d"
% (len(rd), struct.unpack('h', rd[:2])[0]))
print ("sfi: len %d, wLength %d"
% (len(sd), struct.unpack('h', sd[:2])[0]))
self.kids.append(sfi)
i = k
else:
vfi = VarFileInfo()
k = vfi.fromRaw(csublen, cvallen, nm, data, i, j+csublen)
self.kids.append(vfi)
if TEST and data[j:k] != vfi.toRaw():
print "raw:", `data[j:k]`
print "vfi:", `vfi.toRaw()`
i = k
i = j + csublen
i = ((i + 3) / 4) * 4
return i
def toRaw(self):
nm = pywintypes.Unicode('VS_VERSION_INFO')
rawffi = self.ffi.toRaw()
vallen = len(rawffi)
typ = 0
sublen = 6 + 2*len(nm) + 2
pad = ''
if sublen % 4:
pad = '\000\000'
sublen = sublen + len(pad) + vallen
pad2 = ''
if sublen % 4:
pad2 = '\000\000'
tmp = "".join([kid.toRaw() for kid in self.kids ])
sublen = sublen + len(pad2) + len(tmp)
return (struct.pack('hhh', sublen, vallen, typ)
+ getRaw(nm) + '\000\000' + pad + rawffi + pad2 + tmp)
def __repr__(self, indent=''):
indent = indent + ' '
tmp = [kid.__repr__(indent+' ')
for kid in self.kids]
tmp = ', \n'.join(tmp)
return ("VSVersionInfo(\n%sffi=%s,\n%skids=[\n%s\n%s]\n)"
% (indent, self.ffi.__repr__(indent), indent,
tmp, indent))
def parseCommon(data, start=0):
i = start + 6
(wLength, wValueLength, wType) = struct.unpack('3h', data[start:i])
#print "wLength, wValueLength, wType, i:", wLength, wValueLength, wType, i
i, szKey = parseUString(data, i, i+wLength)
#i = ((i + 3) / 4) * 4
#print `data[start+6:start+wLength]`
return i, (wLength, wValueLength, wType, szKey)
def parseUString(data, start, limit):
i = start
while i < limit:
if data[i:i+2] == '\000\000':
break
i += 2
szKey = pywintypes.UnicodeFromRaw(data[start:i])
i += 2
#print "szKey:", repr(szKey), "(consumed", i-start, "bytes - to", i, ")"
return i, szKey
class FixedFileInfo:
"""
DWORD dwSignature; //Contains the value 0xFEEFO4BD
DWORD dwStrucVersion; // binary version number of this structure.
// The high-order word of this member contains
// the major version number, and the low-order
// word contains the minor version number.
DWORD dwFileVersionMS; // most significant 32 bits of the file's binary
// version number
DWORD dwFileVersionLS; //
DWORD dwProductVersionMS; // most significant 32 bits of the binary version
// number of the product with which this file was
// distributed
DWORD dwProductVersionLS; //
DWORD dwFileFlagsMask; // bitmask that specifies the valid bits in
// dwFileFlags. A bit is valid only if it was
// defined when the file was created.
DWORD dwFileFlags; // VS_FF_DEBUG, VS_FF_PATCHED etc.
DWORD dwFileOS; // VOS_NT, VOS_WINDOWS32 etc.
DWORD dwFileType; // VFT_APP etc.
DWORD dwFileSubtype; // 0 unless VFT_DRV or VFT_FONT or VFT_VXD
DWORD dwFileDateMS;
DWORD dwFileDateLS;
"""
def __init__(self, filevers=(0, 0, 0, 0), prodvers=(0, 0, 0, 0),
mask=0x3f, flags=0x0, OS=0x40004, fileType=0x1,
subtype=0x0, date=(0, 0)):
self.sig = 0xfeef04bdL
self.strucVersion = 0x10000
self.fileVersionMS = (filevers[0] << 16) | (filevers[1] & 0xffff)
self.fileVersionLS = (filevers[2] << 16) | (filevers[3] & 0xffff)
self.productVersionMS = (prodvers[0] << 16) | (prodvers[1] & 0xffff)
self.productVersionLS = (prodvers[2] << 16) | (prodvers[3] & 0xffff)
self.fileFlagsMask = mask
self.fileFlags = flags
self.fileOS = OS
self.fileType = fileType
self.fileSubtype = subtype
self.fileDateMS = date[0]
self.fileDateLS = date[1]
def fromRaw(self, data, i):
(self.sig,
self.strucVersion,
self.fileVersionMS,
self.fileVersionLS,
self.productVersionMS,
self.productVersionLS,
self.fileFlagsMask,
self.fileFlags,
self.fileOS,
self.fileType,
self.fileSubtype,
self.fileDateMS,
self.fileDateLS) = struct.unpack('13l', data[i:i+52])
return i+52
def toRaw(self):
return struct.pack('L12l', self.sig,
self.strucVersion,
self.fileVersionMS,
self.fileVersionLS,
self.productVersionMS,
self.productVersionLS,
self.fileFlagsMask,
self.fileFlags,
self.fileOS,
self.fileType,
self.fileSubtype,
self.fileDateMS,
self.fileDateLS)
def __repr__(self, indent=''):
fv = (self.fileVersionMS >> 16, self.fileVersionMS & 0xffff,
self.fileVersionLS >> 16, self.fileVersionLS & 0xFFFF)
pv = (self.productVersionMS >> 16, self.productVersionMS & 0xffff,
self.productVersionLS >> 16, self.productVersionLS & 0xFFFF)
fd = (self.fileDateMS, self.fileDateLS)
tmp = ["FixedFileInfo(",
"filevers=%s," % repr(fv),
"prodvers=%s," % repr(pv),
"mask=%s," % hex(self.fileFlagsMask),
"flags=%s," % hex(self.fileFlags),
"OS=%s," % hex(self.fileOS),
"fileType=%s," % hex(self.fileType),
"subtype=%s," % hex(self.fileSubtype),
"date=%s" % repr(fd),
")"
]
return ('\n'+indent+' ').join(tmp)
##StringFileInfo {
##};
class StringFileInfo:
"""
WORD wLength; // length of the version resource
WORD wValueLength; // length of the Value member in the current
// VS_VERSION_INFO structure
WORD wType; // 1 means text, 0 means binary
WCHAR szKey[]; // Contains the Unicode string 'StringFileInfo'.
WORD Padding[];
StringTable Children[]; // list of zero or more String structures
"""
def __init__(self, kids=None):
self.name = "StringFileInfo"
self.kids = kids or []
def fromRaw(self, sublen, vallen, name, data, i, limit):
self.name = name
while i < limit:
st = StringTable()
j = st.fromRaw(data, i, limit)
if TEST and data[i:j] != st.toRaw():
rd = data[i:j]
sd = st.toRaw()
for x in range(0, len(rd), 16):
rds = rd[x:x+16]
sds = sd[x:x+16]
if rds != sds:
print "rd[%s:%s+16]: %r" % (x, x, rds)
print "sd[%s:%s+16]: %r" % (x, x, sds)
print
print ("raw: len %d, wLength %d"
% (len(rd), struct.unpack('h', rd[:2])[0]))
print (" st: len %d, wLength %d"
% (len(sd), struct.unpack('h', sd[:2])[0]))
self.kids.append(st)
i = j
return i
def toRaw(self):
if type(self.name) is STRINGTYPE:
self.name = pywintypes.Unicode(self.name)
vallen = 0
typ = 1
sublen = 6 + 2*len(self.name) + 2
pad = ''
if sublen % 4:
pad = '\000\000'
tmp = ''.join([kid.toRaw() for kid in self.kids])
sublen = sublen + len(pad) + len(tmp)
if tmp[-2:] == '\000\000':
sublen = sublen - 2
return (struct.pack('hhh', sublen, vallen, typ)
+ getRaw(self.name) + '\000\000' + pad + tmp)
def __repr__(self, indent=''):
newindent = indent + ' '
tmp = [kid.__repr__(newindent)
for kid in self.kids]
tmp = ', \n'.join(tmp)
return ("%sStringFileInfo(\n%s[\n%s\n%s])"
% (indent, newindent, tmp, newindent))
class StringTable:
"""
WORD wLength;
WORD wValueLength;
WORD wType;
WCHAR szKey[];
String Children[]; // list of zero or more String structures.
"""
def __init__(self, name=None, kids=None):
self.name = name or ''
self.kids = kids or []
def fromRaw(self, data, i, limit):
#print "Parsing StringTable"
i, (cpsublen, cpwValueLength, cpwType, self.name) = parseCodePage(data, i, limit) # should be code page junk
#i = ((i + 3) / 4) * 4
while i < limit:
ss = StringStruct()
j = ss.fromRaw(data, i, limit)
if TEST and data[i:j] != ss.toRaw():
print "raw:", `data[i:j]`
print " ss:", `ss.toRaw()`
i = j
self.kids.append(ss)
i = ((i + 3) / 4) * 4
return i
def toRaw(self):
if type(self.name) is STRINGTYPE:
self.name = pywintypes.Unicode(self.name)
vallen = 0
typ = 1
sublen = 6 + 2*len(self.name) + 2
tmp = []
for kid in self.kids:
raw = kid.toRaw()
if len(raw) % 4:
raw = raw + '\000\000'
tmp.append(raw)
tmp = ''.join(tmp)
sublen += len(tmp)
if tmp[-2:] == '\000\000':
sublen -= 2
return (struct.pack('hhh', sublen, vallen, typ)
+ getRaw(self.name) + '\000\000' + tmp)
def __repr__(self, indent=''):
newindent = indent + ' '
tmp = map(repr, self.kids)
tmp = (',\n%s' % newindent).join(tmp)
return ("%sStringTable(\n%s%r,\n%s[%s])"
% (indent, newindent, self.name, newindent, tmp))
class StringStruct:
"""
WORD wLength;
WORD wValueLength;
WORD wType;
WCHAR szKey[];
WORD Padding[];
String Value[];
"""
def __init__(self, name=None, val=None):
self.name = name or ''
self.val = val or ''
def fromRaw(self, data, i, limit):
i, (sublen, vallen, typ, self.name) = parseCommon(data, i)
limit = i + sublen
i = ((i + 3) / 4) * 4
i, self.val = parseUString(data, i, limit)
return i
def toRaw(self):
if type(self.name) is STRINGTYPE:
self.name = pywintypes.Unicode(self.name)
if type(self.val) is STRINGTYPE:
self.val = pywintypes.Unicode(self.val)
vallen = len(self.val) + 1
typ = 1
sublen = 6 + 2*len(self.name) + 2
pad = ''
if sublen % 4:
pad = '\000\000'
sublen = sublen + len(pad) + 2*vallen
return (struct.pack('hhh', sublen, vallen, typ)
+ getRaw(self.name) + '\000\000' + pad
+ getRaw(self.val) + '\000\000')
def __repr__(self, indent=''):
return "StringStruct(%r, %r)" % (self.name, self.val)
def parseCodePage(data, i, limit):
#print "Parsing CodePage"
i, (sublen, wValueLength, wType, nm) = parseCommon(data, i)
#i = ((i + 3) / 4) * 4
return i, (sublen, wValueLength, wType, nm)
class VarFileInfo:
"""
WORD wLength; // length of the version resource
WORD wValueLength; // length of the Value member in the current
// VS_VERSION_INFO structure
WORD wType; // 1 means text, 0 means binary
WCHAR szKey[]; // Contains the Unicode string 'VarFileInfo'.
WORD Padding[];
Var Children[]; // list of zero or more Var structures
"""
def __init__(self, kids=None):
self.kids = kids or []
def fromRaw(self, sublen, vallen, name, data, i, limit):
self.sublen = sublen
self.vallen = vallen
self.name = name
i = ((i + 3) / 4) * 4
while i < limit:
vs = VarStruct()
j = vs.fromRaw(data, i, limit)
self.kids.append(vs)
if TEST and data[i:j] != vs.toRaw():
print "raw:", `data[i:j]`
print "cmp:", `vs.toRaw()`
i = j
return i
def toRaw(self):
self.vallen = 0
self.wType = 1
self.name = pywintypes.Unicode('VarFileInfo')
sublen = 6 + 2*len(self.name) + 2
pad = ''
if sublen % 4:
pad = '\000\000'
tmp = ''.join([kid.toRaw() for kid in self.kids])
self.sublen = sublen + len(pad) + len(tmp)
return (struct.pack('hhh', self.sublen, self.vallen, self.wType)
+ getRaw(self.name) + '\000\000' + pad + tmp)
def __repr__(self, indent=''):
tmp = map(repr, self.kids)
return "%sVarFileInfo([%s])" % (indent, ', '.join(tmp))
STRINGTYPE = type('')
class VarStruct:
"""
WORD wLength; // length of the version resource
WORD wValueLength; // length of the Value member in the current
// VS_VERSION_INFO structure
WORD wType; // 1 means text, 0 means binary
WCHAR szKey[]; // Contains the Unicode string 'Translation'
// or a user-defined key string value
WORD Padding[]; //
WORD Value[]; // list of one or more values that are language
// and code-page identifiers
"""
def __init__(self, name=None, kids=None):
self.name = name or ''
self.kids = kids or []
def fromRaw(self, data, i, limit):
i, (self.sublen, self.wValueLength, self.wType, self.name) = parseCommon(data, i)
i = ((i + 3) / 4) * 4
for j in range(self.wValueLength/2):
kid = struct.unpack('h', data[i:i+2])[0]
self.kids.append(kid)
i += 2
return i
def toRaw(self):
self.wValueLength = len(self.kids) * 2
self.wType = 0
if type(self.name) is STRINGTYPE:
self.name = pywintypes.Unicode(self.name)
sublen = 6 + 2*len(self.name) + 2
pad = ''
if sublen % 4:
pad = '\000\000'
self.sublen = sublen + len(pad) + self.wValueLength
tmp = ''.join([struct.pack('h', kid) for kid in self.kids])
return (struct.pack('hhh', self.sublen, self.wValueLength, self.wType)
+ getRaw(self.name) + '\000\000' + pad + tmp)
def __repr__(self, indent=''):
return "VarStruct(%r, %r)" % (self.name, self.kids)
def SetVersion(exenm, versionfile):
if isinstance(versionfile, VSVersionInfo):
vs = versionfile
else:
txt = open(versionfile, 'rU').read()
vs = eval(txt)
hdst = win32api.BeginUpdateResource(exenm, 0)
win32api.UpdateResource(hdst, RT_VERSION, 1, vs.toRaw())
win32api.EndUpdateResource (hdst, 0)
if __name__ == '__main__':
import sys
TEST = 1
if len(sys.argv) < 2:
decode('c:/Program Files/Netscape/Communicator/Program/netscape.exe')
else:
print "Examining", sys.argv[1]
decode(sys.argv[1])
|
|
from __future__ import unicode_literals
import re
import logging
import functools
from copy import deepcopy
import six
import pytz
import xmltodict
from lxml import etree
from dateutil import parser
from pycountry import languages
from nameparser import HumanName
from scrapi import requests
URL_REGEX = re.compile(r'(https?:\/\/\S*\.[^\s\[\]\<\>\}\{\^]*)')
DOI_REGEX = re.compile(r'(doi:10\.\S*)')
DOE_AFFILIATIONS_REGEX = re.compile(r'\s*\[(.*?)\]')
DOE_EMAIL_REGEX = re.compile(r'((?:,? (?:Email|email|E-mail|e-mail):\s*)?(\S*@\S*))')
DOE_ORCID_REGEX = re.compile(r'(\(ORCID:\s*(\S*)\))')
def CONSTANT(x):
''' Takes a value, returns a function that always returns that value
Useful inside schemas for defining constants
>>> CONSTANT(7)('my', 'name', verb='is')
7
>>> CONSTANT([123, 456])()
[123, 456]
'''
def inner(*y, **z):
return x
return inner
def build_properties(*args):
ret = []
for arg in args:
name, expr = arg[0], arg[1]
kwargs = arg[2] if len(arg) > 2 else {}
description, uri = kwargs.get('description'), kwargs.get('uri')
ret.append(build_property(name, expr, description=description, uri=uri))
return ret
def build_property(name, expr, description=None, uri=None):
property = {
'name': CONSTANT(name),
'properties': {
name: expr
},
}
if description:
property['description'] = CONSTANT(description)
if uri:
property['uri'] = CONSTANT(uri)
return property
def single_result(l, default=''):
''' A function that will return the first element of a list if it exists
>>> print(single_result(['hello', None]))
hello
>>> print(single_result([], default='hello'))
hello
>>> print(single_result([]))
<BLANKLINE>
'''
return l[0] if l else default
def compose(*functions):
''' evaluates functions from right to left.
>>> add = lambda x, y: x + y
>>> add3 = lambda x: x + 3
>>> divide2 = lambda x: x/2
>>> subtract4 = lambda x: x - 4
>>> subtract1 = compose(add3, subtract4)
>>> subtract1(1)
0
>>> compose(subtract1, add3)(4)
6
>>> compose(int, add3, add3, divide2)(4)
8
>>> compose(int, divide2, add3, add3)(4)
5
>>> compose(int, divide2, compose(add3, add3), add)(7, 3)
8
'''
def inner(func1, func2):
return lambda *x, **y: func1(func2(*x, **y))
return functools.reduce(inner, functions)
element_to_dict = compose(xmltodict.parse, etree.tostring)
def non_string(item):
return not isinstance(item, str)
def updated_schema(old, new):
''' Creates a dictionary resulting from adding all keys/values of the second to the first
The second dictionary will overwrite the first.
>>> old, new = {'name': 'ric', 'job': None}, {'name': 'Rick'}
>>> updated = updated_schema(old, new)
>>> len(updated.keys())
2
>>> print(updated['name'])
Rick
>>> updated['job'] is None
True
'''
d = deepcopy(old)
for key, value in new.items():
if isinstance(value, dict) and old.get(key) and isinstance(old[key], dict):
d[key] = updated_schema(old[key], new[key])
else:
d[key] = value
return d
def default_name_parser(names):
''' Takes a list of names, and attempts to parse them
'''
return list(map(maybe_parse_name, names))
def maybe_parse_name(name):
''' Tries to parse a name. If the parsing fails, returns a dictionary
with just the unparsed name (as per the SHARE schema)
'''
return null_on_error(parse_name)(name) or {'name': name}
def parse_name(name):
''' Takes a human name, parses it into given/middle/last names
'''
person = HumanName(name)
return {
'name': name,
'givenName': person.first,
'additionalName': person.middle,
'familyName': person.last
}
def format_tags(all_tags, sep=','):
tags = []
if isinstance(all_tags, six.string_types):
tags = all_tags.split(sep)
elif isinstance(all_tags, list):
for tag in all_tags:
if sep in tag:
tags.extend(tag.split(sep))
else:
tags.append(tag)
return list(set([six.text_type(tag.lower().strip()) for tag in tags if tag.strip()]))
def format_doi_as_url(doi):
if doi:
plain_doi = doi.replace('doi:', '').replace('DOI:', '').strip()
return 'http://dx.doi.org/{}'.format(plain_doi)
def gather_identifiers(args):
identifiers = []
for arg in args:
if isinstance(arg, list):
for identifier in arg:
identifiers.append(identifier)
elif arg:
identifiers.append(arg)
return identifiers
def maybe_group(match):
'''
evaluates an regular expression match object, returns the group or none
'''
return match.group() if match else None
def gather_object_uris(identifiers):
'''
Gathers object URIs if there are any
>>> gathered = gather_object_uris(['nopenope', 'doi:10.10.gettables', 'http://dx.doi.org/yep'])
>>> print(gathered[0])
http://dx.doi.org/10.10.gettables
>>> print(gathered[1])
http://dx.doi.org/yep
'''
object_uris = []
for item in identifiers:
if 'doi' in item.lower():
url_doi, just_doi = URL_REGEX.search(item), DOI_REGEX.search(item)
url_doi = maybe_group(url_doi)
just_doi = maybe_group(just_doi)
if url_doi or just_doi:
object_uris.append(url_doi or format_doi_as_url(just_doi))
return object_uris
def seperate_provider_object_uris(identifiers):
object_uris = gather_object_uris(identifiers)
provider_uris = []
for item in identifiers:
found_url = maybe_group(URL_REGEX.search(item))
if found_url:
if 'viewcontent' in found_url:
object_uris.append(found_url)
else:
if 'dx.doi.org' not in found_url:
provider_uris.append(found_url)
return provider_uris, object_uris
def oai_process_uris(*args, **kwargs):
use_doi = kwargs.get('use_doi', False)
identifiers = gather_identifiers(args)
provider_uris, object_uris = seperate_provider_object_uris(identifiers)
potential_uris = (provider_uris + object_uris)
if use_doi:
for uri in object_uris:
if 'dx.doi.org' in uri:
potential_uris = [uri]
try:
canonical_uri = potential_uris[0]
except IndexError:
raise ValueError('No Canonical URI was returned for this record.')
return {
'canonicalUri': canonical_uri,
'objectUris': object_uris,
'providerUris': provider_uris
}
def oai_extract_dois(*args):
identifiers = gather_identifiers(args)
dois = []
for item in identifiers:
if 'doi' in item.lower():
doi = item.replace('doi:', '').replace('DOI:', '').strip()
if 'http://dx.doi.org/' in doi:
dois.append(doi)
else:
dois.append('http://dx.doi.org/{}'.format(doi))
return dois
def oai_process_contributors(*args):
names = gather_identifiers(args)
return default_name_parser(names)
def dif_process_contributors(first_names, last_names):
raw_names = zip(first_names, last_names)
return [{'name': ' '.join(map(str, name)),
'givenName': name[0],
'familyName': name[1]} for name in raw_names]
def pack(*args, **kwargs):
return args, kwargs
def language_codes(langs):
'''Given an array of language names, returns an array of ISO 639-3 codes
e.g. ['English', 'Russian'] -> ['eng', 'rus']
'''
return list(filter(lambda x: x, map(get_code, langs)))
def get_code(language):
try:
return languages.get(name=language).bibliographic
except KeyError:
return None
def oai_get_records_and_token(url, throttle, force, namespaces, verify):
""" Helper function to get the records and any resumptionToken
from an OAI request.
Takes a url and any request parameters and returns the records
along with the resumptionToken if there is one.
"""
data = requests.get(url, throttle=throttle, force=force, verify=verify)
doc = etree.XML(data.content)
records = doc.xpath(
'//ns0:record',
namespaces=namespaces
)
token = doc.xpath(
'//ns0:resumptionToken/node()',
namespaces=namespaces
)
return records, token
def extract_doi_from_text(identifiers):
identifiers = [identifiers] if not isinstance(identifiers, list) else identifiers
for item in identifiers:
try:
found_url = DOI_REGEX.search(item).group()
return 'http://dx.doi.org/{}'.format(found_url.replace('doi:', ''))
except AttributeError:
continue
def null_on_error(task, log=True):
'''Decorator that makes a function return None on exception'''
def inner(*args, **kwargs):
try:
return task(*args, **kwargs)
except Exception as e:
if log:
logger = logging.getLogger('scrapi.base.helpers.null_on_error')
logger.warn(e)
return None
return inner
def coerce_to_list(thing):
''' If a value is not already a list or tuple, puts that value in a length 1 list
>>> niceties = coerce_to_list('hello')
>>> len(niceties)
1
>>> print(niceties[0])
hello
>>> niceties2 = coerce_to_list(['hello'])
>>> niceties2 == niceties
True
>>> niceties3 = (coerce_to_list(('hello', 'goodbye')))
>>> len(niceties3)
2
>>> print(niceties3[0])
hello
>>> print(niceties3[1])
goodbye
'''
if not (isinstance(thing, list) or isinstance(thing, tuple)):
return [thing]
return list(thing)
def datetime_formatter(datetime_string):
'''Takes an arbitrary date/time string and parses it, adds time
zone information and returns a valid ISO-8601 datetime string
'''
date_time = parser.parse(datetime_string)
if not date_time.tzinfo:
date_time = date_time.replace(tzinfo=pytz.UTC)
return date_time.isoformat()
def doe_name_parser(name):
if name.strip() == 'None':
return {'name': ''}
name, orcid = extract_and_replace_one(name, DOE_ORCID_REGEX)
name, email = extract_and_replace_one(name, DOE_EMAIL_REGEX)
name, affiliations = doe_extract_affiliations(name)
parsed_name = maybe_parse_name(name)
if affiliations:
parsed_name['affiliation'] = list(map(doe_parse_affiliation, affiliations))
if orcid:
parsed_name['sameAs'] = ['https://orcid.org/{}'.format(orcid)]
if email:
parsed_name['email'] = email
return parsed_name
def extract_and_replace_one(text, pattern):
''' Works with regexes with two matches, where the text of the first match
is replaced and the text of the second is returned
In the case where there is a match:
>>> text = 'I feelvery happy'
>>> pattern = re.compile(r'.*(very\s*(\S*)).*')
>>> modified_text, match = extract_and_replace_one(text, pattern)
>>> print(modified_text)
I feel
>>> print(match)
happy
In the case where there is not a match:
>>> text = 'I feel happy'
>>> modified_text, match = extract_and_replace_one(text, pattern)
>>> modified_text == text
True
>>> match is None
True
'''
matches = pattern.findall(text)
if matches and len(matches) == 1:
return text.replace(matches[0][0], ''), matches[0][1]
return text, None
def doe_extract_affiliations(name):
affiliations = DOE_AFFILIATIONS_REGEX.findall(name)
for affiliation in affiliations:
name = name.replace('[{}]'.format(affiliation), '')
return name, affiliations
def doe_parse_affiliation(affiliation):
return {'name': affiliation} # TODO: Maybe parse out address?
def doe_process_contributors(names):
return list(map(doe_name_parser, names))
def xml_text_only_list(elems):
'''Return inner text of all elements in list'''
return [xml_text_only(elem) for elem in elems]
def xml_text_only(elem):
'''Return inner text of element with tags stripped'''
etree.strip_tags(elem, '*')
inner_text = elem.text
if inner_text:
return inner_text.strip()
return None
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from collections import defaultdict
from datetime import datetime
import pymongo
from pylons import tmpl_context as c, app_globals as g
from pylons import request
from ming import schema as S
from ming.orm import state, session
from ming.orm import FieldProperty, ForeignIdProperty, RelationProperty
from ming.orm.declarative import MappedClass
from ming.utils import LazyProperty
from webhelpers import feedgenerator as FG
from allura.lib import helpers as h
from allura.lib import security
from allura.lib import utils
from allura.lib import plugin
from allura.lib import exceptions as forge_exc
from allura.lib.search import SearchIndexable
from .session import main_orm_session
from .session import project_orm_session
from .session import artifact_orm_session
from .index import ArtifactReference
from .types import ACL, MarkdownCache
from .project import AppConfig
from .notification import MailFooter
from filesystem import File
log = logging.getLogger(__name__)
class Artifact(MappedClass, SearchIndexable):
"""
Base class for anything you want to keep track of.
- Automatically indexed into Solr (see index() method)
- Has a discussion thread that can have files attached to it
:var mod_date: last-modified :class:`datetime`
:var acl: dict of permission name => [roles]
:var labels: list of plain old strings
"""
class __mongometa__:
session = artifact_orm_session
name = 'artifact'
indexes = [
('app_config_id', 'labels'),
]
def before_save(data):
_session = artifact_orm_session._get()
skip_mod_date = getattr(_session, 'skip_mod_date', False)
skip_last_updated = getattr(_session, 'skip_last_updated', False)
if not skip_mod_date:
data['mod_date'] = datetime.utcnow()
else:
log.debug('Not updating mod_date')
if c.project and not skip_last_updated:
c.project.last_updated = datetime.utcnow()
type_s = 'Generic Artifact'
# Artifact base schema
_id = FieldProperty(S.ObjectId)
mod_date = FieldProperty(datetime, if_missing=datetime.utcnow)
app_config_id = ForeignIdProperty(
'AppConfig', if_missing=lambda: c.app.config._id)
plugin_verson = FieldProperty(S.Deprecated)
tool_version = FieldProperty(S.Deprecated)
acl = FieldProperty(ACL)
tags = FieldProperty(S.Deprecated)
labels = FieldProperty([str])
references = FieldProperty(S.Deprecated)
backreferences = FieldProperty(S.Deprecated)
app_config = RelationProperty('AppConfig')
# Not null if artifact originated from external import. The import ID is
# implementation specific, but should probably be an object indicating
# the source, original ID, and any other info needed to identify where
# the artifact came from. But if you only have one source, a str might do.
import_id = FieldProperty(None, if_missing=None)
deleted = FieldProperty(bool, if_missing=False)
def __json__(self, posts_limit=None):
"""Return a JSON-encodable :class:`dict` representation of this
Artifact.
"""
return dict(
_id=str(self._id),
mod_date=self.mod_date,
labels=list(self.labels),
related_artifacts=[a.url() for a in self.related_artifacts()],
discussion_thread=self.discussion_thread.__json__(limit=posts_limit),
discussion_thread_url=h.absurl('/rest%s' %
self.discussion_thread.url()),
)
def parent_security_context(self):
"""Return the :class:`allura.model.project.AppConfig` instance for
this Artifact.
ACL processing for this Artifact continues at the AppConfig object.
This lets AppConfigs provide a 'default' ACL for all artifacts in the
tool.
"""
return self.app_config
@classmethod
def attachment_class(cls):
raise NotImplementedError, 'attachment_class'
@LazyProperty
def ref(self):
"""Return :class:`allura.model.index.ArtifactReference` for this
Artifact.
"""
return ArtifactReference.from_artifact(self)
@LazyProperty
def refs(self):
"""Artifacts referenced by this one.
:return: list of :class:`allura.model.index.ArtifactReference`
"""
return self.ref.references
@LazyProperty
def backrefs(self):
"""Artifacts that reference this one.
:return: list of :attr:`allura.model.index.ArtifactReference._id`'s
"""
q = ArtifactReference.query.find(dict(references=self.index_id()))
return [aref._id for aref in q]
def related_artifacts(self):
"""Return all Artifacts that are related to this one.
"""
related_artifacts = []
for ref_id in self.refs + self.backrefs:
ref = ArtifactReference.query.get(_id=ref_id)
if ref is None:
continue
artifact = ref.artifact
if artifact is None:
continue
artifact = artifact.primary()
if artifact is None:
continue
# don't link to artifacts in deleted tools
if hasattr(artifact, 'app_config') and artifact.app_config is None:
continue
# TODO: This should be refactored. We shouldn't be checking
# artifact type strings in platform code.
if artifact.type_s == 'Commit' and not artifact.repo:
ac = AppConfig.query.get(
_id=ref.artifact_reference['app_config_id'])
app = ac.project.app_instance(ac) if ac else None
if app:
artifact.set_context(app.repo)
if artifact not in related_artifacts and (getattr(artifact, 'deleted', False) == False):
related_artifacts.append(artifact)
return sorted(related_artifacts, key=lambda a: a.url())
def subscribe(self, user=None, topic=None, type='direct', n=1, unit='day'):
"""Subscribe ``user`` to the :class:`allura.model.notification.Mailbox`
for this Artifact.
:param user: :class:`allura.model.auth.User`
If ``user`` is None, ``c.user`` will be subscribed.
"""
from allura.model import Mailbox
if user is None:
user = c.user
Mailbox.subscribe(
user_id=user._id,
project_id=self.app_config.project_id,
app_config_id=self.app_config._id,
artifact=self, topic=topic,
type=type, n=n, unit=unit)
def unsubscribe(self, user=None):
"""Unsubscribe ``user`` from the
:class:`allura.model.notification.Mailbox` for this Artifact.
:param user: :class:`allura.model.auth.User`
If ``user`` is None, ``c.user`` will be unsubscribed.
"""
from allura.model import Mailbox
if user is None:
user = c.user
Mailbox.unsubscribe(
user_id=user._id,
project_id=self.app_config.project_id,
app_config_id=self.app_config._id,
artifact_index_id=self.index_id())
def primary(self):
"""If an artifact is a "secondary" artifact (discussion of a ticket, for
instance), return the artifact that is the "primary".
"""
return self
@classmethod
def artifacts_labeled_with(cls, label, app_config):
"""Return all artifacts of type ``cls`` that have the label ``label`` and
are in the tool denoted by ``app_config``.
:param label: str
:param app_config: :class:`allura.model.project.AppConfig` instance
"""
return cls.query.find({'labels': label, 'app_config_id': app_config._id})
def email_link(self, subject='artifact'):
"""Return a 'mailto' URL for this Artifact, with optional subject.
"""
if subject:
return 'mailto:%s?subject=[%s:%s:%s] Re: %s' % (
self.email_address,
self.app_config.project.shortname,
self.app_config.options.mount_point,
self.shorthand_id(),
subject)
else:
return 'mailto:%s' % self.email_address
@property
def email_domain(self):
"""Return domain part of email address for this Artifact"""
url = self.app.url[1:-1].split('/')
return '.'.join(reversed(url)).replace('_', '-')
@property
def project(self):
"""Return the :class:`allura.model.project.Project` instance to which
this Artifact belongs.
"""
return getattr(self.app_config, 'project', None)
@property
def project_id(self):
"""Return the ``_id`` of the :class:`allura.model.project.Project`
instance to which this Artifact belongs.
"""
return self.app_config.project_id
@LazyProperty
def app(self):
"""Return the :class:`allura.model.app.Application` instance to which
this Artifact belongs.
"""
if not self.app_config:
return None
if getattr(c, 'app', None) and c.app.config._id == self.app_config._id:
return c.app
else:
return self.app_config.load()(self.project, self.app_config)
def index(self):
project = self.project
return dict(
id=self.index_id(),
mod_date_dt=self.mod_date,
title='Artifact %s' % self._id,
project_id_s=str(project._id),
project_name_t=project.name,
project_shortname_t=project.shortname,
tool_name_s=self.app_config.tool_name,
mount_point_s=self.app_config.options.mount_point,
is_history_b=False,
url_s=self.url(),
type_s=self.type_s,
labels_t=' '.join(l for l in self.labels),
snippet_s='',
deleted_b=self.deleted)
def url(self):
"""Return the URL for this Artifact.
Subclasses must implement this.
"""
raise NotImplementedError, 'url' # pragma no cover
def shorthand_id(self):
"""How to refer to this artifact within the app instance context.
For a wiki page, it might be the title. For a ticket, it might be the
ticket number. For a discussion, it might be the message ID. Generally
this should have a strong correlation to the URL.
"""
return str(self._id) # pragma no cover
def link_text(self):
"""Return the link text to use when a shortlink to this artifact
is expanded into an <a></a> tag.
By default this method returns :attr:`type_s` + :meth:`shorthand_id`. Subclasses should
override this method to provide more descriptive link text.
"""
return self.shorthand_id()
def get_discussion_thread(self, data=None):
"""Return the discussion thread and parent_id for this artifact.
:return: (:class:`allura.model.discuss.Thread`, parent_thread_id (int))
"""
from .discuss import Thread
threads = Thread.query.find(dict(ref_id=self.index_id())).all()
if not threads:
idx = self.index()
t = Thread.new(
app_config_id=self.app_config_id,
discussion_id=self.app_config.discussion_id,
ref_id=idx['id'],
subject='%s discussion' % h.get_first(idx, 'title'))
elif len(threads) == 1:
t = threads[0]
else:
# there should not be multiple threads, we'll merge them
destination = threads.pop()
for thread in threads:
for post in thread.posts:
post.thread_id = destination._id
destination.num_replies += 1
destination.last_post_date = max(destination.last_post_date, post.mod_date)
session(post).flush(post)
session(post).expunge(post) # so thread.posts ref later in the code doesn't use stale posts
Thread.query.remove({'_id': thread._id}) # NOT thread.delete() since that would remove its posts too
thread.attachment_class().query.update({'thread_id': thread._id},
{'$set': {'thread_id': destination._id}},
multi=True)
t = destination
parent_id = None
if data:
in_reply_to = data.get('in_reply_to', [])
if in_reply_to:
parent_id = in_reply_to[0]
return t, parent_id
@LazyProperty
def discussion_thread(self):
"""Return the :class:`discussion thread <allura.model.discuss.Thread>`
for this Artifact.
"""
return self.get_discussion_thread()[0]
def add_multiple_attachments(self, file_info):
if not isinstance(file_info, list):
file_info = [file_info]
for attach in file_info:
if hasattr(attach, 'file'):
self.attach(attach.filename, attach.file,
content_type=attach.type)
def attach(self, filename, fp, **kw):
"""Attach a file to this Artifact.
:param filename: file name
:param fp: a file-like object (implements ``read()``)
:param \*\*kw: passed through to Attachment class constructor
"""
att = self.attachment_class().save_attachment(
filename=filename,
fp=fp, artifact_id=self._id, **kw)
return att
@LazyProperty
def attachments(self):
return self.attachment_class().query.find(dict(
app_config_id=self.app_config_id, artifact_id=self._id, type='attachment')).all()
def delete(self):
"""Delete this Artifact.
"""
ArtifactReference.query.remove(dict(_id=self.index_id()))
super(Artifact, self).delete()
def get_mail_footer(self, notification, toaddr):
allow_email_posting = self.app.config.options.get('AllowEmailPosting', True)
return MailFooter.standard(notification, allow_email_posting)
def message_id(self):
'''Persistent, email-friendly (Message-ID header) id of this artifact'''
return h.gen_message_id(self._id)
@classmethod
def is_limit_exceeded(cls, app_config):
"""
Returns True if any of artifact creation rate limits are exceeded,
False otherwise
"""
pkg = cls.__module__.split('.', 1)[0]
opt = u'{}.rate_limits'.format(pkg)
count = cls.query.find(dict(app_config_id=app_config._id)).count()
provider = plugin.ProjectRegistrationProvider.get()
start = provider.registration_date(app_config.project)
# have to have the replace because, the generation_time is offset-aware
# UTC and h.rate_limit uses offset-naive UTC dates
start = start.replace(tzinfo=None)
try:
h.rate_limit(opt, count, start)
except forge_exc.RatelimitError:
return True
return False
class Snapshot(Artifact):
"""A snapshot of an :class:`Artifact <allura.model.artifact.Artifact>`, used in :class:`VersionedArtifact <allura.model.artifact.VersionedArtifact>`"""
class __mongometa__:
session = artifact_orm_session
name = 'artifact_snapshot'
unique_indexes = [('artifact_class', 'artifact_id', 'version')]
indexes = [('artifact_id', 'version')]
_id = FieldProperty(S.ObjectId)
artifact_id = FieldProperty(S.ObjectId)
artifact_class = FieldProperty(str)
version = FieldProperty(S.Int, if_missing=0)
author = FieldProperty(dict(
id=S.ObjectId,
username=str,
display_name=str,
logged_ip=str))
timestamp = FieldProperty(datetime)
data = FieldProperty(None)
def index(self):
result = Artifact.index(self)
original = self.original()
if original:
original_index = original.index()
result.update(original_index)
result['title'] = '%s (version %d)' % (
h.get_first(original_index, 'title'), self.version)
result.update(
id=self.index_id(),
version_i=self.version,
author_username_t=self.author.username,
author_display_name_t=self.author.display_name,
timestamp_dt=self.timestamp,
is_history_b=True)
return result
def original(self):
raise NotImplemented, 'original' # pragma no cover
def shorthand_id(self):
return '%s#%s' % (self.original().shorthand_id(), self.version)
@property
def attachments(self):
orig = self.original()
if not orig:
return None
return orig.attachments
def __getattr__(self, name):
return getattr(self.data, name)
class VersionedArtifact(Artifact):
"""
An :class:`Artifact <allura.model.artifact.Artifact>` that has versions.
Associated data like attachments and discussion thread are not versioned.
"""
class __mongometa__:
session = artifact_orm_session
name = 'versioned_artifact'
history_class = Snapshot
version = FieldProperty(S.Int, if_missing=0)
def commit(self, update_stats=True):
'''Save off a snapshot of the artifact and increment the version #'''
try:
ip_address = utils.ip_address(request)
except:
ip_address = '0.0.0.0'
data = dict(
artifact_id=self._id,
artifact_class='%s.%s' % (
self.__class__.__module__,
self.__class__.__name__),
author=dict(
id=c.user._id,
username=c.user.username,
display_name=c.user.get_pref('display_name'),
logged_ip=ip_address),
data=state(self).clone())
while True:
self.version += 1
data['version'] = self.version
data['timestamp'] = datetime.utcnow()
ss = self.__mongometa__.history_class(**data)
try:
session(ss).insert_now(ss, state(ss))
except pymongo.errors.DuplicateKeyError:
log.warning('Trying to create duplicate version %s of %s',
self.version, self.__class__)
session(ss).expunge(ss)
continue
else:
break
log.debug('Snapshot version %s of %s',
self.version, self.__class__)
if update_stats:
if self.version > 1:
g.statsUpdater.modifiedArtifact(
self.type_s, self.mod_date, self.project, c.user)
else:
g.statsUpdater.newArtifact(
self.type_s, self.mod_date, self.project, c.user)
return ss
def get_version(self, n):
if n < 0:
n = self.version + n + 1
ss = self.__mongometa__.history_class.query.get(
artifact_id=self._id,
artifact_class='%s.%s' % (
self.__class__.__module__,
self.__class__.__name__),
version=n)
if ss is None:
raise IndexError, n
return ss
def revert(self, version):
ss = self.get_version(version)
old_version = self.version
for k, v in ss.data.iteritems():
setattr(self, k, v)
self.version = old_version
def history(self):
HC = self.__mongometa__.history_class
q = HC.query.find(dict(artifact_id=self._id)).sort(
'version', pymongo.DESCENDING)
return q
@property
def last_updated(self):
history = self.history()
if history.count():
return self.history().first().timestamp
else:
return self.mod_date
def delete(self):
# remove history so that the snapshots aren't left orphaned
super(VersionedArtifact, self).delete()
HC = self.__mongometa__.history_class
HC.query.remove(dict(artifact_id=self._id))
class Message(Artifact):
"""
A message
:var _id: an email friendly (e.g. message-id) string id
:var slug: slash-delimeted random identifier. Slashes useful for threaded searching and ordering
:var full_slug: string of slash-delimited "timestamp:slug" components. Useful for sorting by timstamp
"""
class __mongometa__:
session = artifact_orm_session
name = 'message'
type_s = 'Generic Message'
_id = FieldProperty(str, if_missing=h.gen_message_id)
slug = FieldProperty(str, if_missing=h.nonce)
full_slug = FieldProperty(str, if_missing=None)
parent_id = FieldProperty(str)
app_id = FieldProperty(S.ObjectId, if_missing=lambda: c.app.config._id)
timestamp = FieldProperty(datetime, if_missing=datetime.utcnow)
author_id = FieldProperty(S.ObjectId, if_missing=lambda: c.user._id)
text = FieldProperty(str, if_missing='')
@classmethod
def make_slugs(cls, parent=None, timestamp=None):
part = h.nonce()
if timestamp is None:
timestamp = datetime.utcnow()
dt = timestamp.strftime('%Y%m%d%H%M%S')
slug = part
full_slug = dt + ':' + part
if parent:
return (parent.slug + '/' + slug,
parent.full_slug + '/' + full_slug)
else:
return slug, full_slug
def author(self):
from .auth import User
return User.query.get(_id=self.author_id) or User.anonymous()
def index(self):
result = Artifact.index(self)
author = self.author()
result.update(
author_user_name_t=author.username,
author_display_name_t=author.get_pref('display_name'),
timestamp_dt=self.timestamp,
text=self.text)
return result
def shorthand_id(self):
return self.slug
class AwardFile(File):
class __mongometa__:
session = main_orm_session
name = 'award_file'
award_id = FieldProperty(S.ObjectId)
class Award(Artifact):
class __mongometa__:
session = main_orm_session
name = 'award'
indexes = ['short']
type_s = 'Generic Award'
from .project import Neighborhood
_id = FieldProperty(S.ObjectId)
created_by_neighborhood_id = ForeignIdProperty(
Neighborhood, if_missing=None)
created_by_neighborhood = RelationProperty(
Neighborhood, via='created_by_neighborhood_id')
short = FieldProperty(str, if_missing=h.nonce)
timestamp = FieldProperty(datetime, if_missing=datetime.utcnow)
full = FieldProperty(str, if_missing='')
def index(self):
result = Artifact.index(self)
result.update(
_id_s=self._id,
short_s=self.short,
timestamp_dt=self.timestamp,
full_s=self.full)
if self.created_by:
result['created_by_s'] = self.created_by.name
return result
@property
def icon(self):
return AwardFile.query.get(award_id=self._id)
def url(self):
return str(self._id)
def longurl(self):
return self.created_by_neighborhood.url_prefix + "_admin/awards/" + self.url()
def shorthand_id(self):
return self.short
class AwardGrant(Artifact):
"An :class:`Award <allura.model.artifact.Award>` can be bestowed upon a project by a neighborhood"
class __mongometa__:
session = main_orm_session
name = 'grant'
indexes = ['short']
type_s = 'Generic Award Grant'
_id = FieldProperty(S.ObjectId)
award_id = ForeignIdProperty(Award, if_missing=None)
award = RelationProperty(Award, via='award_id')
granted_by_neighborhood_id = ForeignIdProperty(
'Neighborhood', if_missing=None)
granted_by_neighborhood = RelationProperty(
'Neighborhood', via='granted_by_neighborhood_id')
granted_to_project_id = ForeignIdProperty('Project', if_missing=None)
granted_to_project = RelationProperty(
'Project', via='granted_to_project_id')
award_url = FieldProperty(str, if_missing='')
comment = FieldProperty(str, if_missing='')
timestamp = FieldProperty(datetime, if_missing=datetime.utcnow)
def index(self):
result = Artifact.index(self)
result.update(
_id_s=self._id,
short_s=self.short,
timestamp_dt=self.timestamp,
full_s=self.full)
if self.award:
result['award_s'] = self.award.short
return result
@property
def icon(self):
return AwardFile.query.get(award_id=self.award_id)
def url(self):
slug = str(self.granted_to_project.shortname).replace('/', '_')
return h.urlquote(slug)
def longurl(self):
slug = str(self.granted_to_project.shortname).replace('/', '_')
slug = self.award.longurl() + '/' + slug
return h.urlquote(slug)
def shorthand_id(self):
if self.award:
return self.award.short
else:
return None
class RssFeed(FG.Rss201rev2Feed):
def rss_attributes(self):
attrs = super(RssFeed, self).rss_attributes()
attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'
return attrs
def add_root_elements(self, handler):
super(RssFeed, self).add_root_elements(handler)
if self.feed['feed_url'] is not None:
handler.addQuickElement('atom:link', '', {
'rel': 'self',
'href': self.feed['feed_url'],
'type': 'application/rss+xml',
})
class Feed(MappedClass):
"""
Used to generate rss/atom feeds. This does not need to be extended;
all feed items go into the same collection
"""
class __mongometa__:
session = project_orm_session
name = 'artifact_feed'
indexes = [
'pubdate',
('artifact_ref.project_id', 'artifact_ref.mount_point'),
(('ref_id', pymongo.ASCENDING),
('pubdate', pymongo.DESCENDING)),
(('project_id', pymongo.ASCENDING),
('app_config_id', pymongo.ASCENDING),
('pubdate', pymongo.DESCENDING)),
# used in ext/user_profile/user_main.py for user feeds
'author_link',
# used in project feed
(('project_id', pymongo.ASCENDING),
('pubdate', pymongo.DESCENDING)),
]
_id = FieldProperty(S.ObjectId)
ref_id = ForeignIdProperty('ArtifactReference')
neighborhood_id = ForeignIdProperty('Neighborhood')
project_id = ForeignIdProperty('Project')
app_config_id = ForeignIdProperty('AppConfig')
tool_name = FieldProperty(str)
title = FieldProperty(str)
link = FieldProperty(str)
pubdate = FieldProperty(datetime, if_missing=datetime.utcnow)
description = FieldProperty(str)
description_cache = FieldProperty(MarkdownCache)
unique_id = FieldProperty(str, if_missing=lambda: h.nonce(40))
author_name = FieldProperty(str, if_missing=lambda: c.user.get_pref(
'display_name') if hasattr(c, 'user') else None)
author_link = FieldProperty(
str, if_missing=lambda: c.user.url() if hasattr(c, 'user') else None)
artifact_reference = FieldProperty(S.Deprecated)
@classmethod
def post(cls, artifact, title=None, description=None, author=None, author_link=None, author_name=None, pubdate=None, link=None, **kw):
"""
Create a Feed item. Returns the item.
But if anon doesn't have read access, create does not happen and None is returned
"""
# TODO: fix security system so we can do this correctly and fast
from allura import model as M
anon = M.User.anonymous()
if not security.has_access(artifact, 'read', user=anon):
return
if not security.has_access(c.project, 'read', user=anon):
return
idx = artifact.index()
if author is None:
author = c.user
if author_name is None:
author_name = author.get_pref('display_name')
if title is None:
title = '%s modified by %s' % (
h.get_first(idx, 'title'), author_name)
if description is None:
description = title
if pubdate is None:
pubdate = datetime.utcnow()
if link is None:
link = artifact.url()
item = cls(
ref_id=artifact.index_id(),
neighborhood_id=artifact.app_config.project.neighborhood_id,
project_id=artifact.app_config.project_id,
app_config_id=artifact.app_config_id,
tool_name=artifact.app_config.tool_name,
title=title,
description=g.markdown.convert(description),
link=link,
pubdate=pubdate,
author_name=author_name,
author_link=author_link or author.url())
unique_id = kw.pop('unique_id', None)
if unique_id:
item.unique_id = unique_id
return item
@classmethod
def feed(cls, q, feed_type, title, link, description,
since=None, until=None, page=None, limit=None):
"Produces webhelper.feedgenerator Feed"
d = dict(title=title, link=h.absurl(link),
description=description, language=u'en',
feed_url=request.url)
if feed_type == 'atom':
feed = FG.Atom1Feed(**d)
elif feed_type == 'rss':
feed = RssFeed(**d)
limit, page = h.paging_sanitizer(limit or 10, page)
query = defaultdict(dict)
query.update(q)
if since is not None:
query['pubdate']['$gte'] = since
if until is not None:
query['pubdate']['$lte'] = until
cur = cls.query.find(query)
cur = cur.sort('pubdate', pymongo.DESCENDING)
cur = cur.limit(limit)
cur = cur.skip(limit * page)
for r in cur:
feed.add_item(title=r.title,
link=h.absurl(r.link.encode('utf-8')),
pubdate=r.pubdate,
description=r.description,
unique_id=h.absurl(r.unique_id),
author_name=r.author_name,
author_link=h.absurl(r.author_link))
return feed
class VotableArtifact(MappedClass):
"""Voting support for the Artifact. Use as a mixin."""
class __mongometa__:
session = main_orm_session
name = 'vote'
votes = FieldProperty(int, if_missing=0)
votes_up = FieldProperty(int, if_missing=0)
votes_down = FieldProperty(int, if_missing=0)
votes_up_users = FieldProperty([str], if_missing=list())
votes_down_users = FieldProperty([str], if_missing=list())
def vote_up(self, user):
voted = self.user_voted(user)
if voted == 1:
# Already voted up - unvote
self.votes_up_users.remove(user.username)
self.votes_up -= 1
elif voted == -1:
# Change vote to negative
self.votes_down_users.remove(user.username)
self.votes_down -= 1
self.votes_up_users.append(user.username)
self.votes_up += 1
else:
self.votes_up_users.append(user.username)
self.votes_up += 1
self.votes = self.votes_up - self.votes_down
def vote_down(self, user):
voted = self.user_voted(user)
if voted == -1:
# Already voted down - unvote
self.votes_down_users.remove(user.username)
self.votes_down -= 1
elif voted == 1:
# Change vote to positive
self.votes_up_users.remove(user.username)
self.votes_up -= 1
self.votes_down_users.append(user.username)
self.votes_down += 1
else:
self.votes_down_users.append(user.username)
self.votes_down += 1
self.votes = self.votes_up - self.votes_down
def user_voted(self, user):
"""Check that user voted for this artifact.
Return:
1 if user voted up
-1 if user voted down
0 if user doesn't vote
"""
if user.username in self.votes_up_users:
return 1
if user.username in self.votes_down_users:
return -1
return 0
@property
def votes_up_percent(self):
votes_count = self.votes_up + self.votes_down
if votes_count == 0:
return 0
return int(float(self.votes_up) / votes_count * 100)
def __json__(self):
return {
'votes_up': self.votes_up,
'votes_down': self.votes_down,
}
class MovedArtifact(Artifact):
class __mongometa__:
session = artifact_orm_session
name = 'moved_artifact'
_id = FieldProperty(S.ObjectId)
app_config_id = ForeignIdProperty(
'AppConfig', if_missing=lambda: c.app.config._id)
app_config = RelationProperty('AppConfig')
moved_to_url = FieldProperty(str, required=True, allow_none=False)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from datetime import datetime
from unittest import mock
from airflow.exceptions import AirflowException
from airflow.models import DAG
from airflow.providers.databricks.hooks.databricks import RunState
from airflow.providers.databricks.operators import databricks as databricks_operator
from airflow.providers.databricks.operators.databricks import (
DatabricksRunNowOperator,
DatabricksSubmitRunOperator,
)
DATE = '2017-04-20'
TASK_ID = 'databricks-operator'
DEFAULT_CONN_ID = 'databricks_default'
NOTEBOOK_TASK = {'notebook_path': '/test'}
TEMPLATED_NOTEBOOK_TASK = {'notebook_path': '/test-{{ ds }}'}
RENDERED_TEMPLATED_NOTEBOOK_TASK = {'notebook_path': f'/test-{DATE}'}
SPARK_JAR_TASK = {'main_class_name': 'com.databricks.Test'}
SPARK_PYTHON_TASK = {'python_file': 'test.py', 'parameters': ['--param', '123']}
SPARK_SUBMIT_TASK = {
"parameters": ["--class", "org.apache.spark.examples.SparkPi", "dbfs:/path/to/examples.jar", "10"]
}
NEW_CLUSTER = {'spark_version': '2.0.x-scala2.10', 'node_type_id': 'development-node', 'num_workers': 1}
EXISTING_CLUSTER_ID = 'existing-cluster-id'
RUN_NAME = 'run-name'
RUN_ID = 1
JOB_ID = 42
NOTEBOOK_PARAMS = {"dry-run": "true", "oldest-time-to-consider": "1457570074236"}
JAR_PARAMS = ["param1", "param2"]
RENDERED_TEMPLATED_JAR_PARAMS = [f'/test-{DATE}']
TEMPLATED_JAR_PARAMS = ['/test-{{ ds }}']
PYTHON_PARAMS = ["john doe", "35"]
SPARK_SUBMIT_PARAMS = ["--class", "org.apache.spark.examples.SparkPi"]
class TestDatabricksOperatorSharedFunctions(unittest.TestCase):
def test_deep_string_coerce(self):
test_json = {
'test_int': 1,
'test_float': 1.0,
'test_dict': {'key': 'value'},
'test_list': [1, 1.0, 'a', 'b'],
'test_tuple': (1, 1.0, 'a', 'b'),
}
expected = {
'test_int': '1',
'test_float': '1.0',
'test_dict': {'key': 'value'},
'test_list': ['1', '1.0', 'a', 'b'],
'test_tuple': ['1', '1.0', 'a', 'b'],
}
self.assertDictEqual(databricks_operator._deep_string_coerce(test_json), expected)
class TestDatabricksSubmitRunOperator(unittest.TestCase):
def test_init_with_notebook_task_named_parameters(self):
"""
Test the initializer with the named parameters.
"""
op = DatabricksSubmitRunOperator(
task_id=TASK_ID, new_cluster=NEW_CLUSTER, notebook_task=NOTEBOOK_TASK
)
expected = databricks_operator._deep_string_coerce(
{'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID}
)
self.assertDictEqual(expected, op.json)
def test_init_with_spark_python_task_named_parameters(self):
"""
Test the initializer with the named parameters.
"""
op = DatabricksSubmitRunOperator(
task_id=TASK_ID, new_cluster=NEW_CLUSTER, spark_python_task=SPARK_PYTHON_TASK
)
expected = databricks_operator._deep_string_coerce(
{'new_cluster': NEW_CLUSTER, 'spark_python_task': SPARK_PYTHON_TASK, 'run_name': TASK_ID}
)
self.assertDictEqual(expected, op.json)
def test_init_with_spark_submit_task_named_parameters(self):
"""
Test the initializer with the named parameters.
"""
op = DatabricksSubmitRunOperator(
task_id=TASK_ID, new_cluster=NEW_CLUSTER, spark_submit_task=SPARK_SUBMIT_TASK
)
expected = databricks_operator._deep_string_coerce(
{'new_cluster': NEW_CLUSTER, 'spark_submit_task': SPARK_SUBMIT_TASK, 'run_name': TASK_ID}
)
self.assertDictEqual(expected, op.json)
def test_init_with_json(self):
"""
Test the initializer with json data.
"""
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce(
{'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID}
)
self.assertDictEqual(expected, op.json)
def test_init_with_specified_run_name(self):
"""
Test the initializer with a specified run_name.
"""
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': RUN_NAME}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce(
{'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': RUN_NAME}
)
self.assertDictEqual(expected, op.json)
def test_init_with_merging(self):
"""
Test the initializer when json and other named parameters are both
provided. The named parameters should override top level keys in the
json dict.
"""
override_new_cluster = {'workers': 999}
json = {
'new_cluster': NEW_CLUSTER,
'notebook_task': NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json, new_cluster=override_new_cluster)
expected = databricks_operator._deep_string_coerce(
{
'new_cluster': override_new_cluster,
'notebook_task': NOTEBOOK_TASK,
'run_name': TASK_ID,
}
)
self.assertDictEqual(expected, op.json)
def test_init_with_templating(self):
json = {
'new_cluster': NEW_CLUSTER,
'notebook_task': TEMPLATED_NOTEBOOK_TASK,
}
dag = DAG('test', start_date=datetime.now())
op = DatabricksSubmitRunOperator(dag=dag, task_id=TASK_ID, json=json)
op.render_template_fields(context={'ds': DATE})
expected = databricks_operator._deep_string_coerce(
{
'new_cluster': NEW_CLUSTER,
'notebook_task': RENDERED_TEMPLATED_NOTEBOOK_TASK,
'run_name': TASK_ID,
}
)
self.assertDictEqual(expected, op.json)
def test_init_with_bad_type(self):
json = {'test': datetime.now()}
# Looks a bit weird since we have to escape regex reserved symbols.
exception_message = (
r'Type \<(type|class) \'datetime.datetime\'\> used '
+ r'for parameter json\[test\] is not a number or a string'
)
with self.assertRaisesRegex(AirflowException, exception_message):
DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
@mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_success(self, db_mock_class):
"""
Test the execute function in case where the run is successful.
"""
run = {
'new_cluster': NEW_CLUSTER,
'notebook_task': NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '')
op.execute(None)
expected = databricks_operator._deep_string_coerce(
{'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay
)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
self.assertEqual(RUN_ID, op.run_id)
@mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_failure(self, db_mock_class):
"""
Test the execute function in case where the run failed.
"""
run = {
'new_cluster': NEW_CLUSTER,
'notebook_task': NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'FAILED', '')
with self.assertRaises(AirflowException):
op.execute(None)
expected = databricks_operator._deep_string_coerce(
{
'new_cluster': NEW_CLUSTER,
'notebook_task': NOTEBOOK_TASK,
'run_name': TASK_ID,
}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay
)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
self.assertEqual(RUN_ID, op.run_id)
@mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_on_kill(self, db_mock_class):
run = {
'new_cluster': NEW_CLUSTER,
'notebook_task': NOTEBOOK_TASK,
}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
op.run_id = RUN_ID
op.on_kill()
db_mock.cancel_run.assert_called_once_with(RUN_ID)
class TestDatabricksRunNowOperator(unittest.TestCase):
def test_init_with_named_parameters(self):
"""
Test the initializer with the named parameters.
"""
op = DatabricksRunNowOperator(job_id=JOB_ID, task_id=TASK_ID)
expected = databricks_operator._deep_string_coerce({'job_id': 42})
self.assertDictEqual(expected, op.json)
def test_init_with_json(self):
"""
Test the initializer with json data.
"""
json = {
'notebook_params': NOTEBOOK_PARAMS,
'jar_params': JAR_PARAMS,
'python_params': PYTHON_PARAMS,
'spark_submit_params': SPARK_SUBMIT_PARAMS,
'job_id': JOB_ID,
}
op = DatabricksRunNowOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce(
{
'notebook_params': NOTEBOOK_PARAMS,
'jar_params': JAR_PARAMS,
'python_params': PYTHON_PARAMS,
'spark_submit_params': SPARK_SUBMIT_PARAMS,
'job_id': JOB_ID,
}
)
self.assertDictEqual(expected, op.json)
def test_init_with_merging(self):
"""
Test the initializer when json and other named parameters are both
provided. The named parameters should override top level keys in the
json dict.
"""
override_notebook_params = {'workers': 999}
json = {'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(
task_id=TASK_ID,
json=json,
job_id=JOB_ID,
notebook_params=override_notebook_params,
python_params=PYTHON_PARAMS,
spark_submit_params=SPARK_SUBMIT_PARAMS,
)
expected = databricks_operator._deep_string_coerce(
{
'notebook_params': override_notebook_params,
'jar_params': JAR_PARAMS,
'python_params': PYTHON_PARAMS,
'spark_submit_params': SPARK_SUBMIT_PARAMS,
'job_id': JOB_ID,
}
)
self.assertDictEqual(expected, op.json)
def test_init_with_templating(self):
json = {'notebook_params': NOTEBOOK_PARAMS, 'jar_params': TEMPLATED_JAR_PARAMS}
dag = DAG('test', start_date=datetime.now())
op = DatabricksRunNowOperator(dag=dag, task_id=TASK_ID, job_id=JOB_ID, json=json)
op.render_template_fields(context={'ds': DATE})
expected = databricks_operator._deep_string_coerce(
{
'notebook_params': NOTEBOOK_PARAMS,
'jar_params': RENDERED_TEMPLATED_JAR_PARAMS,
'job_id': JOB_ID,
}
)
self.assertDictEqual(expected, op.json)
def test_init_with_bad_type(self):
json = {'test': datetime.now()}
# Looks a bit weird since we have to escape regex reserved symbols.
exception_message = (
r'Type \<(type|class) \'datetime.datetime\'\> used '
+ r'for parameter json\[test\] is not a number or a string'
)
with self.assertRaisesRegex(AirflowException, exception_message):
DatabricksRunNowOperator(task_id=TASK_ID, job_id=JOB_ID, json=json)
@mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_success(self, db_mock_class):
"""
Test the execute function in case where the run is successful.
"""
run = {'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, job_id=JOB_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.run_now.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '')
op.execute(None)
expected = databricks_operator._deep_string_coerce(
{
'notebook_params': NOTEBOOK_PARAMS,
'notebook_task': NOTEBOOK_TASK,
'jar_params': JAR_PARAMS,
'job_id': JOB_ID,
}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay
)
db_mock.run_now.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
self.assertEqual(RUN_ID, op.run_id)
@mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_failure(self, db_mock_class):
"""
Test the execute function in case where the run failed.
"""
run = {'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, job_id=JOB_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.run_now.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'FAILED', '')
with self.assertRaises(AirflowException):
op.execute(None)
expected = databricks_operator._deep_string_coerce(
{
'notebook_params': NOTEBOOK_PARAMS,
'notebook_task': NOTEBOOK_TASK,
'jar_params': JAR_PARAMS,
'job_id': JOB_ID,
}
)
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay
)
db_mock.run_now.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
self.assertEqual(RUN_ID, op.run_id)
@mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_on_kill(self, db_mock_class):
run = {'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, job_id=JOB_ID, json=run)
db_mock = db_mock_class.return_value
op.run_id = RUN_ID
op.on_kill()
db_mock.cancel_run.assert_called_once_with(RUN_ID)
|
|
#!/usr/bin/env python
#
# Unit tests for the multiprocessing package
#
import unittest
import Queue
import time
import sys
import os
import gc
import signal
import array
import socket
import random
import logging
from test import test_support
from StringIO import StringIO
_multiprocessing = test_support.import_module('_multiprocessing')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
# Work around broken sem_open implementations
test_support.import_module('multiprocessing.synchronize')
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
#
#
#
latin = str
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
try:
from ctypes import Value
except ImportError:
Value = None
try:
from ctypes import copy as ctypes_copy
except ImportError:
ctypes_copy = None
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def _test(self, q, *args, **kwds):
current = self.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if self.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEquals(p.authkey, current.authkey)
self.assertEquals(p.is_alive(), False)
self.assertEquals(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEquals(p.exitcode, None)
self.assertEquals(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEquals(q.get(), args[1:])
self.assertEquals(q.get(), kwargs)
self.assertEquals(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEquals(q.get(), current.authkey)
self.assertEquals(q.get(), p.pid)
p.join()
self.assertEquals(p.exitcode, 0)
self.assertEquals(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
def _test_terminate(self):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
def _test_recursion(self, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = self.Process(
target=self._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
def _test_put(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
def _test_get(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
def _test_fork(self, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
def _test_task_done(self, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
self.skipTest("requires 'queue.task_done()' method")
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
def f(self, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
def _test_event(self, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporaily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
self.Process(target=self._test_event, args=(event,)).start()
self.assertEqual(wait(), True)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def _test(self, values):
for sv, cv in zip(values, self.codes_values):
sv.value = cv[2]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawvalue(self):
self.test_value(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def f(self, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
countdown = 5
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
debug_info = self.manager._debug_info()
if refs != EXPECTED_NUMBER:
print self.manager._debug_info()
print debug_info
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def _putter(self, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
def _putter(self, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER)
addr = manager.get_server().address
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.start()
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _echo(self, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _test(self, address):
conn = self.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
"""
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
"""
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _double(self, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
@unittest.skipIf(Value is None, "requires ctypes.Value")
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', range(10), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = 'hello'
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
@unittest.skipIf(Value is None, "requires ctypes.Value")
def test_synchronize(self):
self.test_sharedctypes(lock=True)
@unittest.skipIf(ctypes_copy is None, "requires ctypes.copy")
def test_copy(self):
foo = _Foo(2, 5.0)
bar = ctypes_copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _test_finalize(self, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call mutliprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = [
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.reduction',
'multiprocessing.synchronize', 'multiprocessing.util'
]
if c_int is not None:
# This module requires _ctypes
modules.append('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
def _test_level(self, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = _multiprocessing.Connection(44977608)
self.assertRaises(IOError, conn.poll)
self.assertRaises(IOError, _multiprocessing.Connection, -1)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type.capitalize()
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _ThisSubProcess(q):
try:
item = q.get(block=False)
except Queue.Empty:
pass
def _TestProcess(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,))
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_TestProcess, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
TestStdinBadfiledescriptor]
#
#
#
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, see issue 3111!")
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
# (ncoghlan): Whether or not sys.exc_clear is executed by the threading
# module during these tests is at least platform dependent and possibly
# non-deterministic on any given platform. So we don't mind if the listed
# warnings aren't actually raised.
with test_support.check_py3k_warnings(
(".+__(get|set)slice__ has been removed", DeprecationWarning),
(r"sys.exc_clear\(\) not supported", DeprecationWarning),
quiet=True):
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
|
|
__author__ = "Jens Thomas & Felix Simkovic"
__date__ = "10 June 2019"
__version__ = "1.0"
import argparse
import os
from ample.modelling.multimer_definitions import MULTIMER_MODES
class BoolAction(argparse.Action):
"""Class to set a boolean value either form a string or just from the use of the command-line flag"""
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
# values = self.default
values = True # if nothing specified supplying the flag sets the variable to True
if values in ['0', 'f', 'F', 'false', 'False', False]:
values = False
elif values in ['1', 't', 'T', 'true', 'True', True]:
values = True
else:
raise argparse.ArgumentError(self, 'Unrecognised True/False value: {0}'.format(values))
setattr(namespace, self.dest, values)
class FilePathAction(argparse.Action):
"""Class to handle paths to files or directories.
AMPLE changes directory into a work directory so relative paths to files don't work.
We set absolulte paths here.
"""
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, str):
values = os.path.abspath(values)
setattr(namespace, self.dest, values)
def add_core_options(parser=None):
"""Function to add any arguments required by all runtypes"""
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument('-config_file', action=FilePathAction, help="user configuration file")
parser.add_argument('-debug', action=BoolAction, nargs='?', metavar='True/False', help=argparse.SUPPRESS)
parser.add_argument(
'-nproc',
type=int,
help="Number of processors [1]. For local, serial runs the jobs will be split across nproc processors. For cluster submission, this should be the number of processors on a node.",
)
parser.add_argument(
'-work_dir',
action=FilePathAction,
help='Path to the directory where the job will run (will be created if it doesn\'t exist)',
)
return parser
def add_cluster_submit_options(parser=None):
"""Add the options for submission to a cluster queuing system"""
if parser is None:
parser = argparse.ArgumentParser()
submit_group = parser.add_argument_group('Cluster queue submission options')
submit_group.add_argument(
'-submit_array', action=BoolAction, nargs='?', metavar='True/False', help='Submit SGE jobs as array jobs'
)
submit_group.add_argument(
'-submit_cluster',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Submit jobs to a cluster - need to set -submit_qtype flag to specify the batch queue system.',
)
submit_group.add_argument(
'-submit_max_array',
type=int,
help='The maximum number of jobs to run concurrently with SGE array job submission',
)
submit_group.add_argument(
'-submit_num_array_jobs', type=int, help='The number of jobs to run concurrently with SGE array job submission'
)
submit_group.add_argument(
'-submit_pe_lsf', help='Cluster submission: string to set number of processors for LSF queueing system'
)
submit_group.add_argument(
'-submit_pe_sge', help='Cluster submission: string to set number of processors for SGE queueing system'
)
submit_group.add_argument('-submit_queue', help='The queue to submit to on the cluster.')
submit_group.add_argument('-submit_qtype', help='Cluster submission queue type - currently support SGE and LSF')
return parser
def add_general_options(parser=None):
from ample.util import version
if parser is None:
parser = argparse.ArgumentParser()
add_core_options(parser)
parser.add_argument(
'-alignment_file',
action=FilePathAction,
help='Alignment file in fasta format. For homologues the first line of each sequence must be the pdb file name',
)
parser.add_argument(
'-allow_his_tag',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Allow HIS tags in the input sequence',
)
parser.add_argument(
'-blast_dir',
action=FilePathAction,
help='Directory where ncbi blast is installed (binaries in expected in bin subdirectory)',
)
parser.add_argument(
'-classic_mode',
metavar='True/False',
help='Preset options to run the original AMPLE clustering/truncation options (1 cluster, 3 subclustering radii, 3 sidechains)',
)
parser.add_argument(
'-ccp4i2_xml',
action=FilePathAction,
help='Path to CCP4I2 XML file - if not None indicates we are running under CCP4I2',
)
parser.add_argument(
'-coiled_coil',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Turn on Coiled-Coil mode for solving Coiled-Coil structures',
)
parser.add_argument(
'-devel_mode', metavar='devel_mode', help='Preset options to run in development mode - takes longer'
)
parser.add_argument('-dry_run', metavar='True/False', help='Check if input files and supplied options are valid.')
parser.add_argument(
'-early_terminate',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Stop the run as soon as a success has been found.',
)
parser.add_argument('-ensembles', help='Path to directory containing existing ensembles')
parser.add_argument('-fasta', action=FilePathAction, help='protein fasta file. (required)')
parser.add_argument('-fast_protein_cluster_exe', help='path to fast_protein_cluster executable')
parser.add_argument('-F', metavar='flag for F', help='Flag for F column in the MTZ file')
parser.add_argument('-FREE', metavar='flag for FREE', help='Flag for FREE column in the MTZ file')
parser.add_argument(
'-ideal_helices',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Use ideal polyalanine helices to solve structure (8 helices: from 5-40 residues)',
)
parser.add_argument(
'-improve_template', metavar='improve_template', help='Path to a template to improve - NMR, homolog'
)
parser.add_argument('-LGA', metavar='path_to_LGA dir', help=argparse.SUPPRESS)
parser.add_argument(
'-make_models',
action=BoolAction,
nargs='?',
metavar='True/False',
help='run rosetta modeling, set to False to import pre-made models (required if making models locally default True)',
)
parser.add_argument('-max_array_jobs', help='Maximum number of array jobs to run')
parser.add_argument(
'-models',
metavar='models',
help='Path to a folder of PDB decoys, or a tarred and gzipped/bziped, or zipped collection of decoys',
)
parser.add_argument(
'-mr_sequence',
action=FilePathAction,
help="sequence file for crystal content (if different from what's given by -fasta)",
)
parser.add_argument('-mtz', action=FilePathAction, metavar='MTZ in', help='The MTZ file with the reflection data.')
parser.add_argument('-name', metavar='job_name', help='4-letter identifier for job [ampl]')
parser.add_argument(
'-native_pdb',
action=FilePathAction,
metavar='native_pdb',
help='Path to the crystal structure PDB for benchmarking.',
)
parser.add_argument(
'-native_mtz',
action=FilePathAction,
metavar='native_pdb',
help='Path to the native MTZ containing FC and PHIC calculated phases for benchmarking.',
)
parser.add_argument('-nmr_model_in', action=FilePathAction, metavar='nmr_model_in', help='PDB with NMR models')
parser.add_argument('-nmr_process', type=int, help='number of times to process the NMR models')
parser.add_argument(
'-nmr_remodel', action=BoolAction, nargs='?', metavar='True/False', help='Remodel the NMR structures'
)
parser.add_argument(
'-nmr_remodel_fasta',
action=FilePathAction,
help='The FASTA sequence to be used for remodelling the NMR ensemble if different from the default FASTA sequence',
)
parser.add_argument(
'-purge',
metavar='purge_level',
type=int,
choices=[0, 1, 2],
help='Delete intermediate files and failed MRBUMP results: 0 - None, 1 - Some, 2 - All possible',
)
parser.add_argument('-psipred_ss2', metavar='PSIPRED_FILE', help='Psipred secondary structure prediction file')
parser.add_argument(
'-quick_mode',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Preset options to run quickly, but less thoroughly',
)
parser.add_argument('-restart_pkl', help='Rerun a job using the pickled ample dictionary')
parser.add_argument(
'-run_dir',
action=FilePathAction,
metavar='run_directory',
help='Directory where the AMPLE work directory will be created [current dir]',
default=os.getcwd(),
)
parser.add_argument(
'-rvapi_document', action=FilePathAction, help='Path to an existing rvapi document (for running under jscofe)'
)
parser.add_argument('-scwrl_exe', metavar='path to scwrl', help='Path to Scwrl4 executable')
parser.add_argument(
'-show_gui', action=BoolAction, nargs='?', metavar='True/False', help='Pop up and display a stand-alone GUI'
)
parser.add_argument('-single_model', help='Single structure model to be used to create ensembles')
parser.add_argument(
'-sf_cif', action=FilePathAction, help='Path to a structure factor CIF file (instead of MTZ file)'
)
parser.add_argument('-SIGF', help='Flag for SIGF column in the MTZ file')
parser.add_argument('-top_model_only', metavar='True/False', help='Only process the top model in each ensemble')
parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(version.__version__))
parser.add_argument(
'-webserver_uri', help='URI of the webserver directory - also indicates we are running as a webserver'
)
return parser
def add_contact_options(parser=None):
"""Contact prediction related options"""
if parser is None:
parser = argparse.ArgumentParser()
contact_group = parser.add_argument_group("Contact Restraints Options")
contact_group.add_argument(
'-bbcontacts_file', action=FilePathAction, help='Additional bbcontacts file. Requires normal contactfile'
)
contact_group.add_argument(
'-bbcontacts_format', help='Residue contact file format. For available formats refer to the AMPLE documentation'
)
contact_group.add_argument('-contact_file', action=FilePathAction, help='Residue contact file')
contact_group.add_argument(
'-contact_format', help='Residue contact file format. For available formats refer to the AMPLE documentation'
)
contact_group.add_argument(
'-disulfide_constraints_file',
action=FilePathAction,
help='Disulfide residue constraints for ab initio modelling',
)
contact_group.add_argument(
'-distance_to_neighbour', type=int, help="Min. distance between residue pairs for contact (default=5)"
)
contact_group.add_argument(
'-energy_function', help='Rosetta energy function for contact restraint conversion (default=FADE)'
)
contact_group.add_argument(
'-native_cutoff', type=float, help='Distance cutoff for reference contacts in native structure (default=8A)'
)
contact_group.add_argument(
'--no-contact-prediction', action=BoolAction, default=False, help="Do not predict contacts"
)
contact_group.add_argument(
'-restraints_factor',
type=float,
help='Factor (* Sequence length) determining number of contact restraints to use (default=1.0)',
)
contact_group.add_argument(
'-restraints_file', action=FilePathAction, help='Residue restraints for ab initio modelling'
)
contact_group.add_argument(
'-restraints_weight', type=float, help="Additional energy weighting of restraints in Rosetta"
)
contact_group.add_argument(
'-subselect_mode',
help="Long-range decoy satisfaction subselection mode - one of [{0}]".format(
" | ".join(["linear", "scaled", "cutoff"])
),
)
return parser
def add_mr_options(parser=None):
if parser is None:
parser = argparse.ArgumentParser()
mr_group = parser.add_argument_group('MRBUMP/Molecular Replacement Options')
mr_group.add_argument('-arpwarp_cycles', type=int, help='The number of ArpWarp cycles to run')
mr_group.add_argument('-buccaneer_cycles', type=int, help='The number of Bucanner rebuilding cycles to run')
mr_group.add_argument(
'-do_mr', action=BoolAction, nargs='?', metavar='True/False', help='Run or skip the Molecular Replacement step'
)
mr_group.add_argument('-domain_termini_distance', help='distance between termini for insert domains')
mr_group.add_argument('-existing_mr_solution', action=FilePathAction, help='Existing MR solution to give to MRBUMP')
mr_group.add_argument(
'-early_terminate_SHELXE_CC', type=float, help='SHELXE_CC criteria for when a job has succeeeded'
)
mr_group.add_argument(
'-early_terminate_SHELXE_ACL', type=int, help='SHELXE_ACL criteria for when a job has succeeeded'
)
mr_group.add_argument(
'-molrep_only',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Only use Molrep for Molecular Replacement step in MRBUMP',
)
mr_group.add_argument(
'-mrbump_dir', action=FilePathAction, help='Path to a directory of MRBUMP jobs (see restart_pkl)'
)
mr_group.add_argument(
'-mr_keys',
nargs='+',
action='append',
help='Additional keywords for MRBUMP - are passed through without editing',
)
mr_group.add_argument(
'-mr_sg_all',
metavar='True/False',
help='Try all possible space groups in PHASER Molecular Replacement step in MRBUMP',
)
mr_group.add_argument(
'-nmasu',
type=int,
help='Manually specify the number of molecules in the asymmetric unit - sets the NMASu MRBUMP flag',
)
mr_group.add_argument(
'-phaser_kill',
metavar='phaser_kill',
type=int,
help='Time in minutes after which phaser will be killed (0 to leave running)',
)
mr_group.add_argument(
'-phaser_only',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Only use Phaser for Molecular Replacement step in MRBUMP',
)
mr_group.add_argument('-phaser_rms', metavar='phaser_rms', help='RMS value for phaser')
mr_group.add_argument(
'-refine_rebuild_arpwarp',
metavar='True/False',
help='True to use ARPWARP to rebuild the REFMAC-refined MR result.',
)
mr_group.add_argument(
'-refine_rebuild_buccaneer',
metavar='True/False',
help='True to use Buccaneer to rebuild the REFMAC-refined MR result.',
)
mr_group.add_argument('-shelx_cycles', help='The number of SHELXE cycles to run when rebuilding.')
mr_group.add_argument('-shelxe_exe', metavar='path to shelxe executable', help='Path to the SHELXE executable')
mr_group.add_argument('-shelxe_max_resolution', help='Maximum permitted resolution for rebuilding with SHELXE')
mr_group.add_argument(
'-shelxe_rebuild',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Rebuild SHELXE traced pdb with buccaneer and arpwarp',
)
mr_group.add_argument(
'-shelxe_rebuild_arpwarp',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Rebuild SHELXE traced pdb with arpwarp',
)
mr_group.add_argument(
'-shelxe_rebuild_buccaneer',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Rebuild SHELXE traced pdb with buccaneer',
)
mr_group.add_argument(
'-use_scwrl',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Remodel sidechains of the decoy models using Scwrl4',
)
mr_group.add_argument('-use_shelxe', action=BoolAction, nargs='?', metavar='True/False', help='True to use SHELXE')
return parser
def add_rosetta_options(parser=None):
if parser is None:
parser = argparse.ArgumentParser()
rosetta_group = parser.add_argument_group('ROSETTA Modelling Options')
rosetta_group.add_argument(
'-all_atom',
action=BoolAction,
nargs='?',
metavar='True/False',
help="Do all-atom Rosetta modelling (adds \"-return_full_atom true\" to rosetta arguments",
)
rosetta_group.add_argument(
'-frags_3mers', action=FilePathAction, help='Path to file with pre-existing Rosetta 3mer fragments'
)
rosetta_group.add_argument(
'-frags_9mers', action=FilePathAction, help='Path to file with pre-existing Rosetta 3mer fragments'
)
rosetta_group.add_argument(
'-make_frags',
action=BoolAction,
nargs='?',
metavar='True/False',
help='set True to generate Rosetta 3mers and 9mers locally, False to import fragments',
)
rosetta_group.add_argument(
'-multimer_modelling', help='Generate multimeric models. Accepted values: {}'.format(MULTIMER_MODES)
)
rosetta_group.add_argument(
'-nmodels', metavar='number of models', type=int, help='number of models to make (default: 1000)'
)
rosetta_group.add_argument('-nr', metavar='nr', help='Path to the NR non-redundant sequence database')
rosetta_group.add_argument(
'-rg_reweight',
metavar='radius of gyration reweight',
type=float,
help='Set the Rosetta -rg_reweight flag to specify the radius of gyration reweight.',
)
rosetta_group.add_argument(
'-rosetta_executable', action=FilePathAction, help='Path to ROSETTA executable for modelling'
)
rosetta_group.add_argument('-rosetta_db', action=FilePathAction, help='Path to the Rosetta database directory')
rosetta_group.add_argument('-rosetta_dir', action=FilePathAction, help='The Rosetta install directory')
rosetta_group.add_argument(
'-rosetta_fragments_exe', action=FilePathAction, help='Location of the Rosetta make_fragments.pl script'
)
rosetta_group.add_argument(
'-rosetta_flagsfile', action=FilePathAction, help='Location of file with Rosetta modelling commands'
)
rosetta_group.add_argument('-rosetta_version', type=float, help='The version number of Rosetta')
rosetta_group.add_argument(
'-transmembrane',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Do Rosetta modelling for transmembrane proteins (Ovchinnikov protocol)',
)
rosetta_group.add_argument(
'-transmembrane_old',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Do Rosetta modelling for transmembrane proteins (Yarov-Yarovoy protocol)',
)
rosetta_group.add_argument(
'-transmembrane_octopusfile', action=FilePathAction, help='Octopus transmembrane topology predicition file'
)
rosetta_group.add_argument(
'-transmembrane_spanfile', action=FilePathAction, help='Span file for modelling transmembrane proteins'
)
rosetta_group.add_argument(
'-transmembrane_lipofile', action=FilePathAction, help='Lips4 file for modelling transmembrane proteins'
)
rosetta_group.add_argument(
'-use_homs',
action=BoolAction,
nargs='?',
metavar='True/False',
help="Select ROSETTA fragments from homologous models",
)
return parser
def add_ensembler_options(parser=None):
# --------------------------------------------------------------------------------------------- #
# sphinx-argparse ignores Mock imports and thus cannot find iotbx.pdb when generating the docs. #
try:
from ample.ensembler.constants import ALLOWED_SIDE_CHAIN_TREATMENTS, SPICKER_RMSD, SPICKER_TM
from ample.ensembler.truncation_util import TRUNCATION_METHODS
except ImportError:
allowed_side_chain_treatments = ['polyala', 'reliable', 'allatom', 'unmod']
truncation_methods = ['percent']
SPICKER_RMSD = 'spicker'
SPICKER_TM = 'spicker_tm'
else:
allowed_side_chain_treatments = ALLOWED_SIDE_CHAIN_TREATMENTS[:]
truncation_methods = [t.value for t in TRUNCATION_METHODS]
if parser is None:
parser = argparse.ArgumentParser()
ensembler_group = parser.add_argument_group('Ensemble Options')
ensembler_group.add_argument(
'-cluster_dir', action=FilePathAction, help='Path to directory of pre-clustered models to import'
)
ensembler_group.add_argument(
'-cluster_method',
help='How to cluster the models for ensembling. Options: ' + '|'.join([SPICKER_RMSD, SPICKER_TM]),
)
ensembler_group.add_argument('-ensembler_timeout', type=int, help='Time in seconds before timing out ensembling')
ensembler_group.add_argument(
'-gesamt_exe', action=FilePathAction, metavar='gesamt_exe', help='Path to the gesamt executable'
)
ensembler_group.add_argument(
'-homologs',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Generate ensembles from homologs models (requires -alignment_file)',
)
ensembler_group.add_argument(
'-homolog_aligner',
metavar='homolog_aligner',
help='Program to use for structural alignment of homologs (gesamt|mustang)',
)
ensembler_group.add_argument('-ensemble_max_models', help='Maximum number of models permitted in an ensemble')
ensembler_group.add_argument(
'-mustang_exe', action=FilePathAction, metavar='mustang_exe', help='Path to the mustang executable'
)
ensembler_group.add_argument(
'-num_clusters', type=int, help='The number of Spicker clusters of the original decoys that will be sampled [1]'
)
ensembler_group.add_argument('-percent', metavar='percent_truncation', help='percent interval for truncation')
ensembler_group.add_argument(
'-percent_fixed_intervals', nargs='+', type=int, help='list of integer percentage intervals for truncation'
)
ensembler_group.add_argument('-score_matrix', action=FilePathAction, help='Path to score matrix for spicker')
ensembler_group.add_argument(
'-score_matrix_file_list',
action=FilePathAction,
help='File with list of ordered model names for the score_matrix',
)
ensembler_group.add_argument(
'-side_chain_treatments',
type=str,
nargs='+',
help='The side chain treatments to use. Options: ' + '|'.join(allowed_side_chain_treatments),
)
ensembler_group.add_argument('-spicker_exe', action=FilePathAction, help='Path to spicker executable')
ensembler_group.add_argument(
'-subcluster_radius_thresholds',
type=float,
nargs='+',
help='The radii to use for subclustering the truncated ensembles',
)
ensembler_group.add_argument('-subcluster_program', help='Program for subclustering models [gesamt]')
ensembler_group.add_argument(
'-theseus_exe', action=FilePathAction, metavar='Theseus exe', help='Path to theseus executable'
)
ensembler_group.add_argument(
'-thin_clusters',
action=BoolAction,
nargs='?',
metavar='True/False',
help='Create ensembles from 10 clusters with 1 + 3A subclustering and polyAlanine sidechains',
)
ensembler_group.add_argument(
'-truncation_method', help='How to truncate the models for ensembling: ' + '|'.join(truncation_methods)
)
ensembler_group.add_argument('-truncation_pruning', help='Whether to remove isolated residues (single)')
ensembler_group.add_argument(
'-truncation_scorefile',
action=FilePathAction,
help="CSV file containing per residue scores - COLUMN ONE MUST BE RESIDUE INDEX STARTING FROM 1",
)
ensembler_group.add_argument(
'-truncation_scorefile_header', nargs='+', help="column headers to be used to create ensembles"
)
return parser
def process_command_line(args=None, contacts=True, modelling=True, mol_rep=True):
"""Process the command-line for the main AMPLE program.
:args: optional argument that can hold the command-line arguments if we
have been called from within python for testing
"""
parser = argparse.ArgumentParser(
description="AMPLE: Ab initio Modelling of Proteins for moLEcular replacement", prefix_chars="-"
)
add_general_options(parser)
add_cluster_submit_options(parser)
add_ensembler_options(parser)
if contacts:
add_contact_options(parser)
if mol_rep:
add_mr_options(parser)
if modelling:
add_rosetta_options(parser)
return vars(parser.parse_args(args))
|
|
# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad
# Complutense de Madrid (dsa-research.org)
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenNebula.org test suite.
"""
__docformat__ = 'epytext'
import unittest
import sys
from libcloud.utils.py3 import httplib
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeState
from libcloud.compute.drivers.opennebula import *
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.common.types import InvalidCredsError
from libcloud.test import MockResponse, MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.secrets import OPENNEBULA_PARAMS
class OpenNebulaCaseMixin(TestCaseMixin):
def test_reboot_node_response(self):
pass
class OpenNebula_ResponseTests(unittest.TestCase):
XML = """<?xml version="1.0" encoding="UTF-8"?><root/>"""
def test_unauthorized_response(self):
http_response = MockResponse(httplib.UNAUTHORIZED,
OpenNebula_ResponseTests.XML,
headers={'content-type':
'application/xml'})
try:
OpenNebulaResponse(http_response, None).parse_body()
except InvalidCredsError:
exceptionType = sys.exc_info()[0]
self.assertEqual(exceptionType, type(InvalidCredsError()))
class OpenNebula_1_4_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v1.4.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_1_4_MockHttp, OpenNebula_1_4_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('1.4',))
def test_create_node(self):
"""
Test create_node functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
size = NodeSize(id=1, name='small', ram=None, disk=None,
bandwidth=None, price=None, driver=self.driver)
networks = list()
networks.append(OpenNebulaNetwork(id=5, name='Network 5',
address='192.168.0.0', size=256, driver=self.driver))
networks.append(OpenNebulaNetwork(id=15, name='Network 15',
address='192.168.1.0', size=256, driver=self.driver))
node = self.driver.create_node(name='Compute 5', image=image,
size=size, networks=networks)
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.extra['dev'], 'sda1')
def test_destroy_node(self):
"""
Test destroy_node functionality.
"""
node = Node(5, None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_list_nodes(self):
"""
Test list_nodes functionality.
"""
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 3)
node = nodes[0]
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.extra['dev'], 'sda1')
node = nodes[1]
self.assertEqual(node.id, '15')
self.assertEqual(node.name, 'Compute 15')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].address, '192.168.0.2')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].address, '192.168.1.2')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image.id, '15')
self.assertEqual(node.image.extra['dev'], 'sda1')
node = nodes[2]
self.assertEqual(node.id, '25')
self.assertEqual(node.name, 'Compute 25')
self.assertEqual(node.state,
NodeState.UNKNOWN)
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, None)
self.assertEqual(node.public_ips[0].address, '192.168.0.3')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, None)
self.assertEqual(node.public_ips[1].address, '192.168.1.3')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.private_ips, [])
self.assertEqual(node.image, None)
def test_list_images(self):
"""
Test list_images functionality.
"""
images = self.driver.list_images()
self.assertEqual(len(images), 2)
image = images[0]
self.assertEqual(image.id, '5')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['size'], '2048')
self.assertEqual(image.extra['url'],
'file:///images/ubuntu/jaunty.img')
image = images[1]
self.assertEqual(image.id, '15')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['size'], '2048')
self.assertEqual(image.extra['url'],
'file:///images/ubuntu/jaunty.img')
def test_list_sizes(self):
"""
Test list_sizes functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 3)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
def test_list_locations(self):
"""
Test list_locations functionality.
"""
locations = self.driver.list_locations()
self.assertEqual(len(locations), 1)
location = locations[0]
self.assertEqual(location.id, '0')
self.assertEqual(location.name, '')
self.assertEqual(location.country, '')
def test_ex_list_networks(self):
"""
Test ex_list_networks functionality.
"""
networks = self.driver.ex_list_networks()
self.assertEqual(len(networks), 2)
network = networks[0]
self.assertEqual(network.id, '5')
self.assertEqual(network.name, 'Network 5')
self.assertEqual(network.address, '192.168.0.0')
self.assertEqual(network.size, '256')
network = networks[1]
self.assertEqual(network.id, '15')
self.assertEqual(network.name, 'Network 15')
self.assertEqual(network.address, '192.168.1.0')
self.assertEqual(network.size, '256')
def test_ex_node_action(self):
"""
Test ex_node_action functionality.
"""
node = Node(5, None, None, None, None, self.driver)
ret = self.driver.ex_node_action(node, ACTION.STOP)
self.assertTrue(ret)
class OpenNebula_2_0_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v2.0 through v2.2.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_2_0_MockHttp, OpenNebula_2_0_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('2.0',))
def test_create_node(self):
"""
Test create_node functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
size = OpenNebulaNodeSize(id=1, name='small', ram=1024, cpu=1,
disk=None, bandwidth=None, price=None,
driver=self.driver)
networks = list()
networks.append(OpenNebulaNetwork(id=5, name='Network 5',
address='192.168.0.0', size=256, driver=self.driver))
networks.append(OpenNebulaNetwork(id=15, name='Network 15',
address='192.168.1.0', size=256, driver=self.driver))
context = {'hostname': 'compute-5'}
node = self.driver.create_node(name='Compute 5', image=image,
size=size, networks=networks,
context=context)
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01')
self.assertEqual(node.private_ips, [])
self.assertTrue(len([size for size in self.driver.list_sizes() \
if size.id == node.size.id]) == 1)
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(node.image.extra['type'], 'DISK')
self.assertEqual(node.image.extra['target'], 'hda')
context = node.extra['context']
self.assertEqual(context['hostname'], 'compute-5')
def test_destroy_node(self):
"""
Test destroy_node functionality.
"""
node = Node(5, None, None, None, None, self.driver)
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_list_nodes(self):
"""
Test list_nodes functionality.
"""
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 3)
node = nodes[0]
self.assertEqual(node.id, '5')
self.assertEqual(node.name, 'Compute 5')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.1')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.1')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01')
self.assertEqual(node.private_ips, [])
self.assertTrue(len([size for size in self.driver.list_sizes() \
if size.id == node.size.id]) == 1)
self.assertEqual(node.size.id, '1')
self.assertEqual(node.size.name, 'small')
self.assertEqual(node.size.ram, 1024)
self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu,
int))
self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu,
int))
self.assertEqual(node.size.cpu, 1)
self.assertEqual(node.size.vcpu, None)
self.assertEqual(node.size.disk, None)
self.assertEqual(node.size.bandwidth, None)
self.assertEqual(node.size.price, None)
self.assertTrue(len([image for image in self.driver.list_images() \
if image.id == node.image.id]) == 1)
self.assertEqual(node.image.id, '5')
self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(node.image.extra['type'], 'DISK')
self.assertEqual(node.image.extra['target'], 'hda')
context = node.extra['context']
self.assertEqual(context['hostname'], 'compute-5')
node = nodes[1]
self.assertEqual(node.id, '15')
self.assertEqual(node.name, 'Compute 15')
self.assertEqual(node.state,
OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE'])
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.2')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:02')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.2')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:02')
self.assertEqual(node.private_ips, [])
self.assertTrue(len([size for size in self.driver.list_sizes() \
if size.id == node.size.id]) == 1)
self.assertEqual(node.size.id, '1')
self.assertEqual(node.size.name, 'small')
self.assertEqual(node.size.ram, 1024)
self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu,
int))
self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu,
int))
self.assertEqual(node.size.cpu, 1)
self.assertEqual(node.size.vcpu, None)
self.assertEqual(node.size.disk, None)
self.assertEqual(node.size.bandwidth, None)
self.assertEqual(node.size.price, None)
self.assertTrue(len([image for image in self.driver.list_images() \
if image.id == node.image.id]) == 1)
self.assertEqual(node.image.id, '15')
self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(node.image.extra['type'], 'DISK')
self.assertEqual(node.image.extra['target'], 'hda')
context = node.extra['context']
self.assertEqual(context['hostname'], 'compute-15')
node = nodes[2]
self.assertEqual(node.id, '25')
self.assertEqual(node.name, 'Compute 25')
self.assertEqual(node.state,
NodeState.UNKNOWN)
self.assertEqual(node.public_ips[0].id, '5')
self.assertEqual(node.public_ips[0].name, 'Network 5')
self.assertEqual(node.public_ips[0].address, '192.168.0.3')
self.assertEqual(node.public_ips[0].size, 1)
self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:03')
self.assertEqual(node.public_ips[1].id, '15')
self.assertEqual(node.public_ips[1].name, 'Network 15')
self.assertEqual(node.public_ips[1].address, '192.168.1.3')
self.assertEqual(node.public_ips[1].size, 1)
self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:03')
self.assertEqual(node.private_ips, [])
self.assertEqual(node.size, None)
self.assertEqual(node.image, None)
context = node.extra['context']
self.assertEqual(context, {})
def test_list_images(self):
"""
Test list_images functionality.
"""
images = self.driver.list_images()
self.assertEqual(len(images), 2)
image = images[0]
self.assertEqual(image.id, '5')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['description'],
'Ubuntu 9.04 LAMP Description')
self.assertEqual(image.extra['type'], 'OS')
self.assertEqual(image.extra['size'], '2048')
image = images[1]
self.assertEqual(image.id, '15')
self.assertEqual(image.name, 'Ubuntu 9.04 LAMP')
self.assertEqual(image.extra['description'],
'Ubuntu 9.04 LAMP Description')
self.assertEqual(image.extra['type'], 'OS')
self.assertEqual(image.extra['size'], '2048')
def test_list_sizes(self):
"""
Test list_sizes functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 4)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, 1024)
self.assertTrue(size.cpu is None or isinstance(size.cpu, int))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 1)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, 4096)
self.assertTrue(size.cpu is None or isinstance(size.cpu, int))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 4)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, 8192)
self.assertTrue(size.cpu is None or isinstance(size.cpu, int))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 8)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[3]
self.assertEqual(size.id, '4')
self.assertEqual(size.name, 'custom')
self.assertEqual(size.ram, 0)
self.assertEqual(size.cpu, 0)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
def test_list_locations(self):
"""
Test list_locations functionality.
"""
locations = self.driver.list_locations()
self.assertEqual(len(locations), 1)
location = locations[0]
self.assertEqual(location.id, '0')
self.assertEqual(location.name, '')
self.assertEqual(location.country, '')
def test_ex_list_networks(self):
"""
Test ex_list_networks functionality.
"""
networks = self.driver.ex_list_networks()
self.assertEqual(len(networks), 2)
network = networks[0]
self.assertEqual(network.id, '5')
self.assertEqual(network.name, 'Network 5')
self.assertEqual(network.address, '192.168.0.0')
self.assertEqual(network.size, '256')
network = networks[1]
self.assertEqual(network.id, '15')
self.assertEqual(network.name, 'Network 15')
self.assertEqual(network.address, '192.168.1.0')
self.assertEqual(network.size, '256')
class OpenNebula_3_0_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v3.0.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_3_0_MockHttp, OpenNebula_3_0_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.0',))
def test_ex_list_networks(self):
"""
Test ex_list_networks functionality.
"""
networks = self.driver.ex_list_networks()
self.assertEqual(len(networks), 2)
network = networks[0]
self.assertEqual(network.id, '5')
self.assertEqual(network.name, 'Network 5')
self.assertEqual(network.address, '192.168.0.0')
self.assertEqual(network.size, '256')
self.assertEqual(network.extra['public'], 'YES')
network = networks[1]
self.assertEqual(network.id, '15')
self.assertEqual(network.name, 'Network 15')
self.assertEqual(network.address, '192.168.1.0')
self.assertEqual(network.size, '256')
self.assertEqual(network.extra['public'], 'NO')
def test_ex_node_set_save_name(self):
"""
Test ex_node_action functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
node = Node(5, None, None, None, None, self.driver, image=image)
ret = self.driver.ex_node_set_save_name(node, 'test')
self.assertTrue(ret)
class OpenNebula_3_2_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v3.2.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_3_2_MockHttp, OpenNebula_3_2_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.2',))
def test_reboot_node(self):
"""
Test reboot_node functionality.
"""
image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver)
node = Node(5, None, None, None, None, self.driver, image=image)
ret = self.driver.reboot_node(node)
self.assertTrue(ret)
def test_list_sizes(self):
"""
Test ex_list_networks functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 3)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, 1024)
self.assertTrue(size.cpu is None or isinstance(size.cpu, float))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 1)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, 4096)
self.assertTrue(size.cpu is None or isinstance(size.cpu, float))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 4)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, 8192)
self.assertTrue(size.cpu is None or isinstance(size.cpu, float))
self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int))
self.assertEqual(size.cpu, 8)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
class OpenNebula_3_6_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v3.6.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_3_6_MockHttp, OpenNebula_3_6_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.6',))
def test_create_volume(self):
new_volume = self.driver.create_volume(1000, 'test-volume')
self.assertEquals(new_volume.id, '5')
self.assertEquals(new_volume.size, 1000)
self.assertEquals(new_volume.name, 'test-volume')
def test_destroy_volume(self):
images = self.driver.list_images()
self.assertEqual(len(images), 2)
image = images[0]
ret = self.driver.destroy_volume(image)
self.assertTrue(ret)
def test_attach_volume(self):
nodes = self.driver.list_nodes()
node = nodes[0]
images = self.driver.list_images()
image = images[0]
ret = self.driver.attach_volume(node, image, 'sda')
self.assertTrue(ret)
def test_detach_volume(self):
images = self.driver.list_images()
image = images[1]
ret = self.driver.detach_volume(image)
self.assertTrue(ret)
nodes = self.driver.list_nodes()
# node with only a single associated image
node = nodes[1]
ret = self.driver.detach_volume(node.image)
self.assertFalse(ret)
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertEqual(len(volumes), 2)
volume = volumes[0]
self.assertEqual(volume.id, '5')
self.assertEqual(volume.size, 2048)
self.assertEqual(volume.name, 'Ubuntu 9.04 LAMP')
volume = volumes[1]
self.assertEqual(volume.id, '15')
self.assertEqual(volume.size, 1024)
self.assertEqual(volume.name, 'Debian Sid')
class OpenNebula_3_8_Tests(unittest.TestCase, OpenNebulaCaseMixin):
"""
OpenNebula.org test suite for OpenNebula v3.8.
"""
def setUp(self):
"""
Setup test environment.
"""
OpenNebulaNodeDriver.connectionCls.conn_classes = (
OpenNebula_3_8_MockHttp, OpenNebula_3_8_MockHttp)
self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.8',))
def test_list_sizes(self):
"""
Test ex_list_networks functionality.
"""
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 3)
size = sizes[0]
self.assertEqual(size.id, '1')
self.assertEqual(size.name, 'small')
self.assertEqual(size.ram, 1024)
self.assertEqual(size.cpu, 1)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[1]
self.assertEqual(size.id, '2')
self.assertEqual(size.name, 'medium')
self.assertEqual(size.ram, 4096)
self.assertEqual(size.cpu, 4)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
size = sizes[2]
self.assertEqual(size.id, '3')
self.assertEqual(size.name, 'large')
self.assertEqual(size.ram, 8192)
self.assertEqual(size.cpu, 8)
self.assertEqual(size.vcpu, None)
self.assertEqual(size.disk, None)
self.assertEqual(size.bandwidth, None)
self.assertEqual(size.price, None)
class OpenNebula_1_4_MockHttp(MockHttp):
"""
Mock HTTP server for testing v1.4 of the OpenNebula.org compute driver.
"""
fixtures = ComputeFileFixtures('opennebula_1_4')
def _compute(self, method, url, body, headers):
"""
Compute pool resources.
"""
if method == 'GET':
body = self.fixtures.load('computes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('compute_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _storage(self, method, url, body, headers):
"""
Storage pool resources.
"""
if method == 'GET':
body = self.fixtures.load('storage.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('disk_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _network(self, method, url, body, headers):
"""
Network pool resources.
"""
if method == 'GET':
body = self.fixtures.load('networks.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('network_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _compute_5(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _compute_15(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _compute_25(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_25.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _storage_5(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('disk_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _storage_15(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('disk_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _network_5(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
def _network_15(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.OK, body, {},
httplib.responses[httplib.OK])
class OpenNebula_2_0_MockHttp(MockHttp):
"""
Mock HTTP server for testing v2.0 through v3.2 of the OpenNebula.org
compute driver.
"""
fixtures = ComputeFileFixtures('opennebula_2_0')
def _compute(self, method, url, body, headers):
"""
Compute pool resources.
"""
if method == 'GET':
body = self.fixtures.load('compute_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('compute_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _storage(self, method, url, body, headers):
"""
Storage pool resources.
"""
if method == 'GET':
body = self.fixtures.load('storage_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('storage_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _network(self, method, url, body, headers):
"""
Network pool resources.
"""
if method == 'GET':
body = self.fixtures.load('network_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('network_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _compute_5(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _compute_15(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _compute_25(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_25.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _storage_5(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('storage_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _storage_15(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures.load('storage_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _network_5(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _network_15(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures.load('network_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
class OpenNebula_3_0_MockHttp(OpenNebula_2_0_MockHttp):
"""
Mock HTTP server for testing v3.0 of the OpenNebula.org compute driver.
"""
fixtures_3_0 = ComputeFileFixtures('opennebula_3_0')
def _network(self, method, url, body, headers):
"""
Network pool resources.
"""
if method == 'GET':
body = self.fixtures_3_0.load('network_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures.load('network_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _network_5(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures_3_0.load('network_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _network_15(self, method, url, body, headers):
"""
Network entry resource.
"""
if method == 'GET':
body = self.fixtures_3_0.load('network_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
class OpenNebula_3_2_MockHttp(OpenNebula_3_0_MockHttp):
"""
Mock HTTP server for testing v3.2 of the OpenNebula.org compute driver.
"""
fixtures_3_2 = ComputeFileFixtures('opennebula_3_2')
def _compute_5(self, method, url, body, headers):
"""
Compute entry resource.
"""
if method == 'GET':
body = self.fixtures.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _instance_type(self, method, url, body, headers):
"""
Instance type pool.
"""
if method == 'GET':
body = self.fixtures_3_2.load('instance_type_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
class OpenNebula_3_6_MockHttp(OpenNebula_3_2_MockHttp):
"""
Mock HTTP server for testing v3.6 of the OpenNebula.org compute driver.
"""
fixtures_3_6 = ComputeFileFixtures('opennebula_3_6')
def _storage(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('storage_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'POST':
body = self.fixtures_3_6.load('storage_5.xml')
return (httplib.CREATED, body, {},
httplib.responses[httplib.CREATED])
def _compute_5(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures_3_6.load('compute_5.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _compute_5_action(self, method, url, body, headers):
body = self.fixtures_3_6.load('compute_5.xml')
if method == 'POST':
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'GET':
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _compute_15(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures_3_6.load('compute_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == 'PUT':
body = ""
return (httplib.ACCEPTED, body, {},
httplib.responses[httplib.ACCEPTED])
if method == 'DELETE':
body = ""
return (httplib.NO_CONTENT, body, {},
httplib.responses[httplib.NO_CONTENT])
def _storage_10(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures_3_6.load('disk_10.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _storage_15(self, method, url, body, headers):
"""
Storage entry resource.
"""
if method == 'GET':
body = self.fixtures_3_6.load('disk_15.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
class OpenNebula_3_8_MockHttp(OpenNebula_3_2_MockHttp):
"""
Mock HTTP server for testing v3.8 of the OpenNebula.org compute driver.
"""
fixtures_3_8 = ComputeFileFixtures('opennebula_3_8')
def _instance_type(self, method, url, body, headers):
"""
Instance type pool.
"""
if method == 'GET':
body = self.fixtures_3_8.load('instance_type_collection.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _instance_type_small(self, method, url, body, headers):
"""
Small instance type.
"""
if method == 'GET':
body = self.fixtures_3_8.load('instance_type_small.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _instance_type_medium(self, method, url, body, headers):
"""
Medium instance type pool.
"""
if method == 'GET':
body = self.fixtures_3_8.load('instance_type_medium.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _instance_type_large(self, method, url, body, headers):
"""
Large instance type pool.
"""
if method == 'GET':
body = self.fixtures_3_8.load('instance_type_large.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from apache_beam.io.aws.clients.s3 import messages
from apache_beam.options import pipeline_options
try:
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=ungrouped-imports
import boto3
except ImportError:
boto3 = None
class Client(object):
"""
Wrapper for boto3 library
"""
def __init__(self, options):
assert boto3 is not None, 'Missing boto3 requirement'
if isinstance(options, pipeline_options.PipelineOptions):
s3_options = options.view_as(pipeline_options.S3Options)
access_key_id = s3_options.s3_access_key_id
secret_access_key = s3_options.s3_secret_access_key
session_token = s3_options.s3_session_token
endpoint_url = s3_options.s3_endpoint_url
use_ssl = not s3_options.s3_disable_ssl
region_name = s3_options.s3_region_name
api_version = s3_options.s3_api_version
verify = s3_options.s3_verify
else:
access_key_id = options.get('s3_access_key_id')
secret_access_key = options.get('s3_secret_access_key')
session_token = options.get('s3_session_token')
endpoint_url = options.get('s3_endpoint_url')
use_ssl = not options.get('s3_disable_ssl', False)
region_name = options.get('s3_region_name')
api_version = options.get('s3_api_version')
verify = options.get('s3_verify')
session = boto3.session.Session()
self.client = session.client(
service_name='s3',
region_name=region_name,
api_version=api_version,
use_ssl=use_ssl,
verify=verify,
endpoint_url=endpoint_url,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
aws_session_token=session_token)
def get_object_metadata(self, request):
r"""Retrieves an object's metadata.
Args:
request: (GetRequest) input message
Returns:
(Object) The response message.
"""
kwargs = {'Bucket': request.bucket, 'Key': request.object}
try:
boto_response = self.client.head_object(**kwargs)
except Exception as e:
message = e.response['Error']['Message']
code = e.response['ResponseMetadata']['HTTPStatusCode']
raise messages.S3ClientError(message, code)
item = messages.Item(
boto_response['ETag'],
request.object,
boto_response['LastModified'],
boto_response['ContentLength'],
boto_response['ContentType'])
return item
def get_range(self, request, start, end):
r"""Retrieves an object's contents.
Args:
request: (GetRequest) request
Returns:
(bytes) The response message.
"""
try:
boto_response = self.client.get_object(
Bucket=request.bucket,
Key=request.object,
Range='bytes={}-{}'.format(start, end - 1))
except Exception as e:
message = e.response['Error']['Message']
code = e.response['ResponseMetadata']['HTTPStatusCode']
raise messages.S3ClientError(message, code)
return boto_response['Body'].read() # A bytes object
def list(self, request):
r"""Retrieves a list of objects matching the criteria.
Args:
request: (ListRequest) input message
Returns:
(ListResponse) The response message.
"""
kwargs = {'Bucket': request.bucket, 'Prefix': request.prefix}
if request.continuation_token is not None:
kwargs['ContinuationToken'] = request.continuation_token
try:
boto_response = self.client.list_objects_v2(**kwargs)
except Exception as e:
message = e.response['Error']['Message']
code = e.response['ResponseMetadata']['HTTPStatusCode']
raise messages.S3ClientError(message, code)
if boto_response['KeyCount'] == 0:
message = 'Tried to list nonexistent S3 path: s3://%s/%s' % (
request.bucket, request.prefix)
raise messages.S3ClientError(message, 404)
items = [
messages.Item(
etag=content['ETag'],
key=content['Key'],
last_modified=content['LastModified'],
size=content['Size']) for content in boto_response['Contents']
]
try:
next_token = boto_response['NextContinuationToken']
except KeyError:
next_token = None
response = messages.ListResponse(items, next_token)
return response
def create_multipart_upload(self, request):
r"""Initates a multipart upload to S3 for a given object
Args:
request: (UploadRequest) input message
Returns:
(UploadResponse) The response message.
"""
try:
boto_response = self.client.create_multipart_upload(
Bucket=request.bucket,
Key=request.object,
ContentType=request.mime_type)
response = messages.UploadResponse(boto_response['UploadId'])
except Exception as e:
message = e.response['Error']['Message']
code = e.response['ResponseMetadata']['HTTPStatusCode']
raise messages.S3ClientError(message, code)
return response
def upload_part(self, request):
r"""Uploads part of a file to S3 during a multipart upload
Args:
request: (UploadPartRequest) input message
Returns:
(UploadPartResponse) The response message.
"""
try:
boto_response = self.client.upload_part(
Body=request.bytes,
Bucket=request.bucket,
Key=request.object,
PartNumber=request.part_number,
UploadId=request.upload_id)
response = messages.UploadPartResponse(
boto_response['ETag'], request.part_number)
return response
except Exception as e:
message = e.response['Error']['Message']
code = e.response['ResponseMetadata']['HTTPStatusCode']
raise messages.S3ClientError(message, code)
def complete_multipart_upload(self, request):
r"""Completes a multipart upload to S3
Args:
request: (UploadPartRequest) input message
Returns:
(Void) The response message.
"""
parts = {'Parts': request.parts}
try:
self.client.complete_multipart_upload(
Bucket=request.bucket,
Key=request.object,
UploadId=request.upload_id,
MultipartUpload=parts)
except Exception as e:
message = e.response['Error']['Message']
code = e.response['ResponseMetadata']['HTTPStatusCode']
raise messages.S3ClientError(message, code)
def delete(self, request):
r"""Deletes given object from bucket
Args:
request: (DeleteRequest) input message
Returns:
(void) Void, otherwise will raise if an error occurs
"""
try:
self.client.delete_object(Bucket=request.bucket, Key=request.object)
except Exception as e:
message = e.response['Error']['Message']
code = e.response['ResponseMetadata']['HTTPStatusCode']
raise messages.S3ClientError(message, code)
def delete_batch(self, request):
aws_request = {
'Bucket': request.bucket,
'Delete': {
'Objects': [{
'Key': object
} for object in request.objects]
}
}
try:
aws_response = self.client.delete_objects(**aws_request)
except Exception as e:
message = e.response['Error']['Message']
code = int(e.response['ResponseMetadata']['HTTPStatusCode'])
raise messages.S3ClientError(message, code)
deleted = [obj['Key'] for obj in aws_response.get('Deleted', [])]
failed = [obj['Key'] for obj in aws_response.get('Errors', [])]
errors = [
messages.S3ClientError(obj['Message'], obj['Code'])
for obj in aws_response.get('Errors', [])
]
return messages.DeleteBatchResponse(deleted, failed, errors)
def copy(self, request):
try:
copy_src = {'Bucket': request.src_bucket, 'Key': request.src_key}
self.client.copy(copy_src, request.dest_bucket, request.dest_key)
except Exception as e:
message = e.response['Error']['Message']
code = e.response['ResponseMetadata']['HTTPStatusCode']
raise messages.S3ClientError(message, code)
|
|
# file eulfedora/xml.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from eulxml import xmlmap
from eulxml.xmlmap.fields import Field, SingleNodeManager, NodeMapper, \
DateTimeField
from eulfedora.util import datetime_to_fedoratime, fedoratime_to_datetime
class FedoraDateMapper(xmlmap.fields.DateTimeMapper):
def to_python(self, node):
rep = self.XPATH(node)
return fedoratime_to_datetime(rep)
def to_xml(self, dt):
return datetime_to_fedoratime(dt)
class FedoraDateField(xmlmap.fields.Field):
"""Map an XPath expression to a single Python `datetime.datetime`.
Assumes date-time format in use by Fedora, e.g. 2010-05-20T18:42:52.766Z
"""
def __init__(self, xpath):
super(FedoraDateField, self).__init__(xpath,
manager = xmlmap.fields.SingleNodeManager(),
mapper = FedoraDateMapper())
class FedoraDateListField(xmlmap.fields.Field):
"""Map an XPath expression to a list of Python `datetime.datetime`.
Assumes date-time format in use by Fedora, e.g. 2010-05-20T18:42:52.766Z.
If the XPath expression evaluates to an empty NodeList, evaluates to
an empty list."""
def __init__(self, xpath):
super(FedoraDateListField, self).__init__(xpath,
manager = xmlmap.fields.NodeListManager(),
mapper = FedoraDateMapper())
# xml objects to wrap around xml returns from fedora
FEDORA_MANAGE_NS = 'http://www.fedora.info/definitions/1/0/management/'
FEDORA_ACCESS_NS = 'http://www.fedora.info/definitions/1/0/access/'
FEDORA_DATASTREAM_NS = 'info:fedora/fedora-system:def/dsCompositeModel#'
FEDORA_TYPES_NS = 'http://www.fedora.info/definitions/1/0/types/'
FEDORA_AUDIT_NS = 'info:fedora/fedora-system:def/audit#'
class _FedoraBase(xmlmap.XmlObject):
'''Common Fedora REST API namespace declarations.'''
ROOT_NAMESPACES = {
'm' : FEDORA_MANAGE_NS,
'a' : FEDORA_ACCESS_NS,
'ds': FEDORA_DATASTREAM_NS,
't': FEDORA_TYPES_NS,
'audit': FEDORA_AUDIT_NS
}
class ObjectDatastream(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for a single datastream as returned
by :meth:`REST_API.listDatastreams` """
ROOT_NAME = 'datastream'
dsid = xmlmap.StringField('@dsid')
"datastream id - `@dsid`"
label = xmlmap.StringField('@label')
"datastream label - `@label`"
mimeType = xmlmap.StringField('@mimeType')
"datastream mime type - `@mimeType`"
class ObjectDatastreams(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for the list of a single object's
datastreams, as returned by :meth:`REST_API.listDatastreams`"""
# listDatastreams result default namespace is fedora access
ROOT_NAME = 'objectDatastreams'
pid = xmlmap.StringField('@pid')
"object pid - `@pid`"
datastreams = xmlmap.NodeListField('a:datastream', ObjectDatastream)
"list of :class:`ObjectDatastream`"
class ObjectProfile(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for object profile information
returned by :meth:`REST_API.getObjectProfile`."""
# objectProfile result default namespace is fedora access
ROOT_NAME = 'objectProfile'
label = xmlmap.StringField('a:objLabel')
"object label"
owner = xmlmap.StringField('a:objOwnerId')
"object owner"
created = FedoraDateField('a:objCreateDate')
"date the object was created"
modified = FedoraDateField('a:objLastModDate')
"date the object was last modified"
# do we care about these? probably not useful in this context...
# - disseminator index view url
# - object item index view url
state = xmlmap.StringField('a:objState')
"object state (A/I/D - Active, Inactive, Deleted)"
class ObjectHistory(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for object history information
returned by :meth:`REST_API.getObjectHistory`."""
# objectHistory result default namespace is fedora access
ROOT_NAME = 'fedoraObjectHistory'
pid = xmlmap.StringField('@pid')
changed = FedoraDateListField('a:objectChangeDate')
class ObjectMethodService(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for object method services; included
in :class:`ObjectMethods` for data returned by :meth:`REST_API.listMethods`."""
# default namespace is fedora access
ROOT_NAME = 'sDef'
pid = xmlmap.StringField('@pid')
methods = xmlmap.StringListField('a:method/@name')
class ObjectMethods(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for object method information
returned by :meth:`REST_API.listMethods`."""
# default namespace is fedora access
ROOT_NAME = 'objectMethods'
service_definitions = xmlmap.NodeListField('a:sDef', ObjectMethodService)
class DatastreamProfile(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for datastream profile information
returned by :meth:`REST_API.getDatastream`."""
# default namespace is fedora manage
ROOT_NAME = 'datastreamProfile'
label = xmlmap.StringField('m:dsLabel')
"datastream label"
version_id = xmlmap.StringField('m:dsVersionID')
"current datastream version id"
created = FedoraDateField('m:dsCreateDate')
"date the datastream was created"
state = xmlmap.StringField('m:dsState')
"datastream state (A/I/D - Active, Inactive, Deleted)"
mimetype = xmlmap.StringField('m:dsMIME')
"datastream mimetype"
format = xmlmap.StringField('m:dsFormatURI')
"format URI for the datastream, if any"
control_group = xmlmap.StringField('m:dsControlGroup')
"datastream control group (inline XML, Managed, etc)"
size = xmlmap.IntegerField('m:dsSize') # not reliable for managed datastreams as of Fedora 3.3
"integer; size of the datastream content"
versionable = xmlmap.SimpleBooleanField('m:dsVersionable', 'true', 'false')
"boolean; indicates whether or not the datastream is currently being versioned"
# infoType ?
# location ?
checksum = xmlmap.StringField('m:dsChecksum')
"checksum for current datastream contents"
checksum_type = xmlmap.StringField('m:dsChecksumType')
"type of checksum"
checksum_valid = xmlmap.SimpleBooleanField('m:dsChecksumValid', 'true', 'false')
'''Boolean flag indicating if the current checksum is valid. Only
present when profile is accessed via :meth:`REST_API.compareDatastreamChecksum`'''
class NewPids(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for a list of pids as returned by
:meth:`REST_API.getNextPID`."""
# NOTE: default namespace as of should be manage, but the
# namespace was missing until Fedora 3.5. Match with or without a
# namespace, to support Fedora 3.5 as well as older versions.
pids = xmlmap.StringListField('pid|m:pid')
class RepositoryDescriptionPid(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for PID section of :class:`RepositoryDescription`"""
# default namespace is fedora access
namespace = xmlmap.StringField('a:PID-namespaceIdentifier')
"PID namespace"
delimiter = xmlmap.StringField('a:PID-delimiter')
"PID delimiter"
sample = xmlmap.StringField('a:PID-sample')
"sample PID"
retain_pids = xmlmap.StringField('a:retainPID')
"list of pid namespaces configured to be retained"
class RepositoryDescriptionOAI(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for OAI section of :class:`RepositoryDescription`"""
# default namespace is fedora access
namespace = xmlmap.StringField('a:OAI-namespaceIdentifier')
"OAI namespace"
delimiter = xmlmap.StringField('a:OAI-delimiter')
"OAI delimiter"
sample = xmlmap.StringField('a:OAI-sample')
"sample OAI id"
class RepositoryDescription(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for a repository description as returned
by :meth:`API_A_LITE.describeRepository` """
# default namespace is fedora access
name = xmlmap.StringField('a:repositoryName')
"repository name"
base_url = xmlmap.StringField('a:repositoryBaseURL')
"base url"
version = xmlmap.StringField('a:repositoryVersion')
"version of Fedora being run"
pid_info = xmlmap.NodeField('a:repositoryPID', RepositoryDescriptionPid)
":class:`RepositoryDescriptionPid` - configuration info for pids"
oai_info = xmlmap.NodeField('a:repositoryPID', RepositoryDescriptionOAI)
":class:`RepositoryDescriptionOAI` - configuration info for OAI"
search_url = xmlmap.StringField('a:sampleSearch-URL')
"sample search url"
access_url = xmlmap.StringField('a:sampleAccess-URL')
"sample access url"
oai_url = xmlmap.StringField('a:sampleOAI-URL')
"sample OAI url"
admin_email = xmlmap.StringListField("a:adminEmail")
"administrator emails"
class SearchResult(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for a single entry in the results
returned by :meth:`REST_API.findObjects`"""
# default namespace is fedora types
ROOT_NAME = 'objectFields'
pid = xmlmap.StringField('t:pid')
"pid"
class SearchResults(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for the results returned by
:meth:`REST_API.findObjects`"""
# default namespace is fedora types
ROOT_NAME = 'result'
session_token = xmlmap.StringField('t:listSession/t:token')
"session token"
cursor = xmlmap.IntegerField('t:listSession/t:cursor')
"session cursor"
expiration_date = DateTimeField('t:listSession/t:expirationDate')
"session experation date"
results = xmlmap.NodeListField('t:resultList/t:objectFields', SearchResult)
"search results - list of :class:`SearchResult`"
class DatastreamHistory(_FedoraBase):
""":class:`~eulxml.xmlmap.XmlObject` for datastream history
information returned by :meth:`REST_API.getDatastreamHistory`."""
# default namespace is fedora manage
ROOT_NAME = 'datastreamHistory'
pid = xmlmap.StringField('@pid')
"pid"
dsid = xmlmap.StringField('@dsID')
"datastream id"
versions = xmlmap.NodeListField('m:datastreamProfile', DatastreamProfile)
'list of :class:`DatastreamProfile` objects for each version'
DS_NAMESPACES = {'ds': FEDORA_DATASTREAM_NS }
class DsTypeModel(xmlmap.XmlObject):
ROOT_NAMESPACES = DS_NAMESPACES
id = xmlmap.StringField('@ID')
mimetype = xmlmap.StringField('ds:form/@MIME')
format_uri = xmlmap.StringField('ds:form/@FORMAT_URI')
class DsCompositeModel(xmlmap.XmlObject):
""":class:`~eulxml.xmlmap.XmlObject` for a
:class:`~eulfedora.models.ContentModel`'s DS-COMPOSITE-MODEL
datastream"""
ROOT_NAME = 'dsCompositeModel'
ROOT_NS = FEDORA_DATASTREAM_NS
ROOT_NAMESPACES = DS_NAMESPACES
# TODO: this feels like it could be generalized into a dict-like field
# class.
TYPE_MODEL_XPATH = 'ds:dsTypeModel[@ID=$dsid]'
def get_type_model(self, dsid, create=False):
field = Field(self.TYPE_MODEL_XPATH,
manager=SingleNodeManager(instantiate_on_get=create),
mapper=NodeMapper(DsTypeModel))
context = { 'namespaces': DS_NAMESPACES,
'dsid': dsid }
return field.get_for_node(self.node, context)
class AuditTrailRecord(_FedoraBase):
''':class:`~eulxml.xmlmap.XmlObject` for a single audit entry in
an :class:`AuditTrail`.
'''
ROOT_NAME = 'record'
ROOT_NS = FEDORA_AUDIT_NS
id = xmlmap.StringField('@ID')
'id for this audit trail record'
process_type = xmlmap.StringField('audit:process/@type')
'type of modification, e.g. `Fedora API-M`'
action = xmlmap.StringField('audit:action')
'the particular action taken, e.g. `addDatastream`'
component = xmlmap.StringField('audit:componentID')
'the component that was modified, e.g. a datastream ID such as `DC` or `RELS-EXT`'
user = xmlmap.StringField('audit:responsibility')
'the user or account responsible for the change (e.g., `fedoraAdmin`)'
date = FedoraDateField('audit:date')
'date the change was made, as :class:`datetime.datetime`'
message = xmlmap.StringField('audit:justification')
'justification for the change, if any (i.e., log message passed to save method)'
class AuditTrail(_FedoraBase):
''':class:`~eulxml.xmlmap.XmlObject` for the Fedora built-in audit trail
that is automatically populated from any modifications made to an object.
'''
records = xmlmap.NodeListField('audit:record', AuditTrailRecord)
'list of :class:`AuditTrailRecord` entries'
class FoxmlContentDigest(_FedoraBase):
'Content digest, as stored in full foxml (e.g. object export)'
#: digest type, e.g. MD5
type = xmlmap.StringField('@TYPE')
#: digest value
digest = xmlmap.StringField('@DIGEST')
class FoxmlDatastreamVersion(_FedoraBase):
'Foxml datastream version in full foxml, e.g. object export'
#: datastream version id
id = xmlmap.StringField('@ID')
#: mimetype
mimetype = xmlmap.StringField('@MIMETYPE')
#: content digest
content_digest = xmlmap.NodeListField('foxml:contentDigest',
FoxmlContentDigest)
class FoxmlDatastream(_FedoraBase):
'Foxml datastream in full foxml, e.g. object export'
#: datastream id
id = xmlmap.StringField('@ID')
#: list of versions
versions = xmlmap.NodeListField('foxml:datastreamVersion',
FoxmlDatastreamVersion)
class FoxmlDigitalObject(_FedoraBase):
'''Minimal :class:`~eulxml.xmlmap.XmlObject` for Foxml
DigitalObject as returned by :meth:`REST_API.getObjectXML`, to
provide access to the Fedora audit trail.
'''
audit_trail = xmlmap.NodeField('foxml:datastream[@ID="AUDIT"]/foxml:datastreamVersion/foxml:xmlContent/audit:auditTrail', AuditTrail)
'Fedora audit trail, as instance of :class:`AuditTrail`'
datastreams = xmlmap.NodeListField('foxml:datastream', FoxmlDatastream)
|
|
import os
import six
import shutil
import tempfile
import contextlib
try:
from unittest import mock
except ImportError:
import mock
from twisted.trial import unittest
from twisted.protocols.policies import WrappingFactory
from twisted.python.filepath import FilePath
from twisted.internet import reactor, defer, error
from twisted.web import server, static, util, resource
from twisted.web._newclient import ResponseFailed
from twisted.web.http import _DataLoss
from twisted.web.test.test_webclient import ForeverTakingResource, \
NoLengthResource, HostHeaderResource, \
PayloadResource
from twisted.cred import portal, checkers, credentials
from w3lib.url import path_to_file_uri
from scrapy.core.downloader.handlers import DownloadHandlers
from scrapy.core.downloader.handlers.datauri import DataURIDownloadHandler
from scrapy.core.downloader.handlers.file import FileDownloadHandler
from scrapy.core.downloader.handlers.http import HTTPDownloadHandler, HttpDownloadHandler
from scrapy.core.downloader.handlers.http10 import HTTP10DownloadHandler
from scrapy.core.downloader.handlers.http11 import HTTP11DownloadHandler
from scrapy.core.downloader.handlers.s3 import S3DownloadHandler
from scrapy.spiders import Spider
from scrapy.http import Headers, Request
from scrapy.http.response.text import TextResponse
from scrapy.responsetypes import responsetypes
from scrapy.settings import Settings
from scrapy.utils.test import get_crawler, skip_if_no_boto
from scrapy.utils.python import to_bytes
from scrapy.exceptions import NotConfigured
from tests.mockserver import MockServer, ssl_context_factory, Echo
from tests.spiders import SingleRequestSpider
class DummyDH(object):
lazy = False
def __init__(self, crawler):
pass
class DummyLazyDH(object):
# Default is lazy for backward compatibility
def __init__(self, crawler):
pass
class OffDH(object):
lazy = False
def __init__(self, crawler):
raise NotConfigured
class LoadTestCase(unittest.TestCase):
def test_enabled_handler(self):
handlers = {'scheme': 'tests.test_downloader_handlers.DummyDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertIn('scheme', dh._schemes)
self.assertIn('scheme', dh._handlers)
self.assertNotIn('scheme', dh._notconfigured)
def test_not_configured_handler(self):
handlers = {'scheme': 'tests.test_downloader_handlers.OffDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertIn('scheme', dh._schemes)
self.assertNotIn('scheme', dh._handlers)
self.assertIn('scheme', dh._notconfigured)
def test_disabled_handler(self):
handlers = {'scheme': None}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertNotIn('scheme', dh._schemes)
for scheme in handlers: # force load handlers
dh._get_handler(scheme)
self.assertNotIn('scheme', dh._handlers)
self.assertIn('scheme', dh._notconfigured)
def test_lazy_handlers(self):
handlers = {'scheme': 'tests.test_downloader_handlers.DummyLazyDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertIn('scheme', dh._schemes)
self.assertNotIn('scheme', dh._handlers)
for scheme in handlers: # force load lazy handler
dh._get_handler(scheme)
self.assertIn('scheme', dh._handlers)
self.assertNotIn('scheme', dh._notconfigured)
class FileTestCase(unittest.TestCase):
def setUp(self):
self.tmpname = self.mktemp()
with open(self.tmpname + '^', 'w') as f:
f.write('0123456789')
self.download_request = FileDownloadHandler(Settings()).download_request
def tearDown(self):
os.unlink(self.tmpname + '^')
def test_download(self):
def _test(response):
self.assertEqual(response.url, request.url)
self.assertEqual(response.status, 200)
self.assertEqual(response.body, b'0123456789')
request = Request(path_to_file_uri(self.tmpname + '^'))
assert request.url.upper().endswith('%5E')
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_non_existent(self):
request = Request('file://%s' % self.mktemp())
d = self.download_request(request, Spider('foo'))
return self.assertFailure(d, IOError)
class ContentLengthHeaderResource(resource.Resource):
"""
A testing resource which renders itself as the value of the Content-Length
header from the request.
"""
def render(self, request):
return request.requestHeaders.getRawHeaders(b"content-length")[0]
class ChunkedResource(resource.Resource):
def render(self, request):
def response():
request.write(b"chunked ")
request.write(b"content\n")
request.finish()
reactor.callLater(0, response)
return server.NOT_DONE_YET
class BrokenChunkedResource(resource.Resource):
def render(self, request):
def response():
request.write(b"chunked ")
request.write(b"content\n")
# Disable terminating chunk on finish.
request.chunked = False
closeConnection(request)
reactor.callLater(0, response)
return server.NOT_DONE_YET
class BrokenDownloadResource(resource.Resource):
def render(self, request):
def response():
request.setHeader(b"Content-Length", b"20")
request.write(b"partial")
closeConnection(request)
reactor.callLater(0, response)
return server.NOT_DONE_YET
def closeConnection(request):
# We have to force a disconnection for HTTP/1.1 clients. Otherwise
# client keeps the connection open waiting for more data.
if hasattr(request.channel, 'loseConnection'): # twisted >=16.3.0
request.channel.loseConnection()
else:
request.channel.transport.loseConnection()
request.finish()
class EmptyContentTypeHeaderResource(resource.Resource):
"""
A testing resource which renders itself as the value of request body
without content-type header in response.
"""
def render(self, request):
request.setHeader("content-type", "")
return request.content.read()
class LargeChunkedFileResource(resource.Resource):
def render(self, request):
def response():
for i in range(1024):
request.write(b"x" * 1024)
request.finish()
reactor.callLater(0, response)
return server.NOT_DONE_YET
class HttpTestCase(unittest.TestCase):
scheme = 'http'
download_handler_cls = HTTPDownloadHandler
# only used for HTTPS tests
keyfile = 'keys/localhost.key'
certfile = 'keys/localhost.crt'
def setUp(self):
self.tmpname = self.mktemp()
os.mkdir(self.tmpname)
FilePath(self.tmpname).child("file").setContent(b"0123456789")
r = static.File(self.tmpname)
r.putChild(b"redirect", util.Redirect(b"/file"))
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"hang-after-headers", ForeverTakingResource(write=True))
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
r.putChild(b"chunked", ChunkedResource())
r.putChild(b"broken-chunked", BrokenChunkedResource())
r.putChild(b"contentlength", ContentLengthHeaderResource())
r.putChild(b"nocontenttype", EmptyContentTypeHeaderResource())
r.putChild(b"largechunkedfile", LargeChunkedFileResource())
r.putChild(b"echo", Echo())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.host = 'localhost'
if self.scheme == 'https':
self.port = reactor.listenSSL(
0, self.wrapper, ssl_context_factory(self.keyfile, self.certfile),
interface=self.host)
else:
self.port = reactor.listenTCP(0, self.wrapper, interface=self.host)
self.portno = self.port.getHost().port
self.download_handler = self.download_handler_cls(Settings())
self.download_request = self.download_handler.download_request
@defer.inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
if hasattr(self.download_handler, 'close'):
yield self.download_handler.close()
shutil.rmtree(self.tmpname)
def getURL(self, path):
return "%s://%s:%d/%s" % (self.scheme, self.host, self.portno, path)
def test_download(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEqual, b"0123456789")
return d
def test_download_head(self):
request = Request(self.getURL('file'), method='HEAD')
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEqual, b'')
return d
def test_redirect_status(self):
request = Request(self.getURL('redirect'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.status)
d.addCallback(self.assertEqual, 302)
return d
def test_redirect_status_head(self):
request = Request(self.getURL('redirect'), method='HEAD')
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.status)
d.addCallback(self.assertEqual, 302)
return d
@defer.inlineCallbacks
def test_timeout_download_from_spider_nodata_rcvd(self):
# client connects but no data is received
spider = Spider('foo')
meta = {'download_timeout': 0.2}
request = Request(self.getURL('wait'), meta=meta)
d = self.download_request(request, spider)
yield self.assertFailure(d, defer.TimeoutError, error.TimeoutError)
@defer.inlineCallbacks
def test_timeout_download_from_spider_server_hangs(self):
# client connects, server send headers and some body bytes but hangs
spider = Spider('foo')
meta = {'download_timeout': 0.2}
request = Request(self.getURL('hang-after-headers'), meta=meta)
d = self.download_request(request, spider)
yield self.assertFailure(d, defer.TimeoutError, error.TimeoutError)
def test_host_header_not_in_request_headers(self):
def _test(response):
self.assertEqual(
response.body, to_bytes('%s:%d' % (self.host, self.portno)))
self.assertEqual(request.headers, {})
request = Request(self.getURL('host'))
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_host_header_seted_in_request_headers(self):
def _test(response):
self.assertEqual(response.body, b'example.com')
self.assertEqual(request.headers.get('Host'), b'example.com')
request = Request(self.getURL('host'), headers={'Host': 'example.com'})
return self.download_request(request, Spider('foo')).addCallback(_test)
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEqual, b'example.com')
return d
def test_content_length_zero_bodyless_post_request_headers(self):
"""Tests if "Content-Length: 0" is sent for bodyless POST requests.
This is not strictly required by HTTP RFCs but can cause trouble
for some web servers.
See:
https://github.com/scrapy/scrapy/issues/823
https://issues.apache.org/jira/browse/TS-2902
https://github.com/kennethreitz/requests/issues/405
https://bugs.python.org/issue14721
"""
def _test(response):
self.assertEqual(response.body, b'0')
request = Request(self.getURL('contentlength'), method='POST', headers={'Host': 'example.com'})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_content_length_zero_bodyless_post_only_one(self):
def _test(response):
import json
headers = Headers(json.loads(response.text)['headers'])
contentlengths = headers.getlist('Content-Length')
self.assertEqual(len(contentlengths), 1)
self.assertEqual(contentlengths, [b"0"])
request = Request(self.getURL('echo'), method='POST')
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_payload(self):
body = b'1'*100 # PayloadResource requires body length to be 100
request = Request(self.getURL('payload'), method='POST', body=body)
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEqual, body)
return d
class DeprecatedHttpTestCase(HttpTestCase):
"""HTTP 1.0 test case"""
download_handler_cls = HttpDownloadHandler
class Http10TestCase(HttpTestCase):
"""HTTP 1.0 test case"""
download_handler_cls = HTTP10DownloadHandler
class Https10TestCase(Http10TestCase):
scheme = 'https'
class Http11TestCase(HttpTestCase):
"""HTTP 1.1 test case"""
download_handler_cls = HTTP11DownloadHandler
def test_download_without_maxsize_limit(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEqual, b"0123456789")
return d
def test_response_class_choosing_request(self):
"""Tests choosing of correct response type
in case of Content-Type is empty but body contains text.
"""
body = b'Some plain text\ndata with tabs\t and null bytes\0'
def _test_type(response):
self.assertEqual(type(response), TextResponse)
request = Request(self.getURL('nocontenttype'), body=body)
d = self.download_request(request, Spider('foo'))
d.addCallback(_test_type)
return d
@defer.inlineCallbacks
def test_download_with_maxsize(self):
request = Request(self.getURL('file'))
# 10 is minimal size for this request and the limit is only counted on
# response body. (regardless of headers)
d = self.download_request(request, Spider('foo', download_maxsize=10))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEqual, b"0123456789")
yield d
d = self.download_request(request, Spider('foo', download_maxsize=9))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
@defer.inlineCallbacks
def test_download_with_maxsize_very_large_file(self):
with mock.patch('scrapy.core.downloader.handlers.http11.logger') as logger:
request = Request(self.getURL('largechunkedfile'))
def check(logger):
logger.error.assert_called_once_with(mock.ANY, mock.ANY)
d = self.download_request(request, Spider('foo', download_maxsize=1500))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
# As the error message is logged in the dataReceived callback, we
# have to give a bit of time to the reactor to process the queue
# after closing the connection.
d = defer.Deferred()
d.addCallback(check)
reactor.callLater(.1, d.callback, logger)
yield d
@defer.inlineCallbacks
def test_download_with_maxsize_per_req(self):
meta = {'download_maxsize': 2}
request = Request(self.getURL('file'), meta=meta)
d = self.download_request(request, Spider('foo'))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
@defer.inlineCallbacks
def test_download_with_small_maxsize_per_spider(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo', download_maxsize=2))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
def test_download_with_large_maxsize_per_spider(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo', download_maxsize=100))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEqual, b"0123456789")
return d
def test_download_chunked_content(self):
request = Request(self.getURL('chunked'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEqual, b"chunked content\n")
return d
def test_download_broken_content_cause_data_loss(self, url='broken'):
request = Request(self.getURL(url))
d = self.download_request(request, Spider('foo'))
def checkDataLoss(failure):
if failure.check(ResponseFailed):
if any(r.check(_DataLoss) for r in failure.value.reasons):
return None
return failure
d.addCallback(lambda _: self.fail("No DataLoss exception"))
d.addErrback(checkDataLoss)
return d
def test_download_broken_chunked_content_cause_data_loss(self):
return self.test_download_broken_content_cause_data_loss('broken-chunked')
def test_download_broken_content_allow_data_loss(self, url='broken'):
request = Request(self.getURL(url), meta={'download_fail_on_dataloss': False})
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.flags)
d.addCallback(self.assertEqual, ['dataloss'])
return d
def test_download_broken_chunked_content_allow_data_loss(self):
return self.test_download_broken_content_allow_data_loss('broken-chunked')
def test_download_broken_content_allow_data_loss_via_setting(self, url='broken'):
download_handler = self.download_handler_cls(Settings({
'DOWNLOAD_FAIL_ON_DATALOSS': False,
}))
request = Request(self.getURL(url))
d = download_handler.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.flags)
d.addCallback(self.assertEqual, ['dataloss'])
return d
def test_download_broken_chunked_content_allow_data_loss_via_setting(self):
return self.test_download_broken_content_allow_data_loss_via_setting('broken-chunked')
class Https11TestCase(Http11TestCase):
scheme = 'https'
class Https11WrongHostnameTestCase(Http11TestCase):
scheme = 'https'
# above tests use a server certificate for "localhost",
# client connection to "localhost" too.
# here we test that even if the server certificate is for another domain,
# "www.example.com" in this case,
# the tests still pass
keyfile = 'keys/example-com.key.pem'
certfile = 'keys/example-com.cert.pem'
class Https11InvalidDNSId(Https11TestCase):
"""Connect to HTTPS hosts with IP while certificate uses domain names IDs."""
def setUp(self):
super(Https11InvalidDNSId, self).setUp()
self.host = '127.0.0.1'
class Https11InvalidDNSPattern(Https11TestCase):
"""Connect to HTTPS hosts where the certificate are issued to an ip instead of a domain."""
keyfile = 'keys/localhost.ip.key'
certfile = 'keys/localhost.ip.crt'
def setUp(self):
try:
from service_identity.exceptions import CertificateError
except ImportError:
raise unittest.SkipTest("cryptography lib is too old")
super(Https11InvalidDNSPattern, self).setUp()
class Http11MockServerTestCase(unittest.TestCase):
"""HTTP 1.1 test case with MockServer"""
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_download_with_content_length(self):
crawler = get_crawler(SingleRequestSpider)
# http://localhost:8998/partial set Content-Length to 1024, use download_maxsize= 1000 to avoid
# download it
yield crawler.crawl(seed=Request(url=self.mockserver.url('/partial'), meta={'download_maxsize': 1000}))
failure = crawler.spider.meta['failure']
self.assertIsInstance(failure.value, defer.CancelledError)
@defer.inlineCallbacks
def test_download(self):
crawler = get_crawler(SingleRequestSpider)
yield crawler.crawl(seed=Request(url=self.mockserver.url('')))
failure = crawler.spider.meta.get('failure')
self.assertTrue(failure == None)
reason = crawler.spider.meta['close_reason']
self.assertTrue(reason, 'finished')
@defer.inlineCallbacks
def test_download_gzip_response(self):
crawler = get_crawler(SingleRequestSpider)
body = b'1' * 100 # PayloadResource requires body length to be 100
request = Request(self.mockserver.url('/payload'), method='POST',
body=body, meta={'download_maxsize': 50})
yield crawler.crawl(seed=request)
failure = crawler.spider.meta['failure']
# download_maxsize < 100, hence the CancelledError
self.assertIsInstance(failure.value, defer.CancelledError)
if six.PY2:
request.headers.setdefault(b'Accept-Encoding', b'gzip,deflate')
request = request.replace(url=self.mockserver.url('/xpayload'))
yield crawler.crawl(seed=request)
# download_maxsize = 50 is enough for the gzipped response
failure = crawler.spider.meta.get('failure')
self.assertTrue(failure == None)
reason = crawler.spider.meta['close_reason']
self.assertTrue(reason, 'finished')
else:
# See issue https://twistedmatrix.com/trac/ticket/8175
raise unittest.SkipTest("xpayload only enabled for PY2")
class UriResource(resource.Resource):
"""Return the full uri that was requested"""
def getChild(self, path, request):
return self
def render(self, request):
# Note: this is an ugly hack for CONNECT request timeout test.
# Returning some data here fail SSL/TLS handshake
# ToDo: implement proper HTTPS proxy tests, not faking them.
if request.method != b'CONNECT':
return request.uri
else:
return b''
class HttpProxyTestCase(unittest.TestCase):
download_handler_cls = HTTPDownloadHandler
def setUp(self):
site = server.Site(UriResource(), timeout=None)
wrapper = WrappingFactory(site)
self.port = reactor.listenTCP(0, wrapper, interface='127.0.0.1')
self.portno = self.port.getHost().port
self.download_handler = self.download_handler_cls(Settings())
self.download_request = self.download_handler.download_request
@defer.inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
if hasattr(self.download_handler, 'close'):
yield self.download_handler.close()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def test_download_with_proxy(self):
def _test(response):
self.assertEqual(response.status, 200)
self.assertEqual(response.url, request.url)
self.assertEqual(response.body, b'http://example.com')
http_proxy = self.getURL('')
request = Request('http://example.com', meta={'proxy': http_proxy})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_download_with_proxy_https_noconnect(self):
def _test(response):
self.assertEqual(response.status, 200)
self.assertEqual(response.url, request.url)
self.assertEqual(response.body, b'https://example.com')
http_proxy = '%s?noconnect' % self.getURL('')
request = Request('https://example.com', meta={'proxy': http_proxy})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_download_without_proxy(self):
def _test(response):
self.assertEqual(response.status, 200)
self.assertEqual(response.url, request.url)
self.assertEqual(response.body, b'/path/to/resource')
request = Request(self.getURL('path/to/resource'))
return self.download_request(request, Spider('foo')).addCallback(_test)
class DeprecatedHttpProxyTestCase(unittest.TestCase):
"""Old deprecated reference to http10 downloader handler"""
download_handler_cls = HttpDownloadHandler
class Http10ProxyTestCase(HttpProxyTestCase):
download_handler_cls = HTTP10DownloadHandler
class Http11ProxyTestCase(HttpProxyTestCase):
download_handler_cls = HTTP11DownloadHandler
@defer.inlineCallbacks
def test_download_with_proxy_https_timeout(self):
""" Test TunnelingTCP4ClientEndpoint """
http_proxy = self.getURL('')
domain = 'https://no-such-domain.nosuch'
request = Request(
domain, meta={'proxy': http_proxy, 'download_timeout': 0.2})
d = self.download_request(request, Spider('foo'))
timeout = yield self.assertFailure(d, error.TimeoutError)
self.assertIn(domain, timeout.osError)
class HttpDownloadHandlerMock(object):
def __init__(self, settings):
pass
def download_request(self, request, spider):
return request
class S3AnonTestCase(unittest.TestCase):
def setUp(self):
skip_if_no_boto()
self.s3reqh = S3DownloadHandler(Settings(),
httpdownloadhandler=HttpDownloadHandlerMock,
#anon=True, # is implicit
)
self.download_request = self.s3reqh.download_request
self.spider = Spider('foo')
def test_anon_request(self):
req = Request('s3://aws-publicdatasets/')
httpreq = self.download_request(req, self.spider)
self.assertEqual(hasattr(self.s3reqh, 'anon'), True)
self.assertEqual(self.s3reqh.anon, True)
self.assertEqual(
httpreq.url, 'http://aws-publicdatasets.s3.amazonaws.com/')
class S3TestCase(unittest.TestCase):
download_handler_cls = S3DownloadHandler
# test use same example keys than amazon developer guide
# http://s3.amazonaws.com/awsdocs/S3/20060301/s3-dg-20060301.pdf
# and the tests described here are the examples from that manual
AWS_ACCESS_KEY_ID = '0PN5J17HBGZHT7JJ3X82'
AWS_SECRET_ACCESS_KEY = 'uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o'
def setUp(self):
skip_if_no_boto()
s3reqh = S3DownloadHandler(Settings(), self.AWS_ACCESS_KEY_ID,
self.AWS_SECRET_ACCESS_KEY,
httpdownloadhandler=HttpDownloadHandlerMock)
self.download_request = s3reqh.download_request
self.spider = Spider('foo')
@contextlib.contextmanager
def _mocked_date(self, date):
try:
import botocore.auth
except ImportError:
yield
else:
# We need to mock botocore.auth.formatdate, because otherwise
# botocore overrides Date header with current date and time
# and Authorization header is different each time
with mock.patch('botocore.auth.formatdate') as mock_formatdate:
mock_formatdate.return_value = date
yield
def test_extra_kw(self):
try:
S3DownloadHandler(Settings(), extra_kw=True)
except Exception as e:
self.assertIsInstance(e, (TypeError, NotConfigured))
else:
assert False
def test_request_signing1(self):
# gets an object from the johnsmith bucket.
date ='Tue, 27 Mar 2007 19:36:42 +0000'
req = Request('s3://johnsmith/photos/puppy.jpg', headers={'Date': date})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
b'AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=')
def test_request_signing2(self):
# puts an object into the johnsmith bucket.
date = 'Tue, 27 Mar 2007 21:15:45 +0000'
req = Request('s3://johnsmith/photos/puppy.jpg', method='PUT', headers={
'Content-Type': 'image/jpeg',
'Date': date,
'Content-Length': '94328',
})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
b'AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=')
def test_request_signing3(self):
# lists the content of the johnsmith bucket.
date = 'Tue, 27 Mar 2007 19:42:41 +0000'
req = Request('s3://johnsmith/?prefix=photos&max-keys=50&marker=puppy', \
method='GET', headers={
'User-Agent': 'Mozilla/5.0',
'Date': date,
})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
b'AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=')
def test_request_signing4(self):
# fetches the access control policy sub-resource for the 'johnsmith' bucket.
date = 'Tue, 27 Mar 2007 19:44:46 +0000'
req = Request('s3://johnsmith/?acl',
method='GET', headers={'Date': date})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
b'AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=')
def test_request_signing5(self):
try: import botocore
except ImportError: pass
else:
raise unittest.SkipTest(
'botocore does not support overriding date with x-amz-date')
# deletes an object from the 'johnsmith' bucket using the
# path-style and Date alternative.
date = 'Tue, 27 Mar 2007 21:20:27 +0000'
req = Request('s3://johnsmith/photos/puppy.jpg', \
method='DELETE', headers={
'Date': date,
'x-amz-date': 'Tue, 27 Mar 2007 21:20:26 +0000',
})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
# botocore does not override Date with x-amz-date
self.assertEqual(httpreq.headers['Authorization'],
b'AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk=')
def test_request_signing6(self):
# uploads an object to a CNAME style virtual hosted bucket with metadata.
date = 'Tue, 27 Mar 2007 21:06:08 +0000'
req = Request('s3://static.johnsmith.net:8080/db-backup.dat.gz', \
method='PUT', headers={
'User-Agent': 'curl/7.15.5',
'Host': 'static.johnsmith.net:8080',
'Date': date,
'x-amz-acl': 'public-read',
'content-type': 'application/x-download',
'Content-MD5': '4gJE4saaMU4BqNR0kLY+lw==',
'X-Amz-Meta-ReviewedBy': 'joe@johnsmith.net,jane@johnsmith.net',
'X-Amz-Meta-FileChecksum': '0x02661779',
'X-Amz-Meta-ChecksumAlgorithm': 'crc32',
'Content-Disposition': 'attachment; filename=database.dat',
'Content-Encoding': 'gzip',
'Content-Length': '5913339',
})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
b'AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=')
def test_request_signing7(self):
# ensure that spaces are quoted properly before signing
date = 'Tue, 27 Mar 2007 19:42:41 +0000'
req = Request(
("s3://johnsmith/photos/my puppy.jpg"
"?response-content-disposition=my puppy.jpg"),
method='GET',
headers={'Date': date},
)
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(
httpreq.headers['Authorization'],
b'AWS 0PN5J17HBGZHT7JJ3X82:+CfvG8EZ3YccOrRVMXNaK2eKZmM=')
class BaseFTPTestCase(unittest.TestCase):
username = "scrapy"
password = "passwd"
req_meta = {"ftp_user": username, "ftp_password": password}
def setUp(self):
from twisted.protocols.ftp import FTPRealm, FTPFactory
from scrapy.core.downloader.handlers.ftp import FTPDownloadHandler
# setup dirs and test file
self.directory = self.mktemp()
os.mkdir(self.directory)
userdir = os.path.join(self.directory, self.username)
os.mkdir(userdir)
fp = FilePath(userdir)
fp.child('file.txt').setContent(b"I have the power!")
fp.child('file with spaces.txt').setContent(b"Moooooooooo power!")
# setup server
realm = FTPRealm(anonymousRoot=self.directory, userHome=self.directory)
p = portal.Portal(realm)
users_checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
users_checker.addUser(self.username, self.password)
p.registerChecker(users_checker, credentials.IUsernamePassword)
self.factory = FTPFactory(portal=p)
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.portNum = self.port.getHost().port
self.download_handler = FTPDownloadHandler(Settings())
self.addCleanup(self.port.stopListening)
def tearDown(self):
shutil.rmtree(self.directory)
def _add_test_callbacks(self, deferred, callback=None, errback=None):
def _clean(data):
self.download_handler.client.transport.loseConnection()
return data
deferred.addCallback(_clean)
if callback:
deferred.addCallback(callback)
if errback:
deferred.addErrback(errback)
return deferred
def test_ftp_download_success(self):
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta=self.req_meta)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 200)
self.assertEqual(r.body, b'I have the power!')
self.assertEqual(r.headers, {b'Local Filename': [b''], b'Size': [b'17']})
return self._add_test_callbacks(d, _test)
def test_ftp_download_path_with_spaces(self):
request = Request(
url="ftp://127.0.0.1:%s/file with spaces.txt" % self.portNum,
meta=self.req_meta
)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 200)
self.assertEqual(r.body, b'Moooooooooo power!')
self.assertEqual(r.headers, {b'Local Filename': [b''], b'Size': [b'18']})
return self._add_test_callbacks(d, _test)
def test_ftp_download_notexist(self):
request = Request(url="ftp://127.0.0.1:%s/notexist.txt" % self.portNum,
meta=self.req_meta)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 404)
return self._add_test_callbacks(d, _test)
def test_ftp_local_filename(self):
f, local_fname = tempfile.mkstemp()
local_fname = to_bytes(local_fname)
os.close(f)
meta = {"ftp_local_filename": local_fname}
meta.update(self.req_meta)
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta=meta)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.body, local_fname)
self.assertEqual(r.headers, {b'Local Filename': [local_fname],
b'Size': [b'17']})
self.assertTrue(os.path.exists(local_fname))
with open(local_fname, "rb") as f:
self.assertEqual(f.read(), b"I have the power!")
os.remove(local_fname)
return self._add_test_callbacks(d, _test)
class FTPTestCase(BaseFTPTestCase):
def test_invalid_credentials(self):
from twisted.protocols.ftp import ConnectionLost
meta = dict(self.req_meta)
meta.update({"ftp_password": 'invalid'})
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta=meta)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.type, ConnectionLost)
return self._add_test_callbacks(d, errback=_test)
class AnonymousFTPTestCase(BaseFTPTestCase):
username = "anonymous"
req_meta = {}
def setUp(self):
from twisted.protocols.ftp import FTPRealm, FTPFactory
from scrapy.core.downloader.handlers.ftp import FTPDownloadHandler
# setup dir and test file
self.directory = self.mktemp()
os.mkdir(self.directory)
fp = FilePath(self.directory)
fp.child('file.txt').setContent(b"I have the power!")
fp.child('file with spaces.txt').setContent(b"Moooooooooo power!")
# setup server for anonymous access
realm = FTPRealm(anonymousRoot=self.directory)
p = portal.Portal(realm)
p.registerChecker(checkers.AllowAnonymousAccess(),
credentials.IAnonymous)
self.factory = FTPFactory(portal=p,
userAnonymous=self.username)
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.portNum = self.port.getHost().port
self.download_handler = FTPDownloadHandler(Settings())
self.addCleanup(self.port.stopListening)
def tearDown(self):
shutil.rmtree(self.directory)
class DataURITestCase(unittest.TestCase):
def setUp(self):
self.download_handler = DataURIDownloadHandler(Settings())
self.download_request = self.download_handler.download_request
self.spider = Spider('foo')
def test_response_attrs(self):
uri = "data:,A%20brief%20note"
def _test(response):
self.assertEqual(response.url, uri)
self.assertFalse(response.headers)
request = Request(uri)
return self.download_request(request, self.spider).addCallback(_test)
def test_default_mediatype_encoding(self):
def _test(response):
self.assertEqual(response.text, 'A brief note')
self.assertEqual(type(response),
responsetypes.from_mimetype("text/plain"))
self.assertEqual(response.encoding, "US-ASCII")
request = Request("data:,A%20brief%20note")
return self.download_request(request, self.spider).addCallback(_test)
def test_default_mediatype(self):
def _test(response):
self.assertEqual(response.text, u'\u038e\u03a3\u038e')
self.assertEqual(type(response),
responsetypes.from_mimetype("text/plain"))
self.assertEqual(response.encoding, "iso-8859-7")
request = Request("data:;charset=iso-8859-7,%be%d3%be")
return self.download_request(request, self.spider).addCallback(_test)
def test_text_charset(self):
def _test(response):
self.assertEqual(response.text, u'\u038e\u03a3\u038e')
self.assertEqual(response.body, b'\xbe\xd3\xbe')
self.assertEqual(response.encoding, "iso-8859-7")
request = Request("data:text/plain;charset=iso-8859-7,%be%d3%be")
return self.download_request(request, self.spider).addCallback(_test)
def test_mediatype_parameters(self):
def _test(response):
self.assertEqual(response.text, u'\u038e\u03a3\u038e')
self.assertEqual(type(response),
responsetypes.from_mimetype("text/plain"))
self.assertEqual(response.encoding, "utf-8")
request = Request('data:text/plain;foo=%22foo;bar%5C%22%22;'
'charset=utf-8;bar=%22foo;%5C%22 foo ;/,%22'
',%CE%8E%CE%A3%CE%8E')
return self.download_request(request, self.spider).addCallback(_test)
def test_base64(self):
def _test(response):
self.assertEqual(response.text, 'Hello, world.')
request = Request('data:text/plain;base64,SGVsbG8sIHdvcmxkLg%3D%3D')
return self.download_request(request, self.spider).addCallback(_test)
|
|
#! -*- codign: utf-8 -*-
import argparse
import contextlib
import os.path
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import traceback
import yaml
from . import utils
"""
Main code, runner
"""
class Runner(object):
BASEDIR_TEMP = 'TEMP'
signals = [
signal.SIGINT,
signal.SIGQUIT,
signal.SIGTERM,
]
def __init__(self):
self.args = () # comman line args
self.config = {} # parsed config
self.environ = {} # generated vars
self.servers = {} # server instances
self.basedir = None # dir with temporary env
self.confdir = None # dir with config
self.pid = os.getpid()
self.exit_code = 1 # error by default
self.orig_stderr = sys.stderr
self.orig_stdout = sys.stdout
def parse_params(self):
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--config', dest='config', type=str, help='testenv config (.yml)', required=True)
parser.add_argument('command', nargs=argparse.REMAINDER)
args = parser.parse_args()
args.config = os.path.abspath(args.config)
if not os.path.isfile(args.config):
raise Exception("not a file: " + args.config)
self.confdir = os.path.dirname(args.config)
self.args = args
def read_config(self):
with contextlib.closing(open(self.args.config, "r")) as fh:
self.config = yaml.load(fh)
self.config.setdefault('basedir', 'tenv')
self.config.setdefault('basedir_cleanup', False)
self.config.setdefault('servers', {})
self.config.setdefault('log', None)
assert type(self.config['servers']) == dict, "servers section should be a dict"
for name, sconf in self.config['servers'].iteritems():
assert 'type' in sconf, name + " should have type attribute"
def parametrize_config(self):
environ = self.environ
environ['confdir'] = self.confdir
environ['basedir'] = self.basedir
environ['testenv'] = '1'
def handle(s, trail):
def one(match):
groups = match.groups()
name = groups[0]
if name in environ:
return environ[name]
if len(groups) == 1:
return name
sname = groups[1]
kind = groups[2]
if kind in ('addr', 'ip', 'port'):
ip = utils.free_ip()
port = utils.free_port(ip)
addr = '{0}:{1}'.format(ip, port)
environ.setdefault(sname + '_ip', ip)
environ.setdefault(sname + '_port', port)
environ.setdefault(sname + '_addr', addr)
elif kind == 'dir':
environ[name] = os.path.join(self.basedir, name)
os.makedirs(environ[name])
elif kind == 'sock':
environ[name] = os.path.join(self.basedir, name + '.sock')
elif kind == 'env':
environ[name] = os.environ.get(sname, '')
else:
raise ValueError("unexpected pattern {0} in {1}".format(match.group(0), '/'.join(trail)))
return environ[name]
s = re.sub(r'\$((\w+)_(\w+))\$', one, s)
s = re.sub(r'\$(\w+)\$', one, s)
return s
self.config = utils.walk(self.config, handle)
self.environ.update(self.config.get('extra', {}))
def stop_by_signal(self, signup, frame):
raise Exception("signaled with " + str(signup))
def setup_signals(self):
for s in self.signals:
signal.signal(s, self.stop_by_signal)
def reset_signals(self):
for s in self.signals:
signal.signal(s, signal.SIG_DFL)
def confpath(self, path):
if os.path.isabs(path):
return path
else:
return os.path.join(self.confdir, path)
def basepath(self, path):
if os.path.isabs(path):
return path
else:
return os.path.join(self.basedir, path)
def open_log(self):
self.orig_stderr = os.fdopen(os.dup(sys.stderr.fileno()), 'w')
self.orig_stdout = os.fdopen(os.dup(sys.stdout.fileno()), 'w')
if self.config['log'] is not None:
log = open(self.basepath(self.config['log']), 'w', buffering=1)
os.dup2(log.fileno(), sys.stderr.fileno())
os.dup2(log.fileno(), sys.stdout.fileno())
def create_basedir(self):
basedir = self.config['basedir']
if basedir == self.BASEDIR_TEMP:
self.basedir = tempfile.mkdtemp()
else:
self.basedir = os.path.join(self.confdir, basedir)
if os.path.exists(self.basedir):
shutil.rmtree(self.basedir)
os.makedirs(self.basedir)
def create_servers(self):
for name, sconf in self.config['servers'].iteritems():
stype = sconf['type']
if '.' not in stype:
stype = 'testenv.contrib.' + stype
sclass = utils.load_class(stype)
self.servers[name] = sclass(self, name, sconf)
def order_servers(self):
ordered = []
stack = []
def add(server):
if server in stack:
bad = ', '.join(s.name for s in stack)
raise Exception("dependency cycle with servers: " + bad)
stack.append(server)
for s in server.after:
if s not in self.servers:
raise Exception("wrong dependency {0}: no such server".format(s))
add(self.servers[s])
if server not in ordered:
ordered.append(server)
stack.pop()
for s in self.servers.values():
add(s)
return ordered
def start_servers(self):
servers = self.order_servers()
for s in servers:
sys.stderr.write("Starting {0}\n".format(s.name))
s.prepare()
s.start()
s.wait_ready()
s.fill()
def run_command(self):
if len(self.args.command) > 0:
cmd = self.args.command
else:
cmd = ['env']
environ = {}
environ.update(os.environ)
environ.update(self.environ)
environ = { k: str(v) for k, v in environ.items() }
try:
p = subprocess.Popen(cmd, stdout=self.orig_stdout, stderr=self.orig_stderr, env=environ)
except Exception as e:
raise Exception("can't start {0}: {1}".format(' '.join(cmd), str(e)))
p.wait()
self.exit_code = p.returncode
def stop_servers(self):
servers = self.order_servers()
for s in reversed(servers):
if s.is_running():
sys.stderr.write("Stoping {0}\n".format(s.name))
s.stop()
def cleanup(self):
if self.config['basedir_cleanup'] or self.config['basedir'] == self.BASEDIR_TEMP:
shutil.rmtree(self.basedir)
def run(self):
assert os.name == 'posix', "testenv support only unix now"
self.parse_params()
sys.path.append(self.confdir)
self.read_config()
self.setup_signals()
try:
self.create_basedir()
self.open_log()
self.parametrize_config()
self.create_servers()
self.start_servers()
self.run_command()
except Exception:
traceback.print_exc(limit=100, file=sys.stderr)
finally:
if os.getpid() == self.pid:
self.stop_servers()
self.cleanup()
sys.exit(self.exit_code)
|
|
#!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2015, Jesse Keating <jlk@derpops.bike>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_server_action
short_description: Perform actions on Compute Instances from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Jesse Keating (@j2sol)"
description:
- Perform server actions on an existing compute instance from OpenStack.
This module does not return any data other than changed true/false.
When I(action) is 'rebuild', then I(image) parameter is required.
options:
server:
description:
- Name or ID of the instance
required: true
wait:
description:
- If the module should wait for the instance action to be performed.
required: false
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the instance to perform
the requested action.
required: false
default: 180
action:
description:
- Perform the given action. The lock and unlock actions always return
changed as the servers API does not provide lock status.
choices: [stop, start, pause, unpause, lock, unlock, suspend, resume,
rebuild]
default: present
image:
description:
- Image the server should be rebuilt with
default: null
version_added: "2.3"
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Pauses a compute instance
- os_server_action:
action: pause
auth:
auth_url: https://mycloud.openstack.blueboxgrid.com:5001/v2.0
username: admin
password: admin
project_name: admin
server: vm1
timeout: 200
'''
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs
_action_map = {'stop': 'SHUTOFF',
'start': 'ACTIVE',
'pause': 'PAUSED',
'unpause': 'ACTIVE',
'lock': 'ACTIVE', # API doesn't show lock/unlock status
'unlock': 'ACTIVE',
'suspend': 'SUSPENDED',
'resume': 'ACTIVE',
'rebuild': 'ACTIVE'}
_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock']
def _wait(timeout, cloud, server, action, module):
"""Wait for the server to reach the desired state for the given action."""
for count in shade._utils._iterate_timeout(
timeout,
"Timeout waiting for server to complete %s" % action):
try:
server = cloud.get_server(server.id)
except Exception:
continue
if server.status == _action_map[action]:
return
if server.status == 'ERROR':
module.fail_json(msg="Server reached ERROR state while attempting to %s" % action)
def _system_state_change(action, status):
"""Check if system state would change."""
if status == _action_map[action]:
return False
return True
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
action=dict(required=True, choices=['stop', 'start', 'pause', 'unpause',
'lock', 'unlock', 'suspend', 'resume',
'rebuild']),
image=dict(required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, supports_check_mode=True,
required_if=[('action', 'rebuild', ['image'])],
**module_kwargs)
if module._name == 'os_server_actions':
module.deprecate("The 'os_server_actions' module is being renamed 'os_server_action'", version=2.8)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
action = module.params['action']
wait = module.params['wait']
timeout = module.params['timeout']
image = module.params['image']
try:
if action in _admin_actions:
cloud = shade.operator_cloud(**module.params)
else:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
if not server:
module.fail_json(msg='Could not find server %s' % server)
status = server.status
if module.check_mode:
module.exit_json(changed=_system_state_change(action, status))
if action == 'stop':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.stop(server=server.id)
if wait:
_wait(timeout, cloud, server, action, module)
module.exit_json(changed=True)
if action == 'start':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.start(server=server.id)
if wait:
_wait(timeout, cloud, server, action, module)
module.exit_json(changed=True)
if action == 'pause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.pause(server=server.id)
if wait:
_wait(timeout, cloud, server, action, module)
module.exit_json(changed=True)
elif action == 'unpause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.unpause(server=server.id)
if wait:
_wait(timeout, cloud, server, action, module)
module.exit_json(changed=True)
elif action == 'lock':
# lock doesn't set a state, just do it
cloud.nova_client.servers.lock(server=server.id)
module.exit_json(changed=True)
elif action == 'unlock':
# unlock doesn't set a state, just do it
cloud.nova_client.servers.unlock(server=server.id)
module.exit_json(changed=True)
elif action == 'suspend':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.suspend(server=server.id)
if wait:
_wait(timeout, cloud, server, action, module)
module.exit_json(changed=True)
elif action == 'resume':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.resume(server=server.id)
if wait:
_wait(timeout, cloud, server, action, module)
module.exit_json(changed=True)
elif action == 'rebuild':
image = cloud.get_image(image)
if image is None:
module.fail_json(msg="Image does not exist")
# rebuild doesn't set a state, just do it
cloud.nova_client.servers.rebuild(server=server.id, image=image.id)
if wait:
_wait(timeout, cloud, server, action, module)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == '__main__':
main()
|
|
# Copyright 2019 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from lib.utils import file_size
from lib.view import terminal
from .const import DynamicFieldOrder, FieldType, SheetStyle
from .source import source_lookup, source_root
class Sheet(object):
def __init__(
self,
fields,
from_source=None,
for_each=None,
where=None,
group_by=None,
order_by=None,
default_style=SheetStyle.columns,
title_fill="~",
subtitle_fill="~",
subtitle_empty_line="",
vertical_separator="|",
horizontal_seperator="-",
no_entry="--",
error_entry="~~",
):
"""Instantiates a sheet definition.
Arguments:
fields -- Sequence of fields to present.
Keyword Arguments:
from_sources -- (Required) A sequence of source names that are
required for rendering (used for sanity checks only).
for_each -- A field who's data-source contains multiple sets of
values.
where -- Function to determine if a record should be shown.
group_by -- Field or sequence of fields to group records by.
order_by -- Field or sequence of fields sort the records by.
title_fill --
subtitle_fill --
subtitle_empty_line --
separator -- String used to separate fields.
no_entry -- String used when a field is present but a particular
key is missing.
error_entry -- String used when a field's data-source is not a dict
or sequence.
"""
self.fields = fields
# XXX - Support TitleFields in SubGroups?
self.title_field_keys = set(
field.key for field in fields if isinstance(field, TitleField)
)
self.from_sources = self._arg_as_tuple(from_source)
self.where = where
self.for_each = self._arg_as_tuple(for_each)
self.group_bys = self._arg_as_tuple(group_by)
self.order_bys = self._arg_as_tuple(order_by)
self.default_style = default_style
self.vertical_separator = vertical_separator
self.horizontal_seperator = horizontal_seperator
self.formatted_vertical_separator = (
terminal.dim() + vertical_separator + terminal.undim()
)
self.formatted_horizontal_seperator = (
terminal.dim() + horizontal_seperator + terminal.undim()
)
self.title_fill = title_fill
self.subtitle_fill = subtitle_fill
self.subtitle_empty_line = subtitle_empty_line
self.no_entry = no_entry
self.error_entry = error_entry
self.has_aggregates = any(f.has_aggregate for f in fields)
self._init_sanity_check()
def _arg_as_tuple(self, arg):
if arg is None:
return tuple()
elif isinstance(arg, tuple):
return arg
elif isinstance(arg, list):
return tuple(arg)
elif isinstance(arg, str):
return (arg,)
raise ValueError(
"Expected tuple, list or string - instead {}".format(type(arg))
)
def _init_sanity_check(self):
# Ensure 'group_bys' and 'sort_bys' are in 'fields'.
# NOTE - currently cannot group_by/sort_by a member of a Subgroup.
static_fields = [
field for field in self.fields if not isinstance(field, DynamicFields)
]
field_set = set(field.key for field in static_fields)
if len(field_set) != len(static_fields):
field_keys = ",".join(
(
"{} appears {} times".format(key, count)
for count, key in Counter(
field.key for field in static_fields
).items()
if count > 1
)
)
assert False, "Field keys are not unique: {}".format(field_keys)
if self.group_bys:
group_by_set = set(self.group_bys)
assert len(group_by_set) == len(self.group_bys)
else:
group_by_set = set()
if self.order_bys:
order_by_set = set(self.order_bys)
assert len(order_by_set) == len(self.order_bys)
else:
order_by_set = set()
error_groups = group_by_set - field_set
error_orders = order_by_set - field_set
assert not error_groups, error_groups
assert not error_orders, error_orders
assert self.from_sources, "require a list of expected field sources"
sources_set = set(source_root(s) for s in self.from_sources)
assert len(sources_set) == len(self.from_sources)
seen_sources_set = set()
def populate_seen_sources(fields):
for field in fields:
if isinstance(field, Subgroup):
return populate_seen_sources(field.fields)
assert not isinstance(
field, DynamicFields
), "DynamicFields cannot be members of Subgroups."
try:
sources = field.projector.sources
except AttributeError:
sources = set([field.projector.source])
assert sources - sources_set == set(), "{} not subset of {}".format(
sources, sources_set
)
seen_sources_set.update(sources)
populate_seen_sources(static_fields)
for field in self.fields:
if isinstance(field, DynamicFields):
seen_sources_set.add(source_root(field.source))
assert len(seen_sources_set) == len(sources_set)
if self.for_each:
for_each_set = set(self.for_each)
assert len(for_each_set) == len(self.for_each)
else:
for_each_set = set()
assert for_each_set - sources_set == set()
class Subgroup(object):
def __init__(self, title, fields, key=None):
"""
Arguments:
title -- Name in this Field's heading
fields -- Sequence of sub-fields.
Keyword Arguments:
key -- Alternative key to access this key from the parent sheet.
"""
self.title = title
self.fields = fields
self.key = title if key is None else key
self.has_aggregate = any(f.has_aggregate for f in fields)
class Field(object):
def __init__(
self,
title,
projector,
converter=None,
formatters=None,
aggregator=None,
align=None,
key=None,
hidden=None,
dynamic_field_decl=None,
allow_diff=False,
):
"""
Arguments:
title -- Name in this Field's heading
projector -- How to retrieve a field entry from the fields data_source.
Keyword Arguments:
converter -- Typically used to convert numbers to SI formats.
formatters -- List of cell formatters functions, evaluated in order,
first format to not return None is applied - rest will be
ignored. These may *not* change the length of the
rendered value.
aggregator -- Function that generates an aggregate value to be
displayed at the end of a group.
align -- None : Allow sheet to choose alignment.
'left' : Always align left.
'right' : Always align right.
'center': Always align center.
key -- Alternative key to access this key from the parent sheet.
hidden -- None : Visible if there are any entries.
True : Always visible.
False: Always hidden.
dynamic_field_decl -- None if not from DynamicFields.
Otherwise DynamicFields instance.
allow_diff -- A non dynamic field by default does not allow diff and is always
displayed. This is mutally exclusive with dynamic_field_decl.
True: Allow diff.
False: Don't diff.
Default: False
"""
self.title = title
self.projector = projector
self.converter = Converters.standard if converter is None else converter
self.formatters = [] if formatters is None else formatters
self.aggregator = aggregator
self.align = align
self.key = title if key is None else key
self.hidden = hidden
self.dynamic_field_decl = dynamic_field_decl
self.allow_diff = allow_diff
self.has_aggregate = self.aggregator is not None
# Pre-compute commonly accessed data.
self.title_words = tuple(title.split(" "))
self.min_title_width = max(map(len, self.title_words))
class TitleField(Field):
def __init__(
self,
title,
projector,
converter=None,
formatters=None,
aggregator=None,
align=None,
key=None,
):
if formatters is None:
formatters = (Formatters.bold(lambda _: True),)
super(TitleField, self).__init__(
title,
projector,
converter=converter,
formatters=formatters,
aggregator=aggregator,
align=align,
key=key,
)
class DynamicFields(object):
def __init__(
self,
source,
infer_projectors=True,
required=False,
aggregator_selector=None,
projector_selector=None,
converter_selector=None,
order=DynamicFieldOrder.ascending,
):
"""
Arguments:
source -- Data source to project fields from.
Keyword Arguments:
infer_projectors -- True : If true, will try to infer a projector for a
field.
aggregator_selector -- None: Function used to select aggregator. Function
will take the form (key, is_numeric) -> Aggregator.
Key is the row or column header. is_numeric is passed
in by the rendering function and allows the developer
to know if the the projected value is a numeric value,
i.e. you can use arithmetic aggregators. If none,
no aggregation is used.
projector_selector -- None: Function used to select a projector. Function
will take the form (key) -> Projector. If none and
infer_projector is false String projection is used.
Note: If a preceding non-dynamic Field uses the same key it will not be rendered
by a proceding DynamicField.
"""
self.source = source
self.infer_projectors = infer_projectors
self.required = required
self.projector_selector = projector_selector
self.aggregator_selector = aggregator_selector
self.converter_selector = converter_selector
self.order = order
self.has_aggregate = False # XXX - hack
class Aggregator(object):
def __init__(self, aggregate_func, converter=None):
"""
Arguments:
aggregate_func -- aggregate function. Type determined by subclass
Keyword Arguments:
converter -- None : Use the field's converter (if defined).
-- Function: Use this function to convert the result.
"""
self.func = aggregate_func
self.converter = converter
def compute(self, values):
raise NotImplementedError("override compute")
class ReduceAggregator(Aggregator):
def __init__(self, aggregate_func, initializer=None, converter=None):
"""
Arguments:
aggregate_func -- function accepts 2 arguments and returns a value.
"""
self.initializer = initializer
super().__init__(aggregate_func, converter=converter)
def compute(self, values):
return self.reduce(values)
def reduce(self, edatas):
initialized = False
result = None
for edata in edatas:
edata = edata.value
if edata is None:
return
if not initialized:
initialized = True
if self.initializer is None:
result = edata
return
result = self.initializer
result = self.func(result, edata)
return result
class ComplexAggregator(Aggregator):
def __init__(self, aggregate_func, converter=None):
"""
An aggregator that takes all the entires in a group for a more complex calculation.
Arguments:
aggregate_func -- function accepts 1 argument of type list(EntryData) and
returns a value.
"""
super().__init__(aggregate_func, converter=converter)
def compute(self, edatas):
return self.func(edatas)
class Aggregators(object):
@staticmethod
def sum(initializer=0, converter=None):
return ReduceAggregator(
lambda acc, value: acc + value,
initializer=initializer,
converter=converter,
)
@staticmethod
def count(initializer=0, converter=None):
return ReduceAggregator(
lambda acc, value: acc + 1,
initializer=initializer,
converter=converter,
)
@staticmethod
def min(initializer=None, converter=None):
return ReduceAggregator(
lambda acc, value: acc if acc <= value else value,
initializer=initializer,
converter=converter,
)
@staticmethod
def max(initializer=None, converter=None):
return ReduceAggregator(
lambda acc, value: acc if acc >= value else value,
initializer=initializer,
converter=converter,
)
class BaseProjector(object):
field_type = None # required override
source = None # optional override
keys = None # optional override
def __init__(self, source, *keys, **kwargs):
"""
Arguments:
source -- Name of the source to project from.
*keys -- Sequence : Key aliases to project in order of preference
(typically newest term to oldest term).
[None] : Use entire value of the source (typically used
when source contains individual value instead of
dict of values).
[number]: Use number as index into source (used when source
is a sequence of values).
Keyword Arguments:
for_each_key -- If True, return the key to the source when iterated by
'for_each', else use the value. NOTE - if not None and
source is not used in a 'for_each' then the sheet will
assert during render.
"""
self.source = source
self.keys = None if keys[0] is None else tuple(keys)
self.for_each_key = kwargs.get("for_each_key", None)
def __call__(self, sheet, sources):
try:
result = self.do_project(sheet, sources)
except (NoEntryException, ErrorEntryException):
raise
except Exception as e:
# XXX - A debug log may be useful.
# print 'debug - ', e, self.source, self.source
raise ErrorEntryException("unexpected error occurred: {}".format(e))
if result is None:
raise NoEntryException("No entry found for source {}".format(self.source))
return result
def do_project(self, sheet, sources):
raise NotImplementedError("override do_project")
def project_raw(self, sheet, sources, ignore_exception=False):
row = source_lookup(sources, self.source)
if self.source in sheet.for_each:
if self.for_each_key:
row = row[0]
else:
row = row[-1]
else:
assert (
self.for_each_key is None
), 'for_each_key set where "for_each" is not applied to the source'
if row is None:
raise NoEntryException("No entry for this row")
if not ignore_exception and isinstance(row, Exception):
raise ErrorEntryException(row, "Error occurred fetching row")
if self.keys is None:
# Setting 'self.keys' to None indicates that the field needs the
# entire value contained in the source.
return row
elif isinstance(self.keys[0], int):
# Setting 'self.keys' to an integer indicate that the field needs
# to access contents of row by index.
return row[self.keys[0]]
else:
# Setting 'self.keys' to one or more strings indicates that the
# field needs to be accessed by key.
try:
return next(row[k] for k in self.keys if k in row)
except (KeyError, StopIteration):
raise NoEntryException(
"{} does not contain any key in {}".format(self.source, self.keys)
)
class Projectors(object):
class Identity(BaseProjector):
field_type = FieldType.undefined
def do_project(self, sheet, sources):
try:
row = self.project_raw(sheet, sources)
except ErrorEntryException as e:
row = e.exc
return row
class String(BaseProjector):
field_type = FieldType.string
def do_project(self, sheet, sources):
"""
Arguments:
source -- A set of sources to project a string from.
"""
return str(self.project_raw(sheet, sources))
class Boolean(String):
field_type = FieldType.boolean
def do_project(self, sheet, sources):
"""
Arguments:
source -- A set of sources to project a boolean from.
"""
value = super().do_project(sheet, sources)
if isinstance(value, str):
return value.lower().strip() != "false"
return True if value else False
class Float(String):
field_type = FieldType.number
def do_project(self, sheet, sources):
"""
Arguments:
source -- A set of sources to project a float from.
"""
value = super().do_project(sheet, sources)
try:
return float(value)
except ValueError:
return value
class Number(String):
field_type = FieldType.number
def do_project(self, sheet, sources):
"""
Arguments:
source -- A set of sources to project a number from.
"""
value = super().do_project(sheet, sources)
try:
return int(value)
except ValueError:
try:
return int(float(value))
except ValueError:
pass
return value
class Percent(Number):
field_type = FieldType.number
def __init__(self, source, *keys, **kwargs):
"""
Arguments:
See 'BaseProjector'
Keyword Arguments:
invert -- False by default, if True will return 100 - value.
"""
super().__init__(source, *keys, **kwargs)
self.invert = kwargs.get("invert", False)
def do_project(self, sheet, sources):
"""
Arguments:
sheet -- The decleration.Sheet this field belongs to, needed for
determining if this field's source was iterated by
'for_each'.
source -- A set of sources to project a number from.
"""
value = super().do_project(sheet, sources)
return value if not self.invert else 100 - value
class Sum(BaseProjector):
field_type = FieldType.number
def __init__(self, *field_projectors):
"""
Arguments:
field_projectors -- Projectors to be summed.
"""
self.field_projectors = field_projectors
self.sources = set((field_fn.source for field_fn in field_projectors))
def do_project(self, sheet, sources):
"""
Arguments:
source -- A set of sources to project a sum of fields.
"""
result = 0
for field_projector in self.field_projectors:
result += field_projector(sheet, sources)
return result
class Div(BaseProjector):
field_type = FieldType.number
def __init__(self, numerator_projector, denominator_projector):
"""
Arguments:
field_projectors -- Projectors to be summed.
"""
self.numerator_projector = numerator_projector
self.denominator_projector = denominator_projector
self.sources = set(
(
field_fn.source
for field_fn in [numerator_projector, denominator_projector]
)
)
def do_project(self, sheet, sources):
"""
Arguments:
source -- A set of sources to project a sum of fields.
"""
result = self.numerator_projector(
sheet, sources
) / self.denominator_projector(sheet, sources)
return result
class Any(BaseProjector):
def __init__(self, field_type, *field_projectors):
"""
Arguments:
field_type -- The 'FieldType' for this field.
field_projectors -- Projectors to be used. First one to succeed will be returned.
which is useful because non-existent keys cause failure.
"""
self.field_type = field_type
self.sources = set()
for field_fn in field_projectors:
if field_fn.source is None:
source = field_fn.sources
else:
source = set([field_fn.source])
self.sources = self.sources.union(source)
self.field_projectors = field_projectors
def do_project(self, sheet, sources):
"""
Arguments:
source -- A set of sources to project a the result of a function
from.
"""
for field_projector in self.field_projectors:
try:
return field_projector(sheet, sources)
except NoEntryException:
pass
class Func(BaseProjector):
def __init__(self, field_type, func, *field_projectors):
"""
Arguments:
field_type -- The 'FieldType' for this field.
func -- A function to evaluate the projected fields.
field_projectors -- Projectors values will be used as the arguments
to func.
"""
self.field_type = field_type
self.sources = set((field_fn.source for field_fn in field_projectors))
self.func = func
self.field_projectors = field_projectors
def do_project(self, sheet, sources):
"""
Arguments:
source -- A set of sources to project a the result of a function
from.
"""
values = []
for field_projector in self.field_projectors:
try:
values.append(field_projector(sheet, sources))
except KeyError:
values.append(None)
return self.func(*values)
class Exception(BaseProjector):
def __init__(self, source, *keys, **kwargs):
"""
Arguments:
See 'BaseProjector'
Keyword Arguments:
filter_exc -- List of exception types to convert to strings.
"""
super().__init__(source, *keys, **kwargs)
self.filter_exc = kwargs.get("filter_exc", [])
def do_project(self, sheet, sources):
row = self.project_raw(sheet, sources, ignore_exception=True)
for exc_type in self.filter_exc:
if isinstance(row, exc_type):
return str(row)
if isinstance(row, Exception):
raise ErrorEntryException(row, "Error occurred fetching row")
return row
class EntryData(object):
def __init__(self, **kwargs):
"""
Keyword Arguments:
value -- Unconverted entry of a field.
values -- Sequence of unconverted values for the current group or a
field.
record -- Cross-section of all fields at this entry's position.
common -- A dictionary of common data supplied to all fields.
"""
self.__dict__.update(kwargs)
class Converters(object):
@staticmethod
def _file_size(value, unit):
try:
return file_size.size(value, unit)
except Exception:
return value
@staticmethod
def byte(edata):
"""
Arguments:
edata -- Take an 'EntryData' and returns the value as byte units.
"""
return Converters._file_size(edata.value, file_size.byte)
@staticmethod
def scientific_units(edata):
"""
Arguments:
edata -- Take an 'EntryData' and returns the value as floating pint
International System Units.
"""
return Converters._file_size(edata.value, file_size.si_float)
@staticmethod
def time_seconds(edata):
"""
Arguments:
edata -- Take an 'EntryData' and returns the value as time with format
HH:MM:SS.
"""
time_stamp = int(edata.value)
hours = time_stamp // 3600
minutes = (time_stamp % 3600) // 60
seconds = time_stamp % 60
return "{:02}:{:02}:{:02}".format(hours, minutes, seconds)
@staticmethod
def time_milliseconds(edata):
"""
Arguments:
edata -- Take an 'EntryData' and returns the value as time with format
HH:MM:SS.
"""
edata.value = int(edata.value) / 1000
return Converters.time_seconds(edata)
@staticmethod
def standard(edata):
"""
Arguments:
edata -- Take an 'EntryData' and returns the value as a string.
"""
return str(edata.value)
@staticmethod
def _list_to_str(edata, separator):
return separator.join(edata)
@staticmethod
def list_to_comma_sep_str(edata):
if len(edata.value):
return Converters._list_to_str(edata.value, ", ")
return "--"
@staticmethod
def round(decimal):
def fun(edata):
return round(float(edata.value), decimal)
return fun
class Formatters(object):
@staticmethod
def _apply_style(style, not_style):
return lambda unformatted: style() + unformatted + not_style()
@staticmethod
def _should_apply(predicate_fn, style, not_style):
def _should_apply_helper(edata):
if edata.value is None:
return None
try:
if predicate_fn(edata):
return Formatters._apply_style(style, not_style)
else:
return None
except Exception:
return None
return _should_apply_helper
@staticmethod
def red_alert(predicate_fn):
"""
Arguments:
predicate_fn -- A function that accepts an 'EntryData' and if true sets
the foreground color to red for the entry.
Return:
A tuple containing the string form of the alert and the function to
apply to formatting to a cell.
"""
return (
"red-alert",
Formatters._should_apply(
predicate_fn, terminal.fg_red, terminal.fg_not_red
),
)
@staticmethod
def yellow_alert(predicate_fn):
"""Similar to red_alert but yellow instead of red."""
return (
"yellow-alert",
Formatters._should_apply(
predicate_fn, terminal.fg_yellow, terminal.fg_not_yellow
),
)
@staticmethod
def green_alert(predicate_fn):
"""Similar to red_alert but green instead of red."""
return (
"green-alert",
Formatters._should_apply(
predicate_fn, terminal.fg_green, terminal.fg_not_green
),
)
@staticmethod
def bold(predicate_fn):
"""Applies bold formatting if predicate evaluates to True."""
return (
"bold",
Formatters._should_apply(predicate_fn, terminal.bold, terminal.unbold),
)
class NoEntryException(Exception):
pass
class ErrorEntryException(Exception):
def __init__(self, error, *args):
self.exc = error
super().__init__(*args)
|
|
#
# coding: utf-8
# Copyright (c) 2018 DATADVANCE
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Logical message types definitions, encapsulating both field access and
wire serialization format.
Note:
Message classes are rather similar, so they could have been
generated using some factory. However, generating 'first-class' classes
without self-introspection would require really ugly hacks with exec
(e.g. see collections.namedtuple).
As a bonus, manual classes are IDE-friendly.
Note:
namedtuple itself isn't right as there is no sane way to 'bind' the
first tuple argument to be constant (MessageType).
"""
import collections
import msgpack
from .constants import MessageType
_message_by_type = {}
def message_class(cls):
"""Decorator to register message class for deserialization.
Explicit mapping looks cleaner than __subclasses__ hacks and
adds some bonus validation.
Args:
cls: Class with (not yet registered) class-level MESSAGE_TYPE variable.
Return:
Cls itself without any changes.
"""
global _message_by_type
assert cls.MESSAGE_TYPE in MessageType
assert cls.MESSAGE_TYPE not in _message_by_type
_message_by_type[cls.MESSAGE_TYPE] = cls
return cls
def pack(data):
"""Convert 'simple' (~POD) python object to a compact byte representation.
Args:
data: Object to pack.
Return:
Byte representation of the object.
"""
# kwargs to ensure proper encoding of unicode string
# (so it's distinct from byte strings)
return msgpack.packb(data, use_bin_type=True, encoding="utf-8")
def unpack(dgram):
"""Unpack byte representation of an object.
Args:
dgram: Serialized object bytes.
Return:
Unpacked object.
"""
# kwargs to ensure proper decode of unicode strings
return msgpack.unpackb(dgram, encoding="utf-8")
class ProtocolMessage(object):
"""Base class for protocol messages."""
__slots__ = ("id",)
def __init__(self):
raise NotImplementedError()
@classmethod
def from_bytes(self, dgram):
"""
Decode message from wire format.
Args:
dgram: Message bytes.
Return:
Decoded message.
"""
message_tuple = unpack(dgram)
message_type = message_tuple[0]
cls = _message_by_type[message_type]
return cls(*message_tuple[1:])
def to_bytes(self):
"""
Encode message to wire format.
Return:
Serialized message bytes.
"""
# Theoretically, tuple is redundant there
# as the message content can be deserialized
# based solely on MessageType implicitly
# encoding number of elements.
#
# However, overhead of array based serialization is just
# 1 byte due to msgpack's fixarray format
# (applies for arrays with <= 15 elements).
#
# And it makes serialization/deserialization code really simple.
#
# Worth it )
return pack(self.to_tuple())
def to_tuple(self):
"""
Convert message to a tuple (with fixed field order).
"""
raise NotImplementedError()
#: Handshake data. Very basic for now.
#:
HandshakeData = collections.namedtuple(
"HandshakeData", ("protocol_version", "id", "user_data")
)
#: Serialized exception object.
#:
ErrorDescription = collections.namedtuple(
"ErrorDescription", ("message", "cause_type", "cause_message", "trace")
)
@message_class
class RequestHandshake(ProtocolMessage):
"""Handshake message sent by 'connecting' side.
Contains 1 payload field:
* args: handshake 'arguments' (must be HandshakeData tuple)
"""
__slots__ = ("args",)
MESSAGE_TYPE = MessageType.REQUEST_HANDSHAKE
def __init__(self, id, args):
self.id = id
self.args = HandshakeData(*args)
def to_tuple(self):
return (self.MESSAGE_TYPE, self.id, self.args)
@message_class
class RequestCallStart(ProtocolMessage):
"""Message requesting a new call.
Contains 4 payload fields:
* call_type: enum value desribing call type
* method: method name
* args: positional argument list
* kwargs: keyword argument dict
"""
__slots__ = ("call_type", "method", "args", "kwargs")
MESSAGE_TYPE = MessageType.REQUEST_CALL_START
def __init__(self, id, call_type, method, args, kwargs):
self.id = id
self.call_type = call_type
self.method = method
self.args = args
self.kwargs = kwargs
def to_tuple(self):
return (self.MESSAGE_TYPE, self.id, self.call_type,
self.method, self.args, self.kwargs)
@message_class
class RequestCallCancel(ProtocolMessage):
"""Request to close remote stream on the callee side.
Has no payload.
"""
__slots__ = ()
MESSAGE_TYPE = MessageType.REQUEST_CALL_CANCEL
def __init__(self, id):
self.id = id
def to_tuple(self):
return (self.MESSAGE_TYPE, self.id)
@message_class
class RequestStreamMessage(ProtocolMessage):
"""Data message containing stream message from caller.
Only 1 payload field:
* data: unserialized data (will be packed with the message itself)
"""
__slots__ = ("data",)
MESSAGE_TYPE = MessageType.REQUEST_STREAM_MESSAGE
def __init__(self, id, data):
self.id = id
self.data = data
def to_tuple(self):
return (self.MESSAGE_TYPE, self.id, self.data)
@message_class
class RequestStreamClose(ProtocolMessage):
"""Request to close remote stream on the callee side.
Has no payload.
"""
__slots__ = ()
MESSAGE_TYPE = MessageType.REQUEST_STREAM_CLOSE
def __init__(self, id):
self.id = id
def to_tuple(self):
return (self.MESSAGE_TYPE, self.id)
@message_class
class ResponseHandshake(ProtocolMessage):
"""Handshake call response.
Contains 3 payload fields:
* result: call return value (must be a HandshakeData tuple)
* status: call status
* error: error as ErrorDescription tuple
"""
__slots__ = ("result", "status", "error")
MESSAGE_TYPE = MessageType.RESPONSE_HANDSHAKE
def __init__(self, id, result, status, error):
self.id = id
self.result = HandshakeData(*result)
self.status = status
# There is a trade-off between 'expanded' errors and nested error tuple.
# As most calls are expected to finish successfully, nested approach
# looks better (so we have just 1 byte overhead on success and don't
# really care about redundant unpacking if exception actually happened).
self.error = ErrorDescription(*error) if error is not None else None
def to_tuple(self):
return (
self.MESSAGE_TYPE, self.id, self.result, self.status, self.error
)
@message_class
class ResponseResult(ProtocolMessage):
"""Regular call final response.
Has similar structure to the hanshake response,
but the result isn't limited to HandshakeData.
Contains 3 payload fields:
* result: call return value
* status: call status
* error: error as ErrorDescription tuple
"""
__slots__ = ("result", "status", "error")
MESSAGE_TYPE = MessageType.RESPONSE_RESULT
def __init__(self, id, result, status, error):
self.id = id
self.result = result
self.status = status
self.error = ErrorDescription(*error) if error is not None else None
def to_tuple(self):
return (
self.MESSAGE_TYPE, self.id, self.result, self.status, self.error
)
@message_class
class ResponseStreamMessage(RequestStreamMessage):
"""Data message containing stream message from callee.
Only 1 payload field:
* data: unserialized data (will be packed with the message itself)
"""
__slots__ = ("data",)
MESSAGE_TYPE = MessageType.RESPONSE_STREAM_MESSAGE
@message_class
class ResponseStreamClose(RequestStreamClose):
"""Request to close stream on the calling side.
Has no payload.
"""
__slots__ = ()
MESSAGE_TYPE = MessageType.RESPONSE_STREAM_CLOSE
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import subprocess
import time
import unittest
from silk.config import wpan_constants as wpan
from silk.node.wpan_node import WpanCredentials
from silk.tools import wpan_table_parser
from silk.tools.wpan_util import verify, verify_within
from silk.utils import process_cleanup
import silk.hw.hw_resource as hwr
import silk.node.fifteen_four_dev_board as ffdb
import silk.tests.testcase as testcase
CHILD_TIMEOUT = 6
CHILD_SUPERVISION_CHECK_TIMEOUT = 12
PARENT_SUPERVISION_INTERVAL = 10
hwr.global_instance()
class TestChildSupervision(testcase.TestCase):
poll_interval = 500
@classmethod
def hardware_select(cls):
cls.router = ffdb.ThreadDevBoard()
cls.sed = ffdb.ThreadDevBoard()
@classmethod
@testcase.setup_class_decorator
def setUpClass(cls):
# Check and clean up wpantund process if any left over
process_cleanup.ps_cleanup()
cls.hardware_select()
cls.router.set_logger(cls.logger)
cls.sed.set_logger(cls.logger)
cls.add_test_device(cls.router)
cls.add_test_device(cls.sed)
cls.router.set_up()
cls.sed.set_up()
cls.network_data = WpanCredentials(network_name="SILK-{0:04X}".format(random.randint(0, 0xffff)),
psk="00112233445566778899aabbccdd{0:04x}".format(random.randint(0, 0xffff)),
channel=random.randint(11, 25),
fabric_id="{0:06x}dead".format(random.randint(0, 0xffffff)))
cls.thread_sniffer_init(cls.network_data.channel)
@classmethod
@testcase.teardown_class_decorator
def tearDownClass(cls):
for device in cls.device_list:
device.tear_down()
@testcase.setup_decorator
def setUp(self):
pass
@testcase.teardown_decorator
def tearDown(self):
pass
@testcase.test_method_decorator
def test01_Pairing(self):
self.router.form(self.network_data, "router")
self.router.permit_join(3600)
self.wait_for_completion(self.device_list)
self.logger.info(self.router.ip6_lla)
self.logger.info(self.router.ip6_thread_ula)
self.network_data.xpanid = self.router.xpanid
self.network_data.panid = self.router.panid
self.sed.join(self.network_data, "sleepy-end-device")
self.wait_for_completion(self.device_list)
self.sed.set_sleep_poll_interval(self.poll_interval)
self.sed.setprop(wpan.WPAN_THREAD_CHILD_TIMEOUT, str(CHILD_TIMEOUT))
@testcase.test_method_decorator
def test02_GetWpanStatus(self):
for _ in range(1):
ret = self.router.wpanctl("get", "status", 2)
print(ret)
ret = self.router.wpanctl("get", "get Thread:NeighborTable", 2)
print(ret)
ret = self.router.wpanctl("get", "get Thread:ChildTable", 2)
print(ret)
ret = self.sed.wpanctl("get", "status", 2)
print(ret)
ret = self.sed.wpanctl("get", "get NCP:ExtendedAddress", 2)
print("Extended Address:{}".format(ret))
ret = self.sed.wpanctl("get", "get NCP:HardwareAddress", 2)
print("SED Hardware Address:{}".format(ret))
ret = self.sed.wpanctl("get", "get NCP:MACAddress", 2)
print("SED MAC Address:{}".format(ret))
time.sleep(5)
@testcase.test_method_decorator
def test03_PingRouterLLA(self):
self.ping6(self.sed, self.router.ip6_lla, num_pings=10, allowed_errors=5, ping_size=200)
@testcase.test_method_decorator
def test04_settings(self):
self.sed.setprop(wpan.WPAN_POLL_INTERVAL, str(self.poll_interval))
interval = self.sed.getprop(wpan.WPAN_POLL_INTERVAL)
print(interval)
self.assertEqual(int(interval), self.poll_interval)
self.sed.setprop(wpan.WPAN_THREAD_CHILD_TIMEOUT, str(CHILD_TIMEOUT))
timeout = self.sed.getprop(wpan.WPAN_THREAD_CHILD_TIMEOUT)
print(timeout)
self.assertEqual(int(timeout), CHILD_TIMEOUT)
self.sed.setprop(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, "0")
child_supervision_timeout = self.sed.getprop(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT)
print(child_supervision_timeout)
self.assertEqual(int(child_supervision_timeout, 16), 0)
self.router.setprop(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, "0")
child_supervision_interval = self.router.getprop(wpan.WPAN_CHILD_SUPERVISION_INTERVAL)
print(child_supervision_interval)
self.assertEqual(int(child_supervision_interval, 16), 0)
@testcase.test_method_decorator
def test05_verify_childTable(self):
child_table = self.router.wpanctl("get", "get Thread:ChildTable", 2)
child_table = wpan_table_parser.parse_child_table_result(child_table)
sed_ext_address = self.sed.wpanctl("get", "get NCP:ExtendedAddress", 2).split("=")[-1].strip()[1:-1]
for e in child_table:
if e.ext_address == sed_ext_address:
break
else:
msg = ("Failed to find a child entry for extended address" " {} in table".format(sed_ext_address))
print(msg)
self.assertEqual(int(e.rloc16, 16),
int(self.sed.wpanctl("get", "get " + wpan.WPAN_THREAD_RLOC16, 2).split("=")[-1].strip(), 16))
self.assertEqual(int(e.timeout, 0), CHILD_TIMEOUT)
self.assertEqual(e.is_rx_on_when_idle(), False)
self.assertEqual(e.is_ftd(), False)
@testcase.test_method_decorator
def test06_enable_allowlist(self):
self.router.setprop(wpan.WPAN_MAC_ALLOWLIST_ENABLED, "1")
print(self.router.getprop(wpan.WPAN_MAC_ALLOWLIST_ENABLED))
self.assertEqual(self.router.getprop(wpan.WPAN_MAC_ALLOWLIST_ENABLED), "true")
time.sleep(CHILD_TIMEOUT + 3)
child_table = self.router.wpanctl("get", "get Thread:ChildTable", 2)
print("Child Table:")
print(child_table)
child_table = wpan_table_parser.parse_child_table_result(child_table)
sed_ext_address = self.sed.wpanctl("get", "get NCP:ExtendedAddress", 2).split("=")[-1].strip()[1:-1]
print(child_table)
for e in child_table:
self.assertNotEqual(e.ext_address, sed_ext_address,
"SED MAC {} still still in Router ChildTable".format(e.ext_address))
# verify the sed is still associated since data polls are acked by radio
# driver and supervision check is disabled on the child
self.assertTrue(wpan_table_parser.is_associated(self.sed), "SED is not associated !!!")
@testcase.test_method_decorator
def test07_enable_supervision_on_child(self):
# Enable supervision check on child and expect the child to
# become detached after the check timeout
self.sed.setprop(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, str(CHILD_SUPERVISION_CHECK_TIMEOUT))
self.assertEqual(int(self.sed.getprop(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT), 16),
int(CHILD_SUPERVISION_CHECK_TIMEOUT))
time.sleep(CHILD_SUPERVISION_CHECK_TIMEOUT * 3 + 1)
self.assertTrue(wpan_table_parser.check_child_is_detached(self.sed), "SED is still associated!!!")
@testcase.test_method_decorator
def test08_enable_supervision_on_parent(self):
# Enable child supervision on parent and disable allowlisting
self.router.setprop(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
self.router.setprop(wpan.WPAN_MAC_ALLOWLIST_ENABLED, "0")
# Wait for the child to attach back
time.sleep(CHILD_SUPERVISION_CHECK_TIMEOUT * 2)
self.assertTrue(wpan_table_parser.is_associated(self.sed), "SED is still not associated!!!")
# MAC counters are used to verify the child supervision behavior.
parent_unicast_tx_count = int(self.router.getprop("NCP:Counter:TX_PKT_UNICAST"), 0)
time.sleep(PARENT_SUPERVISION_INTERVAL * 1.2)
# To verify that the parent is indeed sending empty "supervision"
# messages to its child, MAC counter for number of unicast tx is
# used.
print(parent_unicast_tx_count)
print(self.router.getprop("NCP:Counter:TX_PKT_UNICAST"))
self.assertGreaterEqual(int(self.router.getprop("NCP:Counter:TX_PKT_UNICAST"), 0), parent_unicast_tx_count + 1)
# Disable child supervision on parent
self.router.setprop(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, "0")
time.sleep(CHILD_SUPERVISION_CHECK_TIMEOUT * 3)
self.assertTrue(wpan_table_parser.is_associated(self.sed), "SED is still not associated!!!")
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
from rally.benchmark.scenarios import base
from rally.benchmark import types as types
from rally.benchmark import utils as bench_utils
from rally.benchmark import validation
from rally.common import log as logging
from rally import consts
from rally import exceptions as rally_exceptions
from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils
from rally.plugins.openstack.scenarios.nova import utils
from rally.plugins.openstack.wrappers import network as network_wrapper
LOG = logging.getLogger(__name__)
class NovaServers(utils.NovaScenario,
cinder_utils.CinderScenario):
"""Benchmark scenarios for Nova servers."""
RESOURCE_NAME_PREFIX = "rally_novaserver_"
RESOURCE_NAME_LENGTH = 16
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova"]})
def boot_and_list_server(self, image, flavor,
detailed=True, **kwargs):
"""Boot a server from an image and then list all servers.
Measure the "nova list" command performance.
If you have only 1 user in your context, you will
add 1 server on every iteration. So you will have more
and more servers and will be able to measure the
performance of the "nova list" command depending on
the number of servers owned by users.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param detailed: True if the server listing should contain
detailed information about all of them
:param kwargs: Optional additional arguments for server creation
"""
self._boot_server(image, flavor, **kwargs)
self._list_servers(detailed)
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova"]})
def list_servers(self, detailed=True):
"""List all servers.
This simple scenario test the nova list command by listing
all the servers.
:param detailed: True if detailed information about servers
should be listed
"""
self._list_servers(detailed)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova"]})
def boot_and_delete_server(self, image, flavor,
min_sleep=0, max_sleep=0,
force_delete=False, **kwargs):
"""Boot and delete a server.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between volume creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@base.scenario(context={"cleanup": ["nova"]})
def boot_and_delete_multiple_servers(self, image, flavor, count=2,
min_sleep=0, max_sleep=0,
force_delete=False, **kwargs):
"""Boot multiple servers in a single request and delete them.
Deletion is done in parallel with one request per server, not
with a single request for all servers.
:param image: The image to boot from
:param flavor: Flavor used to boot instance
:param count: Number of instances to boot
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for instance creation
"""
servers = self._boot_servers(image, flavor, 1, instances_amount=count,
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_servers(servers, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume_and_delete(self, image, flavor,
volume_size,
min_sleep=0, max_sleep=0,
force_delete=False, **kwargs):
"""Boot a server from volume and then delete it.
The scenario first creates a volume and then a server.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between volume creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param volume_size: volume size (in GB)
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
volume = self._create_volume(volume_size, imageRef=image)
block_device_mapping = {"vda": "%s:::1" % volume.id}
server = self._boot_server(image, flavor,
block_device_mapping=block_device_mapping,
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova"]})
def boot_and_bounce_server(self, image, flavor,
force_delete=False, actions=None, **kwargs):
"""Boot a server and run specified actions against it.
Actions should be passed into the actions parameter. Available actions
are 'hard_reboot', 'soft_reboot', 'stop_start' and 'rescue_unrescue'.
Delete server after all actions were completed.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param actions: list of action dictionaries, where each action
dictionary speicifes an action to be performed
in the following format:
{"action_name": <no_of_iterations>}
:param kwargs: Optional additional arguments for server creation
"""
action_builder = self._bind_actions()
actions = actions or []
try:
action_builder.validate(actions)
except jsonschema.exceptions.ValidationError as error:
raise rally_exceptions.InvalidConfigException(
"Invalid server actions configuration \'%(actions)s\' due to: "
"%(error)s" % {"actions": str(actions), "error": str(error)})
server = self._boot_server(image, flavor, **kwargs)
for action in action_builder.build_actions(actions, server):
action()
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova"]})
def boot_lock_unlock_and_delete(self, image, flavor,
min_sleep=0, max_sleep=0,
force_delete=False,
**kwargs):
"""Boot a server, lock it, then unlock and delete it.
Optional 'min_sleep' and 'max_sleep' parameters allow the
scenario to simulate a pause between locking and unlocking the
server (of random duration from min_sleep to max_sleep).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param min_sleep: Minimum sleep time between locking and unlocking
in seconds
:param max_sleep: Maximum sleep time between locking and unlocking
in seconds
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._lock_server(server)
self.sleep_between(min_sleep, max_sleep)
self._unlock_server(server)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.GLANCE)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova", "glance"]})
def snapshot_server(self, image, flavor,
force_delete=False, **kwargs):
"""Boot a server, make its snapshot and delete both.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
image = self._create_image(server)
self._delete_server(server, force=force_delete)
server = self._boot_server(image.id, flavor, **kwargs)
self._delete_server(server, force=force_delete)
self._delete_image(image)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova"]})
def boot_server(self, image, flavor, auto_assign_nic=False, **kwargs):
"""Boot a server.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param auto_assign_nic: True if NICs should be assigned
:param kwargs: Optional additional arguments for server creation
"""
self._boot_server(image, flavor,
auto_assign_nic=auto_assign_nic, **kwargs)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume(self, image, flavor, volume_size,
auto_assign_nic=False, **kwargs):
"""Boot a server from volume.
The scenario first creates a volume and then a server.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param volume_size: volume size (in GB)
:param auto_assign_nic: True if NICs should be assigned
:param kwargs: Optional additional arguments for server creation
"""
volume = self._create_volume(volume_size, imageRef=image)
block_device_mapping = {"vda": "%s:::1" % volume.id}
self._boot_server(image, flavor, auto_assign_nic=auto_assign_nic,
block_device_mapping=block_device_mapping,
**kwargs)
def _bind_actions(self):
actions = ["hard_reboot", "soft_reboot", "stop_start",
"rescue_unrescue"]
action_builder = bench_utils.ActionBuilder(actions)
action_builder.bind_action("hard_reboot", self._reboot_server)
action_builder.bind_action("soft_reboot", self._soft_reboot_server)
action_builder.bind_action("stop_start",
self._stop_and_start_server)
action_builder.bind_action("rescue_unrescue",
self._rescue_and_unrescue_server)
return action_builder
def _stop_and_start_server(self, server):
"""Stop and then start the given server.
A stop will be issued on the given server upon which time
this method will wait for the server to become 'SHUTOFF'.
Once the server is SHUTOFF a start will be issued and this
method will wait for the server to become 'ACTIVE' again.
:param server: The server to stop and then start.
"""
self._stop_server(server)
self._start_server(server)
def _rescue_and_unrescue_server(self, server):
"""Rescue and then unrescue the given server.
A rescue will be issued on the given server upon which time
this method will wait for the server to become 'RESCUE'.
Once the server is RESCUE a unrescue will be issued and
this method will wait for the server to become 'ACTIVE'
again.
:param server: The server to rescue and then unrescue.
"""
self._rescue_server(server)
self._unrescue_server(server)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType,
to_flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova"]})
def resize_server(self, image, flavor, to_flavor,
force_delete=False, **kwargs):
"""Boot a server, then resize and delete it.
This test will confirm the resize by default,
or revert the resize if confirm is set to false.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param to_flavor: flavor to be used to resize the booted instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._resize(server, to_flavor)
# by default we confirm
confirm = kwargs.get("confirm", True)
if confirm:
self._resize_confirm(server)
else:
self._resize_revert(server)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova"]})
def suspend_and_resume_server(self, image, flavor,
force_delete=False, **kwargs):
"""Create a server, suspend, resume and then delete it
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._suspend_server(server)
self._resume_server(server)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova"]})
def pause_and_unpause_server(self, image, flavor,
force_delete=False, **kwargs):
"""Create a server, pause, unpause and then delete it
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._pause_server(server)
self._unpause_server(server)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["nova"]})
def shelve_and_unshelve_server(self, image, flavor,
force_delete=False, **kwargs):
"""Create a server, shelve, unshelve and then delete it
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param force_delete: True if force_delete should be used
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._shelve_server(server)
self._unshelve_server(server)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@base.scenario(context={"cleanup": ["nova"]})
def boot_and_live_migrate_server(self, image,
flavor, block_migration=False,
disk_over_commit=False, min_sleep=0,
max_sleep=0, **kwargs):
"""Live Migrate a server.
This scenario launches a VM on a compute node available in
the availability zone and then migrates the VM to another
compute node on the same availability zone.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between VM booting and running live migration
(of random duration from range [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param block_migration: Specifies the migration type
:param disk_over_commit: Specifies whether to allow overcommit
on migrated instance or not
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self.sleep_between(min_sleep, max_sleep)
new_host = self._find_host_to_migrate(server)
self._live_migrate(server, new_host,
block_migration, disk_over_commit)
self._delete_server(server)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(admin=True, users=True)
@base.scenario(context={"cleanup": ["nova", "cinder"]})
def boot_server_from_volume_and_live_migrate(self, image, flavor,
volume_size,
block_migration=False,
disk_over_commit=False,
force_delete=False,
min_sleep=0, max_sleep=0,
**kwargs):
"""Boot a server from volume and then migrate it.
The scenario first creates a volume and a server booted from
the volume on a compute node available in the availability zone and
then migrates the VM to another compute node on the same availability
zone.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between VM booting and running live migration
(of random duration from range [min_sleep, max_sleep]).
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param volume_size: volume size (in GB)
:param block_migration: Specifies the migration type
:param disk_over_commit: Specifies whether to allow overcommit
on migrated instance or not
:param force_delete: True if force_delete should be used
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
:param kwargs: Optional additional arguments for server creation
"""
volume = self._create_volume(volume_size, imageRef=image)
block_device_mapping = {"vda": "%s:::1" % volume.id}
server = self._boot_server(image, flavor,
block_device_mapping=block_device_mapping,
**kwargs)
self.sleep_between(min_sleep, max_sleep)
new_host = self._find_host_to_migrate(server)
self._live_migrate(server, new_host,
block_migration, disk_over_commit)
self._delete_server(server, force=force_delete)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(admin=True, users=True)
@base.scenario(context={"cleanup": ["cinder", "nova"]})
def boot_server_attach_created_volume_and_live_migrate(
self,
image,
flavor,
size,
block_migration=False,
disk_over_commit=False,
boot_server_kwargs=None,
create_volume_kwargs=None,
min_sleep=0,
max_sleep=0):
"""Create a VM, attach a volume to it and live migrate.
Simple test to create a VM and attach a volume, then migrate the VM,
detach the volume and delete volume/VM.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between attaching a volume and running live
migration (of random duration from range [min_sleep, max_sleep]).
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param size: volume size (in GB)
:param block_migration: Specifies the migration type
:param disk_over_commit: Specifies whether to allow overcommit
on migrated instance or not
:param boot_server_kwargs: optional arguments for VM creation
:param create_volume_kwargs: optional arguments for volume creation
:param min_sleep: Minimum sleep time in seconds (non-negative)
:param max_sleep: Maximum sleep time in seconds (non-negative)
"""
if boot_server_kwargs is None:
boot_server_kwargs = {}
if create_volume_kwargs is None:
create_volume_kwargs = {}
server = self._boot_server(image, flavor, **boot_server_kwargs)
volume = self._create_volume(size, **create_volume_kwargs)
self._attach_volume(server, volume)
self.sleep_between(min_sleep, max_sleep)
new_host = self._find_host_to_migrate(server)
self._live_migrate(server, new_host,
block_migration, disk_over_commit)
self._detach_volume(server, volume)
self._delete_volume(volume)
self._delete_server(server)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@base.scenario(context={"cleanup": ["nova"]})
def boot_and_migrate_server(self, image, flavor, **kwargs):
"""Migrate a server.
This scenario launches a VM on a compute node available in
the availability zone and stops the VM, and then migrates the VM
to another compute node on the same availability zone.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
self._stop_server(server)
self._migrate(server)
# NOTE(wtakase): This is required because cold migration and resize
# share same code path.
confirm = kwargs.get("confirm", True)
if confirm:
self._resize_confirm(server, status="SHUTOFF")
else:
self._resize_revert(server, status="SHUTOFF")
self._delete_server(server)
@types.set(from_image=types.ImageResourceType,
to_image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "from_image")
@validation.image_valid_on_flavor("flavor", "to_image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True, users=True)
@base.scenario(context={"cleanup": ["nova"]})
def boot_and_rebuild_server(self, from_image, to_image, flavor, **kwargs):
"""Rebuild a server.
This scenario launches a VM, then rebuilds that VM with a
different image.
:param from_image: image to be used to boot an instance
:param to_image: image to be used to rebuild the instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(from_image, flavor, **kwargs)
self._rebuild_server(server, to_image)
self._delete_server(server)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@validation.required_contexts("network")
@base.scenario(context={"cleanup": ["nova"]})
def boot_and_associate_floating_ip(self, image, flavor, **kwargs):
"""Boot a server and associate a floating IP to it.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: Optional additional arguments for server creation
"""
server = self._boot_server(image, flavor, **kwargs)
address = network_wrapper.wrap(self.clients).create_floating_ip(
tenant_id=server.tenant_id)
self._associate_floating_ip(server, address["ip"])
|
|
# Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for PCI request."""
import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from nova import context
from nova import exception
from nova.network import model
from nova import objects
from nova.objects import fields
from nova.pci import request
from nova import test
from nova.tests.unit.api.openstack import fakes
_fake_alias1 = jsonutils.dumps({
"name": "QuickAssist",
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "type-PCI",
"numa_policy": "legacy",
})
_fake_alias2 = jsonutils.dumps({
"name": "IntelNIC",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "type-PF",
})
class PciRequestTestCase(test.NoDBTestCase):
@staticmethod
def _create_fake_inst_with_pci_devs(pci_req_list, pci_dev_list):
"""Create a fake Instance object with the provided InstancePciRequests
and PciDevices.
:param pci_req_list: a list of InstancePCIRequest objects.
:param pci_dev_list: a list of PciDevice objects, each element
associated (via request_id attribute)with a corresponding
element from pci_req_list.
:return: A fake Instance object associated with the provided
PciRequests and PciDevices.
"""
inst = objects.Instance()
inst.uuid = uuidsentinel.instance1
inst.pci_requests = objects.InstancePCIRequests(
requests=pci_req_list)
inst.pci_devices = objects.PciDeviceList(objects=pci_dev_list)
inst.host = 'fake-host'
inst.node = 'fake-node'
return inst
def setUp(self):
super(PciRequestTestCase, self).setUp()
self.context = context.RequestContext(fakes.FAKE_USER_ID,
fakes.FAKE_PROJECT_ID)
def test_get_alias_from_config_valid(self):
self.flags(alias=[_fake_alias1], group='pci')
result = request._get_alias_from_config()
expected_result = (
'legacy',
[{
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"dev_type": "type-PCI",
}])
self.assertEqual(expected_result, result['QuickAssist'])
def test_get_alias_from_config_valid_multispec(self):
_fake_alias = jsonutils.dumps({
"name": "QuickAssist",
"capability_type": "pci",
"product_id": "4444",
"vendor_id": "8086",
"device_type": "type-PCI",
})
self.flags(alias=[_fake_alias1, _fake_alias], group='pci')
result = request._get_alias_from_config()
expected_result = (
'legacy',
[{
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"dev_type": "type-PCI"
}, {
"capability_type": "pci",
"product_id": "4444",
"vendor_id": "8086",
"dev_type": "type-PCI"
}])
self.assertEqual(expected_result, result['QuickAssist'])
def _test_get_alias_from_config_invalid(self, alias):
self.flags(alias=[alias], group='pci')
self.assertRaises(
exception.PciInvalidAlias,
request._get_alias_from_config)
def test_get_alias_from_config_invalid_device_type(self):
fake_alias = jsonutils.dumps({
"name": "xxx",
"device_type": "N",
})
self._test_get_alias_from_config_invalid(fake_alias)
def test_get_alias_from_config_device_type_vdpa(self):
fake_alias = jsonutils.dumps({
"name": "xxx",
"device_type": "vdpa",
})
self._test_get_alias_from_config_invalid(fake_alias)
def test_get_alias_from_config_invalid_product_id(self):
fake_alias = jsonutils.dumps({
"name": "xxx",
"product_id": "g111",
})
self._test_get_alias_from_config_invalid(fake_alias)
def test_get_alias_from_config_invalid_vendor_id(self):
fake_alias = jsonutils.dumps({
"name": "xxx",
"vendor_id": "0xg111",
})
self._test_get_alias_from_config_invalid(fake_alias)
def test_get_alias_from_config_invalid_capability_type(self):
fake_alias = jsonutils.dumps({
"name": "xxx",
"capability_type": "usb",
})
self._test_get_alias_from_config_invalid(fake_alias)
def test_get_alias_from_config_invalid_numa_policy(self):
fake_alias = jsonutils.dumps({
"name": "xxx",
"numa_policy": "derp",
})
self._test_get_alias_from_config_invalid(fake_alias)
def test_get_alias_from_config_invalid_arbitrary_field(self):
fake_alias = jsonutils.dumps({
"name": "xxx",
"foo": "bar",
})
self._test_get_alias_from_config_invalid(fake_alias)
def test_get_alias_from_config_valid_numa_policy(self):
for policy in fields.PCINUMAAffinityPolicy.ALL:
fake_alias = jsonutils.dumps({
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "type-PCI",
"numa_policy": policy,
})
self.flags(alias=[fake_alias], group='pci')
aliases = request._get_alias_from_config()
self.assertIsNotNone(aliases)
self.assertIn("xxx", aliases)
self.assertEqual(policy, aliases["xxx"][0])
def test_get_alias_from_config_conflicting_device_type(self):
"""Check behavior when device_type conflicts occur."""
fake_alias_a = jsonutils.dumps({
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "type-PF"
})
fake_alias_b = jsonutils.dumps({
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "type-PCI"
})
self.flags(alias=[fake_alias_a, fake_alias_b], group='pci')
self.assertRaises(
exception.PciInvalidAlias,
request._get_alias_from_config)
def test_get_alias_from_config_conflicting_numa_policy(self):
"""Check behavior when numa_policy conflicts occur."""
fake_alias_a = jsonutils.dumps({
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"numa_policy": "required",
})
fake_alias_b = jsonutils.dumps({
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"numa_policy": "legacy",
})
self.flags(alias=[fake_alias_a, fake_alias_b], group='pci')
self.assertRaises(
exception.PciInvalidAlias,
request._get_alias_from_config)
def _verify_result(self, expected, real):
exp_real = zip(expected, real)
for exp, real in exp_real:
self.assertEqual(exp['count'], real.count)
self.assertEqual(exp['alias_name'], real.alias_name)
self.assertEqual(exp['spec'], real.spec)
def test_translate_alias_to_requests(self):
self.flags(alias=[_fake_alias1, _fake_alias2], group='pci')
expect_request = [
{'count': 3,
'requester_id': None,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'dev_type': 'type-PCI',
'capability_type': 'pci'}],
'alias_name': 'QuickAssist'},
{'count': 1,
'requester_id': None,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'dev_type': "type-PF",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
requests = request._translate_alias_to_requests(
"QuickAssist : 3, IntelNIC: 1")
self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
self._verify_result(expect_request, requests)
def test_translate_alias_to_requests_invalid(self):
self.flags(alias=[_fake_alias1, _fake_alias2], group='pci')
self.assertRaises(exception.PciRequestAliasNotDefined,
request._translate_alias_to_requests,
"QuickAssistX : 3")
def test_translate_alias_to_requests_affinity_policy(self):
# _fake_alias1 requests the legacy policy and _fake_alias2
# has no numa_policy set so it will default to legacy.
self.flags(alias=[_fake_alias1, _fake_alias2], group='pci')
# so to test that the flavor/image policy takes precedence
# set use the preferred policy.
policy = fields.PCINUMAAffinityPolicy.PREFERRED
expect_request = [
{'count': 3,
'requester_id': None,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'dev_type': 'type-PCI',
'capability_type': 'pci'}],
'alias_name': 'QuickAssist',
'numa_policy': policy
},
{'count': 1,
'requester_id': None,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'dev_type': "type-PF",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC',
'numa_policy': policy
}, ]
requests = request._translate_alias_to_requests(
"QuickAssist : 3, IntelNIC: 1", affinity_policy=policy)
self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
self._verify_result(expect_request, requests)
@mock.patch.object(objects.compute_node.ComputeNode,
'get_by_host_and_nodename')
def test_get_instance_pci_request_from_vif_invalid(
self,
cn_get_by_host_and_node):
# Basically make sure we raise an exception if an instance
# has an allocated PCI device without having the its corresponding
# PCIRequest object in instance.pci_requests
mock_inst_cn = mock.Mock()
mock_inst_cn.id = 1
cn_get_by_host_and_node.return_value = mock_inst_cn
# Create a fake instance with PCI request and allocated PCI devices
pci_dev1 = objects.PciDevice(request_id=uuidsentinel.pci_req_id1,
address='0000:04:00.0',
compute_node_id=1)
pci_req2 = objects.InstancePCIRequest(
request_id=uuidsentinel.pci_req_id2)
pci_dev2 = objects.PciDevice(request_id=uuidsentinel.pci_req_id2,
address='0000:05:00.0',
compute_node_id=1)
pci_request_list = [pci_req2]
pci_device_list = [pci_dev1, pci_dev2]
inst = PciRequestTestCase._create_fake_inst_with_pci_devs(
pci_request_list,
pci_device_list)
# Create a VIF with pci_dev1 that has no corresponding PCI request
pci_vif = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT,
profile={'pci_slot': '0000:04:00.0'})
self.assertRaises(exception.PciRequestFromVIFNotFound,
request.get_instance_pci_request_from_vif,
self.context,
inst,
pci_vif)
@mock.patch.object(objects.compute_node.ComputeNode,
'get_by_host_and_nodename')
def test_get_instance_pci_request_from_vif(self, cn_get_by_host_and_node):
mock_inst_cn = mock.Mock()
mock_inst_cn.id = 1
cn_get_by_host_and_node.return_value = mock_inst_cn
# Create a fake instance with PCI request and allocated PCI devices
pci_req1 = objects.InstancePCIRequest(
request_id=uuidsentinel.pci_req_id1)
pci_dev1 = objects.PciDevice(request_id=uuidsentinel.pci_req_id1,
address='0000:04:00.0',
compute_node_id = 1)
pci_req2 = objects.InstancePCIRequest(
request_id=uuidsentinel.pci_req_id2)
pci_dev2 = objects.PciDevice(request_id=uuidsentinel.pci_req_id2,
address='0000:05:00.0',
compute_node_id=1)
pci_request_list = [pci_req1, pci_req2]
pci_device_list = [pci_dev1, pci_dev2]
inst = PciRequestTestCase._create_fake_inst_with_pci_devs(
pci_request_list,
pci_device_list)
# Create a vif with normal port and make sure no PCI request returned
normal_vif = model.VIF(vnic_type=model.VNIC_TYPE_NORMAL)
self.assertIsNone(request.get_instance_pci_request_from_vif(
self.context,
inst,
normal_vif))
# Create a vif with PCI address under profile, make sure the correct
# PCI request is returned
pci_vif = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT,
profile={'pci_slot': '0000:05:00.0'})
self.assertEqual(uuidsentinel.pci_req_id2,
request.get_instance_pci_request_from_vif(
self.context,
inst,
pci_vif).request_id)
# Create a vif with PCI under profile which is not claimed
# for the instance, i.e no matching pci device in instance.pci_devices
nonclaimed_pci_vif = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT,
profile={'pci_slot': '0000:08:00.0'})
self.assertIsNone(request.get_instance_pci_request_from_vif(
self.context,
inst,
nonclaimed_pci_vif))
# "Move" the instance to another compute node, make sure that no
# matching PCI request against the new compute.
mock_inst_cn.id = 2
self.assertIsNone(request.get_instance_pci_request_from_vif(
self.context,
inst,
pci_vif))
def test_get_pci_requests_from_flavor(self):
self.flags(alias=[_fake_alias1], group='pci')
expect_request = [
{
'count': 3,
'spec': [
{
'vendor_id': '8086',
'product_id': '4443',
'dev_type': "type-PCI",
'capability_type': 'pci',
}
],
'alias_name': 'QuickAssist'
},
]
flavor = {'extra_specs': {'pci_passthrough:alias': 'QuickAssist:3'}}
requests = request.get_pci_requests_from_flavor(flavor)
self.assertEqual(1, len(requests.requests))
self.assertEqual({3, }, {p.count for p in requests.requests})
self._verify_result(expect_request, requests.requests)
def test_get_pci_requests_from_flavor_multiple(self):
self.flags(alias=[_fake_alias1, _fake_alias2], group='pci')
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'dev_type': "type-PCI",
'capability_type': 'pci'}],
'alias_name': 'QuickAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'dev_type': "type-PF",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
flavor = {'extra_specs': {"pci_passthrough:alias":
"QuickAssist:3, IntelNIC: 1"}}
requests = request.get_pci_requests_from_flavor(flavor)
self.assertEqual(2, len(requests.requests))
self.assertEqual({3, 1}, {p.count for p in requests.requests})
self._verify_result(expect_request, requests.requests)
def test_get_pci_requests_from_flavor_including_space(self):
_fake_alias4 = jsonutils.dumps({
"name": " Cirrus Logic ",
"capability_type": "pci",
"product_id": "0ff2",
"vendor_id": "10de",
"device_type": "type-PCI",
})
self.flags(alias=[_fake_alias2, _fake_alias4], group='pci')
expect_request = [
{'count': 4,
'spec': [{'vendor_id': '10de', 'product_id': '0ff2',
'dev_type': "type-PCI",
'capability_type': 'pci'}],
'alias_name': 'Cirrus Logic'},
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'dev_type': "type-PF",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
flavor = {'extra_specs': {"pci_passthrough:alias":
" Cirrus Logic : 4, IntelNIC: 3"}}
requests = request.get_pci_requests_from_flavor(flavor)
self.assertEqual(2, len(requests.requests))
self.assertEqual({3, 4}, {p.count for p in requests.requests})
self._verify_result(expect_request, requests.requests)
def test_get_pci_requests_from_flavor_no_extra_spec(self):
self.flags(alias=[_fake_alias1, _fake_alias2], group='pci')
flavor = {}
requests = request.get_pci_requests_from_flavor(flavor)
self.assertEqual([], requests.requests)
@mock.patch.object(
request, "_translate_alias_to_requests", return_value=[])
def test_get_pci_requests_from_flavor_affinity_policy(
self, mock_translate):
self.flags(alias=[_fake_alias1, _fake_alias2], group='pci')
flavor = {'extra_specs': {"pci_passthrough:alias":
"QuickAssist:3, IntelNIC: 1"}}
policy = fields.PCINUMAAffinityPolicy.PREFERRED
request.get_pci_requests_from_flavor(flavor, affinity_policy=policy)
mock_translate.assert_called_with(mock.ANY, affinity_policy=policy)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Functions Hook.
"""
import time
from typing import Any, Dict, List, Optional
import requests
from googleapiclient.discovery import build
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.base import CloudBaseHook
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 1
# noinspection PyAbstractClass
class CloudFunctionsHook(CloudBaseHook):
"""
Hook for the Google Cloud Functions APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
_conn = None # type: Optional[Any]
def __init__(
self,
api_version: str,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None
) -> None:
super().__init__(gcp_conn_id, delegate_to)
self.api_version = api_version
@staticmethod
def _full_location(project_id: str, location: str) -> str:
"""
Retrieve full location of the function in the form of
``projects/<GCP_PROJECT_ID>/locations/<GCP_LOCATION>``
:param project_id: The Google Cloud Project project_id where the function belongs.
:type project_id: str
:param location: The location where the function is created.
:type location: str
:return:
"""
return 'projects/{}/locations/{}'.format(project_id, location)
def get_conn(self):
"""
Retrieves the connection to Cloud Functions.
:return: Google Cloud Function services object.
:rtype: dict
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build('cloudfunctions', self.api_version,
http=http_authorized, cache_discovery=False)
return self._conn
def get_function(self, name: str) -> Dict:
"""
Returns the Cloud Function with the given name.
:param name: Name of the function.
:type name: str
:return: A Cloud Functions object representing the function.
:rtype: dict
"""
return self.get_conn().projects().locations().functions().get( # pylint: disable=no-member
name=name).execute(num_retries=self.num_retries)
@CloudBaseHook.fallback_to_default_project_id
def create_new_function(self, location: str, body: Dict, project_id: Optional[str] = None) -> None:
"""
Creates a new function in Cloud Function in the location specified in the body.
:param location: The location of the function.
:type location: str
:param body: The body required by the Cloud Functions insert API.
:type body: dict
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
if not project_id:
raise ValueError("The project_id should be set")
response = self.get_conn().projects().locations().functions().create( # pylint: disable=no-member
location=self._full_location(project_id, location),
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name)
def update_function(self, name: str, body: Dict, update_mask: List[str]) -> None:
"""
Updates Cloud Functions according to the specified update mask.
:param name: The name of the function.
:type name: str
:param body: The body required by the cloud function patch API.
:type body: dict
:param update_mask: The update mask - array of fields that should be patched.
:type update_mask: [str]
:return: None
"""
response = self.get_conn().projects().locations().functions().patch( # pylint: disable=no-member
updateMask=",".join(update_mask),
name=name,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name)
@CloudBaseHook.fallback_to_default_project_id
def upload_function_zip(self, location: str, zip_path: str, project_id: Optional[str] = None) -> str:
"""
Uploads zip file with sources.
:param location: The location where the function is created.
:type location: str
:param zip_path: The path of the valid .zip file to upload.
:type zip_path: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: The upload URL that was returned by generateUploadUrl method.
:rtype: str
"""
if not project_id:
raise ValueError("The project_id should be set")
response = \
self.get_conn().projects().locations().functions().generateUploadUrl( # pylint: disable=no-member # noqa
parent=self._full_location(project_id, location)
).execute(num_retries=self.num_retries)
upload_url = response.get('uploadUrl')
with open(zip_path, 'rb') as file:
requests.put(
url=upload_url,
data=file,
# Those two headers needs to be specified according to:
# https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions/generateUploadUrl
# nopep8
headers={
'Content-type': 'application/zip',
'x-goog-content-length-range': '0,104857600',
}
)
return upload_url
def delete_function(self, name: str) -> None:
"""
Deletes the specified Cloud Function.
:param name: The name of the function.
:type name: str
:return: None
"""
response = self.get_conn().projects().locations().functions().delete( # pylint: disable=no-member
name=name).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name)
@CloudBaseHook.fallback_to_default_project_id
def call_function(
self,
function_id: str,
input_data: Dict,
location: str,
project_id: Optional[str] = None
) -> Dict:
"""
Synchronously invokes a deployed Cloud Function. To be used for testing
purposes as very limited traffic is allowed.
:param function_id: ID of the function to be called
:type function_id: str
:param input_data: Input to be passed to the function
:type input_data: Dict
:param location: The location where the function is located.
:type location: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
name = "projects/{project_id}/locations/{location}/functions/{function_id}".format(
project_id=project_id,
location=location,
function_id=function_id
)
response = self.get_conn().projects().locations().functions().call( # pylint: disable=no-member
name=name,
body=input_data
).execute(num_retries=self.num_retries)
if 'error' in response:
raise AirflowException(response['error'])
return response
def _wait_for_operation_to_complete(self, operation_name: str) -> Dict:
"""
Waits for the named operation to complete - checks status of the
asynchronous call.
:param operation_name: The name of the operation.
:type operation_name: str
:return: The response returned by the operation.
:rtype: dict
:exception: AirflowException in case error is returned.
"""
service = self.get_conn()
while True:
operation_response = service.operations().get( # pylint: disable=no-member
name=operation_name,
).execute(num_retries=self.num_retries)
if operation_response.get("done"):
response = operation_response.get("response")
error = operation_response.get("error")
# Note, according to documentation always either response or error is
# set when "done" == True
if error:
raise AirflowException(str(error))
return response
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
|
|
from datetime import datetime as py_datetime
import decimal
import random
from faker.generators.address import *
from faker.generators.company import *
from faker.generators.date import datetime as f_datetime
from faker.generators.internet import *
from faker.generators.lorem import *
from faker.generators.name import *
from faker.generators.phone_number import *
from faker.generators.francais import *
from faker.generators.utils import bothify
from anonymizer import replacers
randrange = random.SystemRandom().randrange
alphanumeric = ""
for i in range(ord('A'), ord('Z')+1):
alphanumeric += chr(i)
for i in range(ord('a'), ord('z')+1):
alphanumeric += chr(i)
for i in range(ord('0'), ord('9')+1):
alphanumeric += chr(i)
general_chars = alphanumeric + " _-"
class DjangoFaker(object):
"""
Class that provides fake data, using Django specific knowledge to ensure
acceptable data for Django models.
"""
#faker = Faker()
def __init__(self):
self.init_values = {}
def _prep_init(self, field):
if field in self.init_values:
return
field_vals = set(x[0] for x in field.model._default_manager.values_list(field.name))
self.init_values[field] = field_vals
def get_allowed_value(self, source, field, option=None):
retval = source(option) if option else source()
if field is None:
return retval
# Enforce unique. Ensure we don't set the same values, as either
# any of the existing values, or any of the new ones we make up.
unique = getattr(field, 'unique', None)
if unique:
self._prep_init(field)
used = self.init_values[field]
for i in xrange(0, 10):
if retval in used:
retval = source()
else:
break
if retval in used:
raise Exception("Cannot generate unique data for field %s. Last value tried %s" % (field, retval))
used.add(retval)
# Enforce max_length
max_length = getattr(field, 'max_length', None)
if max_length is not None:
retval = retval[:max_length]
return retval
### Public interace ##
def varchar(self, field=None):
"""
Returns a chunk of text, of maximum length 'max_length'
"""
assert field is not None, "The field parameter must be passed to the 'varchar' method."
max_length = field.max_length
def source():
length = random.choice(range(0, max_length + 1))
return "".join(random.choice(general_chars) for i in xrange(length))
return self.get_allowed_value(source, field)
def simple_pattern(self, pattern, field=None):
"""
Use a simple pattern to make the field - # is replaced with a random number,
? with a random letter.
"""
source = lambda: bothify(pattern)
return self.get_allowed_value(source, field)
def bool(self, field=None):
"""
Returns a random boolean
"""
source = lambda: bool(randrange(0, 2))
return self.get_allowed_value(source, field)
def integer(self, field=None):
source = lambda: random.randint(-1000000, 1000000)
return self.get_allowed_value(source, field)
def positive_integer(self, field=None):
source = lambda: random.randint(0, 1000000)
return self.get_allowed_value(source, field)
def small_integer(self, field=None):
source = lambda: random.randint(-32768, +32767)
return self.get_allowed_value(source, field)
def positive_small_integer(self, field=None):
source = lambda: random.randint(0, 32767)
return self.get_allowed_value(source, field)
def datetime(self, field=None, val=None):
"""
Returns a random datetime. If 'val' is passed, a datetime within two
years of that date will be returned.
"""
if val is None:
source = lambda: f_datetime(py_datetime.strptime("01/01/1900", "%d/%m/%Y"))
else:
source = lambda: f_datetime(py_datetime.strptime("01/01/1900", "%d/%m/%Y"),
val.strftime("%d/%m/%Y"))
return self.get_allowed_value(source, field)
def date(self, field=None, val=None):
"""
Like datetime, but truncated to be a date only
"""
d = self.datetime(field=field, val=val)
return d.date()
def decimal(self, field=None, val=None):
source = lambda: decimal.Decimal(random.randrange(0, 100000))/(10**field.decimal_places)
return self.get_allowed_value(source, field)
def lorem(self, field=None, val=None):
"""
Returns lorem ipsum text. If val is provided, the lorem ipsum text will
be the same length as the original text, and with the same pattern of
line breaks.
"""
if val is not None:
def generate(length):
# Get lorem ipsum of a specific length.
collect = ""
while len(collect) < length:
collect += paragraphs()
collect = collect[:length]
return collect
# We want to match the pattern of the text - linebreaks
# in the same places.
def source():
parts = val.split("\n")
for i, p in enumerate(parts):
# Replace each bit with lorem ipsum of the same length
parts[i] = generate(len(p))
return "\n".join(parts)
else:
source = paragraphs
return self.get_allowed_value(source, field)
def choice(self, field=None):
assert field is not None, "The field parameter must be passed to the 'choice' method."
choices = [c[0] for c in field.choices]
source = lambda: random.choice(choices)
return self.get_allowed_value(source, field)
## Other attributes provided by 'Faker':
# username
# first_name
# last_name
# name
# email
# full_address
# phone_number
# street_address
# city
# state
# zip_code
# company
def __getattr__(self, name):
# we delegate most calls to faker, but add checks
#source = getattr(self.faker, name)
def func(*args, **kwargs):
field = kwargs.get('field', None)
parametre = kwargs.get('parametre', None)
return self.get_allowed_value(eval(name), field, parametre)
return func
class Anonymizer(object):
"""
Base class for all anonymizers. When executed with the ``run()`` method,
it will anonymize the data for a specific model.
"""
model = None
# attributes is a dictionary of {attribute_name: replacer}, where replacer is
# a callable that takes as arguments this Anonymizer instance, the object to
# be altered, the field to be altered, and the current field value, and
# returns a replacement value.
# This signature is designed to be useful for making lambdas that call the
# 'faker' instance provided on this class, but it can be used with any
# function.
attributes = None
# To impose an order on Anonymizers within a module, this can be set - lower
# values are done first.
order = 0
faker = DjangoFaker()
def get_query_set(self):
"""
Returns the QuerySet to be manipulated
"""
if self.model is None:
raise Exception("'model' attribute must be set")
qs = self.model._default_manager.get_query_set()
if len([f for f in self.model._meta.fields if f.name == 'id']) == 1:
qs = qs.order_by('id')
return qs
def get_attributes(self):
if self.attributes is None:
raise Exception("'attributes' attribute must be set")
return self.attributes
def alter_object(self, obj):
"""
Alters all the attributes in an individual object.
If it returns False, the object will not be saved
"""
attributes = self.get_attributes()
for attname, replacer, option in attributes:
if replacer == "SKIP":
continue
self.alter_object_attribute(obj, attname, replacer, option)
def alter_object_attribute(self, obj, attname, replacer, option):
"""
Alters a single attribute in an object.
"""
currentval = getattr(obj, attname)
field = obj._meta.get_field_by_name(attname)[0]
if isinstance(replacer, str):
# 'email' is shortcut for: replacers.email
replacer = getattr(replacers, replacer)
elif not callable(replacer):
raise Exception("Expected callable or string to be passed, got %r." % replacer)
if option:
# l'option est :
# - un champs -> parametre = la valeur du champs
# - un formatage -> parametre = le formatage
# - un formatage,un champs -> parametre = le formatage mixe avec la valeur du champs
parametre = ''
formatage = ''
for opt in option.split(','):
try:
# on verifie si l'option est un champs de la table
parametre = getattr(obj, opt)
except AttributeError:
# double transformation :
# - insert la valeur du champs dans le formatage
parametre = opt.format(parametre) if parametre else opt
# - remplace les '!' par les caracteres de currentval
i = 0
while i < len(parametre):
if parametre[i] == '!':
if currentval and i < len(currentval):
formatage += currentval[i]
else:
formatage += parametre[i]
i += 1
parametre = formatage
replacement = replacer(self, obj, field, currentval, parametre)
else:
replacement = replacer(self, obj, field, currentval)
setattr(obj, attname, replacement)
def run(self):
self.validate()
for obj in self.get_query_set().iterator():
retval = self.alter_object(obj)
if retval is not False:
obj.save()
def validate(self):
attributes = self.get_attributes()
model_attrs = set(f.attname for f in self.model._meta.fields)
given_attrs = set(name for name,replacer,lien in attributes)
if model_attrs != given_attrs:
msg = ""
missing_attrs = model_attrs - given_attrs
if missing_attrs:
msg += "The following fields are missing: %s. " % ", ".join(missing_attrs)
msg += "Add the replacer \"SKIP\" to skip these fields."
extra_attrs = given_attrs - model_attrs
if extra_attrs:
msg += "The following non-existent fields were supplied: %s." % ", ".join(extra_attrs)
raise ValueError("The attributes list for %s does not match the complete list of fields for that model. %s" % (self.model.__name__, msg))
|
|
#!/usr/bin/env python
from generator.actions import Actions
import random
import struct
def random_bytes(cnt):
return ''.join([chr(random.randint(0,255)) for x in xrange(cnt)])
def generate_bmp(data):
width = random.randint(1, 30)
height = (len(data) / 3 / width) + 1
nbytes = width * height * 3
padding = random_bytes(nbytes - len(data))
comment = random_bytes(32)
hdr = struct.pack('<HHHH', 0xA9C0, 0, width, height)
return hdr + comment + data + padding
def generate_exe(data):
xork = random.randint(1, 255)
xor = lambda s, n: ''.join([chr(ord(x) ^ n) for x in s])
exe = Executable()
exe.rw_data = xor(data, xork)
exe.pusha()
reg = Register(random.randint(0, 3))
exe.move_abs(reg, 0)
for x in xrange(random.randint(0, 10)):
exe.nop()
exe.loop_op(('xor', xork), reg, len(data))
for x in xrange(random.randint(0, 10)):
exe.nop()
exe.popa()
return exe.as_string()
class Register(object):
def __init__(self, num):
self.num = num
self.value = None
class Executable(object):
def __init__(self):
self.base_addr = random.randint(0x10000, 0x38000000)
self.ep = self.base_addr
self.ro_data = ''
self.rw_data = ''
self.zero_size = random.randint(0x1000, 0x100000)
self.rw_fixups = []
@property
def ro_addr(self):
return self.base_addr
@property
def rw_addr(self):
return self.ro_addr + len(self.ro_data)
@property
def zero_addr(self):
return self.rw_addr + len(self.rw_data)
@property
def ip(self):
return self.ro_addr + len(self.ro_data)
@property
def header(self):
return struct.pack('<IIIIIIII', 0x4E269472, self.ep, self.ro_addr, len(self.ro_data),
self.rw_addr, len(self.rw_data), self.zero_addr, self.zero_size)
def as_string(self):
for addr, func in self.rw_fixups:
addr -= self.ro_addr
x = struct.unpack('<I', self.ro_data[addr:addr+4])[0]
x = func(x, self.rw_addr)
self.ro_data = self.ro_data[:addr] + struct.pack('<I', x & 0xffffffff) + self.ro_data[addr+4:]
return self.header + self.ro_data + self.rw_data
def nop(self):
self.ro_data += random.choice([
'\x90',
'\x40\x48',
'\x41\x49',
'\x42\x4a',
'\x43\x4b',
'\x44\x4c',
'\x45\x4d',
'\x46\x4e',
'\x47\x4f',
])
def pusha(self):
self.ro_data += '\x60'
def popa(self):
self.ro_data += '\x61'
def pop(self, reg):
self.ro_data += chr(0x58 | reg.num)
def move_ip(self, reg):
if random.choice([True, False]):
# use CALL instruction
padding = random_bytes(random.randint(0, 6))
result = self.ip + 5
self.ro_data += struct.pack('<BI', 0xE8, len(padding)) + padding
self.pop(reg)
return result
else:
# use absolute address
result = self.ip
self.ro_data += struct.pack('<BBI', 0xC7, 0xC0 | reg.num, self.ip)
return result
def move_abs(self, reg, value):
rval = self.move_ip(reg)
if random.choice([True, False]):
self.do_op(('sub', -(value - rval)), ('reg', reg))
self.rw_fixups += [(self.ip - 4, lambda a, x: a - x)]
else:
self.do_op(('add', +(value - rval)), ('reg', reg))
self.rw_fixups += [(self.ip - 4, lambda a, x: a + x)]
def loop_op(self, op, reg, count):
# choose to inc reg, inc counter, or unroll
R = random.randint(0, 2)
if R <= 1:
cmpreg = random.choice(list(set(range(4)) - {reg.num}))
cmpreg = Register(cmpreg)
tmpreg = random.choice(list(set(range(4)) - {cmpreg.num,reg.num}))
tmpreg = Register(tmpreg)
if R == 0:
self.do_op(('mov', reg), ('reg', cmpreg))
self.do_op(('add', count), ('reg', cmpreg))
self.do_op(('mov', reg), ('reg', tmpreg))
top = len(self.ro_data)
self.do_op(('cmp', cmpreg), ('reg', tmpreg))
self.ro_data += chr(0x74) + chr(13)
self.do_op(op, ('mem_disp', tmpreg, 0))
self.ro_data += chr(0x40 | tmpreg.num)
self.ro_data += chr(0xEB) + chr((top - (len(self.ro_data) + 2)) & 0xff)
else:
self.do_op(('mov', count), ('reg', cmpreg))
self.do_op(('mov', 0), ('reg', tmpreg))
top = len(self.ro_data)
self.do_op(('cmp', cmpreg), ('reg', tmpreg))
self.ro_data += chr(0x74) + chr(10)
self.do_op(op, ('mem_rr', reg, tmpreg))
self.ro_data += chr(0x40 | tmpreg.num)
self.ro_data += chr(0xEB) + chr((top - (len(self.ro_data) + 2)) & 0xff)
else:
# unroll the loop
for x in xrange(count):
self.do_op(op, ('mem_disp', reg, x))
def modrm(self, r1, r2):
if not isinstance(r2, int):
raise Exception('bad r2')
mtype = r1[0]
if mtype == 'mem_disp':
reg, disp = r1[1], r1[2]
self.ro_data += chr(0x80 | reg.num | (r2 << 3)) + struct.pack('<I', disp & 0xffffffff)
elif mtype == 'reg':
reg = r1[1]
self.ro_data += chr(0xC0 | reg.num | (r2 << 3))
elif mtype == 'mem_rr':
base, index = r1[1], r1[2]
self.ro_data += chr(0x00 | 4 | (r2 << 3))
self.ro_data += chr(base.num | (index.num << 3))
else:
raise Exception('unknown mtype: ' + mtype)
def do_op(self, op, target):
opcodes = {
'add': 0x01,
'sub': 0x29,
'cmp': 0x39,
'mov': 0x89
}
subopcodes = {
'add': 0,
'or': 1,
'adc': 2,
'sbb': 3,
'and': 4,
'sub': 5,
'xor': 6,
'cmp': 7
}
op, src = op
if isinstance(src, int):
# imm32
if op == 'mov':
self.ro_data += '\xc7'
self.modrm(target, 0)
else:
self.ro_data += '\x81'
self.modrm(target, subopcodes[op])
self.ro_data += struct.pack('<I', src & 0xffffffff)
elif isinstance(src, Register):
self.ro_data += chr(opcodes[op])
self.modrm(target, src.num)
else:
raise Exception('bad src argument')
class TemplateGenerator(Actions):
QUIT = 0x7160f126
UPDATE = 0x30a7db1a
CHECK = 0x367bae4d
WHITELIST = 0xfa5059f
OK = 0x6fc5499a
ERROR = 0x42083b85
def start(self):
#self.delay(100)
self.state['rules'] = [self.magic_page[:128]]
self.state['whitelist'] = []
def cmd(self, c, d=''):
self.write(struct.pack('<II', 4 + len(d), c) + d)
def response(self, r):
self.read(length=4, expect=struct.pack('<I', r))
def update_str(self, rules):
s = ''
for r in rules:
s += struct.pack('<BI', int(r[0]), len(r[1])) + r[1]
cksum = sum([ord(x) for x in s]) + 0x12345678
return struct.pack('<I', -cksum & 0xffffffff) + s
def menu(self):
pass
def query(self):
result = self.OK
r = random.randint(0, 10)
if r < 5:
self.comment('CHECK match rule')
result = self.ERROR
rule = random.choice(self.state['rules'])
s = random_bytes(random.randint(0, 10)) + rule + random_bytes(random.randint(0, 10))
else:
rule = None
s = random_bytes(random.randint(1, 40))
r = random.randint(0, 10)
if r < 1:
# encode as bmp
bmp = generate_bmp(s)
if self.chance(0.5):
# bad bmp
self.comment('CHECK bad bmp')
bmp += random_bytes(random.randint(1, 20))
result = self.ERROR
s = bmp
elif r < 3:
# encode as exe
self.comment('CHECK exe wrapper')
exe = generate_exe(s)
s = exe
else:
# don't wrap
pass
if result == self.OK:
self.comment('CHECK ok')
elif self.chance(0.1) and len(self.state['whitelist']) > 0:
self.comment('whitelist')
s += random.choice(self.state['whitelist'])
result = self.OK
self.cmd(self.CHECK, s)
self.response(result)
def update(self):
self.comment('UPDATE')
r = []
for x in xrange(random.randint(5, 10)):
if self.chance(0.8) or len(self.state['rules']) <= 1:
# add some rules
self.comment('update add')
s = random_bytes(random.randint(5, 30))
self.state['rules'].append(s)
r.append((0, s))
elif len(self.state['rules']) > 1:
# remove some rules
self.comment('update remove')
s = random.choice(self.state['rules'])
self.state['rules'].remove(s)
r.append((1, s))
self.cmd(self.UPDATE, self.update_str(r))
self.response(self.OK)
def whitelist(self):
self.comment('WHITELIST')
s = random_bytes(random.randint(10, 30))
self.state['whitelist'].append(s)
self.cmd(self.WHITELIST, s)
self.response(self.OK)
def quit(self):
self.cmd(self.QUIT)
self.response(self.OK)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.