gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for artifacts."""
import os
import subprocess
import time
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# Pull in some extra artifacts used for testing.
from grr.lib import artifact_lib_test
# pylint: enable=unused-import,g-bad-import-order
from grr.client import client_utils_linux
from grr.client import client_utils_osx
from grr.client import vfs
from grr.client.client_actions import standard
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_lib
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import parsers
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
# pylint: mode=test
WMI_SAMPLE = [
rdfvalue.Dict({u"Version": u"65.61.49216", u"InstallDate2": u"",
u"Name": u"Google Chrome", u"Vendor": u"Google, Inc.",
u"Description": u"Google Chrome", u"IdentifyingNumber":
u"{35790B21-ACFE-33F5-B320-9DA320D96682}",
u"InstallDate": u"20130710"}),
rdfvalue.Dict({u"Version": u"7.0.1", u"InstallDate2": u"",
u"Name": u"Parity Agent", u"Vendor": u"Bit9, Inc.",
u"Description": u"Parity Agent", u"IdentifyingNumber":
u"{ADC7EB41-4CC2-4FBA-8FBE-9338A9FB7666}",
u"InstallDate": u"20130710"}),
rdfvalue.Dict({u"Version": u"8.0.61000", u"InstallDate2": u"",
u"Name": u"Microsoft Visual C++ 2005 Redistributable (x64)",
u"Vendor": u"Microsoft Corporation", u"Description":
u"Microsoft Visual C++ 2005 Redistributable (x64)",
u"IdentifyingNumber":
u"{ad8a2fa1-06e7-4b0d-927d-6e54b3d3102}",
u"InstallDate": u"20130710"})]
class TestCmdProcessor(parsers.CommandParser):
output_types = ["SoftwarePackage"]
supported_artifacts = ["TestCmdArtifact"]
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
_ = cmd, args, stdout, stderr, return_val, time_taken, knowledge_base
installed = rdfvalue.SoftwarePackage.InstallState.INSTALLED
soft = rdfvalue.SoftwarePackage(name="Package1", description="Desc1",
version="1", architecture="amd64",
install_state=installed)
yield soft
soft = rdfvalue.SoftwarePackage(name="Package2", description="Desc2",
version="1", architecture="i386",
install_state=installed)
yield soft
# Also yield something random so we can test return type filtering.
yield rdfvalue.StatEntry()
# Also yield an anomaly to test that.
yield rdfvalue.Anomaly(type="PARSER_ANOMALY",
symptom="could not parse gremlins.")
class MultiProvideParser(parsers.RegistryValueParser):
output_types = ["Dict"]
supported_artifacts = ["DepsProvidesMultiple"]
def Parse(self, stat, knowledge_base):
_ = stat, knowledge_base
test_dict = {"environ_temp": rdfvalue.RDFString("tempvalue"),
"environ_path": rdfvalue.RDFString("pathvalue")}
yield rdfvalue.Dict(test_dict)
class RekallMock(action_mocks.MemoryClientMock):
def __init__(self, client_id, result_filename):
self.result_filename = result_filename
self.client_id = client_id
def RekallAction(self, _):
# Generate this file with:
# rekall -r data -f win7_trial_64bit.raw pslist > rekall_pslist_result.dat
ps_list_file = os.path.join(config_lib.CONFIG["Test.data_dir"],
self.result_filename)
result = rdfvalue.RekallResponse(
json_messages=open(ps_list_file).read(10000000),
plugin="pslist",
client_urn=self.client_id)
return [result, rdfvalue.Iterator(state="FINISHED")]
class ArtifactTest(test_lib.GRRBaseTest):
"""Helper class for tests using artifacts."""
def setUp(self):
super(ArtifactTest, self).setUp()
self.client_id = self.SetupClients(1)[0]
self.client_id = self.SetupClients(1)[0]
@classmethod
def LoadTestArtifacts(cls):
test_artifacts_file = os.path.join(
config_lib.CONFIG["Test.data_dir"], "test_artifacts.json")
artifact_lib.LoadArtifactsFromFiles([test_artifacts_file])
class MockClient(action_mocks.MemoryClientMock):
def WmiQuery(self, _):
return WMI_SAMPLE
def SetWindowsClient(self):
fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
fd.Set(fd.Schema.SYSTEM("Windows"))
fd.Set(fd.Schema.OS_VERSION("6.2"))
fd.Set(fd.Schema.ARCH("AMD64"))
fd.Flush()
def UpdateCoreKBAttributes(self):
fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
kb = fd.Get(fd.Schema.KNOWLEDGE_BASE)
artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)
fd.Set(fd.Schema.KNOWLEDGE_BASE, kb)
fd.Flush()
def SetLinuxClient(self):
fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
fd.Set(fd.Schema.SYSTEM("Linux"))
fd.Set(fd.Schema.OS_VERSION("12.04"))
fd.Flush()
def SetDarwinClient(self):
fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
fd.Set(fd.Schema.SYSTEM("Darwin"))
fd.Set(fd.Schema.OS_VERSION("10.9"))
fd.Flush()
def MockClientMountPointsWithImage(self, image_path, fs_type="ext2"):
"""Mock the client to run off a test image.
Args:
image_path: The path to the image file.
fs_type: The filesystem in the image.
Returns:
A context manager which ensures that client actions are served off the
test image.
"""
def MockGetMountpoints():
return {"/": (image_path, fs_type)}
return utils.MultiStubber(
(client_utils_linux, "GetMountpoints", MockGetMountpoints),
(client_utils_osx, "GetMountpoints", MockGetMountpoints),
(standard, "HASH_CACHE", utils.FastStore(100)))
def RunCollectorAndGetCollection(self, artifact_list, client_mock=None,
**kw):
"""Helper to handle running the collector flow."""
if client_mock is None:
client_mock = self.MockClient(client_id=self.client_id)
output_name = "/analysis/output/%s" % int(time.time())
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock=client_mock, output=output_name,
client_id=self.client_id, artifact_list=artifact_list,
token=self.token, **kw):
pass
output_urn = self.client_id.Add(output_name)
return aff4.FACTORY.Open(output_urn, aff4_type="RDFValueCollection",
token=self.token)
class GRRArtifactTest(ArtifactTest):
def testRDFMaps(self):
"""Validate the RDFMaps."""
for rdf_name, dat in artifact.GRRArtifactMappings.rdf_map.items():
# "info/software", "InstalledSoftwarePackages", "INSTALLED_PACKAGES",
# "Append"
_, aff4_type, aff4_attribute, operator = dat
if operator not in ["Set", "Append"]:
raise artifact_lib.ArtifactDefinitionError(
"Bad RDFMapping, unknown operator %s in %s" %
(operator, rdf_name))
if aff4_type not in aff4.AFF4Object.classes:
raise artifact_lib.ArtifactDefinitionError(
"Bad RDFMapping, invalid AFF4 Object %s in %s" %
(aff4_type, rdf_name))
attr = getattr(aff4.AFF4Object.classes[aff4_type].SchemaCls,
aff4_attribute)()
if not isinstance(attr, rdfvalue.RDFValue):
raise artifact_lib.ArtifactDefinitionError(
"Bad RDFMapping, bad attribute %s for %s" %
(aff4_attribute, rdf_name))
class ArtifactFlowTest(ArtifactTest):
def setUp(self):
"""Make sure things are initialized."""
super(ArtifactFlowTest, self).setUp()
fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
fd.Set(fd.Schema.SYSTEM("Linux"))
kb = fd.Schema.KNOWLEDGE_BASE()
artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)
kb.MergeOrAddUser(rdfvalue.KnowledgeBaseUser(username="gogol"))
kb.MergeOrAddUser(rdfvalue.KnowledgeBaseUser(username="gevulot"))
kb.MergeOrAddUser(rdfvalue.KnowledgeBaseUser(username="exomemory"))
fd.Set(kb)
fd.Flush()
self.LoadTestArtifacts()
def testCmdArtifact(self):
"""Check we can run command based artifacts and get anomalies."""
class Popen(object):
"""A mock object for subprocess.Popen."""
def __init__(self, run, stdout, stderr, stdin):
Popen.running_args = run
Popen.stdout = stdout
Popen.stderr = stderr
Popen.stdin = stdin
Popen.returncode = 0
def communicate(self): # pylint: disable=g-bad-name
return "stdout here", "stderr here"
client_mock = self.MockClient("ExecuteCommand", client_id=self.client_id)
with utils.Stubber(subprocess, "Popen", Popen):
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock, client_id=self.client_id,
store_results_in_aff4=True, use_tsk=False,
artifact_list=["TestCmdArtifact"], token=self.token):
pass
urn = self.client_id.Add("info/software")
fd = aff4.FACTORY.Open(urn, token=self.token)
packages = fd.Get(fd.Schema.INSTALLED_PACKAGES)
self.assertEqual(len(packages), 2)
self.assertEqual(packages[0].__class__.__name__, "SoftwarePackage")
with aff4.FACTORY.Open(self.client_id.Add("anomalies"),
token=self.token) as anomaly_coll:
self.assertEqual(len(anomaly_coll), 1)
self.assertTrue("gremlin" in anomaly_coll[0].symptom)
def testWMIQueryArtifact(self):
"""Check we can run WMI based artifacts."""
self.SetWindowsClient()
self.UpdateCoreKBAttributes()
self.RunCollectorAndGetCollection(["WMIInstalledSoftware"],
store_results_in_aff4=True)
urn = self.client_id.Add("info/software")
fd = aff4.FACTORY.Open(urn, token=self.token)
packages = fd.Get(fd.Schema.INSTALLED_PACKAGES)
self.assertEqual(len(packages), 3)
self.assertEqual(packages[0].description, "Google Chrome")
def testRekallPsListArtifact(self):
"""Check we can run Rekall based artifacts."""
self.SetWindowsClient()
self.CreateSignedDriver()
fd = self.RunCollectorAndGetCollection(
["RekallPsList"], RekallMock(
self.client_id, "rekall_pslist_result.dat"))
self.assertEqual(len(fd), 36)
self.assertEqual(fd[0].exe, "System")
self.assertEqual(fd[0].pid, 4)
self.assertIn("DumpIt.exe", [x.exe for x in fd])
def testRekallVadArtifact(self):
"""Check we can run Rekall based artifacts."""
# The client should now be populated with the data we care about.
with aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token) as fd:
fd.Set(fd.Schema.KNOWLEDGE_BASE(
os="Windows",
environ_systemdrive=r"c:"))
self.SetWindowsClient()
self.CreateSignedDriver()
fd = self.RunCollectorAndGetCollection(
["FullVADBinaryList"], RekallMock(
self.client_id, "rekall_vad_result.dat"))
self.assertEqual(len(fd), 1986)
self.assertEqual(fd[0].path, u"c:\\Windows\\System32\\ntdll.dll")
for x in fd:
self.assertEqual(x.pathtype, "OS")
extension = x.path.lower().split(".")[-1]
self.assertIn(extension, ["exe", "dll", "pyd", "drv", "mui", "cpl"])
def testFilesArtifact(self):
"""Check GetFiles artifacts."""
# Update the artifact path to point to the test directory.
art_reg = artifact_lib.ArtifactRegistry.artifacts
orig_path = art_reg["TestFilesArtifact"].collectors[0].args["path_list"]
art_reg["TestFilesArtifact"].collectors[0].args["path_list"] = (
[os.path.join(self.base_path, "auth.log")])
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
self.RunCollectorAndGetCollection(["TestFilesArtifact"],
client_mock=client_mock)
urn = self.client_id.Add("fs/os/").Add(self.base_path).Add("auth.log")
aff4.FACTORY.Open(urn, aff4_type="VFSBlobImage", token=self.token)
art_reg["TestFilesArtifact"].collectors[0].args["path_list"] = orig_path
def testLinuxPasswdHomedirsArtifact(self):
"""Check LinuxPasswdHomedirs artifacts."""
# Update the artifact path to point to the test directory.
art_reg = artifact_lib.ArtifactRegistry.artifacts
orig_path = art_reg["LinuxPasswdHomedirs"].collectors[0].args["path_list"]
art_reg["LinuxPasswdHomedirs"].collectors[0].args["path_list"] = [
os.path.join(self.base_path, "passwd")]
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile", "Grep")
fd = self.RunCollectorAndGetCollection(["LinuxPasswdHomedirs"],
client_mock=client_mock)
self.assertEqual(len(fd), 3)
self.assertItemsEqual([x.username for x in fd], [u"exomemory", u"gevulot",
u"gogol"])
for user in fd:
if user.username == u"exomemory":
self.assertEqual(user.full_name, u"Never Forget (admin)")
self.assertEqual(user.gid, 47)
self.assertEqual(user.homedir, u"/var/lib/exomemory")
self.assertEqual(user.shell, u"/bin/sh")
self.assertEqual(user.uid, 46)
art_reg["LinuxPasswdHomedirs"].collectors[0].args["path_list"] = orig_path
def testArtifactOutput(self):
"""Check we can run command based artifacts."""
self.SetLinuxClient()
# Update the artifact path to point to the test directory.
art_reg = artifact_lib.ArtifactRegistry.artifacts
art_reg["TestFilesArtifact"].collectors[0].args["path_list"] = ([
os.path.join(self.base_path, "auth.log")])
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile",
"FingerprintFile", "HashBuffer",
"ListDirectory", "Find")
# Will raise if something goes wrong.
self.RunCollectorAndGetCollection(["TestFilesArtifact"],
client_mock=client_mock)
# Will raise if something goes wrong.
self.RunCollectorAndGetCollection(["TestFilesArtifact"],
client_mock=client_mock,
split_output_by_artifact=True)
# Test the on_no_results_error option.
with self.assertRaises(RuntimeError) as context:
self.RunCollectorAndGetCollection(
["NullArtifact"], client_mock=client_mock,
split_output_by_artifact=True, on_no_results_error=True)
if "collector returned 0 responses" not in str(context.exception):
raise RuntimeError("0 responses should have been returned")
class GrrKbTest(ArtifactTest):
def SetupWindowsMocks(self):
test_lib.ClientFixture(self.client_id, token=self.token)
self.SetWindowsClient()
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
def testKnowledgeBaseRetrievalWindows(self):
"""Check we can retrieve a knowledge base from a client."""
self.SetupWindowsMocks()
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
# The client should now be populated with the data we care about.
client = aff4.FACTORY.Open(self.client_id, token=self.token)
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.environ_systemroot, "C:\\Windows")
self.assertEqual(kb.time_zone, "US/Alaska")
self.assertEqual(kb.code_page, "cp_1252")
self.assertEqual(kb.environ_windir, "C:\\Windows")
self.assertEqual(kb.environ_allusersprofile, "C:\\Users\\All Users")
self.assertEqual(kb.environ_allusersappdata, "C:\\ProgramData")
self.assertEqual(kb.environ_temp, "C:\\Windows\\TEMP")
self.assertEqual(kb.environ_systemdrive, "C:")
self.assertItemsEqual([x.username for x in kb.users],
["jim", "kovacs"])
user = kb.GetUser(username="jim")
self.assertEqual(user.username, "jim")
self.assertEqual(user.sid, "S-1-5-21-702227068-2140022151-3110739409-1000")
def testKnowledgeBaseMultiProvides(self):
"""Check we can handle multi-provides."""
self.SetupWindowsMocks()
# Replace some artifacts with test one that will run the MultiProvideParser.
self.LoadTestArtifacts()
artifacts = config_lib.CONFIG["Artifacts.knowledge_base"]
artifacts.append("DepsProvidesMultiple") # Our test artifact.
artifacts.remove("WinPathEnvironmentVariable")
artifacts.remove("TempEnvironmentVariable")
config_lib.CONFIG.Set("Artifacts.knowledge_base", artifacts)
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
# The client should now be populated with the data we care about.
client = aff4.FACTORY.Open(self.client_id, token=self.token)
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.environ_temp, "tempvalue")
self.assertEqual(kb.environ_path, "pathvalue")
def testKnowledgeBaseRetrievalFailures(self):
"""Test kb retrieval failure modes."""
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
self.assertRaises(artifact_lib.KnowledgeBaseUninitializedError,
artifact.GetArtifactKnowledgeBase, client)
kb = rdfvalue.KnowledgeBase()
kb.hostname = "test"
client.Set(client.Schema.KNOWLEDGE_BASE(kb))
client.Flush(sync=True)
self.assertRaises(artifact_lib.KnowledgeBaseAttributesMissingError,
artifact.GetArtifactKnowledgeBase, client)
def testKnowledgeBaseRetrievalDarwin(self):
"""Check we can retrieve a Darwin kb."""
test_lib.ClientFixture(self.client_id, token=self.token)
self.SetDarwinClient()
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.os_major_version, 10)
self.assertEqual(kb.os_minor_version, 9)
# scalzi from /Users dir listing.
# Bert and Ernie not present (Users fixture overriden by kb).
self.assertItemsEqual([x.username for x in kb.users], ["scalzi"])
user = kb.GetUser(username="scalzi")
self.assertEqual(user.homedir, "/Users/scalzi")
def testKnowledgeBaseRetrievalLinux(self):
"""Check we can retrieve a Linux kb."""
test_lib.ClientFixture(self.client_id, token=self.token)
self.SetLinuxClient()
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["LinuxWtmp",
"NetgroupConfiguration",
"LinuxPasswdHomedirs",
"LinuxRelease"])
config_lib.CONFIG.Set("Artifacts.netgroup_filter_regexes", ["^login$"])
config_lib.CONFIG.Set("Artifacts.netgroup_user_blacklist", ["isaac"])
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeTestDataVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile", "Grep")
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.os_major_version, 14)
self.assertEqual(kb.os_minor_version, 4)
# user 1,2,3 from wtmp. yagharek from netgroup.
# Bert and Ernie not present (Users fixture overriden by kb).
self.assertItemsEqual([x.username for x in kb.users], ["user1", "user2",
"user3", "yagharek"])
user = kb.GetUser(username="user1")
self.assertEqual(user.last_logon.AsSecondsFromEpoch(), 1296552099)
self.assertEqual(user.homedir, "/home/user1")
def testKnowledgeBaseRetrievalLinuxPasswd(self):
"""Check we can retrieve a Linux kb."""
test_lib.ClientFixture(self.client_id, token=self.token)
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeTestDataVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile", "Grep")
self.SetLinuxClient()
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["LinuxWtmp",
"LinuxPasswdHomedirs",
"LinuxRelease"])
config_lib.CONFIG.Set("Artifacts.knowledge_base_additions", [])
config_lib.CONFIG.Set("Artifacts.knowledge_base_skip", [])
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.os_major_version, 14)
self.assertEqual(kb.os_minor_version, 4)
# user 1,2,3 from wtmp.
# Bert and Ernie not present (Users fixture overriden by kb).
self.assertItemsEqual([x.username for x in kb.users], ["user1", "user2",
"user3"])
user = kb.GetUser(username="user1")
self.assertEqual(user.last_logon.AsSecondsFromEpoch(), 1296552099)
self.assertEqual(user.homedir, "/home/user1")
user = kb.GetUser(username="user2")
self.assertEqual(user.last_logon.AsSecondsFromEpoch(), 1296552102)
self.assertEqual(user.homedir, "/home/user2")
self.assertFalse(kb.GetUser(username="buguser3"))
def testKnowledgeBaseRetrievalLinuxNoUsers(self):
"""Cause a users.username dependency failure."""
test_lib.ClientFixture(self.client_id, token=self.token)
self.SetLinuxClient()
config_lib.CONFIG.Set("Artifacts.knowledge_base",
["NetgroupConfiguration",
"NssCacheLinuxPasswdHomedirs",
"LinuxRelease"])
config_lib.CONFIG.Set("Artifacts.netgroup_filter_regexes",
["^doesntexist$"])
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeTestDataVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
require_complete=False,
client_id=self.client_id, token=self.token):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.os_major_version, 14)
self.assertEqual(kb.os_minor_version, 4)
self.assertItemsEqual([x.username for x in kb.users], [])
def testKnowledgeBaseNoOS(self):
"""Check unset OS dies."""
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
self.assertRaises(flow.FlowError, list, test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token))
def testGlobRegistry(self):
"""Test that glob works on registry."""
self.SetupWindowsMocks()
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory")
paths = ["HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT"
"\\CurrentVersion\\ProfileList\\ProfilesDirectory",
"HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT"
"\\CurrentVersion\\ProfileList\\AllUsersProfile"]
for _ in test_lib.TestFlowHelper(
"Glob", client_mock, paths=paths,
pathtype=rdfvalue.PathSpec.PathType.REGISTRY,
client_id=self.client_id, token=self.token):
pass
path = paths[0].replace("\\", "/")
fd = aff4.FACTORY.Open(self.client_id.Add("registry").Add(path),
token=self.token)
self.assertEqual(fd.__class__.__name__, "VFSFile")
self.assertEqual(fd.Get(fd.Schema.STAT).registry_data.GetValue(),
"%SystemDrive%\\Users")
def testGetDependencies(self):
"""Test that dependencies are calculated correctly."""
self.SetupWindowsMocks()
with utils.Stubber(artifact_lib.ArtifactRegistry, "artifacts", {}):
test_artifacts_file = os.path.join(
config_lib.CONFIG["Test.data_dir"], "test_artifacts.json")
artifact_lib.LoadArtifactsFromFiles([test_artifacts_file])
# No dependencies
args = artifact.CollectArtifactDependenciesArgs(
artifact_list=["DepsHomedir2"])
collect_obj = artifact.CollectArtifactDependencies(None, token=self.token)
collect_obj.args = args
collect_obj.knowledge_base = None
collect_obj.state.Register("all_deps", set())
collect_obj.state.Register("awaiting_deps_artifacts", [])
collect_obj.state.Register("knowledge_base",
rdfvalue.KnowledgeBase(os="Windows"))
no_deps = collect_obj.GetFirstFlowsForCollection()
self.assertItemsEqual(no_deps, [])
self.assertItemsEqual(collect_obj.state.all_deps, [])
self.assertItemsEqual(collect_obj.state.awaiting_deps_artifacts, [])
# Dependency tree with a single starting point
args = artifact.CollectArtifactDependenciesArgs(
artifact_list=["DepsHomedir"])
collect_obj.args = args
no_deps = collect_obj.GetFirstFlowsForCollection()
self.assertItemsEqual(no_deps, ["DepsControlSet"])
self.assertItemsEqual(collect_obj.state.all_deps, ["environ_windir",
"users.username",
"current_control_set"])
self.assertItemsEqual(collect_obj.state.awaiting_deps_artifacts,
["DepsWindir", "DepsWindirRegex"])
def testGetKBDependencies(self):
"""Test that KB dependencies are calculated correctly."""
self.SetupWindowsMocks()
with utils.Stubber(artifact_lib.ArtifactRegistry, "artifacts", {}):
test_artifacts_file = os.path.join(
config_lib.CONFIG["Test.data_dir"], "test_artifacts.json")
artifact_lib.LoadArtifactsFromFiles([test_artifacts_file])
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["DepsParent",
"DepsDesktop",
"DepsHomedir",
"DepsWindir",
"DepsWindirRegex",
"DepsControlSet",
"FakeArtifact"])
config_lib.CONFIG.Set("Artifacts.knowledge_base_additions",
["DepsHomedir2"])
config_lib.CONFIG.Set("Artifacts.knowledge_base_skip", ["DepsWindir"])
config_lib.CONFIG.Set("Artifacts.knowledge_base_heavyweight",
["FakeArtifact"])
args = rdfvalue.KnowledgeBaseInitializationArgs(lightweight=True)
kb_init = artifact.KnowledgeBaseInitializationFlow(None, token=self.token)
kb_init.args = args
kb_init.state.Register("all_deps", set())
kb_init.state.Register("awaiting_deps_artifacts", [])
kb_init.state.Register("knowledge_base",
rdfvalue.KnowledgeBase(os="Windows"))
no_deps = kb_init.GetFirstFlowsForCollection()
self.assertItemsEqual(no_deps, ["DepsControlSet", "DepsHomedir2"])
self.assertItemsEqual(kb_init.state.all_deps, ["users.homedir",
"users.desktop",
"users.username",
"environ_windir",
"current_control_set"])
self.assertItemsEqual(kb_init.state.awaiting_deps_artifacts,
["DepsParent", "DepsDesktop", "DepsHomedir",
"DepsWindirRegex"])
class FlowTestLoader(test_lib.GRRTestLoader):
base_class = ArtifactTest
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv, testLoader=FlowTestLoader())
if __name__ == "__main__":
flags.StartMain(main)
|
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import click
from polyaxon import settings
from polyaxon.cli.admin import admin
from polyaxon.cli.artifacts import artifacts
from polyaxon.cli.auth import login, logout, whoami
from polyaxon.cli.check import check
from polyaxon.cli.completion import completion
from polyaxon.cli.components import components
from polyaxon.cli.config import config
from polyaxon.cli.dashboard import dashboard
from polyaxon.cli.init import init
from polyaxon.cli.models import models
from polyaxon.cli.operations import ops
from polyaxon.cli.port_forward import port_forward
from polyaxon.cli.projects import project
from polyaxon.cli.run import run
from polyaxon.cli.session import set_versions_config
from polyaxon.cli.version import check_cli_version, upgrade, version
from polyaxon.logger import clean_outputs, configure_logger
from polyaxon.utils.bool_utils import to_bool
from polyaxon.utils.formatting import Printer
DOCS_GEN = to_bool(os.environ.get("POLYAXON_DOCS_GEN", False))
@click.group()
@click.option(
"-v", "--verbose", is_flag=True, default=False, help="Turn on debug logging"
)
@click.option(
"--offline",
is_flag=True,
default=False,
help="Run command in offline mode if supported. "
"Currently used for run command in --local mode.",
)
@click.pass_context
@clean_outputs
def cli(context, verbose, offline):
"""Polyaxon - Cloud Native Machine Learning Automation & Experimentation tool.
This CLI provides tools to:
- Parse, Validate, and Check Polyaxonfiles.
- Interact with Polyaxon server.
- Run and Monitor experiments and jobs.
This CLI tool comes with a caching mechanism:
- You can initialize a project with: polyaxon init -p [project name]
- Otherwise Polyaxon will use the default global path for the cache.
You can check the version of your CLI by running:
- polyaxon version
You can check the version of the CLI and the server, and the compatibility matrix with:
- polyaxon version --check
To enable the debug mode, you can use the `-v` flag:
- polyaxon -v ...
To configure your host:
- polyaxon config set --host=...
To check your current config:
- polyaxon config show
Common commands:
- polyaxon project get
- polyaxon run [-f] [-l]
- polyaxon ops ls
- polyaxon ops logs
- polyaxon ops get
- polyaxon config set ...
Admin deployment commands:
- polyaxon admin deploy [-f] [--check]
- polyaxon admin upgrade [-f] [--check]
- polyaxon admin teardown [-f]
For more information, please visit https://polyaxon.com/docs/core/cli/
Check the help available for each command listed below by appending `-h`.
"""
settings.set_cli_config()
configure_logger(verbose)
if settings.CLIENT_CONFIG.no_op:
Printer.print_warning(
"POLYAXON_NO_OP is set to `true`, some commands will not function correctly."
)
context.obj = context.obj or {}
if not settings.CLIENT_CONFIG.client_header:
settings.CLIENT_CONFIG.set_cli_header()
context.obj["offline"] = offline
if offline:
os.environ["POLYAXON_IS_OFFLINE"] = "true"
settings.CLIENT_CONFIG.is_offline = True
non_check_cmds = [
"completion",
"config",
"version",
"login",
"logout",
"deploy",
"admin",
"teardown",
"docker",
"initializer",
"sidecar",
"proxy",
"notify",
"upgrade",
"port-forward",
]
if not settings.CLI_CONFIG.installation:
pass
if (
not (
context.invoked_subcommand in non_check_cmds
or offline
or settings.CLIENT_CONFIG.no_api
or settings.CLIENT_CONFIG.is_ops
or DOCS_GEN
)
and not settings.CLI_CONFIG.installation
):
cli_config = set_versions_config(is_cli=False)
settings.CLI_CONFIG = cli_config
check_cli_version(cli_config, is_cli=False)
cli.add_command(login)
cli.add_command(logout)
cli.add_command(whoami)
cli.add_command(upgrade)
cli.add_command(version)
cli.add_command(config)
cli.add_command(check)
cli.add_command(init)
cli.add_command(project)
cli.add_command(ops)
cli.add_command(artifacts)
cli.add_command(components)
cli.add_command(models)
cli.add_command(run)
cli.add_command(dashboard)
cli.add_command(admin)
cli.add_command(port_forward)
cli.add_command(completion)
if settings.CLIENT_CONFIG.is_ops:
from polyaxon.cli.services.agent import agent
from polyaxon.cli.services.clean_artifacts import clean_artifacts
from polyaxon.cli.services.clean_ops import clean_ops
from polyaxon.cli.services.docker import docker
from polyaxon.cli.services.initializer import initializer
from polyaxon.cli.services.notifier import notify
from polyaxon.cli.services.proxies import proxy
from polyaxon.cli.services.sidecar import sidecar
from polyaxon.cli.services.tuner import tuner
from polyaxon.cli.services.wait import wait
cli.add_command(agent)
cli.add_command(clean_artifacts)
cli.add_command(clean_ops)
cli.add_command(docker)
cli.add_command(initializer)
cli.add_command(notify)
cli.add_command(proxy)
cli.add_command(sidecar)
cli.add_command(tuner)
cli.add_command(wait)
def main():
cli(auto_envvar_prefix="POLYAXON_CLI")
|
|
#!/usr/bin/python
from logging import DEBUG, NOTSET
from cloud_utils.log_utils.eulogger import Eulogger
from cloud_utils.log_utils import markup, get_traceback
from cloud_utils.file_utils.eucarc import Eucarc
from nephoria import CleanTestResourcesException
from urlparse import urlparse
AWSRegionData = {
'us-east-1': 'us-east-1.amazonaws.com',
'us-west-1': 'us-west-1.amazonaws.com',
'us-west-2': 'us-west-2.amazonaws.com',
'eu-west-1': 'eu-west-1.amazonaws.com',
'ap-northeast-1': 'ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'ap-southeast-1.amazonaws.com'}
class BaseOps(object):
# The key name this ops class uses to look up it's service url value (ie EC2_URL, S3_URL, etc)
EUCARC_URL_NAME = None
# The service prefix used with the region (ie ec2, iam, s3, etc)
SERVICE_PREFIX = None
# The underlying class used to connect to the cloud (ie boto.VPCConnection)
CONNECTION_CLASS = None
def __init__(self, eucarc=None, credpath=None, service_url=None, aws_access_key_id=None,
aws_secret_access_key=None, is_secure=False, port=None, host=None,
region=None, connection_debug=0, path=None, validate_certs=True,
test_resources=None, logger=None, log_level=None, user_context=None,
session=None, api_version=None, verbose_requests=None):
if self.EUCARC_URL_NAME is None:
raise NotImplementedError('EUCARC_URL_NAME not set for this class:"{0}"'
.format(self.__class__.__name__))
if self.SERVICE_PREFIX is None:
raise NotImplementedError('Service Prefix has not been defined for this class:"{0}"'
.format(self.__class__.__name__))
init_kwargs = locals()
init_kwargs.__delitem__('self')
self._session = session
self.connection = None
self.service_host = None
self.service_port = None
self.service_path = None
# Store info about created resources and how to clean/delete them for this ops connection
self.test_resources_clean_methods = {}
self.test_resources = test_resources or {}
# Store the user context for this connection if provided
self._user_context = user_context
if not region and self._user_context:
region = self._user_context.region
self.service_region = region
# Create the logger for this ops connection
if log_level is None:
log_level = DEBUG
if not logger:
context = ""
if user_context:
try:
context = "({0}:{1})".format(user_context.account_name, user_context.user_name)
except:
pass
logger = Eulogger("{0}{1}".format(self.__class__.__name__, context),
stdout_level=log_level)
self.log = logger
self.log.set_stdout_loglevel(log_level)
# Store the runtime configuration for this ops connection
if not eucarc:
if credpath:
eucarc = Eucarc(filepath=credpath)
else:
eucarc = Eucarc()
self.eucarc = eucarc
# Set the connection params...
self._try_verbose = verbose_requests
self._is_secure = is_secure
if aws_secret_access_key:
self.eucarc.aws_secret_key = aws_secret_access_key
if aws_access_key_id:
self.eucarc.aws_access_key = aws_access_key_id
self._service_url = service_url
if not (host or port or path):
if self.service_url:
urlp = urlparse(self.service_url)
host = host or urlp.hostname
port = port or urlp.port or 8773
path = path or urlp.path
self.service_host = host
self.service_port = port
self.service_path = path
self.service_region = region
# Build out kwargs used to create service connection/client
# Pass all the args/kwargs provided at init to the create_connection_kwargs method for the
# ops class to use to build it's kwargs as needed.
self._connection_kwargs = self.create_connection_kwargs(**init_kwargs)
self.connect(verbose=connection_debug)
# Remaining setup...
self.setup()
def __repr__(self):
return "{0}:{1}:{2}".format(self.__class__.__name__, self.service_region,
self.SERVICE_PREFIX)
def connect(self, verbose=False, connection_kwargs=None):
"""
Verify the required params have been set, and connect the underlying connection class.
:param verbose: Dump debug output about the connection
:param connection_kwargs: options dict containing kwargs used when creating the
underlying connection
"""
if self.CONNECTION_CLASS is None:
raise NotImplementedError('Connection Class has not been defined for this class:"{0}"'
.format(self.__class__.__name__))
if connection_kwargs:
self._connection_kwargs = connection_kwargs
# Remove and kwargs which are not part of the service connection class creation method
self._clean_connection_kwargs()
required = []
for key, value in self._connection_kwargs.iteritems():
if value is None:
required.append(key)
if required:
self.show_connection_kwargs(connection_kwargs=connection_kwargs)
raise ValueError('{0}: Required Connection parameters were None: "{1}"'
.format(self.__class__.__name__, ", ".join(required)))
#### Init connection...
if verbose:
self.show_connection_kwargs()
try:
# Remove any kwargs that are not applicable to this connection class
# For example 'region' may not be applicable to services such as 'IAM'
connection_keys = self._connection_kwargs.keys()
for ckey in connection_keys:
if ckey not in self.CONNECTION_CLASS.__init__.__func__.__code__.co_varnames:
self.log.debug('Arg "0" not found in "{1}.init()", removing kwarg '
'connection args.'.format(ckey, self.CONNECTION_CLASS.__name__))
self._connection_kwargs.__delitem__(ckey)
self.connection = self.CONNECTION_CLASS(**self._connection_kwargs)
except:
self.show_connection_kwargs()
raise
def create_connection_kwargs(self, **kwargs):
self._connection_kwargs = kwargs
def setup(self):
self.setup_resource_trackers()
@property
def service_name(self):
return self.SERVICE_PREFIX
@property
def service_url(self):
if not self._service_url:
url = getattr(self.eucarc, self.EUCARC_URL_NAME, None)
if url:
self._service_url = url
else:
if self._is_secure:
prefix = 'https'
elif self.service_host:
prefix = 'http'
url = "{0}://{1}:{2}{3}".format(prefix,
self.service_host,
self.service_port or "",
self.service_path or "")
return url
return self._service_url
@property
def session(self):
return self._session
@session.setter
def session(self, value):
self._session = value
def enable_connection_debug(self, level=DEBUG, format_string=None):
pass
def disable_connection_debug(self, level=NOTSET):
pass
@property
def _use_verbose_requests(self):
if self._try_verbose is None:
self._try_verbose = False
if self.eucarc:
account = getattr(self.eucarc, 'aws_account_name', None)
user = getattr(self.eucarc, 'aws_user_name', None)
if account == 'eucalyptus' and user == 'sys_admin':
self._try_verbose = True
return self._try_verbose
@_use_verbose_requests.setter
def _use_verbose_requests(self, value):
if value is None or isinstance(value, bool):
self._try_verbose = value
return
raise ValueError('Only bool or None type supported for "_use_verbose_requests". '
'Got: "{0}/{1}"'.format(value, type(value)))
def _clean_connection_kwargs(self, connection_kwargs=None, connection_method=None):
# Remove any kwargs from self_connection_kwargs that are not applicable
# to self.CONNECTION_CLASS
if connection_kwargs is None:
connection_kwargs = self._connection_kwargs or {}
if connection_method is None:
connection_method = self.CONNECTION_CLASS.__init__
varnames = connection_method.__func__.func_code.co_varnames
keys = connection_kwargs.keys()
for key in keys:
if key not in varnames:
del connection_kwargs[key]
return connection_kwargs
def show_connection_kwargs(self, connection_kwargs=None):
if connection_kwargs is None:
connection_kwargs = self._connection_kwargs
print connection_kwargs
debug_buf = 'Current "{0}" connection kwargs for\n'.format(self.__class__.__name__)
for key, value in connection_kwargs.iteritems():
debug_buf += "{0}{1}{2}\n".format(str(key).ljust(30), " -> ", value)
self.log.debug(debug_buf)
def setup_resource_trackers(self):
"""
Allows each ops class to track resources created by this ops class, as well as the
method(s) to user per resource to type to clean/remove them.
For example an ec2_ops class may create 'instances' and later can register a 'terminate()'
method to delete these upon exit.
"""
raise NotImplementedError('ERROR: {0} has not implemented resource tracking method. '
'"test_resources" and "test_resources_clean_methods" should be '
'setup here.'
.format(self.__class__.__name__))
def clean_all_test_resources(self):
fault_buf = ""
for resource_name, resource_list in self.test_resources.iteritems():
clean_method = self.test_resources_clean_methods.get(resource_name, None)
if clean_method:
try:
try:
clean_method_name = clean_method.__func__.__name__
except:
clean_method_name = str(clean_method)
self.log.debug('Attempting to clean test resources of type:"{0}", '
'method:"{1}", artifacts:"{2}"'
.format(resource_name, clean_method_name, resource_list))
clean_method(resource_list)
except Exception as E:
fault_buf += "{0}\n{1}\n".format(get_traceback(),
markup('Error while attempting to remove '
'test resource type:"{0}", '
'error:"{1}"'
.format(resource_name, E)))
if fault_buf:
raise CleanTestResourcesException(fault_buf)
|
|
from django.http import HttpResponse,Http404
from django.shortcuts import render,redirect
from django.views.decorators.csrf import csrf_exempt
import flockapplib.jwtauth as jwt
import flockapplib.actions as action
import flockapplib.exports as export
import json
from django.http import HttpResponseRedirect
#importing models
from .models import Currency,User,Track,Expense,Chat,ChatExpense,Chattrack
@csrf_exempt
def events(request):
if(jwt.verify(request.META['HTTP_X_FLOCK_EVENT_TOKEN'])):
pjson = json.loads(request.body)
if(pjson['name']=='app.install'):
if(action.appinstall(pjson)):
return HttpResponse("OK")
else:
raise Http404("testing")
# elif(pjson['name']=='chat.receiveMessage'):
# if(action.receiveMessage(pjson)):
# return HttpResponse("OK")
elif(pjson['name']=='client.messageAction'):
print(pjson)
user = User.objects.get(userId=str(pjson['userId']))
message_uni = pjson['messageUids']
message = []
message.append(str(message_uni[0]))
print(str(pjson['chat']))
print(user)
print(message)
action.fetchMessage(str(pjson['chat']),user,message)
return HttpResponse("""{"text": "Saved the bill"}""")
else:
print(request.body)
raise Http404("testing")
#return HttpResponse("connected to xpense-server")
def installed(request):
context = {
}
return render(request, 'flockapp/installed.html', context)
def widget(request):
if(jwt.verify(request.GET['flockEventToken'])):
currency_list = Currency.objects.all()
context = {
'currency_list' : currency_list,
}
#Getting group information
flockEvent = request.GET['flockEvent']
pjson = json.loads(flockEvent)
cmd_text = pjson['text']
chat_id = str(pjson['chat'])
chat_name = pjson['chatName']
username = pjson['userName']
userId = pjson['userId']
cmd_text = pjson['text']
cmd_text=cmd_text.lower()
if(cmd_text=="list"):
################################################################### DONE
user = User.objects.get(userId = userId)
current_chat = Chat.objects.filter(chatId = chat_id)
current_track = Chattrack.objects.filter(user = current_chat,active=True)
if(len(current_track)==0):
return HttpResponse("You aren't tracking yet. Try /Xpense to start tracking.")
else:
message = 'The list of '+current_track[0].name+' expenses are,\n'
ch_list = ChatExpense.objects.filter(track=current_track)
for expense in ch_list:
message = message+str(expense.currency.abbr)+' '+str(expense.amount)+' by '+str(expense.paidbywhom)+' for '+str(expense.purpose)+'\n'
context['chatexpense_list'] = ch_list
context['current_track'] = current_track[0]
context['message'] = message
context['userId'] = str(userId)
context['chatId'] = str(chat_id)
return render(request, 'flockapp/listexpense.html', context)
elif(cmd_text=='close'):
################################################################# done
user = User.objects.get(userId = userId)
current_chat = Chat.objects.filter(chatId = chat_id)
current_track = Chattrack.objects.filter(user = current_chat,active=True)
if(len(current_track)==0):
return HttpResponse("You aren't tracking yet. Try /Xpense to start tracking.")
else:
current_track[0].active = False
current_track[0].save()
action.sendGroupMessage(str(chat_id),user,'Xpense tracking of '+current_track[0].name+' closed.')
context['userId'] = str(userId)
context['track'] = current_track[0]
context['chatId'] = str(chat_id)
return render(request, 'flockapp/closetrack.html', context)
elif(cmd_text=='report'):
################################################################# Done
context['userId'] = str(userId)
context['chatId'] = str(chat_id)
chat = Chat.objects.get(chatId = str(chat_id))
track_list = Chattrack.objects.filter(user=chat)
context['track_list'] = track_list
return render(request, 'flockapp/report.html', context)
elif(cmd_text=='delete'):
context['userId'] = str(userId)
context['chatId'] = str(chat_id)
chat = Chat.objects.get(chatId = str(chat_id))
track_list = Chattrack.objects.filter(user=chat)
context['track_list'] = track_list
return render(request, 'flockapp/deletetrack.html', context)
elif(cmd_text=='help'):
context={}
return render(request, 'flockapp/help.html', context)
else:
current_chat = Chat.objects.filter(chatId = chat_id)
if(len(current_chat)==0):
Chat(name=str(chat_name),chatId=str(chat_id)).save()
current_track = Chattrack.objects.filter(user = current_chat,active=True)
if(len(current_track)==0):
context['id'] = chat_id
context['userId'] = str(userId)
return render(request, 'flockapp/starttrack.html', context)
else:
user = User.objects.get(userId = userId)
context['chatId'] = current_track[0].user.chatId
context['userId'] = str(userId)
if(str(chat_id)[0]=='g'):
#group
group_members = action.getMembers(chat_id,user)
context['group_members'] = group_members
else:
context['username'] = username
return render(request, 'flockapp/widget.html', context)
else:
raise Http404("wth you doing bro?")
def chattab(request):
if(jwt.verify(request.GET['flockEventToken'])):
currency_list = Currency.objects.all()
context = {
'currency_list' : currency_list,
}
#Getting group information
flockEvent = request.GET['flockEvent']
pjson = json.loads(flockEvent)
chat_id = str(pjson['chat'])
chat_name = pjson['chatName']
username = pjson['userName']
userId = pjson['userId']
current_chat = Chat.objects.filter(chatId = chat_id)
if(len(current_chat)==0):
Chat(name=str(chat_name),chatId=str(chat_id)).save()
current_track = Chattrack.objects.filter(user = current_chat,active=True)
if(len(current_track)==0):
context['id'] = chat_id
context['userId'] = str(userId)
return render(request, 'flockapp/starttrack2.html', context)
else:
user = User.objects.get(userId = userId)
context['chatId'] = current_track[0].user.chatId
context['userId'] = str(userId)
if(str(chat_id)[0]=='g'):
#group
group_members = action.getMembers(chat_id,user)
context['group_members'] = group_members
else:
context['username'] = username
return render(request, 'flockapp/widget2.html', context)
return HttpResponse("ok")
def chattablist(request):
context = {}
userId = request.POST['userId']
chatId = request.POST['chatId']
current_chat = Chat.objects.filter(chatId = chatId)
current_track = Chattrack.objects.filter(user = current_chat,active=True)
message = 'The list of '+current_track[0].name+' expenses are,\n'
ch_list = ChatExpense.objects.filter(track=current_track)
for expense in ch_list:
message = message+str(expense.currency.abbr)+' '+str(expense.amount)+' by '+str(expense.paidbywhom)+' for '+str(expense.purpose)+'\n'
context['chatexpense_list'] = ch_list
context['current_track'] = current_track[0]
context['message'] = message
context['userId'] = str(userId)
context['chatId'] = str(chatId)
return render(request, 'flockapp/listexpense.html', context)
def chattabclose(request):
context = {}
userId = request.POST['userId']
chatId = request.POST['chatId']
user = User.objects.get(userId = userId)
current_chat = Chat.objects.filter(chatId = chatId)
current_track = Chattrack.objects.filter(user = current_chat,active=True)
if(len(current_track)==0):
return HttpResponse("You aren't tracking yet. Try /Xpense to start tracking.")
else:
current_track[0].active = False
current_track[0].save()
action.sendGroupMessage(str(chatId),user,'Xpense tracking of '+current_track[0].name+' closed.')
context['userId'] = str(userId)
context['track'] = current_track[0]
context['chatId'] = str(chatId)
return render(request, 'flockapp/closetrack.html', context)
def chattabreport(request):
context = {}
userId = request.POST['userId']
chatId = request.POST['chatId']
context['userId'] = str(userId)
context['chatId'] = str(chatId)
chat = Chat.objects.get(chatId = str(chatId))
track_list = Chattrack.objects.filter(user=chat)
context['track_list'] = track_list
return render(request, 'flockapp/report.html', context)
def closewidget(request):
context = {}
return render(request, 'flockapp/close.html', context)
@csrf_exempt
def starttrack(request):
chatId = request.POST['chatId']
userId = request.POST['userId']
trackname = request.POST['trackname']
purpose = request.POST['purpose']
current_chat = Chat.objects.get(chatId=str(chatId))
Chattrack(name=str(trackname),user=current_chat,purpose=purpose).save()
user = User.objects.get(userId=str(userId))
action.sendGroupMessage(chatId,user,'I have added Xpense tracker '+str(trackname)+' for '+str(purpose))
return HttpResponse('ok')
@csrf_exempt
def deletetrack(request):
chatId = request.POST['chatId']
userId = request.POST['userId']
trackId = request.POST['trackId']
chat = Chat.objects.get(chatId=str(chatId))
chattrack = Chattrack.objects.get(id=trackId,user=chat)
track_name = chattrack.name
chattrack.delete()
user = User.objects.get(userId=str(userId))
action.sendGroupMessage(chatId,user,'Track '+track_name+' deleted.')
return HttpResponse('ok')
@csrf_exempt
def generatereport(request):
chatId = request.POST['chatId']
userId = request.POST['userId']
trackId = request.POST['trackId']
link_to_report = export.generate_report_2(trackId,chatId,userId)
user = User.objects.get(userId=str(userId))
current_chat = Chat.objects.get(chatId=str(chatId))
current_track = Chattrack.objects.get(user = current_chat,id= str(trackId))
#action.sendGroupMessage(chatId,user,'<flockml><a href="'+link_to_report+'">'+current_track.name+' pdf</a></flockml>')
action.sendAttachment(chatId,user,link_to_report,current_track.name)
return HttpResponse('ok')
@csrf_exempt
def sendmessage(request):
chatId = request.POST['chatId']
userId = request.POST['userId']
message = request.POST['message']
user = User.objects.get(userId=str(userId))
action.sendGroupMessage(chatId,user,message)
return HttpResponse('ok')
@csrf_exempt
def addexpense(request):
chatId = request.POST['chatId']
currency = request.POST['currency']
amount = request.POST['amount']
paidby = request.POST['paidby']
purpose = request.POST['purpose']
userId = request.POST['userId']
current_chat = Chat.objects.get(chatId=str(chatId))
current_track = Chattrack.objects.get(user = current_chat,active=True)
currency = Currency.objects.get(id=str(currency))
ChatExpense(track=current_track,amount=str(amount),currency=currency,paidbywhom=str(paidby),purpose=str(purpose),equallyshared=True).save()
user = User.objects.get(userId=str(userId))
action.sendGroupMessage(chatId,user,"Xpense Tracker: "+str(currency.abbr)+" "+str(amount)+" "+" paid by "+str(paidby)+" for "+str(purpose))
return HttpResponse('ok')
|
|
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import functools
import io
import jmespath
import json
import logging
import os
import re
import shutil
import tempfile
import unittest
import pytest
import mock
import six
import yaml
from c7n import policy
from c7n.loader import PolicyLoader
from c7n.ctx import ExecutionContext
from c7n.utils import reset_session_cache
from c7n.config import Bag, Config
C7N_VALIDATE = bool(os.environ.get("C7N_VALIDATE", ""))
skip_if_not_validating = unittest.skipIf(
not C7N_VALIDATE, reason="We are not validating schemas.")
functional = pytest.mark.functional
class CustodianTestCore(object):
custodian_schema = None
# thread local? tests are single threaded, multiprocess execution
policy_loader = PolicyLoader(Config.empty())
policy_loader.default_policy_validate = C7N_VALIDATE
def addCleanup(self, func, *args, **kw):
raise NotImplementedError("subclass required")
def write_policy_file(self, policy, format="yaml"):
""" Write a policy file to disk in the specified format.
Input a dictionary and a format. Valid formats are `yaml` and `json`
Returns the file path.
"""
fh = tempfile.NamedTemporaryFile(mode="w+b", suffix="." + format, delete=False)
if format == "json":
fh.write(json.dumps(policy).encode("utf8"))
else:
fh.write(yaml.dump(policy, encoding="utf8", Dumper=yaml.SafeDumper))
fh.flush()
self.addCleanup(os.unlink, fh.name)
self.addCleanup(fh.close)
return fh.name
def get_temp_dir(self):
""" Return a temporary directory that will get cleaned up. """
temp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, temp_dir)
return temp_dir
def get_context(self, config=None, session_factory=None, policy=None):
if config is None:
self.context_output_dir = self.get_temp_dir()
config = Config.empty(output_dir=self.context_output_dir)
ctx = ExecutionContext(
session_factory, policy or Bag({
"name": "test-policy", "provider_name": "aws"}), config)
return ctx
def load_policy(
self,
data,
config=None,
session_factory=None,
validate=C7N_VALIDATE,
output_dir=None,
cache=False,
):
pdata = {'policies': [data]}
if not (config and isinstance(config, Config)):
config = self._get_policy_config(
output_dir=output_dir, cache=cache, **(config or {}))
collection = self.policy_loader.load_data(
pdata, validate=validate,
file_uri="memory://test",
session_factory=session_factory,
config=config)
# policy non schema validation is also lazy initialization
[p.validate() for p in collection]
return list(collection)[0]
def _get_policy_config(self, **kw):
config = kw
config["output_dir"] = temp_dir = self.get_temp_dir()
if config.get('cache'):
config["cache"] = os.path.join(temp_dir, "c7n.cache")
config["cache_period"] = 300
return Config.empty(**config)
def load_policy_set(self, data, config=None):
filename = self.write_policy_file(data, format="json")
if config:
e = Config.empty(**config)
else:
e = Config.empty()
return policy.load(e, filename)
def patch(self, obj, attr, new):
old = getattr(obj, attr, None)
setattr(obj, attr, new)
self.addCleanup(setattr, obj, attr, old)
def change_cwd(self, work_dir=None):
if work_dir is None:
work_dir = self.get_temp_dir()
cur_dir = os.path.abspath(os.getcwd())
def restore():
os.chdir(cur_dir)
self.addCleanup(restore)
os.chdir(work_dir)
return work_dir
def change_environment(self, **kwargs):
"""Change the environment to the given set of variables.
To clear an environment variable set it to None.
Existing environment restored after test.
"""
# preserve key elements needed for testing
for env in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_DEFAULT_REGION"]:
if env not in kwargs:
kwargs[env] = os.environ.get(env, "")
original_environ = dict(os.environ)
@self.addCleanup
def cleanup_env():
os.environ.clear()
os.environ.update(original_environ)
os.environ.clear()
for key, value in list(kwargs.items()):
if value is None:
del (kwargs[key])
os.environ.update(kwargs)
def capture_logging(
self, name=None, level=logging.INFO, formatter=None, log_file=None
):
if log_file is None:
log_file = TextTestIO()
log_handler = logging.StreamHandler(log_file)
if formatter:
log_handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.addHandler(log_handler)
old_logger_level = logger.level
logger.setLevel(level)
@self.addCleanup
def reset_logging():
logger.removeHandler(log_handler)
logger.setLevel(old_logger_level)
return log_file
# Backport from stdlib for 2.7 compat, drop when 2.7 support is dropped.
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, six.string_types):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
standardMsg = "Regex didn't match: %r not found in %r" % (
expected_regex.pattern, text)
# _formatMessage ensures the longMessage option is respected
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertJmes(self, expr, instance, expected):
value = jmespath.search(expr, instance)
self.assertEqual(value, expected)
class _TestUtils(unittest.TestCase):
# used to expose unittest feature set as a pytest fixture
def test_utils(self):
"""dummy method for py2.7 unittest"""
class PyTestUtils(CustodianTestCore):
"""Pytest compatibile testing utils intended for use as fixture."""
def __init__(self, request):
self.request = request
# Copy over asserts from unit test
t = _TestUtils('test_utils')
for n in dir(t):
if n.startswith('assert'):
setattr(self, n, getattr(t, n))
def addCleanup(self, func, *args, **kw):
self.request.addfinalizer(functools.partial(func, *args, **kw))
class TestUtils(unittest.TestCase, CustodianTestCore):
def tearDown(self):
self.cleanUp()
def cleanUp(self):
# Clear out thread local session cache
reset_session_cache()
class TextTestIO(io.StringIO):
def write(self, b):
# print handles both str/bytes and unicode/str, but io.{String,Bytes}IO
# requires us to choose. We don't have control over all of the places
# we want to print from (think: traceback.print_exc) so we can't
# standardize the arg type up at the call sites. Hack it here.
if not isinstance(b, six.text_type):
b = b.decode("utf8")
return super(TextTestIO, self).write(b)
# Per http://blog.xelnor.net/python-mocking-datetime/
# naive implementation has issues with pypy
real_datetime_class = datetime.datetime
def mock_datetime_now(tgt, dt):
class DatetimeSubclassMeta(type):
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, real_datetime_class)
class BaseMockedDatetime(real_datetime_class):
target = tgt
@classmethod
def now(cls, tz=None):
return cls.target.replace(tzinfo=tz)
@classmethod
def utcnow(cls):
return cls.target
# Python2 & Python3 compatible metaclass
MockedDatetime = DatetimeSubclassMeta(
b"datetime" if str is bytes else "datetime", # hack Python2/3 port
(BaseMockedDatetime,),
{},
)
return mock.patch.object(dt, "datetime", MockedDatetime)
|
|
import requests
from bs4 import BeautifulSoup
from pymongo import Connection
"""
Author : Kshitij Burman <kburman6@gmail.com>
"""
class IMDBParser:
def __init__(self):
pass
def parseMovie(self,id):
item = {"_id":id}
persons = {}
url = 'http://www.imdb.com/title/'+id+'/'
r = requests.get(url)
if r.ok :
soup = BeautifulSoup(r.text,'html5lib')
self.soup = soup
# overview table
ov = soup.find(attrs={"id":"title-overview-widget-layout"})
ov = ov.tbody
self.ov =ov
# check if cover image exists or not
img_primary = ov.find('td',attrs={"id":"img_primary"})
self.ip = img_primary
if img_primary.div != None:
item['poster_src'] = img_primary.find('img',attrs={"itemprop":"image"})['src']
del img_primary
# overview top
ovt = ov.find('td',attrs={"id":"overview-top"})
# get header
header = ovt.find(attrs={"class":"header"})
# get name
item['name'] = header.find(attrs={"class":"itemprop","itemprop":"name"}).text
item['name'] = self.shave(item['name'])
# get year
if header.find('a') != None:
item['relase_year'] = header.find('a').text
elif header.find('span',attrs={"class":"nobr"}) != None:
item['relase_year'] = header.find('span',attrs={"class":"nobr"}).text
del header
# get info bar
infobar = ovt.find(attrs={"class":"infobar"})
# get content rating
content_rating = infobar.find(attrs={"itemprop":"contentrating"})
if content_rating != None:
item['content_rating'] = content_rating['content']
del content_rating
# get time
time = infobar.find(attrs={"itemprop":"duratin"})
if time != None:
item['time'] = self.shave(time.text)
del infobar
# get rating
rating = ovt.find(attrs={"class":["star-box","giga-star"]})
if rating != None:
rating = rating.find(attrs={"class":"star-box-details","itemprop":"aggregateRating"})
# there may be review there not present
# so check for it
rate = rating.find(attrs={"itemprop":"ratingValue"})
if rate != None:
item['ratings'] = self.shave(rate.text)
del rate
ratecount = rating.find(attrs={"itemprop":"ratingCount"})
if ratecount != None:
item['rating_users'] = self.shave(ratecount.text)
del rating
# get summary of the movie
desc = ovt.find(attrs={"itemprop":"description"})
if desc != None:
item['desc'] = self.shave(desc.text)
del desc
humans = ovt.find(attrs={"itemprop":"creator"})
if humans != None:
human_list = []
if 'writers' in item:
human_list = mov['writers']
for i in humans.findChildren('a',attrs={"itemprop":"url"}):
uid,name = self.createPerson(i)
persons[uid]={"name":name}
if uid not in human_list:
human_list.append(uid)
item['writers'] = human_list
del human_list
del humans
del i
humans = ovt.find(attrs={"itemprop":"director"})
if humans != None:
human_list = []
if 'director' in item:
human_list = mov['director']
for i in humans.findChildren('a',attrs={"itemprop":"url"}):
uid,name = self.createPerson(i)
persons[uid]={"name":name}
if uid not in human_list:
human_list.append(uid)
item['director'] = human_list
del human_list
del humans
del i
del ovt
del ov
# now get cast people
cast_list = soup.find('table',attrs={"class":"cast_list"})
if cast_list != None:
cast_list = cast_list.tbody
cast_people = {}
for a in cast_list.findChildren('tr'):
if 'class' in a.attrs:
profile = {}
# thumb pic
img = a.find(attrs={"class":"primary_photo"}).a.img
src = img['src']
if 'loadlate' in img.attrs:
src = img['loadlate']
del img
if '/nopicture/' not in src:
profile['picture_small'] = src
# get name
profile['name'] = self.shave(a.find(attrs={"itemprop":"name"}).text)
# get IMDB ID
uid = a.find('a',attrs={"itemprop":"url"})['href']
uid = self.getID(uid)
# get char name
chname = a.find(attrs={"class":"character"}).text
chname = self.shave(chname)
indx = chname.find('/')
chname = chname[:indx]
# add it cast list
if uid not in cast_list:
cast_people[uid] = [chname,uid]
# add it to person list
if uid not in persons:
persons[uid] = profile
del profile
del uid
del chname
del indx
item['casts'] = cast_people
del cast_list
del cast_people
# get story line
sl = soup.find('div',attrs={"id":"titleStoryLine"})
if sl != None:
if sl.find(attrs={"itemprop":"description"}) != None:
item['desc'] = self.shave(sl.find(attrs={"itemprop":"description"}).text)
# now get genre
g = sl.find(attrs={"itemprop":"genre"})
if g != None:
genre = []
for i in g.findAll('a'):
genre.append(i.text)
item['genre'] = genre
del g
del i
del genre
del sl
# get details
det = soup.find('div',attrs={"id":"titleDetails"})
if det != None:
for row in det.findAll('div',attrs={"class":"txt-block"}):
if row.h4 != None:
txt = row.h4.text
if txt == "Release Date:":
item['release_date'] = self.shave(row.contents[2])
elif txt == "Budget:":
item['budget'] = self.shave(row.contents[2])
elif txt == "Gross:":
item['gross'] = self.shave(row.contents[2])
del txt
del det
# get facts
dun = soup.find('div',attrs={"id":"titleDidYouKnow"})
if dun != None:
for row in dun.findAll('div',attrs={"class":"txt-block"}):
if row.h4 != None:
txt = row.h4.text
if txt == "Trivia":
item['trivia'] = self.shave(row.contents[2])
elif txt == "Goofs":
item['goofs'] = self.shave(row.contents[2])
elif txt == "Quotes":
item['quotes'] = self.shave(row.contents[2])
del txt
del dun
imdb_ids = []
for i in soup.findAll('div',attrs={"class":"rec_item"}):
url = i.a['href']
imdb_ids.append(self.getID(url))
return item,persons,imdb_ids
else:
return None,None,None
def createPerson(self,soup):
url = soup['href']
url = self.getID(url)
name = soup.find(attrs={"itemprop":"name"}).text
name = self.shave(name)
return url,name
def getID(self,url):
url = url[1:]a
indx = url.find('/')
indx = indx + 1
url = url[indx:]
indx = url.find('/')
url = url[:indx]
return url
def shave(self,txt):
txt = txt.replace('\n','')
txt = txt.replace('\r','')
txt = txt.strip()
return txt
i = IMDBParser()
mov,human = i.parseMovie('tt22679938')
print mov
print human
|
|
import os
import re
from pip.backwardcompat import urlparse
from pip.index import Link
from pip.util import rmtree, display_path, call_subprocess
from pip.log import logger
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
bundle_file = 'svn-checkout.txt'
guide = ('# This was an svn checkout; to make it a checkout again run:\n'
'svn checkout --force -r %(rev)s %(url)s .\n')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
output = call_subprocess(
[self.cmd, 'info', location], show_stdout=False, extra_environ={'LANG': 'C'})
match = _svn_url_re.search(output)
if not match:
logger.warn('Cannot determine URL of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warn('Cannot determine revision of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return url, None
return url, match.group(1)
def parse_vcs_bundle_file(self, content):
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^-r\s*([^ ])?', line)
if not match:
return None, None
rev = match.group(1)
rest = line[match.end():].strip().split(None, 1)[0]
return rest, rev
return None, None
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
logger.notify('Exporting svn repository %s to %s' % (url, location))
logger.indent += 2
try:
if os.path.exists(location):
# Subversion doesn't like to check out over an existing directory
# --force fixes this, but was only added in svn 1.5
rmtree(location)
call_subprocess(
[self.cmd, 'export'] + rev_options + [url, location],
filter_stdout=self._filter, show_stdout=False)
finally:
logger.indent -= 2
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
if rev:
rev_display = ' (to revision %s)' % rev
else:
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
## FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
## FIXME: should we warn?
continue
dirurl, localrev = self._get_svn_url_rev(base)
if base == location:
base_url = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url_rev(self):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev = super(Subversion, self).get_url_rev()
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside setup.py
# we have to look up in the location until we find a real setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without finding setup.py
logger.warn("Could not find setup.py for directory %s (tried all parent directories)"
% orig_location)
return None
return self._get_svn_url_rev(location)[0]
def _get_svn_url_rev(self, location):
from pip.exceptions import InstallationError
f = open(os.path.join(location, self.dirname, 'entries'))
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9') or data.startswith('10'):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = call_subprocess([self.cmd, 'info', '--xml', location], show_stdout=False)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
def get_tag_revs(self, svn_tag_url):
stdout = call_subprocess(
[self.cmd, 'ls', '-v', svn_tag_url], show_stdout=False)
results = []
for line in stdout.splitlines():
parts = line.split()
rev = int(parts[0])
tag = parts[-1].strip('/')
results.append((tag, rev))
return results
def find_tag_match(self, rev, tag_revs):
best_match_rev = None
best_tag = None
for tag, tag_rev in tag_revs:
if (tag_rev > rev and
(best_match_rev is None or best_match_rev > tag_rev)):
# FIXME: Is best_match > tag_rev really possible?
# or is it a sign something is wacky?
best_match_rev = tag_rev
best_tag = tag
return best_tag
def get_src_requirement(self, dist, location, find_tags=False):
repo = self.get_url(location)
if repo is None:
return None
parts = repo.split('/')
## FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
if parts[-2] in ('tags', 'tag'):
# It's a tag, perfect!
full_egg_name = '%s-%s' % (egg_project_name, parts[-1])
elif parts[-2] in ('branches', 'branch'):
# It's a branch :(
full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev)
elif parts[-1] == 'trunk':
# Trunk :-/
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev)
if find_tags:
tag_url = '/'.join(parts[:-1]) + '/tags'
tag_revs = self.get_tag_revs(tag_url)
match = self.find_tag_match(rev, tag_revs)
if match:
logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match)
repo = '%s/%s' % (tag_url, match)
full_egg_name = '%s-%s' % (egg_project_name, match)
else:
# Don't know what it is
logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo)
full_egg_name = '%s-dev_r%s' % (egg_project_name, rev)
return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name)
def get_rev_options(url, rev):
if rev:
rev_options = ['-r', rev]
else:
rev_options = []
r = urlparse.urlsplit(url)
if hasattr(r, 'username'):
# >= Python-2.5
username, password = r.username, r.password
else:
netloc = r[1]
if '@' in netloc:
auth = netloc.split('@')[0]
if ':' in auth:
username, password = auth.split(':', 1)
else:
username, password = auth, None
else:
username, password = None, None
if username:
rev_options += ['--username', username]
if password:
rev_options += ['--password', password]
return rev_options
vcs.register(Subversion)
|
|
mHelpText = '''
# ----------------------------------------------
# Name: DoM
# Description: Expand and execute command from menu
## D20H-53 Expand and execute command
#
# Author: Philip S. Rist
# Date: 10/26/2010
# Copyright 2010 by St. Thomas Software
# ----------------------------------------------
# This program is freeware. It may be used
# for any moral purpose. You use it at your
# own risk. St. Thomas Software makes no
# guarantees to its fitness to do anything.
#
# If you feel you must pay for it. Please
# send one million dollars to
#
# The International Rescue Committee
# 122 East 42nd Street
# New York, N.Y. 10168-1289
#
# Ok, we are in a recession. So, make it a half
# million.
# DoM.py -
# The specified command in the specified section in the specified menu file will
# be expanded with Do.py and executed.
# Usage examples:
#
#[HKEY_CLASSES_ROOT\*\Shell\Rundom]
#@="Run"
#"EditFlags"=hex:01,00,00,00
#[HKEY_CLASSES_ROOT\*\Shell\Rundom\Command]
#@="c:\\sys\\python25\\pythonw.exe c:\\bin\\dom.py -m c:\bin\menus.ini \"%1\" project open "
#
# Syntax:
# dom.py [ options ] <file path> <section> <command key>
#
# Options:
# -d <path> - set current working directory
# -e <name>=<value> - set environment variable
# -h - display this text, everything else is ignored
# -l - list available commands, everything else is ignored
# -m <menu path> - set path to menu file
# default: '{o}\menus.ini;{i}\menus.ini' <- can be changed
# -v - run in verbose mode
# Sample menu file
[DOS]
Open,c:\source\TextFiles\qeditor.exe "{a}"
; Open file
Print,c:\windows\nodepad.exe /P "{a}"
; Print file
Edit,c:\windows\system32\wscript.exe c:\bin\editit.vbs "{a}"
; Edit file with Notetab
Save,C:\Windows\system32\wscript.exe c:\bin\Util.vbs /S "{a}"
Has Been Ssved,C:\Windows\system32\wscript.exe c:\bin\Util.vbs /K "{a}"
UnTabify,c:\sys\python25\python.exe c:\sys\python25\tools\scripts\untabify.py "{a}"
U2M,c:\bin\u2m.bat "{a}"
Echo,c:\bin\messagebox.exe "{a}"
Dir,c:\windows\system32\cmd.exe /C dir "{p}\*.{e}"
'''
import sys
import os
import getopt
import Do # Recipe: 577439
import Do2 # Recipe: 577440
import subprocess
def FindCommand(pKey, pFilePath, pSearchPath, pSection, pList=False, pVerbose=False):
'''
Find command keyword and extract containing line
pKey -- String identifying line to use as command template
pFilePath -- File to use in macro expansion
pSearchPath -- File to scan for command
pSection -- Section containing command
pList --
'''
if pVerbose:
print 'DoM.py FindCommand:', pKey, pFilePath, pSearchPath, pSection
if not os.path.exists(pSearchPath):
lCommand = ''
print 'DoM.py Could not find menu file', pSearchPath
else:
lSection = pSection.lower()
lKey = pKey.lower()
# ---- Load menu file
lFile = open(pSearchPath, 'r')
lText = lFile.readlines()
lFile.close()
if len(lText) < 1:
print 'DoM.py Menu', pSearchPath, 'read failed'
lFound = False
lCommand = ''
lCount = 0
if pList:
print 'DoM.py Available commands in', pSearchPath
# ---- Scan menu file
for lLine in lText:
lLine = lLine.lstrip()
if len(lLine) < 1:
continue
# ---- Start of section
if lLine[0] == '[':
lCount = 0
lPos = lLine.find(']')
if lPos > 0:
lFoundSection = lLine[1:lPos].lower()
else:
lFoundSection = ''
# ---- Check for conditions, conditions are ignored
if lFoundSection[0] == '/':
lPos2 = lFoundSection.rfind('/')
if lPos2 >= 0:
lFoundSection = lFoundSection[lPos2+1:]
elif lFoundSection[0] == '!':
lPos2 = lFoundSection.rfind('!')
if lPos2 >= 0:
lFoundSection = lFoundSection[lPos2+1:]
#if lSection != lFoundSection and lSection != '*':
if not lFoundSection.startswith(lSection) and lSection != '*':
if pVerbose:
print 'DoM.py not found', lSection, 'in', lFoundSection
if lFound == True:
break
continue
elif lSection != '*' and lFound == True:
break
else:
if pVerbose:
print 'DoM.py found', lSection, 'in', lFoundSection
if pList:
print 'DoM.py Section:', lFoundSection
lFound = True
# ---- Comments
elif lLine[0] == ';':
if lFound and pList:
print 'DoM.py ', lLine,
# ---- Command lines and label lines
else:
if not lFound:
continue
if lLine[0] == '-':
continue
lPos = lLine.find(',')
if lPos > 0:
lMatch = lLine[0:lPos].lower()
else:
continue
# ---- Check for conditions, conditions are ignored
if lMatch[0] == '/':
lPos2 = lMatch.rfind('/')
if lPos2 >= 0:
lMatch = lMatch[lPos2+1:]
elif lMatch[0] == '!':
lPos2 = lMatch.rfind('!')
if lPos2 >= 0:
lMatch = lMatch[lPos2+1:]
lCount += 1
if pList:
if lPos > 0:
lLineText = lLine[lPos+1:]
print "DoM.py %5d: %-20s| %s" % (lCount, lMatch, lLineText),
# ---- Check for matching command
#if lKey == lMatch: # must match command key
if lMatch.startswith(lKey): # command key starts with key
lCommand = lLine[lPos+1:]
if pVerbose:
print 'DoM.py found command', lKey, 'in', lMatch, 'for', lCommand
break
else:
print 'DoM.py no command found in', pSearchPath, pSection
return lCommand[0:-1]
def Expand(pArgs, pFilePath, pSearchPath, pSection, pSep='!!', pList=False, pVerbose=False):
'''
Extract command from file and replace all macros
pArgs -- Args passed to program except file path and section name
pFilePath -- File to use in macro expansion
pSearchPath -- File to scan for command '
pCount -- Number of lines at the start of the file to scan
pSep -- String used to identify end of command
pHelp -- True to display available commands
'''
# ---- Find command
lCommand = FindCommand(pArgs[0], pFilePath, pSearchPath, pSection, pList=pList, pVerbose=pVerbose)
# ---- Expand and insert/append any passed arguments
# Arguments on original pb.py command line will replace {} from left to right
# otherwise they will be appended to the end of the command
lStart = 1
if len(lCommand) > 0:
if len(pArgs) > lStart:
for lArg in pArgs[lStart:]:
if lArg.find('{') >= 0:
lArg = Do2.ExpandArg(lArg, pFilePath, '')
if len(lArg) > 0:
try:
lTest = os.path.abspath(lArg)
if os.path.exists(lTest):
if lTest.find(" ") > 0:
lTest = '"' + lTest + '"'
lArg = lTest
except:
pass
lPos = lCommand.find('{}')
if lPos >= 0:
lCommand = lCommand[0:lPos] + lArg + lCommand[lPos+2:]
else:
lCommand += ' ' + lArg
# ---- Prevent unwanted arguments appended to command
lPos = lCommand.rfind(pSep)
if lPos > 0:
lCommand = lCommand[0:lPos]
# ---- Expand all remaining macros
if lCommand.find('{') >= 0:
lCommand = Do.Expand(lCommand, pFilePath)
return lCommand
def submitnow(pArgs, pFilePath, pSearchPath, pSection, pVerbose, pList=False):
'Expand and submit command'
if pVerbose:
print 'DoM.py File path:', pFilePath
print 'DoM.py Menu path:', pSearchPath
print 'DoM.py Section: ', pSection
print 'DoM.py Arguments:', pArgs
lCommand = Expand(pArgs, pFilePath, pSearchPath, pSection, pList=pList, pVerbose=pVerbose)
# ---- Any macro not expanded will be assumed to be an environment variable
# If %...% had been used it would have been replaced when pb.py was run
lCommand = lCommand.replace('{}',' ') #<-- may want to do something else
lCommand = lCommand.replace('{', '%') # try to replace with environment variable
lCommand = lCommand.replace('}', '%')
if len(lCommand) == 0:
print 'DoM.py Expansion failed'
else:
lCommand = '"' + lCommand + '"'
if pVerbose:
print 'DoM.py Submitting: ', lCommand
subprocess.Popen(lCommand, shell=True)
def setenviron(pValue, pFileName):
'Set environment variable'
lParts = pValue.split('=')
if len(lParts) > 1:
lKey = lParts[0]
lValue = lParts[1]
if lValue.find('{') >= 0:
lValue = Do2.ExpandArg(lValue, pFileName, '')
os.environ[lKey] = lValue
else:
os.environ[pValue] = ''
if __name__ == '__main__':
(mOptions, mArgs) = getopt.getopt(sys.argv[1:], 'd:e:hlm:v')
mVerbose = False
mHelp = False
mList = False
mSearchPath = '{o}\menus.ini;{i}\menus.ini'
for (mKey, mValue) in mOptions:
if mKey == '-d': # Set current directory
if mValue.find('{') >= 0:
if len(mArgs) > 2:
mFilePath = os.path.abspath(mArgs[0])
mValue = Do.ExpandArg(mValue, mFilePath)
else:
print 'DoM.py No primary file, could not set directory'
break
else:
os.chdir(mValue)
elif mKey == '-e': # Set environment variable
setenviron(mValue, mFilePath)
elif mKey == '-h':
print mHelpText
mHelp = True
elif mKey == '-l':
mList = True
elif mKey == '-m': #
mSearchPath = mValue
elif mKey == '-v':
mVerbose = True
if len(mArgs) > 2:
mFilePath = os.path.abspath(mArgs[0])
mSection = mArgs[1]
if mSection.find('{'):
mSection = Do.Expand(mSection, mFilePath)
mKey = mArgs[2]
if mKey.find('{'):
mArgs[2] = Do.Expand(mKey, mFilePath)
mArgs[2] = mArgs[2].replace('_', ' ')
if mSearchPath.find('{') >= 0:
mSearchPath = Do2.ExpandArg(mSearchPath, mFilePath, '')
if mSearchPath[0] == '"':
mSearchPath = mSearchPath[1:-1]
mSearchPath = os.path.abspath(mSearchPath)
if mHelp:
print 'DoM.py Default menu: ', mSearchPath
print 'DoM.py Default section:', mSection
elif mList:
Expand('???', mFilePath, mSearchPath, mSection, mVerbose, mList)
else:
submitnow(mArgs[2:], mFilePath, mSearchPath, mSection, mVerbose)
elif mHelp == False:
print 'DoM.pyCommand and/or file path missing'
|
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from inspect import currentframe, getframeinfo
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
from caffe2.python.task import Node, Task
class TestScopes(test_util.TestCase):
def testBlobReferenceIsIndependentFromNameScope(self):
blob_v = core.BlobReference("v")
with core.NameScope("foo"):
blob_w = core.BlobReference("w")
with core.NameScope("bar"):
blob_x = core.BlobReference("x")
self.assertEqual(str(blob_v), "v")
self.assertEqual(str(blob_w), "w")
self.assertEqual(str(blob_x), "x")
def testNameScopeWithOp(self):
global_x = core.BlobReference("x")
global_y = core.BlobReference("y")
with core.NameScope("foo"):
# Raw strings should have namescope prepended.
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/y")
# BlobReferences should not.
op = core.CreateOperator("Relu", global_x, global_y)
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "y")
def testNameScopeWithReset(self):
with core.NameScope("foo"):
# foo/
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/y")
with core.NameScope("bar"):
# foo/bar/
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/bar/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/bar/y")
# Back to foo/
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/y")
with core.NameScope("bar", reset=True):
# bar/
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "bar/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "bar/y")
# Back to foo/
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/y")
def testDeviceScope(self):
# No device
op = core.CreateOperator("Relu", "x", "y")
self.assertFalse(op.HasField('device_option'))
# explicitly setting a device
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
op = core.CreateOperator("Relu", "x", "y", device_option=device_option)
self.assertTrue(op.HasField('device_option'))
self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
with core.DeviceScope(device_option):
# from device scope
op = core.CreateOperator("Relu", "x", "y")
self.assertTrue(op.HasField('device_option'))
self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
# from an overridden device option
override_device = caffe2_pb2.DeviceOption()
override_device.device_type = caffe2_pb2.CPU
op = core.CreateOperator(
"Relu", "x", "y", device_option=override_device)
self.assertTrue(op.HasField('device_option'))
self.assertEqual(op.device_option.device_type, caffe2_pb2.CPU)
# back from normal: no device
op = core.CreateOperator("Relu", "x", "y")
self.assertFalse(op.HasField('device_option'))
device_option = caffe2_pb2.DeviceOption()
def testNameAndDeviceScopeTogether(self):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
with core.DeviceScope(device_option):
with core.NameScope("foo"):
op = core.CreateOperator("Relu", "x", "y")
self.assertTrue(op.HasField('device_option'))
self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/y")
class TestCloneNet(test_util.TestCase):
def testPartialClone(self):
params = core.Net('params')
p1 = params.ConstantFill([], ['p1'])
workspace.CreateNet(params)
workspace.RunNetOnce(params)
n = core.Net('original')
a1 = n.AddExternalInput('a1')
a2 = n.AddExternalInput('a2')
b1, b2 = n.Concat([a1, a2], ['b1', 'b2'], axis=0)
c1 = n.Sum([b1, p1], ['c1'])
c2 = n.Sum([b2], ['c2'])
d = n.Sum([c1, c2], ['d'])
# test that gradient ops are ignored when partial-cloning
n.AddGradientOperators([d])
# test some in-place ops
k = n.Sum([p1], ['k'])
e = n.Sum([d], ['e'])
e = n.Sum([e, k], [e])
e = n.Sum([e], [e])
f = n.Sum(e, ['f'])
def net_assert(net, num_ops, inputs, outputs, internals):
self.assertEqual(len(net.Proto().op), num_ops)
self.assertEqual(set(net.Proto().external_input), inputs)
self.assertEqual(set(net.Proto().external_output), outputs)
all_blobs = set(net.Proto().external_input)
all_blobs |= set(net.Proto().external_output)
for op in net.Proto().op:
all_blobs |= set(op.input) | set(op.output)
self.assertEqual(all_blobs, inputs | outputs | internals)
# create net to make sure its valid
for input in inputs:
workspace.FeedBlob(input, np.array([]))
workspace.CreateNet(net)
n2, (d22, ) = n.ClonePartial('f1', {a1: 'a11', a2: 'a22'}, [d])
net_assert(
n2, 4, {'p1', 'a11', 'a22'}, {'f1/d'},
{'f1/b1', 'f1/b2', 'f1/c1', 'f1/c2', 'p1'})
self.assertTrue(isinstance(d22, core.BlobReference))
self.assertEqual(d22.Net(), n2)
self.assertEqual(str(d22), 'f1/d')
n3, (d22, ) = n.ClonePartial('f2', [b1, b2], [d])
net_assert(
n3, 3, {'p1', 'b1', 'b2'}, {'f2/d'}, {'f2/c1', 'f2/c2', 'p1'})
self.assertEqual(str(d22), 'f2/d')
n4, (c22, ) = n.ClonePartial('f3', [b1], [c1])
net_assert(n4, 1, {'p1', 'b1'}, {'f3/c1'}, {'p1'})
self.assertEqual(str(c22), 'f3/c1')
n5, (c11, c22) = n.ClonePartial('f4', [b1, b2], [c1, c2])
net_assert(n5, 2, {'p1', 'b1', 'b2'}, {'f4/c1', 'f4/c2'}, {'p1'})
self.assertEqual(str(c11), 'f4/c1')
self.assertEqual(str(c22), 'f4/c2')
with self.assertRaises(AssertionError):
n.ClonePartial('f4', [a1, a2, c2], [d])
n6, (e22, ) = n.ClonePartial('f5', [d], [e])
net_assert(n6, 4, {'p1', 'd'}, {'f5/e'}, {'f5/k', 'p1'})
self.assertEqual(str(e22), 'f5/e')
n8, (e22, f22) = n.ClonePartial('f7', [d], [e, f])
net_assert(n8, 5, {'p1', 'd'}, {'f7/e', 'f7/f'}, {'p1', 'f7/k'})
self.assertEqual(str(e22), 'f7/e')
self.assertEqual(str(f22), 'f7/f')
params._CheckLookupTables()
n._CheckLookupTables()
class TestCreateOperator(test_util.TestCase):
def testCreate(self):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
op = core.CreateOperator(
"Ludicrous", "x", "y", name="ludicrous",
control_input="z", device_option=device_option,
engine="WARP", arg1=1, arg2="2", arg3=[1, 2, 3])
self.assertEqual(op.type, "Ludicrous")
self.assertEqual(op.name, "ludicrous")
self.assertEqual(op.engine, "WARP")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "y")
self.assertEqual(len(op.control_input), 1)
self.assertEqual(op.control_input[0], "z")
self.assertTrue(op.HasField('device_option'))
self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertTrue(len(op.arg), 3)
# can't guarantee ordering of kwargs, so generate a set of args
# to test with
arg_map = {}
for arg in op.arg:
arg_map[arg.name] = arg
# Check all elements exist that should
self.assertEqual("arg1" in arg_map, True)
self.assertEqual("arg2" in arg_map, True)
self.assertEqual("arg3" in arg_map, True)
# Now test that all args were initialized correctly
self.assertEqual(arg_map["arg1"].i, 1)
self.assertEqual(arg_map["arg2"].s, b"2")
self.assertEqual(list(arg_map["arg3"].ints), [1, 2, 3])
def testCreateWithNoneKwarg(self):
with self.assertRaises(ValueError):
core.CreateOperator("Ludicrous", "x", "y", arg1=None)
class TestAutoNaming(test_util.TestCase):
def assertOperatorListEqual(self, operatorDefList1, operatorDefList2):
for op in operatorDefList1:
op.debug_info = ""
for op in operatorDefList2:
op.debug_info = ""
self.assertEqual(operatorDefList1, operatorDefList2)
"""
Test that operators are named with different names, and that automatically
named blob names don't clash intra or inter networks.
"""
def test_next_blob(self):
def create_net():
net = core.Net('net')
with core.NameScope('foo'):
net.Add(['a', 'b'], net.NextScopedBlob('ab'))
net.Add(['c', 'd'], net.NextBlob('cd'))
return net
net_a = create_net()
net_b = create_net()
# created net proto is predicatable.
self.assertOperatorListEqual(net_a.Proto().op,
net_b.Proto().op)
self.assertEqual(net_a.Proto().op[0].output[0], 'foo/ab')
self.assertEqual(net_a.Proto().op[1].output[0], 'cd')
net_c = core.Net('net')
# different calls return different blob names
self.assertNotEqual(str(net_c.NextBlob('b')), str(net_c.NextBlob('b')))
def test_auto_naming(self):
a = core.Net('net')
b = core.Net('net')
self.assertNotEqual(a.Proto().name, b.Proto().name)
a_in1 = a.AddExternalInput('a')
b_in1 = b.AddExternalInput('b')
all_outputs_single = []
all_outputs_list = []
def add_ops():
all_outputs_single.append(a.Sum([a_in1, a_in1]))
all_outputs_single.append(a.Sum([a_in1, a_in1]))
all_outputs_single.append(b.Sum([b_in1, b_in1]))
all_outputs_single.append(b.Sum([b_in1, b_in1]))
all_outputs_list.append(a.Sum([a_in1, a_in1], outputs=2))
all_outputs_list.append(a.Sum([a_in1, a_in1], outputs=2))
all_outputs_list.append(b.Sum([b_in1, b_in1], outputs=2))
all_outputs_list.append(b.Sum([b_in1, b_in1], outputs=2))
add_ops()
with core.NameScope('n1'):
add_ops()
# Force reset of lookup tables
a.Proto().name
with core.NameScope('n2'):
add_ops()
all_outputs = []
for s in all_outputs_single:
all_outputs.append(str(s))
for l in all_outputs_list:
for o in l:
all_outputs.append(str(o))
for i, o1 in enumerate(all_outputs):
for j, o2 in enumerate(all_outputs):
if i != j:
self.assertNotEqual(str(o1), str(o2))
a._CheckLookupTables()
b._CheckLookupTables()
class TestAppendNet(test_util.TestCase):
def test_external_inputs_merged_correctly(self):
netA = core.Net("A")
netA.Sum(["in1", "in2"], ["sum1"])
self.assertTrue("in1" in netA.external_inputs)
netB = core.Net("B")
netB.Sum(["in3", "in4"], ["in1"])
netB.AppendNet(netA)
self.assertFalse("in1" in netB.external_inputs)
def test_external_inputs_merged_correctlyB(self):
netA = core.Net("A")
netA.Sum(["in1", "in2"], ["sum1"])
self.assertTrue("in1" in netA.external_inputs)
netB = core.Net("B")
netB.Sum(["in3", "in4"], ["in1"])
netA.AppendNet(netB) # note different order than in prev test
self.assertTrue("in1" in netA.external_inputs)
class TestExtractPredictorNet(test_util.TestCase):
def test_extract_simple(self):
from caffe2.python import brew
from caffe2.python.model_helper import ModelHelper, ExtractPredictorNet
model = ModelHelper(name="test", arg_scope={'order': 'NCHW'})
[data, label] = brew.image_input(
model,
"reader", ["xx/data", "label"],
is_test=1,
)
cnv = brew.conv(model, data, 'cnv', 32, 32, 4)
a = brew.fc(model, cnv, 'a', 100, 200)
pred = brew.fc(model, a, 'pred', 200, 5)
brew.softmax(model, [pred, label], "softmax")
(predict_net, export_blobs) = ExtractPredictorNet(
net_proto=model.net.Proto(),
input_blobs=["xx/data"],
output_blobs=["pred"],
renames={"xx/data": "image"},
)
export_blobs = set(export_blobs)
ops = list(predict_net.Proto().op)
for op in ops:
self.assertFalse(op.type == "Softmax")
self.assertFalse("xx/data" in op.input)
# Note: image input should not be included
self.assertEquals(ops[0].type, "Conv")
self.assertEquals(ops[1].type, "FC")
self.assertEquals(ops[2].type, "FC")
self.assertEquals(len(ops), 3)
# test rename happened
self.assertEquals(ops[0].input[0], "image")
# Check export blobs
self.assertTrue("image" not in export_blobs)
self.assertTrue("xx/data" not in export_blobs)
self.assertEqual(set([str(p) for p in model.params]), export_blobs)
# Check external inputs/outputs
self.assertTrue("image" in predict_net.Proto().external_input)
self.assertEquals(set(["pred"]), set(predict_net.Proto().external_output))
self.assertEqual(
set(predict_net.Proto().external_input) -
set([str(p) for p in model.params]), set(["image"])
)
class TestOperatorTraceback(test_util.TestCase):
def op_name_check(self, net, cf, line, func):
net.PopulateProtoWithFileName()
filename = getframeinfo(cf).filename
self.assertEqual(net.Proto().op[0].name, '{}:{}:{}'.format(
filename, line, func))
def test_operator_constructor_traceback(self):
net = core.Net("test")
a, b = net.AddExternalInput("a", "b")
net.Mul([a, b], "c"); cf = currentframe(); line = cf.f_lineno
func = cf.f_code.co_name
with self.assertRaises(Exception):
workspace.RunNetOnce(net)
with self.assertRaises(Exception):
workspace.CreateNet(net)
self.op_name_check(net, cf, line, func)
def test_operator_runtime_traceback(self):
net = core.Net("test")
a = net.AddExternalInput("a")
workspace.blobs[a] = np.array([1, 2, 3], dtype=np.float32)
net.Split(a, ["b", "c"], axis=0); cf = currentframe(); line = cf.f_lineno
func = cf.f_code.co_name
with self.assertRaises(Exception):
workspace.RunNetOnce(net)
workspace.CreateNet(net)
with self.assertRaises(Exception):
workspace.RunNet(net)
self.op_name_check(net, cf, line, func)
def test_c_workspace_constructor(self):
net = core.Net("test")
a, b = net.AddExternalInput("a", "b")
net.Mul([a, b], "c"); cf = currentframe(); line = cf.f_lineno
func = cf.f_code.co_name
ws = workspace.C.Workspace()
with self.assertRaises(Exception):
ws.run(net)
with self.assertRaises(Exception):
ws.create_net(net)
self.op_name_check(net, cf, line, func)
def test_c_workspace_runtime(self):
net = core.Net("test")
a = net.AddExternalInput("a")
net.Split(a, ["b", "c"], axis=0); cf = currentframe(); line = cf.f_lineno
func = cf.f_code.co_name
ws = workspace.C.Workspace()
ws.create_blob(str(a)).feed(np.array([1, 2, 3], dtype=np.float32))
ws.create_net(net)
with self.assertRaises(Exception):
ws.run(net)
self.op_name_check(net, cf, line, func)
def test_async_exception_handling(self):
net = core.Net("test")
net.Proto().type = 'dag' # this runs operators on background threads
a = net.AddExternalInput("a")
net.Split(a, ["b", "c"], axis=0); cf = currentframe(); line = cf.f_lineno
func = cf.f_code.co_name
workspace.FeedBlob(a, np.array([1, 2, 3], dtype=np.float32))
with self.assertRaises(Exception) as enforceNotMet:
workspace.RunNetOnce(net)
self.assertIn('enforce fail', str(enforceNotMet.exception))
self.op_name_check(net, cf, line, func)
class TestCreatePlan(test_util.TestCase):
def test_create_plan_from_proto_correctly(self):
from caffe2.python.net_builder import ops
with Node('trainer'), Task(name='my_task', num_instances=2) as task:
with ops.task_init():
globl = ops.Const(0)
with ops.task_instance_init():
local = ops.Const(0)
with ops.loop(100):
ops.Copy(globl, local)
with ops.task_instance_exit():
ops.Add([globl, local], [globl])
with ops.task_exit():
ops.Mul([globl, globl], [globl])
plan = core.Plan(task.get_step())
test_plan = core.Plan.create_from_proto(plan.Proto())
self.assertEqual(len(plan.Steps()), 1)
self.assertEqual(len(test_plan.Steps()), 1)
self.assertEqual(plan.Steps()[0].Name(), test_plan.Steps()[0].Name())
self.assertEqual(len(plan.Nets()), len(test_plan.Nets()))
for idx in range(0, len(plan.Nets())):
# When we create Net for test_plan, we will end up with new Net
# name with postfix.
net_1 = plan.Nets()[idx]
net_2 = test_plan.Nets()[idx]
trim_size = len(net_1.Name())
self.assertEqual(net_1.Name(), net_2.Name()[:trim_size])
class TestOpRegistryKey(test_util.TestCase):
def test_is_operator(self):
self.assertTrue(core.IsOperator('Relu'))
self.assertFalse(core.IsOperator('NOEXIST'))
def test_is_operator_with_engine(self):
self.assertTrue(core.IsOperatorWithEngine('Relu', 'DEFAULT'))
self.assertFalse(core.IsOperatorWithEngine('Relu', 'NOEXIST'))
class TestDeviceOption(test_util.TestCase):
def test_check_equal_node_name(self):
opt1 = core.DeviceOption(0)
opt2 = core.DeviceOption(0)
self.assertTrue(core.device_option_equal(opt1, opt2))
opt2.node_name = 'test'
self.assertTrue(core.device_option_equal(opt1, opt2))
self.assertFalse(core.device_option_equal(opt1, opt2, ignore_node_name=False))
opt1.node_name = 'test'
self.assertTrue(core.device_option_equal(opt1, opt2, ignore_node_name=False))
def test_check_equal_default_value(self):
opt1 = caffe2_pb2.DeviceOption()
opt2 = caffe2_pb2.DeviceOption()
opt1.device_type = 0
self.assertTrue(core.device_option_equal(opt1, opt2))
opt1.cuda_gpu_id = 5
# opt1 still is on CPU, so the options should be equal
self.assertTrue(core.device_option_equal(opt1, opt2))
opt2.device_type = 0
self.assertTrue(core.device_option_equal(opt1, opt2))
opt1.device_type = 1
self.assertFalse(core.device_option_equal(opt1, opt2))
@unittest.skipIf(not workspace.has_gpu_support, 'No GPU support')
class TestInferDevice(test_util.TestCase):
def setUp(self):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
self.cuda_option = device_option
self.cpu_option = caffe2_pb2.DeviceOption()
def _test_op(
self,
op_name,
in_option,
out_option,
op_option=None,
inputs=None,
outputs=None
):
op_option = self.cuda_option if not op_option else op_option
inputs = ["blob_1"] if not inputs else inputs
outputs = ["blob_2"] if not outputs else outputs
with core.DeviceScope(op_option):
op = core.CreateOperator(op_name, inputs, outputs)
input_dev, output_dev = core.InferOpBlobDevices(op)
for in_dev in input_dev:
self.assertEqual(in_dev, in_option)
for out_dev in output_dev:
self.assertEqual(out_dev, out_option)
def test_infer_device(self):
self._test_op(
"FC",
self.cuda_option,
self.cuda_option,
op_option=self.cuda_option,
inputs=["data", "fc_w", "fc_b"],
outputs=["fc_1"]
)
def test_infer_device_cross_device(self):
self._test_op("CopyGPUToCPU", self.cuda_option, self.cpu_option)
self._test_op("CopyCPUToGPU", self.cpu_option, self.cuda_option)
self._test_op("EnsureCPUOutput", self.cuda_option, self.cpu_option)
self._test_op("CopyFromCPUInput", self.cpu_option, self.cuda_option)
self._test_op(
"EnsureCPUOutput",
self.cpu_option,
self.cpu_option,
op_option=self.cpu_option
)
self._test_op(
"CopyFromCPUInput",
self.cpu_option,
self.cpu_option,
op_option=self.cpu_option
)
def test_inject_copy(self):
net = core.Net("test")
init_net = core.Net("init")
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
weight = init_net.XavierFill([], 'fc_w', shape=[10, 100])
bias = init_net.ConstantFill([], 'fc_b', shape=[10, ])
with core.DeviceScope(device_option):
net.FC(["data", weight, bias], "fc1")
_, blob_to_device = core.InjectCrossDeviceCopies(init_net)
new_net, blob_to_device = core.InjectCrossDeviceCopies(
net, blob_to_device
)
op = new_net._net.op[-1]
self.assertEqual(op.type, "FC")
self.assertEqual(op.input[0], "data_cuda_1")
self.assertEqual(op.input[1], "fc_w_cuda_1")
self.assertEqual(op.input[2], "fc_b_cuda_1")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertEqual(new_net._net.op[-2].type, "CopyCPUToGPU")
self.assertEqual(new_net._net.op[0].type, "CopyCPUToGPU")
self.assertNotEqual(blob_to_device["fc_w"], device_option)
def test_cross_nets(self):
net = core.Net("test")
init_net = core.Net("init")
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
weight = init_net.XavierFill([], 'fc_w', shape=[10, 100])
bias = init_net.ConstantFill([], 'fc_b', shape=[10, ])
const = init_net.ConstantFill([], 'const', shape=[], value=1.)
with core.DeviceScope(device_option):
const = init_net.Add([const, const], [const])
fc_out = net.FC(["data", weight, bias], "fc1")
net.Add([fc_out, const], [fc_out])
data_remap = {'data': device_option}
nets, _ = core.InjectDeviceCopiesAmongNets(
[init_net, net], blob_to_device_init=data_remap
)
op = nets[1]._net.op[0]
self.assertEqual(op.type, "CopyCPUToGPU")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertEqual(op.output[0], "fc_w_cuda_1")
op = nets[1]._net.op[1]
self.assertEqual(op.type, "CopyCPUToGPU")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertEqual(op.output[0], "fc_b_cuda_1")
op = nets[1]._net.op[2]
self.assertEqual(op.type, "FC")
self.assertEqual(op.input[0], "data")
self.assertEqual(op.input[1], "fc_w_cuda_1")
self.assertEqual(op.input[2], "fc_b_cuda_1")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
op = nets[1]._net.op[3]
self.assertEqual(op.type, "Add")
self.assertEqual(op.input[0], "fc1")
self.assertEqual(op.input[1], "const_cuda_1")
# check that moved blob is in input to the new net
for c in ["data", "fc_w", "fc_b", "const_cuda_1"]:
self.assertTrue(c in nets[1]._net.external_input)
"""
For reference, net.Proto() should be like:
name: ""
op {
input: "fc_w"
output: "fc_w_cuda_1"
name: ""
type: "CopyCPUToGPU"
device_option {
device_type: 1
cuda_gpu_id: 1
}
}
op {
input: "fc_b"
output: "fc_b_cuda_1"
name: ""
type: "CopyCPUToGPU"
device_option {
device_type: 1
cuda_gpu_id: 1
}
}
op {
input: "data"
input: "fc_w_cuda_1"
input: "fc_b_cuda_1"
output: "fc1"
name: ""
type: "FC"
device_option {
device_type: 1
cuda_gpu_id: 1
}
}
op {
input: "fc1"
input: "const_cuda_1"
output: "fc1"
name: ""
type: "Add"
device_option {
device_type: 1
cuda_gpu_id: 1
}
}
external_input: "data"
external_input: "fc_w"
external_input: "fc_b"
external_input: "const"
external_input: "const_cuda_1"
"""
def test_cross_nets_no_change(self):
net = core.Net("test")
init_net = core.Net("init")
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
with core.DeviceScope(device_option):
weight = init_net.XavierFill([], 'fc_w', shape=[10, 100])
bias = init_net.ConstantFill([], 'fc_b', shape=[10, ])
net.FC(["data", weight, bias], "fc1")
data_remap = {'data': device_option}
nets = core.InjectDeviceCopiesAmongNetsWithoutB2D(
[init_net, net], blob_to_device_init=data_remap
)
op = nets[1]._net.op[0]
self.assertEqual(op.type, "FC")
self.assertEqual(op.input[0], "data")
self.assertEqual(op.input[1], "fc_w")
self.assertEqual(op.input[2], "fc_b")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
"""
For reference, net.Proto() should be like:
name: ""
op {
input: "data"
input: "fc_w"
input: "fc_b"
output: "fc1"
name: ""
type: "FC"
device_option {
device_type: 1
cuda_gpu_id: 1
}
}
external_input: "data"
external_input: "fc_w"
external_input: "fc_b"
"""
def test_inject_copy_multi_use(self):
net = core.Net("test")
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
with core.DeviceScope(device_option):
net.Relu("data", "relu1")
net.Relu("data", "relu2")
with core.DeviceScope(device_option):
net.Relu("data", "relu3")
net.Relu("data", "relu4")
device_option.cuda_gpu_id = 0
with core.DeviceScope(device_option):
net.Relu("data", "relu5")
device_option.cuda_gpu_id = 1
with core.DeviceScope(device_option):
net.Relu("data", "relu6")
new_net, _ = core.InjectCrossDeviceCopies(net)
op = new_net._net.op[0]
self.assertEqual(op.type, "CopyCPUToGPU")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertEqual(op.output[0], "data_cuda_1")
op = new_net._net.op[1]
self.assertEqual(op.type, "Relu")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertEqual(op.output[0], "relu1")
op = new_net._net.op[2]
self.assertEqual(op.type, "Relu")
self.assertEqual(op.device_option.device_type, 0)
self.assertEqual(op.output[0], "relu2")
op = new_net._net.op[3]
self.assertEqual(op.type, "Relu")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertEqual(op.input[0], "data_cuda_1")
self.assertEqual(op.output[0], "relu3")
op = new_net._net.op[4]
self.assertEqual(op.type, "Relu")
self.assertEqual(op.device_option.device_type, 0)
self.assertEqual(op.output[0], "relu4")
op = new_net._net.op[5]
self.assertEqual(op.type, "CopyCPUToGPU")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 0)
self.assertEqual(op.output[0], "data_cuda_0")
op = new_net._net.op[6]
self.assertEqual(op.type, "Relu")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 0)
self.assertEqual(op.input[0], "data_cuda_0")
self.assertEqual(op.output[0], "relu5")
op = new_net._net.op[7]
self.assertEqual(op.type, "Relu")
self.assertEqual(op.device_option.device_type, 1)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertEqual(op.input[0], "data_cuda_1")
self.assertEqual(op.output[0], "relu6")
"""
For reference, net.Proto() should be like:
name: ""
op {
input: "data"
output: "data_cuda_1"
name: ""
type: "CopyCPUToGPU"
device_option {
device_type: 1
cuda_gpu_id: 1
}
}
op {
input: "data_cuda_1"
output: "relu1"
name: ""
type: "Relu"
device_option {
device_type: 1
cuda_gpu_id: 1
}
}
op {
input: "data"
output: "relu2"
name: ""
type: "Relu"
}
op {
input: "data_cuda_1"
output: "relu3"
name: ""
type: "Relu"
device_option {
device_type: 1
cuda_gpu_id: 1
}
}
op {
input: "data"
output: "relu4"
name: ""
type: "Relu"
}
op {
input: "data"
output: "data_cuda_0"
name: ""
type: "CopyCPUToGPU"
device_option {
device_type: 1
cuda_gpu_id: 0
}
}
op {
input: "data_cuda_0"
output: "relu5"
name: ""
type: "Relu"
device_option {
device_type: 1
cuda_gpu_id: 0
}
}
op {
input: "data_cuda_1"
output: "relu6"
name: ""
type: "Relu"
device_option {
device_type: 1
cuda_gpu_id: 1
}
}
external_input: "data"
"""
def test_blob_inplace(self):
net = core.Net("test")
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
net.Adagrad(['param', 'moment', 'grad', 'lr'], ['param', 'moment'])
with core.DeviceScope(device_option):
net.Relu("param", "param_relu_no_sense")
net, _ = core.InjectCrossDeviceCopies(net)
op = net._net.op[1]
self.assertEqual(op.type, 'CopyCPUToGPU')
self.assertEqual(op.input[0], 'param')
self.assertEqual(op.output[0], 'param_cuda_1')
op = net._net.op[2]
self.assertEqual(op.input[0], 'param_cuda_1')
net.Relu('nonsense_input', 'moment')
# should not raise inplace error
core.InjectCrossDeviceCopies(net)
with core.DeviceScope(device_option):
net.Relu('nonsense_input_gpu', 'moment')
with self.assertRaises(RuntimeError):
core.InjectCrossDeviceCopies(net)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.platform.power_monitor import cros_power_monitor
class CrosPowerMonitorMonitorTest(unittest.TestCase):
initial_power = ('''line_power_connected 0
battery_present 1
battery_percent 70.20
battery_charge 2.83
battery_charge_full 4.03
battery_charge_full_design 4.03
battery_current 1.08
battery_energy 31.83
battery_energy_rate 12.78
battery_voltage 11.82
battery_discharging 1''')
final_power = ('''line_power_connected 0
battery_present 1
battery_percent 70.20
battery_charge 2.83
battery_charge_full 4.03
battery_charge_full_design 4.03
battery_current 1.08
battery_energy 31.83
battery_energy_rate 12.80
battery_voltage 12.24
battery_discharging 1''')
incomplete_final_power = ('''line_power_connected 0
battery_present 1
battery_percent 70.20
battery_charge 2.83
battery_charge_full 4.03
battery_charge_full_design 4.03
battery_energy_rate 12.80
battery_discharging 1''')
expected_power = {
'energy_consumption_mwh': 2558.0,
'power_samples_mw': [12780.0, 12800.0],
'component_utilization': {
'battery': {
'charge_full': 4.03,
'charge_full_design': 4.03,
'charge_now': 2.83,
'current_now': 1.08,
'energy': 31.83,
'energy_rate': 12.80,
'voltage_now': 12.24
}
}
}
expected_incomplete_power = {
'energy_consumption_mwh': 2558.0,
'power_samples_mw': [12780.0, 12800.0],
'component_utilization': {
'battery': {
'charge_full': 4.03,
'charge_full_design': 4.03,
'charge_now': 2.83,
'energy_rate': 12.80,
}
}
}
expected_cpu = {
'whole_package': {
'frequency_percent': {
1700000000: 3.29254111574526,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.15926805099535601,
1300000000: 0.47124116307273645,
1200000000: 0.818756100807525,
1100000000: 1.099381692400982,
1000000000: 2.5942528544384302,
900000000: 5.68661122326737,
800000000: 3.850545467654628,
700000000: 2.409691872245393,
600000000: 1.4693702487650486,
500000000: 2.4623575553879373,
400000000: 2.672038150383057,
300000000: 3.415770495015825,
200000000: 69.59817400982045
},
'cstate_residency_percent': {
'C0': 83.67623835616438535,
'C1': 0.2698609589041096,
'C2': 0.2780191780821918,
'C3': 15.77588150684931505
}
},
'cpu0': {
'frequency_percent': {
1700000000: 4.113700564971752,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.1765536723163842,
1300000000: 0.4943502824858757,
1200000000: 0.7944915254237288,
1100000000: 1.2226341807909604,
1000000000: 3.0632062146892656,
900000000: 5.680614406779661,
800000000: 3.6679025423728815,
700000000: 2.379060734463277,
600000000: 1.4124293785310735,
500000000: 2.599752824858757,
400000000: 3.0102401129943503,
300000000: 3.650247175141243,
200000000: 67.73481638418079
},
'cstate_residency_percent': {
'C0': 76.76226164383562,
'C1': 0.3189164383561644,
'C2': 0.4544301369863014,
'C3': 22.4643917808219178
}
},
'cpu1': {
'frequency_percent': {
1700000000: 2.4713816665187682,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.1419824296743278,
1300000000: 0.44813204365959713,
1200000000: 0.8430206761913214,
1100000000: 0.9761292040110037,
1000000000: 2.1252994941875945,
900000000: 5.69260803975508,
800000000: 4.033188392936374,
700000000: 2.4403230100275093,
600000000: 1.526311118999024,
500000000: 2.3249622859171177,
400000000: 2.3338361877717633,
300000000: 3.1812938148904073,
200000000: 71.46153163546012
},
'cstate_residency_percent': {
'C0': 90.5902150684931507,
'C1': 0.2208054794520548,
'C2': 0.1016082191780822,
'C3': 9.0873712328767123
}
}
}
def _assertPowerEqual(self, results, expected):
battery = results['component_utilization']['battery']
expected_battery = expected['component_utilization']['battery']
self.assertItemsEqual(battery.keys(), expected_battery.keys())
for value in battery:
self.assertAlmostEqual(battery[value], expected_battery[value])
self.assertAlmostEqual(results['energy_consumption_mwh'],
expected['energy_consumption_mwh'])
self.assertAlmostEqual(results['power_samples_mw'][0],
expected['power_samples_mw'][0])
self.assertAlmostEqual(results['power_samples_mw'][1],
expected['power_samples_mw'][1])
def testParsePower(self):
results = cros_power_monitor.CrosPowerMonitor.ParsePower(
self.initial_power, self.final_power, 0.2)
self._assertPowerEqual(results, self.expected_power)
def testParseIncompletePowerState(self):
"""Test the case where dump_power_status only outputs partial fields.
CrosPowerMonitor hard-coded expected fields from dump_power_status,
this test ensures it parses successfully when expected fields does not
exist. It's mainly for backward compatibility.
"""
results = cros_power_monitor.CrosPowerMonitor.ParsePower(
self.initial_power, self.incomplete_final_power, 0.2)
self._assertPowerEqual(results, self.expected_incomplete_power)
def testSplitSample(self):
sample = self.initial_power + '\n1408739546\n'
power, time = cros_power_monitor.CrosPowerMonitor.SplitSample(sample)
self.assertEqual(power, self.initial_power)
self.assertEqual(time, 1408739546)
def testCombineResults(self):
result = cros_power_monitor.CrosPowerMonitor.CombineResults(
self.expected_cpu, self.expected_power)
comp_util = result['component_utilization']
# Test power values.
self.assertEqual(result['energy_consumption_mwh'],
self.expected_power['energy_consumption_mwh'])
self.assertEqual(result['power_samples_mw'],
self.expected_power['power_samples_mw'])
self.assertEqual(comp_util['battery'],
self.expected_power['component_utilization']['battery'])
# Test frequency values.
self.assertDictEqual(
comp_util['whole_package']['frequency_percent'],
self.expected_cpu['whole_package']['frequency_percent'])
self.assertDictEqual(
comp_util['cpu0']['frequency_percent'],
self.expected_cpu['cpu0']['frequency_percent'])
self.assertDictEqual(
comp_util['cpu1']['frequency_percent'],
self.expected_cpu['cpu1']['frequency_percent'])
# Test c-state residency values.
self.assertDictEqual(
comp_util['whole_package']['cstate_residency_percent'],
self.expected_cpu['whole_package']['cstate_residency_percent'])
self.assertDictEqual(
comp_util['cpu0']['cstate_residency_percent'],
self.expected_cpu['cpu0']['cstate_residency_percent'])
self.assertDictEqual(
comp_util['cpu1']['cstate_residency_percent'],
self.expected_cpu['cpu1']['cstate_residency_percent'])
def testCanMonitorPower(self):
# TODO(tmandel): Add a test here where the device cannot monitor power.
initial_status = cros_power_monitor.CrosPowerMonitor.ParsePowerStatus(
self.initial_power)
final_status = cros_power_monitor.CrosPowerMonitor.ParsePowerStatus(
self.final_power)
self.assertTrue(cros_power_monitor.CrosPowerMonitor.IsOnBatteryPower(
initial_status, 'peppy'))
self.assertTrue(cros_power_monitor.CrosPowerMonitor.IsOnBatteryPower(
final_status, 'butterfly'))
|
|
import json
import uuid
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import connection
from django.db.models import Count, Q, Sum
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.http import require_POST
import commonware.log
from babel import numbers
from elasticsearch_dsl import Q as ES_Q, query
from slumber.exceptions import HttpClientError, HttpServerError
from tower import ugettext as _
import mkt
import mkt.constants.lookup as lkp
from lib.pay_server import client
from mkt.access import acl
from mkt.access.models import Group
from mkt.account.utils import purchase_list
from mkt.api.permissions import GroupPermission
from mkt.constants.payments import (COMPLETED, FAILED, PENDING, PROVIDER_BANGO,
PROVIDER_LOOKUP, SOLITUDE_REFUND_STATUSES)
from mkt.developers.models import ActivityLog, WebappPaymentAccount
from mkt.developers.providers import get_provider
from mkt.developers.utils import prioritize_app
from mkt.developers.views_payments import _redirect_to_bango_portal
from mkt.lookup.forms import (APIFileStatusForm, APIGroupMembershipFormSet,
APIStatusForm, DeleteUserForm,
TransactionRefundForm, TransactionSearchForm,
PromoImgForm)
from mkt.lookup.serializers import AppLookupSerializer, WebsiteLookupSerializer
from mkt.prices.models import WebappPaymentData, Refund
from mkt.purchase.models import Contribution
from mkt.reviewers.models import QUEUE_TARAKO
from mkt.search.filters import SearchQueryFilter
from mkt.search.views import SearchView
from mkt.site.decorators import json_view, permission_required
from mkt.site.utils import paginate
from mkt.tags.models import attach_tags
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
from mkt.websites.models import Website
from mkt.websites.forms import WebsiteForm
from mkt.websites.views import WebsiteSearchView
log = commonware.log.getLogger('z.lookup')
@permission_required([('Lookup', 'View')])
def home(request):
tx_form = TransactionSearchForm()
return render(request, 'lookup/home.html', {'tx_form': tx_form})
@permission_required([('AccountLookup', 'View')])
def user_summary(request, user_id):
user = get_object_or_404(UserProfile, pk=user_id)
is_admin = acl.action_allowed(request, 'Users', 'Edit')
app_summary = _app_summary(user.pk)
# All refunds that this user has requested (probably as a consumer).
req = Refund.objects.filter(contribution__user=user)
# All instantly-approved refunds that this user has requested.
appr = req.filter(status=mkt.REFUND_APPROVED_INSTANT)
refund_summary = {'approved': appr.count(),
'requested': req.count()}
user_webapps = user.webapps.order_by('-created')
user_webapps = paginate(request, user_webapps, per_page=15)
payment_data = (WebappPaymentData.objects.filter(webapp__authors=user)
.values(*WebappPaymentData.address_fields())
.distinct())
# If the user is deleted, get the log detailing the delete.
try:
delete_log = ActivityLog.objects.for_user(user).filter(
action=mkt.LOG.DELETE_USER_LOOKUP.id)[0]
except IndexError:
delete_log = None
group_membership_formset = APIGroupMembershipFormSet()
provider_portals = get_payment_provider_portals(user=user)
return render(request, 'lookup/user_summary.html',
{'account': user, 'app_summary': app_summary,
'delete_form': DeleteUserForm(), 'delete_log': delete_log,
'is_admin': is_admin, 'refund_summary': refund_summary,
'user_webapps': user_webapps, 'payment_data': payment_data,
'provider_portals': provider_portals,
'group_membership_formset': group_membership_formset})
@permission_required([('AccountLookup', 'View')])
def user_delete(request, user_id):
delete_form = DeleteUserForm(request.POST)
if not delete_form.is_valid():
messages.error(request, delete_form.errors)
return HttpResponseRedirect(reverse('lookup.user_summary',
args=[user_id]))
user = get_object_or_404(UserProfile, pk=user_id)
user.deleted = True
user.save() # Must call the save function to delete user.
mkt.log(mkt.LOG.DELETE_USER_LOOKUP, user,
details={'reason': delete_form.cleaned_data['delete_reason']},
user=request.user)
return HttpResponseRedirect(reverse('lookup.user_summary', args=[user_id]))
@permission_required([('Transaction', 'View')])
def transaction_summary(request, tx_uuid):
tx_data = _transaction_summary(tx_uuid)
if not tx_data:
raise Http404
tx_form = TransactionSearchForm()
tx_refund_form = TransactionRefundForm()
return render(request, 'lookup/transaction_summary.html',
dict({'uuid': tx_uuid, 'tx_form': tx_form,
'tx_refund_form': tx_refund_form}.items() +
tx_data.items()))
def _transaction_summary(tx_uuid):
"""Get transaction details from Solitude API."""
contrib = get_object_or_404(Contribution, uuid=tx_uuid)
contrib_id = contrib.transaction_id
refund_contribs = contrib.get_refund_contribs()
refund_contrib = refund_contribs[0] if refund_contribs.exists() else None
lookup = {'status': True, 'transaction': True}
pay = {}
try:
pay = client.api.generic.transaction.get_object_or_404(uuid=contrib_id)
except ObjectDoesNotExist:
log.warning('Transaction not found in solitude: {0}'.format(tx_uuid))
lookup['transaction'] = False
if pay.get('provider') == PROVIDER_BANGO:
# If we are processing a Bango refund, then support would also like to
# know the package id.
try:
pay['package_id'] = (client.api.by_url(pay['seller'])
.get_object_or_404()['bango']['package_id'])
except (KeyError, ObjectDoesNotExist):
log.warning('Failed to find Bango package_id: {0}'.format(tx_uuid))
# Get refund status.
refund_status = None
if refund_contrib and refund_contrib.refund.status == mkt.REFUND_PENDING:
try:
status = client.api.bango.refund.get_object_or_404(
data={'uuid': refund_contrib.transaction_id})
refund_status = SOLITUDE_REFUND_STATUSES[status['status']]
except (KeyError, HttpServerError):
lookup['status'] = False
log.warning('Refund lookup failed: {0}'.format(tx_uuid))
return {
# Solitude data.
'lookup': lookup,
'amount': pay.get('amount'),
'currency': pay.get('currency'),
'package_id': pay.get('package_id'),
'provider': PROVIDER_LOOKUP.get(pay.get('provider')),
'refund_status': refund_status,
'support': pay.get('uid_support'),
'timestamp': pay.get('created'),
# Zamboni data.
'app': contrib.webapp,
'contrib': contrib,
'related': contrib.related,
'type': mkt.CONTRIB_TYPES.get(contrib.type, _('Incomplete')),
# Filter what is refundable.
'is_refundable': ((contrib.type == mkt.CONTRIB_PURCHASE) and
not refund_contrib),
}
@require_POST
@permission_required([('Transaction', 'Refund')])
def transaction_refund(request, tx_uuid):
contrib = get_object_or_404(Contribution, uuid=tx_uuid,
type=mkt.CONTRIB_PURCHASE)
refund_contribs = contrib.get_refund_contribs()
refund_contrib = refund_contribs[0] if refund_contribs.exists() else None
if refund_contrib:
messages.error(request, _('A refund has already been processed.'))
return redirect(reverse('lookup.transaction_summary', args=[tx_uuid]))
form = TransactionRefundForm(request.POST)
if not form.is_valid():
return render(request, 'lookup/transaction_summary.html',
dict({'uuid': tx_uuid, 'tx_refund_form': form,
'tx_form': TransactionSearchForm()}.items() +
_transaction_summary(tx_uuid).items()))
data = {'uuid': contrib.transaction_id,
'manual': form.cleaned_data['manual']}
if settings.BANGO_FAKE_REFUNDS:
data['fake_response_status'] = {'responseCode':
form.cleaned_data['fake']}
try:
res = client.api.bango.refund.post(data)
except (HttpClientError, HttpServerError):
# Either doing something not supposed to or Solitude had an issue.
log.exception('Refund error: %s' % tx_uuid)
messages.error(
request,
_('You cannot make a refund request for this transaction.'))
return redirect(reverse('lookup.transaction_summary', args=[tx_uuid]))
if res['status'] in [PENDING, COMPLETED]:
# Create refund Contribution by cloning the payment Contribution.
refund_contrib = Contribution.objects.get(id=contrib.id)
refund_contrib.id = None
refund_contrib.save()
log.info('Creating refund transaction from: {0} '
'with transaction_id of: {1}'
.format(contrib.id, res['uuid']))
refund_contrib.update(
type=mkt.CONTRIB_REFUND, related=contrib,
uuid=str(uuid.uuid4()),
amount=-refund_contrib.amount if refund_contrib.amount else None,
transaction_id=res['uuid'])
if res['status'] == PENDING:
# Create pending Refund.
refund_contrib.enqueue_refund(
mkt.REFUND_PENDING, request.user,
refund_reason=form.cleaned_data['refund_reason'])
log.info('Refund pending: %s' % tx_uuid)
messages.success(
request, _('Refund for this transaction now pending.'))
elif res['status'] == COMPLETED:
# Create approved Refund.
refund_contrib.enqueue_refund(
mkt.REFUND_APPROVED, request.user,
refund_reason=form.cleaned_data['refund_reason'])
log.info('Refund approved: %s' % tx_uuid)
messages.success(
request, _('Refund for this transaction successfully approved.'))
elif res['status'] == FAILED:
# Bango no like.
log.error('Refund failed: %s' % tx_uuid)
messages.error(
request, _('Refund request for this transaction failed.'))
return redirect(reverse('lookup.transaction_summary', args=[tx_uuid]))
@permission_required([('AppLookup', 'View')])
def app_summary(request, webapp_id):
if unicode(webapp_id).isdigit():
query = {'pk': webapp_id}
else:
query = {'app_slug': webapp_id}
app = get_object_or_404(Webapp.with_deleted, **query)
if request.FILES:
promo_img_form = PromoImgForm(request.POST, request.FILES)
else:
promo_img_form = PromoImgForm()
if 'promo_img' in request.FILES and promo_img_form.is_valid():
promo_img_form.save(app)
messages.success(
request, 'Promo image successfully uploaded.'
' You may have to refresh the page again to see it below.')
return redirect(reverse('lookup.app_summary', args=[app.pk]))
if 'prioritize' in request.POST and not app.priority_review:
prioritize_app(app, request.user)
authors = (app.authors.filter(webappuser__role__in=(mkt.AUTHOR_ROLE_DEV,
mkt.AUTHOR_ROLE_OWNER))
.order_by('display_name'))
if app.premium and app.premium.price:
price = app.premium.price
else:
price = None
purchases, refunds = _app_purchases_and_refunds(app)
provider_portals = get_payment_provider_portals(app=app)
versions = None
status_form = APIStatusForm(initial={
'status': mkt.STATUS_CHOICES_API[app.status]
})
version_status_forms = {}
if app.is_packaged:
versions = app.versions.all().order_by('-created')
for v in versions:
version_status_forms[v.pk] = APIFileStatusForm(initial={
'status': mkt.STATUS_CHOICES_API[v.all_files[0].status]
})
permissions = {}
if app.latest_version:
permissions = app.latest_version.manifest.get('permissions', {})
return render(request, 'lookup/app_summary.html', {
'abuse_reports': app.abuse_reports.count(), 'app': app,
'authors': authors, 'purchases': purchases, 'refunds': refunds,
'price': price, 'provider_portals': provider_portals,
'status_form': status_form, 'versions': versions,
'is_tarako': app.tags.filter(tag_text=QUEUE_TARAKO).exists(),
'tarako_review':
app.additionalreview_set.latest_for_queue(QUEUE_TARAKO),
'version_status_forms': version_status_forms,
'permissions': permissions,
'promo_img_form': promo_img_form,
})
@permission_required([('WebsiteLookup', 'View')])
def website_summary(request, webapp_id):
website = get_object_or_404(Website, pk=webapp_id)
if request.FILES:
promo_img_form = PromoImgForm(request.POST, request.FILES)
else:
promo_img_form = PromoImgForm()
if 'promo_img' in request.FILES and promo_img_form.is_valid():
promo_img_form.save(website)
messages.success(request, 'Promo image successfully uploaded.')
return redirect(reverse('lookup.website_summary', args=[website.pk]))
if not hasattr(website, 'keywords_list'):
attach_tags([website])
return render(request, 'lookup/website_summary.html', {
'website': website,
'promo_img_form': promo_img_form,
})
@permission_required([('WebsiteLookup', 'View')])
def website_edit(request, webapp_id):
website = get_object_or_404(Website, pk=webapp_id)
form = WebsiteForm(request.POST or None, request=request, instance=website)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Website saved.'))
return redirect(
reverse('lookup.website_summary', args=[website.pk]))
return render(request, 'lookup/website_edit.html', {
'website': website,
'form': form,
})
@permission_required([('AccountLookup', 'View')])
def app_activity(request, webapp_id):
"""Shows the app activity age for single app."""
app = get_object_or_404(Webapp.with_deleted, pk=webapp_id)
user_items = ActivityLog.objects.for_apps([app]).exclude(
action__in=mkt.LOG_HIDE_DEVELOPER)
admin_items = ActivityLog.objects.for_apps([app]).filter(
action__in=mkt.LOG_HIDE_DEVELOPER)
user_items = paginate(request, user_items, per_page=20)
admin_items = paginate(request, admin_items, per_page=20)
return render(request, 'lookup/app_activity.html', {
'admin_items': admin_items, 'app': app, 'user_items': user_items})
@permission_required([('BangoPortal', 'Redirect')])
def bango_portal_from_package(request, package_id):
response = _redirect_to_bango_portal(package_id,
'package_id: %s' % package_id)
if 'Location' in response:
return HttpResponseRedirect(response['Location'])
else:
message = (json.loads(response.content)
.get('__all__', response.content)[0])
messages.error(request, message)
return HttpResponseRedirect(reverse('lookup.home'))
@permission_required([('AccountLookup', 'View')])
def user_purchases(request, user_id):
"""Shows the purchase page for another user."""
user = get_object_or_404(UserProfile, pk=user_id)
is_admin = acl.action_allowed(request, 'Users', 'Edit')
products = purchase_list(request, user)
return render(request, 'lookup/user_purchases.html',
{'pager': products, 'account': user, 'is_admin': is_admin,
'single': bool(None), 'show_link': False})
@permission_required([('AccountLookup', 'View')])
def user_activity(request, user_id):
"""Shows the user activity page for another user."""
user = get_object_or_404(UserProfile, pk=user_id)
products = purchase_list(request, user)
is_admin = acl.action_allowed(request, 'Users', 'Edit')
user_items = ActivityLog.objects.for_user(user).exclude(
action__in=mkt.LOG_HIDE_DEVELOPER)
admin_items = ActivityLog.objects.for_user(user).filter(
action__in=mkt.LOG_HIDE_DEVELOPER)
mkt.log(mkt.LOG.ADMIN_VIEWED_LOG, request.user, user=user)
return render(request, 'lookup/user_activity.html',
{'pager': products, 'account': user, 'is_admin': is_admin,
'single': bool(None),
'user_items': user_items, 'admin_items': admin_items,
'show_link': False})
def _expand_query(q, fields):
should = []
for field in fields:
should.append(ES_Q('term', **{field: {'value': q, 'boost': 10}}))
should.append(ES_Q('match', **{field: {'query': q, 'boost': 4,
'type': 'phrase'}}))
should.append(ES_Q('match', **{field: {'query': q, 'boost': 3}}))
should.append(ES_Q('fuzzy', **{field: {'value': q, 'boost': 2,
'prefix_length': 4}}))
should.append(ES_Q('prefix', **{field: {'value': q, 'boost': 1.5}}))
return query.Bool(should=should)
@permission_required([('AccountLookup', 'View')])
@json_view
def user_search(request):
results = []
q = request.GET.get('q', u'').lower().strip()
search_fields = ('fxa_uid', 'display_name', 'email')
fields = ('id',) + search_fields
if q.isnumeric():
# id is added implictly by the ES filter. Add it explicitly:
qs = UserProfile.objects.filter(pk=q).values(*fields)
else:
qs = UserProfile.objects.all()
filters = Q()
for field in search_fields:
filters = filters | Q(**{'%s__icontains' % field: q})
qs = qs.filter(filters)
qs = qs.values(*fields)
qs = _slice_results(request, qs)
for user in qs:
user['url'] = reverse('lookup.user_summary', args=[user['id']])
results.append(user)
return {'objects': results}
@permission_required([('Transaction', 'View')])
def transaction_search(request):
tx_form = TransactionSearchForm(request.GET)
if tx_form.is_valid():
return redirect(reverse('lookup.transaction_summary',
args=[tx_form.cleaned_data['q']]))
else:
return render(request, 'lookup/home.html', {'tx_form': tx_form})
class AppLookupSearchView(SearchView):
permission_classes = [GroupPermission('AppLookup', 'View')]
filter_backends = [SearchQueryFilter]
serializer_class = AppLookupSerializer
paginate_by = lkp.SEARCH_LIMIT
max_paginate_by = lkp.MAX_RESULTS
def get_paginate_by(self, *args, **kwargs):
if self.request.GET.get(self.paginate_by_param) == 'max':
return self.max_paginate_by
else:
return super(AppLookupSearchView, self).get_paginate_by(*args,
**kwargs)
class WebsiteLookupSearchView(WebsiteSearchView):
permission_classes = [GroupPermission('WebsiteLookup', 'View')]
filter_backends = [SearchQueryFilter]
serializer_class = WebsiteLookupSerializer
paginate_by = lkp.SEARCH_LIMIT
max_paginate_by = lkp.MAX_RESULTS
def get_paginate_by(self, *args, **kwargs):
if self.request.GET.get(self.paginate_by_param) == 'max':
return self.max_paginate_by
else:
return super(WebsiteLookupSearchView,
self).get_paginate_by(*args, **kwargs)
def _app_summary(user_id):
sql = """
select currency,
sum(case when type=%(purchase)s then 1 else 0 end)
as app_total,
sum(case when type=%(purchase)s then amount else 0.0 end)
as app_amount
from stats_contributions
where user_id=%(user_id)s
group by currency
"""
cursor = connection.cursor()
cursor.execute(sql, {'user_id': user_id,
'purchase': mkt.CONTRIB_PURCHASE})
summary = {'app_total': 0,
'app_amount': {}}
cols = [cd[0] for cd in cursor.description]
while 1:
row = cursor.fetchone()
if not row:
break
row = dict(zip(cols, row))
for cn in cols:
if cn.endswith('total'):
summary[cn] += row[cn]
elif cn.endswith('amount'):
summary[cn][row['currency']] = row[cn]
return summary
def _app_purchases_and_refunds(webapp):
purchases = {}
now = datetime.now()
base_qs = (Contribution.objects.values('currency')
.annotate(total=Count('id'),
amount=Sum('amount'))
.filter(webapp=webapp)
.exclude(type__in=[mkt.CONTRIB_REFUND,
mkt.CONTRIB_CHARGEBACK,
mkt.CONTRIB_PENDING]))
for typ, start_date in (('last_24_hours', now - timedelta(hours=24)),
('last_7_days', now - timedelta(days=7)),
('alltime', None),):
qs = base_qs.all()
if start_date:
qs = qs.filter(created__gte=start_date)
sums = list(qs)
purchases[typ] = {'total': sum(s['total'] for s in sums),
'amounts': [numbers.format_currency(s['amount'],
s['currency'])
for s in sums if s['currency']]}
refunds = {}
rejected_q = Q(status=mkt.REFUND_DECLINED) | Q(status=mkt.REFUND_FAILED)
qs = Refund.objects.filter(contribution__webapp=webapp)
refunds['requested'] = qs.exclude(rejected_q).count()
percent = 0.0
total = purchases['alltime']['total']
if total:
percent = (refunds['requested'] / float(total)) * 100.0
refunds['percent_of_purchases'] = '%.1f%%' % percent
refunds['auto-approved'] = (qs.filter(status=mkt.REFUND_APPROVED_INSTANT)
.count())
refunds['approved'] = qs.filter(status=mkt.REFUND_APPROVED).count()
refunds['rejected'] = qs.filter(rejected_q).count()
return purchases, refunds
def _slice_results(request, qs):
if request.GET.get('limit') == 'max':
return qs[:lkp.MAX_RESULTS]
else:
return qs[:lkp.SEARCH_LIMIT]
def get_payment_provider_portals(app=None, user=None):
"""
Get a list of dicts describing the payment portals for this app or user.
Either app or user is required.
"""
provider_portals = []
if app:
q = dict(webapp=app)
elif user:
q = dict(payment_account__user=user)
else:
raise ValueError('user or app is required')
for acct in (WebappPaymentAccount.objects.filter(**q)
.select_related('payment_account')):
provider = get_provider(id=acct.payment_account.provider)
portal_url = provider.get_portal_url(acct.webapp.app_slug)
if portal_url:
provider_portals.append({
'provider': provider,
'app': acct.webapp,
'portal_url': portal_url,
'payment_account': acct.payment_account
})
return provider_portals
@permission_required([('AccountLookup', 'View')])
def group_summary(request, group_id):
group = get_object_or_404(Group, pk=group_id)
return render(request, 'lookup/group_summary.html',
{'group': group})
@permission_required([('AccountLookup', 'View')])
@json_view
def group_search(request):
results = []
q = request.GET.get('q', u'').lower().strip()
search_fields = ('name', 'rules')
fields = ('id',) + search_fields
if q.isnumeric():
# id is added implictly by the ES filter. Add it explicitly:
qs = Group.objects.filter(pk=q).values(*fields)
else:
qs = Group.objects.all()
filters = Q()
for field in search_fields:
filters = filters | Q(**{'%s__icontains' % field: q})
qs = qs.filter(filters)
qs = qs.values(*fields)
qs = _slice_results(request, qs)
for user in qs:
user['url'] = reverse('lookup.group_summary', args=[user['id']])
results.append(user)
return {'objects': results}
|
|
"""Tests for light platform."""
from datetime import timedelta
import logging
from typing import Callable, NamedTuple
from pyHS100 import SmartDeviceException
import pytest
from homeassistant.components import tplink
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.components.tplink.common import (
CONF_DIMMER,
CONF_DISCOVERY,
CONF_LIGHT,
)
from homeassistant.components.tplink.light import SLEEP_TIME
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.async_mock import Mock, PropertyMock, patch
from tests.common import async_fire_time_changed
class LightMockData(NamedTuple):
"""Mock light data."""
sys_info: dict
light_state: dict
set_light_state: Callable[[dict], None]
set_light_state_mock: Mock
get_light_state_mock: Mock
current_consumption_mock: Mock
get_sysinfo_mock: Mock
get_emeter_daily_mock: Mock
get_emeter_monthly_mock: Mock
class SmartSwitchMockData(NamedTuple):
"""Mock smart switch data."""
sys_info: dict
state_mock: Mock
brightness_mock: Mock
get_sysinfo_mock: Mock
@pytest.fixture(name="light_mock_data")
def light_mock_data_fixture() -> None:
"""Create light mock data."""
sys_info = {
"sw_ver": "1.2.3",
"hw_ver": "2.3.4",
"mac": "aa:bb:cc:dd:ee:ff",
"mic_mac": "00:11:22:33:44",
"type": "light",
"hwId": "1234",
"fwId": "4567",
"oemId": "891011",
"dev_name": "light1",
"rssi": 11,
"latitude": "0",
"longitude": "0",
"is_color": True,
"is_dimmable": True,
"is_variable_color_temp": True,
"model": "LB120",
"alias": "light1",
}
light_state = {
"on_off": True,
"dft_on_state": {
"brightness": 12,
"color_temp": 3200,
"hue": 110,
"saturation": 90,
},
"brightness": 13,
"color_temp": 3300,
"hue": 110,
"saturation": 90,
}
def set_light_state(state) -> None:
nonlocal light_state
drt_on_state = light_state["dft_on_state"]
drt_on_state.update(state.get("dft_on_state", {}))
light_state.update(state)
light_state["dft_on_state"] = drt_on_state
return light_state
set_light_state_patch = patch(
"homeassistant.components.tplink.common.SmartBulb.set_light_state",
side_effect=set_light_state,
)
get_light_state_patch = patch(
"homeassistant.components.tplink.common.SmartBulb.get_light_state",
return_value=light_state,
)
current_consumption_patch = patch(
"homeassistant.components.tplink.common.SmartDevice.current_consumption",
return_value=3.23,
)
get_sysinfo_patch = patch(
"homeassistant.components.tplink.common.SmartDevice.get_sysinfo",
return_value=sys_info,
)
get_emeter_daily_patch = patch(
"homeassistant.components.tplink.common.SmartDevice.get_emeter_daily",
return_value={
1: 1.01,
2: 1.02,
3: 1.03,
4: 1.04,
5: 1.05,
6: 1.06,
7: 1.07,
8: 1.08,
9: 1.09,
10: 1.10,
11: 1.11,
12: 1.12,
},
)
get_emeter_monthly_patch = patch(
"homeassistant.components.tplink.common.SmartDevice.get_emeter_monthly",
return_value={
1: 2.01,
2: 2.02,
3: 2.03,
4: 2.04,
5: 2.05,
6: 2.06,
7: 2.07,
8: 2.08,
9: 2.09,
10: 2.10,
11: 2.11,
12: 2.12,
},
)
with set_light_state_patch as set_light_state_mock, get_light_state_patch as get_light_state_mock, current_consumption_patch as current_consumption_mock, get_sysinfo_patch as get_sysinfo_mock, get_emeter_daily_patch as get_emeter_daily_mock, get_emeter_monthly_patch as get_emeter_monthly_mock:
yield LightMockData(
sys_info=sys_info,
light_state=light_state,
set_light_state=set_light_state,
set_light_state_mock=set_light_state_mock,
get_light_state_mock=get_light_state_mock,
current_consumption_mock=current_consumption_mock,
get_sysinfo_mock=get_sysinfo_mock,
get_emeter_daily_mock=get_emeter_daily_mock,
get_emeter_monthly_mock=get_emeter_monthly_mock,
)
@pytest.fixture(name="dimmer_switch_mock_data")
def dimmer_switch_mock_data_fixture() -> None:
"""Create dimmer switch mock data."""
sys_info = {
"sw_ver": "1.2.3",
"hw_ver": "2.3.4",
"mac": "aa:bb:cc:dd:ee:ff",
"mic_mac": "00:11:22:33:44",
"type": "switch",
"hwId": "1234",
"fwId": "4567",
"oemId": "891011",
"dev_name": "dimmer1",
"rssi": 11,
"latitude": "0",
"longitude": "0",
"is_color": False,
"is_dimmable": True,
"is_variable_color_temp": False,
"model": "HS220",
"alias": "dimmer1",
"feature": ":",
"relay_state": 1,
"brightness": 13,
}
def state(*args, **kwargs):
nonlocal sys_info
if len(args) == 0:
return sys_info["relay_state"]
if args[0] == "ON":
sys_info["relay_state"] = 1
else:
sys_info["relay_state"] = 0
def brightness(*args, **kwargs):
nonlocal sys_info
if len(args) == 0:
return sys_info["brightness"]
if sys_info["brightness"] == 0:
sys_info["relay_state"] = 0
else:
sys_info["relay_state"] = 1
sys_info["brightness"] = args[0]
get_sysinfo_patch = patch(
"homeassistant.components.tplink.common.SmartDevice.get_sysinfo",
return_value=sys_info,
)
state_patch = patch(
"homeassistant.components.tplink.common.SmartPlug.state",
new_callable=PropertyMock,
side_effect=state,
)
brightness_patch = patch(
"homeassistant.components.tplink.common.SmartPlug.brightness",
new_callable=PropertyMock,
side_effect=brightness,
)
with brightness_patch as brightness_mock, state_patch as state_mock, get_sysinfo_patch as get_sysinfo_mock:
yield SmartSwitchMockData(
sys_info=sys_info,
brightness_mock=brightness_mock,
state_mock=state_mock,
get_sysinfo_mock=get_sysinfo_mock,
)
async def update_entity(hass: HomeAssistant, entity_id: str) -> None:
"""Run an update action for an entity."""
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
async def test_smartswitch(
hass: HomeAssistant, dimmer_switch_mock_data: SmartSwitchMockData
) -> None:
"""Test function."""
sys_info = dimmer_switch_mock_data.sys_info
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
await async_setup_component(
hass,
tplink.DOMAIN,
{
tplink.DOMAIN: {
CONF_DISCOVERY: False,
CONF_DIMMER: [{CONF_HOST: "123.123.123.123"}],
}
},
)
await hass.async_block_till_done()
assert hass.states.get("light.dimmer1")
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.dimmer1"},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.dimmer1")
assert hass.states.get("light.dimmer1").state == "off"
assert sys_info["relay_state"] == 0
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.dimmer1", ATTR_BRIGHTNESS: 50},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.dimmer1")
state = hass.states.get("light.dimmer1")
assert state.state == "on"
assert state.attributes["brightness"] == 51
assert sys_info["relay_state"] == 1
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.dimmer1", ATTR_BRIGHTNESS: 55},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.dimmer1")
state = hass.states.get("light.dimmer1")
assert state.state == "on"
assert state.attributes["brightness"] == 56
assert sys_info["brightness"] == 22
sys_info["relay_state"] = 0
sys_info["brightness"] = 66
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.dimmer1"},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.dimmer1")
state = hass.states.get("light.dimmer1")
assert state.state == "off"
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.dimmer1"},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.dimmer1")
state = hass.states.get("light.dimmer1")
assert state.state == "on"
assert state.attributes["brightness"] == 168
assert sys_info["brightness"] == 66
async def test_light(hass: HomeAssistant, light_mock_data: LightMockData) -> None:
"""Test function."""
light_state = light_mock_data.light_state
set_light_state = light_mock_data.set_light_state
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
await async_setup_component(
hass,
tplink.DOMAIN,
{
tplink.DOMAIN: {
CONF_DISCOVERY: False,
CONF_LIGHT: [{CONF_HOST: "123.123.123.123"}],
}
},
)
await hass.async_block_till_done()
assert hass.states.get("light.light1")
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.light1"},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.light1")
assert hass.states.get("light.light1").state == "off"
assert light_state["on_off"] == 0
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.light1", ATTR_COLOR_TEMP: 222, ATTR_BRIGHTNESS: 50},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.light1")
state = hass.states.get("light.light1")
assert state.state == "on"
assert state.attributes["brightness"] == 51
assert state.attributes["hs_color"] == (110, 90)
assert state.attributes["color_temp"] == 222
assert light_state["on_off"] == 1
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.light1", ATTR_BRIGHTNESS: 55, ATTR_HS_COLOR: (23, 27)},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.light1")
state = hass.states.get("light.light1")
assert state.state == "on"
assert state.attributes["brightness"] == 56
assert state.attributes["hs_color"] == (23, 27)
assert light_state["brightness"] == 22
assert light_state["hue"] == 23
assert light_state["saturation"] == 27
light_state["on_off"] = 0
light_state["dft_on_state"]["on_off"] = 0
light_state["brightness"] = 66
light_state["dft_on_state"]["brightness"] = 66
light_state["color_temp"] = 6400
light_state["dft_on_state"]["color_temp"] = 123
light_state["hue"] = 77
light_state["dft_on_state"]["hue"] = 77
light_state["saturation"] = 78
light_state["dft_on_state"]["saturation"] = 78
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.light1"},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.light1")
state = hass.states.get("light.light1")
assert state.state == "off"
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.light1"},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.light1")
state = hass.states.get("light.light1")
assert state.state == "on"
assert state.attributes["brightness"] == 168
assert state.attributes["hs_color"] == (77, 78)
assert state.attributes["color_temp"] == 156
assert light_state["brightness"] == 66
assert light_state["hue"] == 77
assert light_state["saturation"] == 78
set_light_state({"brightness": 91, "dft_on_state": {"brightness": 91}})
await update_entity(hass, "light.light1")
state = hass.states.get("light.light1")
assert state.attributes["brightness"] == 232
async def test_get_light_state_retry(
hass: HomeAssistant, light_mock_data: LightMockData
) -> None:
"""Test function."""
# Setup test for retries for sysinfo.
get_sysinfo_call_count = 0
def get_sysinfo_side_effect():
nonlocal get_sysinfo_call_count
get_sysinfo_call_count += 1
# Need to fail on the 2nd call because the first call is used to
# determine if the device is online during the light platform's
# setup hook.
if get_sysinfo_call_count == 2:
raise SmartDeviceException()
return light_mock_data.sys_info
light_mock_data.get_sysinfo_mock.side_effect = get_sysinfo_side_effect
# Setup test for retries of setting state information.
set_state_call_count = 0
def set_light_state_side_effect(state_data: dict):
nonlocal set_state_call_count, light_mock_data
set_state_call_count += 1
if set_state_call_count == 1:
raise SmartDeviceException()
return light_mock_data.set_light_state(state_data)
light_mock_data.set_light_state_mock.side_effect = set_light_state_side_effect
# Setup component.
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
await async_setup_component(
hass,
tplink.DOMAIN,
{
tplink.DOMAIN: {
CONF_DISCOVERY: False,
CONF_LIGHT: [{CONF_HOST: "123.123.123.123"}],
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.light1"},
blocking=True,
)
await hass.async_block_till_done()
await update_entity(hass, "light.light1")
assert light_mock_data.get_sysinfo_mock.call_count > 1
assert light_mock_data.get_light_state_mock.call_count > 1
assert light_mock_data.set_light_state_mock.call_count > 1
assert light_mock_data.get_sysinfo_mock.call_count < 40
assert light_mock_data.get_light_state_mock.call_count < 40
assert light_mock_data.set_light_state_mock.call_count < 10
async def test_update_failure(
hass: HomeAssistant, light_mock_data: LightMockData, caplog
):
"""Test that update failures are logged."""
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
await async_setup_component(
hass,
tplink.DOMAIN,
{
tplink.DOMAIN: {
CONF_DISCOVERY: False,
CONF_LIGHT: [{CONF_HOST: "123.123.123.123"}],
}
},
)
await hass.async_block_till_done()
caplog.clear()
caplog.set_level(logging.WARNING)
await hass.helpers.entity_component.async_update_entity("light.light1")
assert caplog.text == ""
with patch("homeassistant.components.tplink.light.MAX_ATTEMPTS", 0):
caplog.clear()
caplog.set_level(logging.WARNING)
await hass.helpers.entity_component.async_update_entity("light.light1")
assert "Could not read state for 123.123.123.123|light1" in caplog.text
get_state_call_count = 0
def get_light_state_side_effect():
nonlocal get_state_call_count
get_state_call_count += 1
if get_state_call_count == 1:
raise SmartDeviceException()
return light_mock_data.light_state
light_mock_data.get_light_state_mock.side_effect = get_light_state_side_effect
with patch("homeassistant.components.tplink.light", MAX_ATTEMPTS=2, SLEEP_TIME=0):
caplog.clear()
caplog.set_level(logging.DEBUG)
await update_entity(hass, "light.light1")
assert (
f"Retrying in {SLEEP_TIME} seconds for 123.123.123.123|light1"
in caplog.text
)
assert "Device 123.123.123.123|light1 responded after " in caplog.text
async def test_async_setup_entry_unavailable(
hass: HomeAssistant, light_mock_data: LightMockData, caplog
):
"""Test unavailable devices trigger a later retry."""
caplog.clear()
caplog.set_level(logging.WARNING)
with patch(
"homeassistant.components.tplink.common.SmartDevice.get_sysinfo",
side_effect=SmartDeviceException,
):
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
await async_setup_component(
hass,
tplink.DOMAIN,
{
tplink.DOMAIN: {
CONF_DISCOVERY: False,
CONF_LIGHT: [{CONF_HOST: "123.123.123.123"}],
}
},
)
await hass.async_block_till_done()
assert not hass.states.get("light.light1")
future = utcnow() + timedelta(seconds=30)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get("light.light1")
|
|
# encoding: utf-8
from django import forms
from django.contrib import admin, messages
from django.utils.html import format_html
from endorsements.models import Category, Tag
from wikipedia.models import BulkImport, ImportedEndorsement, \
ImportedEndorser, ImportedNewspaper, \
ImportedResult, ImportedRepresentative, \
ElectoralVotes
@admin.register(BulkImport)
class BulkImportAdmin(admin.ModelAdmin):
list_display = ('slug', 'created_at')
list_filter = ('slug',)
class ConfirmedEndorserFilter(admin.SimpleListFilter):
title = 'Confirmed endorser'
parameter_name = 'is_confirmed'
def lookups(self, request, model_admin):
return (
('yes', 'Yes'),
('no', 'No'),
)
def queryset(self, request, queryset):
if self.value() == 'yes':
return queryset.filter(confirmed_endorser__isnull=False)
if self.value() == 'no':
return queryset.filter(confirmed_endorser__isnull=True)
class NeedsFilter(admin.SimpleListFilter):
title = 'needs'
parameter_name = 'needs'
def lookups(self, request, model_admin):
return (
('tags', 'Tags'),
('org_type', 'Org type'),
('gender', 'Gender'),
('race', 'Race'),
('occupation', 'Occupation'),
('location', 'Location'),
('party', 'Party'),
)
def queryset(self, request, queryset):
value = self.value()
queryset = queryset.filter(confirmed_endorser__isnull=False)
if value == 'tags':
return queryset.filter(confirmed_endorser__tags=None)
elif value == 'org_type':
return queryset.filter(
confirmed_endorser__is_personal=False
).exclude(
confirmed_endorser__tags__category=7
)
elif value == 'gender':
return queryset.filter(
confirmed_endorser__is_personal=True
).exclude(
confirmed_endorser__tags__category=1
)
elif value == 'race':
return queryset.filter(
confirmed_endorser__is_personal=True
).exclude(
confirmed_endorser__tags__category=4
)
elif value == 'occupation':
return queryset.filter(
confirmed_endorser__is_personal=True
).exclude(
confirmed_endorser__tags__category=3
)
elif value == 'location':
return queryset.filter(
confirmed_endorser__tags=Tag.objects.get(name='Politician')
).exclude(
confirmed_endorser__tags__category=8
)
elif value == 'party':
return queryset.filter(
confirmed_endorser__tags=Tag.objects.get(name='Politician')
).exclude(
confirmed_endorser__tags__category=2
)
def confirm_endorsers(modeladmin, request, queryset):
num_confirmed = 0
for endorsement in queryset:
if endorsement.confirmed_endorser is not None:
continue
endorser = endorsement.get_likely_endorser()
endorsement.confirmed_endorser = endorser
endorsement.save()
if endorser is not None:
num_confirmed += 1
modeladmin.message_user(
request,
'Confirmed endorsers for {n} endorsements'.format(n=num_confirmed),
messages.SUCCESS
)
def make_personal(modeladmin, request, queryset):
for instance in queryset:
endorser = instance.confirmed_endorser
if endorser:
endorser.is_personal = True
endorser.save()
def make_org(modeladmin, request, queryset):
for instance in queryset:
endorser = instance.confirmed_endorser
if endorser:
endorser.is_personal = False
endorser.save()
def remove_tag(modeladmin, request, queryset):
tag_pk = request.POST['tag']
try:
tag = Tag.objects.get(pk=tag_pk)
except Tag.DoesNotExist:
modeladmin.message_user(
request,
"Could not find tag with pk {tag_pk}".format(
tag_pk=tag_pk
),
messages.ERROR,
)
return
for instance in queryset:
endorser = instance.confirmed_endorser
if endorser:
endorser.tags.remove(tag)
modeladmin.message_user(
request,
"Removed tag {tag} for {n} endorsers".format(
tag=tag.name,
n=queryset.count(),
),
messages.SUCCESS,
)
def add_tag(modeladmin, request, queryset):
tag_pk = request.POST['tag']
try:
tag = Tag.objects.get(pk=tag_pk)
except Tag.DoesNotExist:
modeladmin.message_user(
request,
"Could not find tag with pk {tag_pk}".format(
tag_pk=tag_pk
),
messages.ERROR,
)
return
failures = []
for instance in queryset:
endorser = instance.confirmed_endorser
if endorser:
name = 'blah' #endorser.name
if endorser.is_personal:
if tag.category.allow_personal:
endorser.tags.add(tag)
continue
else:
if tag.category.allow_org:
endorser.tags.add(tag)
continue
else:
name = u'%s' % instance
name = 'blah'
failures.append(name)
modeladmin.message_user(
request,
"Added tag {tag} for {n} endorsers (failures: {failures})".format(
tag=tag.name,
n=queryset.count(),
failures=u', '.join(failures),
),
messages.SUCCESS,
)
class EndorserActionForm(admin.helpers.ActionForm):
tag = forms.ModelChoiceField(Tag.objects.all())
class ExcludedCategoriesFilter(admin.SimpleListFilter):
title = 'excluded categories'
parameter_name = 'excludedcategories'
def lookups(self, request, model_admin):
return [
(category.pk, category.name)
for category in Category.objects.all()
]
def queryset(self, request, queryset):
if self.value():
tag_pks = [
tag.pk
for tag in Tag.objects.filter(category=self.value())
]
return queryset.exclude(confirmed_endorser__tags__in=tag_pks)
else:
return queryset
@admin.register(ImportedEndorsement)
class ImportedEndorsementAdmin(admin.ModelAdmin):
list_display = ('get_image', 'get_display', 'is_confirmed', 'sections',
'get_import_date', 'show_raw_text')
list_filter = (ConfirmedEndorserFilter, NeedsFilter,
ExcludedCategoriesFilter,
'bulk_import__slug', 'sections')
action_form = EndorserActionForm
actions = [add_tag, remove_tag, confirm_endorsers, make_personal, make_org]
def show_raw_text(self, obj):
return obj.raw_text[:50]
def get_import_date(self, obj):
return obj.bulk_import.created_at
def get_image(self, obj):
if obj.confirmed_endorser:
return obj.confirmed_endorser.get_image()
get_image.allow_tags = True
def get_display(self, obj):
if obj.confirmed_endorser:
return self.show_endorser(obj)
else:
return self.get_parsed_display(obj)
def get_parsed_display(self, obj):
parsed_attributes = obj.parse_text()
return format_html(
u'<h3>Name: {endorser_name}</h3>'
u'<p>Source: <a href="{citation_url}">{citation_url}</a> '
'on {citation_date} ({citation_name})</p>'
u'<p>Details: {endorser_details}</p>'
''.format(**parsed_attributes)
)
def is_confirmed(self, obj):
return obj.confirmed_endorser is not None
is_confirmed.boolean = True
def get_endorser(self, obj):
return obj.confirmed_endorser or obj.get_likely_endorser()
def show_endorser(self, obj):
endorser = self.get_endorser(obj)
if endorser:
return format_html(
u'<h3><a href="{url}">{name}</a> ({pk})</h3>'
u'<p>{description} - {type}</p>'
u'<p>{tags}</p>'.format(
url=endorser.get_absolute_url(),
name=endorser.name,
pk=endorser.pk,
description=endorser.description,
type='personal' if endorser.is_personal else 'org',
tags=' / '.join(tag.name for tag in endorser.tags.all()),
)
)
@admin.register(ImportedResult)
class ImportedResultAdmin(admin.ModelAdmin):
list_display = ('tag', 'candidate', 'count')
@admin.register(ImportedEndorser)
class ImportedEndorserAdmin(admin.ModelAdmin):
pass
@admin.register(ImportedNewspaper)
class ImportedNewspaperAdmin(admin.ModelAdmin):
list_display = ('name', 'show_endorser', 'get_section_display',
'city', 'state')
actions = [confirm_endorsers]
list_filter = (ConfirmedEndorserFilter, 'section')
def get_endorser(self, obj):
return obj.confirmed_endorser or obj.get_likely_endorser()
def show_endorser(self, obj):
endorser = self.get_endorser(obj)
if endorser:
return format_html(
u'<h3><a href="{url}">{name}</a></h3><p>{description}'.format(
name=endorser.name,
url=endorser.get_absolute_url(),
description=endorser.description
)
)
class HasEndorsementsFilter(admin.SimpleListFilter):
title = 'Has endorsements'
parameter_name = 'has_endorsements'
def lookups(self, request, model_admin):
return (
('yes', 'Yes'),
('no', 'No'),
)
def queryset(self, request, queryset):
if self.value() == 'yes':
return queryset.filter(confirmed_endorser__current_position__isnull=False)
if self.value() == 'no':
return queryset.filter(confirmed_endorser__current_position=None)
@admin.register(ImportedRepresentative)
class ImportedRepresentativeAdmin(admin.ModelAdmin):
list_display = ('get_display', 'get_image', 'show_endorser', 'is_confirmed')
list_filter = [HasEndorsementsFilter, 'party', 'state']
action_form = EndorserActionForm
actions = [add_tag, remove_tag, confirm_endorsers, make_personal, make_org]
def is_confirmed(self, obj):
return obj.confirmed_endorser is not None
is_confirmed.boolean = True
def get_display(self, obj):
return format_html(
u'<h3>{name}</h3><p>{party} - {state}'.format(
name=obj.name,
party=obj.party,
state=obj.state
)
)
def get_image(self, obj):
endorser = obj.confirmed_endorser or obj.get_likely_endorser()
if endorser:
return endorser.get_image()
get_image.allow_tags = True
def show_endorser(self, obj):
endorser = obj.confirmed_endorser or obj.get_likely_endorser()
if endorser:
return format_html(
u'<h3><a href="{url}">{name}</a> ({pk})</h3>'
u'<p>{description} - {type}</p>'
u'<p>{tags}</p>'.format(
url=endorser.get_absolute_url(),
name=endorser.name,
pk=endorser.pk,
description=endorser.description,
type='personal' if endorser.is_personal else 'org',
tags=' / '.join(tag.name for tag in endorser.tags.all()),
)
)
@admin.register(ElectoralVotes)
class ElectoralVotesAdmin(admin.ModelAdmin):
list_display = ('state', 'count')
|
|
import collections
import hashlib
import os
import re
import time
import uuid
from datetime import datetime
from django.conf import settings
from django.core.cache import cache
from django.db import connection, models, transaction
import caching.base as caching
import amo
import amo.models
import sharing.utils as sharing
from access import acl
from addons.models import Addon, AddonRecommendation
from amo.helpers import absolutify, user_media_path, user_media_url
from amo.urlresolvers import reverse
from amo.utils import sorted_groupby
from stats.models import CollectionShareCountTotal
from translations.fields import (LinkifiedField, save_signal,
NoLinksNoMarkupField, TranslatedField)
from users.models import UserProfile
from versions import compare
SPECIAL_SLUGS = amo.COLLECTION_SPECIAL_SLUGS
class TopTags(object):
"""Descriptor to manage a collection's top tags in cache."""
def key(self, obj):
return '%s:top-tags:%s' % (settings.CACHE_PREFIX, obj.id)
def __get__(self, obj, type=None):
if obj is None:
return self
return cache.get(self.key(obj), [])
def __set__(self, obj, value):
two_days = 60 * 60 * 24 * 2
cache.set(self.key(obj), value, two_days)
class CollectionQuerySet(caching.CachingQuerySet):
def with_has_addon(self, addon_id):
"""Add a `has_addon` property to each collection.
`has_addon` will be `True` if `addon_id` exists in that
particular collection.
"""
has_addon = """
select 1 from addons_collections as ac
where ac.addon_id = %s and ac.collection_id = collections.id
limit 1"""
return self.extra(
select={'has_addon': has_addon},
select_params=(addon_id,))
class CollectionManager(amo.models.ManagerBase):
def get_query_set(self):
qs = super(CollectionManager, self).get_query_set()
qs = qs._clone(klass=CollectionQuerySet)
return qs.transform(Collection.transformer)
def manual(self):
"""Only hand-crafted, favorites, and featured collections should appear
in this filter."""
types = (amo.COLLECTION_NORMAL, amo.COLLECTION_FAVORITES,
amo.COLLECTION_FEATURED, )
return self.filter(type__in=types)
def listed(self):
"""Return public collections only."""
return self.filter(listed=True)
def publishable_by(self, user):
"""Collections that are publishable by a user."""
owned_by = models.Q(author=user.id)
publishable_by = models.Q(users=user.id)
collections = self.filter(owned_by | publishable_by)
return collections.distinct().order_by('name__localized_string')
class CollectionBase:
"""A mixin with methods common to Collection and SyncedCollection."""
@classmethod
def make_index(cls, addon_ids):
ids = ':'.join(map(str, sorted(addon_ids)))
return hashlib.md5(ids).hexdigest()
def get_recs(self, app, version):
addons = list(self.addons.values_list('id', flat=True))
return self.get_recs_from_ids(addons, app, version)
@classmethod
def get_recs_from_ids(cls, addons, app, version, compat_mode='strict'):
vint = compare.version_int(version)
recs = RecommendedCollection.build_recs(addons)
qs = (Addon.objects.public()
.filter(id__in=recs, appsupport__app=app.id,
appsupport__min__lte=vint))
if compat_mode == 'strict':
qs = qs.filter(appsupport__max__gte=vint)
return recs, qs
class Collection(CollectionBase, amo.models.ModelBase):
TYPE_CHOICES = amo.COLLECTION_CHOICES.items()
uuid = models.CharField(max_length=36, blank=True, unique=True)
name = TranslatedField(require_locale=False)
# nickname is deprecated. Use slug.
nickname = models.CharField(max_length=30, blank=True, unique=True,
null=True)
slug = models.CharField(max_length=30, blank=True, null=True)
description = NoLinksNoMarkupField(require_locale=False)
default_locale = models.CharField(max_length=10, default='en-US',
db_column='defaultlocale')
type = models.PositiveIntegerField(db_column='collection_type',
choices=TYPE_CHOICES, default=0)
icontype = models.CharField(max_length=25, blank=True)
listed = models.BooleanField(
default=True, help_text='Collections are either listed or private.')
subscribers = models.PositiveIntegerField(default=0)
downloads = models.PositiveIntegerField(default=0)
weekly_subscribers = models.PositiveIntegerField(default=0)
monthly_subscribers = models.PositiveIntegerField(default=0)
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='application_id',
null=True)
addon_count = models.PositiveIntegerField(default=0,
db_column='addonCount')
upvotes = models.PositiveIntegerField(default=0)
downvotes = models.PositiveIntegerField(default=0)
rating = models.FloatField(default=0)
all_personas = models.BooleanField(
default=False,
help_text='Does this collection only contain Themes?')
addons = models.ManyToManyField(Addon, through='CollectionAddon',
related_name='collections')
author = models.ForeignKey(UserProfile, null=True,
related_name='collections')
users = models.ManyToManyField(UserProfile, through='CollectionUser',
related_name='collections_publishable')
addon_index = models.CharField(
max_length=40, null=True, db_index=True,
help_text='Custom index for the add-ons in this collection')
# This gets overwritten in the transformer.
share_counts = collections.defaultdict(int)
objects = CollectionManager()
top_tags = TopTags()
class Meta(amo.models.ModelBase.Meta):
db_table = 'collections'
unique_together = (('author', 'slug'),)
def __unicode__(self):
return u'%s (%s)' % (self.name, self.addon_count)
def flush_urls(self):
urls = ['*%s' % self.get_url_path(),
self.icon_url]
return urls
def save(self, **kw):
if not self.uuid:
self.uuid = unicode(uuid.uuid4())
if not self.slug:
self.slug = self.uuid[:30]
self.clean_slug()
# Maintain our index of add-on ids.
if self.id:
ids = self.addons.values_list('id', flat=True)
self.addon_index = self.make_index(ids)
super(Collection, self).save(**kw)
def clean_slug(self):
if self.type in SPECIAL_SLUGS:
self.slug = SPECIAL_SLUGS[self.type]
return
if self.slug in SPECIAL_SLUGS.values():
self.slug += '~'
if not self.author:
return
qs = self.author.collections.using('default')
slugs = dict((slug, id) for slug, id in qs.values_list('slug', 'id'))
if self.slug in slugs and slugs[self.slug] != self.id:
for idx in range(len(slugs)):
new = '%s-%s' % (self.slug, idx + 1)
if new not in slugs:
self.slug = new
return
def get_url_path(self):
return reverse('collections.detail',
args=[self.author_username, self.slug])
def get_abs_url(self):
return absolutify(self.get_url_path())
def get_img_dir(self):
return os.path.join(user_media_path('collection_icons'),
str(self.id / 1000))
def upvote_url(self):
return reverse('collections.vote',
args=[self.author_username, self.slug, 'up'])
def downvote_url(self):
return reverse('collections.vote',
args=[self.author_username, self.slug, 'down'])
def edit_url(self):
return reverse('collections.edit',
args=[self.author_username, self.slug])
def watch_url(self):
return reverse('collections.watch',
args=[self.author_username, self.slug])
def delete_url(self):
return reverse('collections.delete',
args=[self.author_username, self.slug])
def delete_icon_url(self):
return reverse('collections.delete_icon',
args=[self.author_username, self.slug])
def share_url(self):
return reverse('collections.share',
args=[self.author_username, self.slug])
def feed_url(self):
return reverse('collections.detail.rss',
args=[self.author_username, self.slug])
def stats_url(self):
return reverse('collections.stats',
args=[self.author_username, self.slug])
@property
def author_username(self):
return self.author.username if self.author else 'anonymous'
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
@property
def url_slug(self):
"""uuid or nickname if chosen"""
return self.nickname or self.uuid
@property
def icon_url(self):
modified = int(time.mktime(self.modified.timetuple()))
if self.icontype:
# [1] is the whole ID, [2] is the directory
split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id))
path = "/".join([
split_id.group(2) or '0',
"%s.png?m=%s" % (self.id, modified)
])
return user_media_url('collection_icons') + path
elif self.type == amo.COLLECTION_FAVORITES:
return settings.STATIC_URL + 'img/icons/heart.png'
else:
return settings.STATIC_URL + 'img/icons/collection.png'
def set_addons(self, addon_ids, comments={}):
"""Replace the current add-ons with a new list of add-on ids."""
order = dict((a, idx) for idx, a in enumerate(addon_ids))
# Partition addon_ids into add/update/remove buckets.
existing = set(self.addons.using('default')
.values_list('id', flat=True))
add, update = [], []
for addon in addon_ids:
bucket = update if addon in existing else add
bucket.append((addon, order[addon]))
remove = existing.difference(addon_ids)
cursor = connection.cursor()
now = datetime.now()
if remove:
cursor.execute("DELETE FROM addons_collections "
"WHERE collection_id=%s AND addon_id IN (%s)" %
(self.id, ','.join(map(str, remove))))
if self.listed:
for addon in remove:
amo.log(amo.LOG.REMOVE_FROM_COLLECTION,
(Addon, addon), self)
if add:
insert = '(%s, %s, %s, NOW(), NOW(), 0)'
values = [insert % (a, self.id, idx) for a, idx in add]
cursor.execute("""
INSERT INTO addons_collections
(addon_id, collection_id, ordering, created,
modified, downloads)
VALUES %s""" % ','.join(values))
if self.listed:
for addon_id, idx in add:
amo.log(amo.LOG.ADD_TO_COLLECTION,
(Addon, addon_id), self)
for addon, ordering in update:
(CollectionAddon.objects.filter(collection=self.id, addon=addon)
.update(ordering=ordering, modified=now))
for addon, comment in comments.iteritems():
try:
c = (CollectionAddon.objects.using('default')
.get(collection=self.id, addon=addon))
except CollectionAddon.DoesNotExist:
pass
else:
c.comments = comment
c.save(force_update=True)
self.save()
def is_subscribed(self, user):
"""Determines if the user is subscribed to this collection."""
return self.following.filter(user=user).exists()
def add_addon(self, addon):
"Adds an addon to the collection."
CollectionAddon.objects.get_or_create(addon=addon, collection=self)
if self.listed:
amo.log(amo.LOG.ADD_TO_COLLECTION, addon, self)
self.save() # To invalidate Collection.
def remove_addon(self, addon):
CollectionAddon.objects.filter(addon=addon, collection=self).delete()
if self.listed:
amo.log(amo.LOG.REMOVE_FROM_COLLECTION, addon, self)
self.save() # To invalidate Collection.
def owned_by(self, user):
return (user.id == self.author_id)
def can_view_stats(self, request):
if request and request.amo_user:
return (self.publishable_by(request.amo_user) or
acl.action_allowed(request, 'CollectionStats', 'View'))
return False
@caching.cached_method
def publishable_by(self, user):
return bool(self.owned_by(user) or self.users.filter(pk=user.id))
@staticmethod
def transformer(collections):
if not collections:
return
author_ids = set(c.author_id for c in collections)
authors = dict((u.id, u) for u in
UserProfile.objects.filter(id__in=author_ids))
for c in collections:
c.author = authors.get(c.author_id)
c_dict = dict((c.pk, c) for c in collections)
sharing.attach_share_counts(CollectionShareCountTotal, 'collection',
c_dict)
@staticmethod
def post_save(sender, instance, **kwargs):
from . import tasks
if kwargs.get('raw'):
return
tasks.collection_meta.delay(instance.id, using='default')
tasks.index_collections.delay([instance.id])
@staticmethod
def post_delete(sender, instance, **kwargs):
from . import tasks
if kwargs.get('raw'):
return
tasks.unindex_collections.delay([instance.id])
def check_ownership(self, request, require_owner, require_author,
ignore_disabled, admin):
"""
Used by acl.check_ownership to see if request.user has permissions for
the collection.
"""
from access import acl
return acl.check_collection_ownership(request, self, require_owner)
models.signals.post_save.connect(Collection.post_save, sender=Collection,
dispatch_uid='coll.post_save')
models.signals.pre_save.connect(save_signal, sender=Collection,
dispatch_uid='coll_translations')
models.signals.post_delete.connect(Collection.post_delete, sender=Collection,
dispatch_uid='coll.post_delete')
class CollectionAddon(amo.models.ModelBase):
addon = models.ForeignKey(Addon)
collection = models.ForeignKey(Collection)
# category (deprecated: for "Fashion Your Firefox")
comments = LinkifiedField(null=True)
downloads = models.PositiveIntegerField(default=0)
user = models.ForeignKey(UserProfile, null=True)
ordering = models.PositiveIntegerField(
default=0,
help_text='Add-ons are displayed in ascending order '
'based on this field.')
class Meta(amo.models.ModelBase.Meta):
db_table = 'addons_collections'
unique_together = (('addon', 'collection'),)
@staticmethod
def post_save_or_delete(sender, instance, **kwargs):
"""Update Collection.addon_count."""
from . import tasks
tasks.collection_meta.delay(instance.collection_id, using='default')
models.signals.pre_save.connect(save_signal, sender=CollectionAddon,
dispatch_uid='coll_addon_translations')
# Update Collection.addon_count.
models.signals.post_save.connect(CollectionAddon.post_save_or_delete,
sender=CollectionAddon,
dispatch_uid='coll.post_save')
models.signals.post_delete.connect(CollectionAddon.post_save_or_delete,
sender=CollectionAddon,
dispatch_uid='coll.post_save')
class CollectionFeature(amo.models.ModelBase):
title = TranslatedField()
tagline = TranslatedField()
class Meta(amo.models.ModelBase.Meta):
db_table = 'collection_features'
models.signals.pre_save.connect(save_signal, sender=CollectionFeature,
dispatch_uid='collectionfeature_translations')
class CollectionPromo(amo.models.ModelBase):
collection = models.ForeignKey(Collection, null=True)
locale = models.CharField(max_length=10, null=True)
collection_feature = models.ForeignKey(CollectionFeature)
class Meta(amo.models.ModelBase.Meta):
db_table = 'collection_promos'
unique_together = ('collection', 'locale', 'collection_feature')
@staticmethod
def transformer(promos):
if not promos:
return
promo_dict = dict((p.id, p) for p in promos)
q = (Collection.objects.no_cache()
.filter(collectionpromo__in=promos)
.extra(select={'promo_id': 'collection_promos.id'}))
for promo_id, collection in (sorted_groupby(q, 'promo_id')):
promo_dict[promo_id].collection = collection.next()
class CollectionWatcher(amo.models.ModelBase):
collection = models.ForeignKey(Collection, related_name='following')
user = models.ForeignKey(UserProfile)
class Meta(amo.models.ModelBase.Meta):
db_table = 'collection_subscriptions'
def flush_urls(self):
urls = ['*/user/%d/' % self.user_id]
return urls
@staticmethod
def post_save_or_delete(sender, instance, **kw):
from . import tasks
tasks.collection_watchers(instance.collection_id, using='default')
models.signals.post_save.connect(CollectionWatcher.post_save_or_delete,
sender=CollectionWatcher)
models.signals.post_delete.connect(CollectionWatcher.post_save_or_delete,
sender=CollectionWatcher)
class CollectionUser(models.Model):
collection = models.ForeignKey(Collection)
user = models.ForeignKey(UserProfile)
role = models.SmallIntegerField(
default=1,
choices=amo.COLLECTION_AUTHOR_CHOICES.items())
class Meta:
db_table = 'collections_users'
class CollectionVote(models.Model):
collection = models.ForeignKey(Collection, related_name='votes')
user = models.ForeignKey(UserProfile, related_name='votes')
vote = models.SmallIntegerField(default=0)
created = models.DateTimeField(null=True, auto_now_add=True)
class Meta:
db_table = 'collections_votes'
def flush_urls(self):
urls = ['*%s' % self.collection.get_url_path()]
return urls
@staticmethod
def post_save_or_delete(sender, instance, **kwargs):
# There are some issues with cascade deletes, where the
# collection disappears before the votes. Make sure the
# collection exists before trying to update it in the task.
if Collection.objects.filter(id=instance.collection_id).exists():
from . import tasks
tasks.collection_votes(instance.collection_id, using='default')
models.signals.post_save.connect(CollectionVote.post_save_or_delete,
sender=CollectionVote)
models.signals.post_delete.connect(CollectionVote.post_save_or_delete,
sender=CollectionVote)
class SyncedCollection(CollectionBase, amo.models.ModelBase):
"""
We remember what add-ons a user has installed with this table.
The addon guids come in from the discovery pane and we translate those to
addon ids. If those addons match an addon_index of an existing
SyncedCollection its count is incremented; otherwise a new collection is
created for that bag of addons.
This uses separate tables because we don't want the high volume of data to
crush performance on normal collection tables. SyncedCollections are used
to generate recommendations and may be used for other data mining in the
future.
"""
addon_index = models.CharField(
max_length=40, null=True,
db_index=True, unique=True,
help_text='md5 of addon ids in this collection for fast comparisons')
addons = models.ManyToManyField(Addon, through='SyncedCollectionAddon',
related_name='synced_collections')
count = models.IntegerField("Number of users with this collection.",
default=0)
class Meta:
db_table = 'synced_collections'
def save(self, **kw):
return super(SyncedCollection, self).save(**kw)
def set_addons(self, addon_ids):
# SyncedCollections are only written once so we don't need to deal with
# updates or deletes.
relations = [
SyncedCollectionAddon(addon_id=addon_id, collection_id=self.pk)
for addon_id in addon_ids]
SyncedCollectionAddon.objects.bulk_create(relations)
if not self.addon_index:
self.addon_index = self.make_index(addon_ids)
self.save()
transaction.commit_unless_managed()
class SyncedCollectionAddon(models.Model):
addon = models.ForeignKey(Addon)
collection = models.ForeignKey(SyncedCollection)
class Meta(amo.models.ModelBase.Meta):
db_table = 'synced_addons_collections'
unique_together = (('addon', 'collection'),)
class RecommendedCollection(Collection):
class Meta:
proxy = True
def save(self, **kw):
self.type = amo.COLLECTION_RECOMMENDED
return super(RecommendedCollection, self).save(**kw)
@classmethod
def build_recs(cls, addon_ids):
"""Get the top ranking add-ons according to recommendation scores."""
scores = AddonRecommendation.scores(addon_ids)
d = collections.defaultdict(int)
for others in scores.values():
for addon, score in others.items():
d[addon] += score
addons = sorted(d.items(), key=lambda x: x[1], reverse=True)
return [addon for addon, score in addons if addon not in addon_ids]
class FeaturedCollection(amo.models.ModelBase):
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='application_id')
collection = models.ForeignKey(Collection)
locale = models.CharField(max_length=10, null=True)
class Meta:
db_table = 'featured_collections'
def __unicode__(self):
return u'%s (%s: %s)' % (self.collection, self.application,
self.locale)
class MonthlyPick(amo.models.ModelBase):
addon = models.ForeignKey(Addon)
blurb = models.TextField()
image = models.URLField()
locale = models.CharField(max_length=10, unique=True, null=True,
blank=True)
class Meta:
db_table = 'monthly_pick'
|
|
"""Represent the AsusWrt router."""
from __future__ import annotations
from datetime import datetime, timedelta
import logging
from typing import Any
from aioasuswrt.asuswrt import AsusWrt
from homeassistant.components.device_tracker.const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
DOMAIN as TRACKER_DOMAIN,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_MODE,
CONF_PASSWORD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util import dt as dt_util
from .const import (
CONF_DNSMASQ,
CONF_INTERFACE,
CONF_REQUIRE_IP,
CONF_SSH_KEY,
CONF_TRACK_UNKNOWN,
DEFAULT_DNSMASQ,
DEFAULT_INTERFACE,
DEFAULT_TRACK_UNKNOWN,
DOMAIN,
PROTOCOL_TELNET,
SENSOR_CONNECTED_DEVICE,
SENSOR_RX_BYTES,
SENSOR_RX_RATES,
SENSOR_TX_BYTES,
SENSOR_TX_RATES,
)
CONF_REQ_RELOAD = [CONF_DNSMASQ, CONF_INTERFACE, CONF_REQUIRE_IP]
KEY_COORDINATOR = "coordinator"
KEY_SENSORS = "sensors"
SCAN_INTERVAL = timedelta(seconds=30)
SENSORS_TYPE_BYTES = "sensors_bytes"
SENSORS_TYPE_COUNT = "sensors_count"
SENSORS_TYPE_RATES = "sensors_rates"
_LOGGER = logging.getLogger(__name__)
class AsusWrtSensorDataHandler:
"""Data handler for AsusWrt sensor."""
def __init__(self, hass, api):
"""Initialize a AsusWrt sensor data handler."""
self._hass = hass
self._api = api
self._connected_devices = 0
async def _get_connected_devices(self):
"""Return number of connected devices."""
return {SENSOR_CONNECTED_DEVICE: self._connected_devices}
async def _get_bytes(self):
"""Fetch byte information from the router."""
ret_dict: dict[str, Any] = {}
try:
datas = await self._api.async_get_bytes_total()
except OSError as exc:
raise UpdateFailed from exc
ret_dict[SENSOR_RX_BYTES] = datas[0]
ret_dict[SENSOR_TX_BYTES] = datas[1]
return ret_dict
async def _get_rates(self):
"""Fetch rates information from the router."""
ret_dict: dict[str, Any] = {}
try:
rates = await self._api.async_get_current_transfer_rates()
except OSError as exc:
raise UpdateFailed from exc
ret_dict[SENSOR_RX_RATES] = rates[0]
ret_dict[SENSOR_TX_RATES] = rates[1]
return ret_dict
def update_device_count(self, conn_devices: int):
"""Update connected devices attribute."""
if self._connected_devices == conn_devices:
return False
self._connected_devices = conn_devices
return True
async def get_coordinator(self, sensor_type: str, should_poll=True):
"""Get the coordinator for a specific sensor type."""
if sensor_type == SENSORS_TYPE_COUNT:
method = self._get_connected_devices
elif sensor_type == SENSORS_TYPE_BYTES:
method = self._get_bytes
elif sensor_type == SENSORS_TYPE_RATES:
method = self._get_rates
else:
raise RuntimeError(f"Invalid sensor type: {sensor_type}")
coordinator = DataUpdateCoordinator(
self._hass,
_LOGGER,
name=sensor_type,
update_method=method,
# Polling interval. Will only be polled if there are subscribers.
update_interval=SCAN_INTERVAL if should_poll else None,
)
await coordinator.async_refresh()
return coordinator
class AsusWrtDevInfo:
"""Representation of a AsusWrt device info."""
def __init__(self, mac, name=None):
"""Initialize a AsusWrt device info."""
self._mac = mac
self._name = name
self._ip_address = None
self._last_activity = None
self._connected = False
def update(self, dev_info=None, consider_home=0):
"""Update AsusWrt device info."""
utc_point_in_time = dt_util.utcnow()
if dev_info:
if not self._name:
self._name = dev_info.name or self._mac.replace(":", "_")
self._ip_address = dev_info.ip
self._last_activity = utc_point_in_time
self._connected = True
elif self._connected:
self._connected = (
utc_point_in_time - self._last_activity
).total_seconds() < consider_home
self._ip_address = None
@property
def is_connected(self):
"""Return connected status."""
return self._connected
@property
def mac(self):
"""Return device mac address."""
return self._mac
@property
def name(self):
"""Return device name."""
return self._name
@property
def ip_address(self):
"""Return device ip address."""
return self._ip_address
@property
def last_activity(self):
"""Return device last activity."""
return self._last_activity
class AsusWrtRouter:
"""Representation of a AsusWrt router."""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Initialize a AsusWrt router."""
self.hass = hass
self._entry = entry
self._api: AsusWrt = None
self._protocol = entry.data[CONF_PROTOCOL]
self._host = entry.data[CONF_HOST]
self._model = "Asus Router"
self._sw_v = None
self._devices: dict[str, Any] = {}
self._connected_devices = 0
self._connect_error = False
self._sensors_data_handler: AsusWrtSensorDataHandler = None
self._sensors_coordinator: dict[str, Any] = {}
self._on_close = []
self._options = {
CONF_DNSMASQ: DEFAULT_DNSMASQ,
CONF_INTERFACE: DEFAULT_INTERFACE,
CONF_REQUIRE_IP: True,
}
self._options.update(entry.options)
async def setup(self) -> None:
"""Set up a AsusWrt router."""
self._api = get_api(self._entry.data, self._options)
try:
await self._api.connection.async_connect()
except OSError as exp:
raise ConfigEntryNotReady from exp
if not self._api.is_connected:
raise ConfigEntryNotReady
# System
model = await _get_nvram_info(self._api, "MODEL")
if model:
self._model = model["model"]
firmware = await _get_nvram_info(self._api, "FIRMWARE")
if firmware:
self._sw_v = f"{firmware['firmver']} (build {firmware['buildno']})"
# Load tracked entities from registry
entity_registry = await self.hass.helpers.entity_registry.async_get_registry()
track_entries = (
self.hass.helpers.entity_registry.async_entries_for_config_entry(
entity_registry, self._entry.entry_id
)
)
for entry in track_entries:
if entry.domain == TRACKER_DOMAIN:
self._devices[entry.unique_id] = AsusWrtDevInfo(
entry.unique_id, entry.original_name
)
# Update devices
await self.update_devices()
# Init Sensors
await self.init_sensors_coordinator()
self.async_on_close(
async_track_time_interval(self.hass, self.update_all, SCAN_INTERVAL)
)
async def update_all(self, now: datetime | None = None) -> None:
"""Update all AsusWrt platforms."""
await self.update_devices()
async def update_devices(self) -> None:
"""Update AsusWrt devices tracker."""
new_device = False
_LOGGER.debug("Checking devices for ASUS router %s", self._host)
try:
wrt_devices = await self._api.async_get_connected_devices()
except OSError as exc:
if not self._connect_error:
self._connect_error = True
_LOGGER.error(
"Error connecting to ASUS router %s for device update: %s",
self._host,
exc,
)
return
if self._connect_error:
self._connect_error = False
_LOGGER.info("Reconnected to ASUS router %s", self._host)
consider_home = self._options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
)
track_unknown = self._options.get(CONF_TRACK_UNKNOWN, DEFAULT_TRACK_UNKNOWN)
for device_mac in self._devices:
dev_info = wrt_devices.get(device_mac)
self._devices[device_mac].update(dev_info, consider_home)
for device_mac, dev_info in wrt_devices.items():
if device_mac in self._devices:
continue
if not track_unknown and not dev_info.name:
continue
new_device = True
device = AsusWrtDevInfo(device_mac)
device.update(dev_info)
self._devices[device_mac] = device
async_dispatcher_send(self.hass, self.signal_device_update)
if new_device:
async_dispatcher_send(self.hass, self.signal_device_new)
self._connected_devices = len(wrt_devices)
await self._update_unpolled_sensors()
async def init_sensors_coordinator(self) -> None:
"""Init AsusWrt sensors coordinators."""
if self._sensors_data_handler:
return
self._sensors_data_handler = AsusWrtSensorDataHandler(self.hass, self._api)
self._sensors_data_handler.update_device_count(self._connected_devices)
conn_dev_coordinator = await self._sensors_data_handler.get_coordinator(
SENSORS_TYPE_COUNT, False
)
self._sensors_coordinator[SENSORS_TYPE_COUNT] = {
KEY_COORDINATOR: conn_dev_coordinator,
KEY_SENSORS: [SENSOR_CONNECTED_DEVICE],
}
bytes_coordinator = await self._sensors_data_handler.get_coordinator(
SENSORS_TYPE_BYTES
)
self._sensors_coordinator[SENSORS_TYPE_BYTES] = {
KEY_COORDINATOR: bytes_coordinator,
KEY_SENSORS: [SENSOR_RX_BYTES, SENSOR_TX_BYTES],
}
rates_coordinator = await self._sensors_data_handler.get_coordinator(
SENSORS_TYPE_RATES
)
self._sensors_coordinator[SENSORS_TYPE_RATES] = {
KEY_COORDINATOR: rates_coordinator,
KEY_SENSORS: [SENSOR_RX_RATES, SENSOR_TX_RATES],
}
async def _update_unpolled_sensors(self) -> None:
"""Request refresh for AsusWrt unpolled sensors."""
if not self._sensors_data_handler:
return
if SENSORS_TYPE_COUNT in self._sensors_coordinator:
coordinator = self._sensors_coordinator[SENSORS_TYPE_COUNT][KEY_COORDINATOR]
if self._sensors_data_handler.update_device_count(self._connected_devices):
await coordinator.async_refresh()
async def close(self) -> None:
"""Close the connection."""
if self._api is not None and self._protocol == PROTOCOL_TELNET:
self._api.connection.disconnect()
self._api = None
for func in self._on_close:
func()
self._on_close.clear()
@callback
def async_on_close(self, func: CALLBACK_TYPE) -> None:
"""Add a function to call when router is closed."""
self._on_close.append(func)
def update_options(self, new_options: dict) -> bool:
"""Update router options."""
req_reload = False
for name, new_opt in new_options.items():
if name in (CONF_REQ_RELOAD):
old_opt = self._options.get(name)
if not old_opt or old_opt != new_opt:
req_reload = True
break
self._options.update(new_options)
return req_reload
@property
def device_info(self) -> DeviceInfo:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, "AsusWRT")},
"name": self._host,
"model": self._model,
"manufacturer": "Asus",
"sw_version": self._sw_v,
}
@property
def signal_device_new(self) -> str:
"""Event specific per AsusWrt entry to signal new device."""
return f"{DOMAIN}-device-new"
@property
def signal_device_update(self) -> str:
"""Event specific per AsusWrt entry to signal updates in devices."""
return f"{DOMAIN}-device-update"
@property
def host(self) -> str:
"""Return router hostname."""
return self._host
@property
def devices(self) -> dict[str, Any]:
"""Return devices."""
return self._devices
@property
def sensors_coordinator(self) -> dict[str, Any]:
"""Return sensors coordinators."""
return self._sensors_coordinator
@property
def api(self) -> AsusWrt:
"""Return router API."""
return self._api
async def _get_nvram_info(api: AsusWrt, info_type: str) -> dict[str, Any]:
"""Get AsusWrt router info from nvram."""
info = {}
try:
info = await api.async_get_nvram(info_type)
except OSError as exc:
_LOGGER.warning("Error calling method async_get_nvram(%s): %s", info_type, exc)
return info
def get_api(conf: dict, options: dict | None = None) -> AsusWrt:
"""Get the AsusWrt API."""
opt = options or {}
return AsusWrt(
conf[CONF_HOST],
conf[CONF_PORT],
conf[CONF_PROTOCOL] == PROTOCOL_TELNET,
conf[CONF_USERNAME],
conf.get(CONF_PASSWORD, ""),
conf.get(CONF_SSH_KEY, ""),
conf[CONF_MODE],
opt.get(CONF_REQUIRE_IP, True),
interface=opt.get(CONF_INTERFACE, DEFAULT_INTERFACE),
dnsmasq=opt.get(CONF_DNSMASQ, DEFAULT_DNSMASQ),
)
|
|
import unittest
from conans.test.utils.tools import TestClient, TestServer
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.model.ref import ConanFileReference
from conans.util.files import save
import os
conanfile = """from conans import ConanFile
class MyPkg(ConanFile):
name = "Hello0"
version = "1.2.1"
exports_sources = "*"
def package(self):
self.copy("*")
"""
class UploadTest(unittest.TestCase):
def not_existing_error_test(self):
""" Trying to upload with pattern not matched must raise an Error
"""
client = TestClient()
error = client.run("upload some_nonsense", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: No packages found matching pattern 'some_nonsense'",
client.user_io.out)
def invalid_reference_error_test(self):
""" Trying to upload an invalid reference must raise an Error
"""
client = TestClient()
error = client.run("upload some_nonsense -p hash1", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: -p parameter only allowed with a valid recipe reference",
client.user_io.out)
def non_existing_recipe_error_test(self):
""" Trying to upload a non-existing recipe must raise an Error
"""
client = TestClient()
error = client.run("upload Pkg/0.1@user/channel", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: There is no local conanfile exported as Pkg/0.1@user/channel",
client.user_io.out)
def non_existing_package_error_test(self):
""" Trying to upload a non-existing package must raise an Error
"""
client = TestClient()
error = client.run("upload Pkg/0.1@user/channel -p hash1", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: There is no local conanfile exported as Pkg/0.1@user/channel",
client.user_io.out)
def _client(self):
if not hasattr(self, "_servers"):
servers = {}
test_server = TestServer([("*/*@*/*", "*")], [("*/*@*/*", "*")],
users={"lasote": "mypass"})
servers["default"] = test_server
self._servers = servers
client = TestClient(servers=self._servers, users={"default": [("lasote", "mypass")]})
return client
def pattern_upload_test(self):
client = self._client()
client.save({"conanfile.py": conanfile})
client.run("create . user/testing")
client.run("upload Hello0/*@user/testing --confirm --all")
self.assertIn("Uploading conanmanifest.txt", client.user_io.out)
self.assertIn("Uploading conan_package.tgz", client.user_io.out)
self.assertIn("Uploading conanfile.py", client.user_io.out)
def corrupt_upload_test(self):
client = self._client()
client.save({"conanfile.py": conanfile,
"include/hello.h": ""})
client.run("create . frodo/stable")
ref = ConanFileReference.loads("Hello0/1.2.1@frodo/stable")
packages_folder = client.client_cache.packages(ref)
pkg_id = os.listdir(packages_folder)[0]
package_folder = os.path.join(packages_folder, pkg_id)
save(os.path.join(package_folder, "added.txt"), "")
os.remove(os.path.join(package_folder, "include/hello.h"))
error = client.run("upload Hello0/1.2.1@frodo/stable --all --check", ignore_error=True)
self.assertTrue(error)
self.assertIn("WARN: Mismatched checksum 'added.txt'", client.user_io.out)
self.assertIn("WARN: Mismatched checksum 'include/hello.h'", client.user_io.out)
self.assertIn("ERROR: Cannot upload corrupted package", client.user_io.out)
def upload_modified_recipe_test(self):
client = self._client()
client.save({"conanfile.py": conanfile,
"hello.cpp": ""})
client.run("export . frodo/stable")
client.run("upload Hello0/1.2.1@frodo/stable")
self.assertIn("Uploading conanmanifest.txt", client.user_io.out)
self.assertIn("Uploaded conan recipe 'Hello0/1.2.1@frodo/stable' to 'default'",
client.out)
client2 = self._client()
client2.save({"conanfile.py": conanfile,
"hello.cpp": "//comamend"})
client2.run("export . frodo/stable")
ref = ConanFileReference.loads("Hello0/1.2.1@frodo/stable")
manifest = client2.client_cache.load_manifest(ref)
manifest.time += 10
save(client2.client_cache.digestfile_conanfile(ref), str(manifest))
client2.run("upload Hello0/1.2.1@frodo/stable")
self.assertIn("Uploading conanmanifest.txt", client2.user_io.out)
self.assertIn("Uploaded conan recipe 'Hello0/1.2.1@frodo/stable' to 'default'",
client2.out)
# first client tries to upload again
error = client.run("upload Hello0/1.2.1@frodo/stable", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: Remote recipe is newer than local recipe", client.user_io.out)
def upload_unmodified_recipe_test(self):
client = self._client()
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run("upload Hello0/1.2.1@frodo/stable")
self.assertIn("Uploading conanmanifest.txt", client.user_io.out)
self.assertIn("Uploaded conan recipe 'Hello0/1.2.1@frodo/stable' to 'default'",
client.out)
client2 = self._client()
client2.save(files)
client2.run("export . frodo/stable")
ref = ConanFileReference.loads("Hello0/1.2.1@frodo/stable")
manifest = client2.client_cache.load_manifest(ref)
manifest.time += 10
save(client2.client_cache.digestfile_conanfile(ref), str(manifest))
client2.run("upload Hello0/1.2.1@frodo/stable")
self.assertNotIn("Uploading conanmanifest.txt", client2.out)
self.assertNotIn("Uploaded conan recipe 'Hello0/1.2.1@frodo/stable' to 'default'",
client2.out)
self.assertIn("Recipe is up to date, upload skipped", client2.out)
# first client tries to upload again
client.run("upload Hello0/1.2.1@frodo/stable")
self.assertNotIn("Uploading conanmanifest.txt", client.out)
self.assertNotIn("Uploaded conan recipe 'Hello0/1.2.1@frodo/stable' to 'default'",
client.out)
self.assertIn("Recipe is up to date, upload skipped", client.out)
def upload_unmodified_package_test(self):
client = self._client()
client.save({"conanfile.py": conanfile,
"hello.cpp": ""})
client.run("create . frodo/stable")
client.run("upload Hello0/1.2.1@frodo/stable --all")
client2 = self._client()
client2.save({"conanfile.py": conanfile,
"hello.cpp": ""})
client2.run("create . frodo/stable")
client2.run("upload Hello0/1.2.1@frodo/stable --all")
self.assertIn("Recipe is up to date, upload skipped", client2.out)
self.assertNotIn("Uploading conanfile.py", client2.out)
self.assertNotIn("Uploading conan_sources.tgz", client2.out)
self.assertNotIn("Uploaded conan recipe 'Hello0/1.2.1@frodo/stable' to 'default'",
client2.out)
self.assertNotIn("Uploading conaninfo.txt", client2.out) # conaninfo NOT changed
self.assertNotIn("Uploading conan_package.tgz", client2.out)
self.assertIn("Package is up to date, upload skipped", client2.out)
# first client tries to upload again
client.run("upload Hello0/1.2.1@frodo/stable --all")
self.assertIn("Recipe is up to date, upload skipped", client.out)
self.assertNotIn("Uploading conanfile.py", client.out)
self.assertNotIn("Uploading conan_sources.tgz", client.out)
self.assertNotIn("Uploaded conan recipe 'Hello0/1.2.1@frodo/stable' to 'default'",
client.out)
self.assertNotIn("Uploading conaninfo.txt", client.out) # conaninfo NOT changed
self.assertNotIn("Uploading conan_package.tgz", client2.out)
self.assertIn("Package is up to date, upload skipped", client2.out)
def skip_upload_test(self):
""" Check that the option --dry does not upload anything
"""
client = self._client()
files = cpp_hello_conan_files("Hello0", "1.2.1", build=False)
client.save(files)
client.run("export . frodo/stable")
client.run("install Hello0/1.2.1@frodo/stable --build=missing")
client.run("upload Hello0/1.2.1@frodo/stable -r default --all --skip-upload")
# dry run should not upload
self.assertNotIn("Uploading conan_package.tgz", client.user_io.out)
# but dry run should compress
self.assertIn("Compressing recipe...", client.user_io.out)
self.assertIn("Compressing package...", client.user_io.out)
client.run("search -r default")
# after dry run nothing should be on the server ...
self.assertNotIn("Hello0/1.2.1@frodo/stable", client.user_io.out)
# now upload, the stuff should NOT be recompressed
client.run("upload Hello0/1.2.1@frodo/stable -r default --all")
# check for upload message
self.assertIn("Uploading conan_package.tgz", client.user_io.out)
# check if compressed files are re-used
self.assertNotIn("Compressing recipe...", client.user_io.out)
self.assertNotIn("Compressing package...", client.user_io.out)
# now it should be on the server
client.run("search -r default")
self.assertIn("Hello0/1.2.1@frodo/stable", client.user_io.out)
def upload_without_sources_test(self):
client = self._client()
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
pass
"""
client.save({"conanfile.py": conanfile})
client.run("create . Pkg/0.1@user/testing")
client.run("upload * --all --confirm")
client2 = self._client()
client2.run("install Pkg/0.1@user/testing")
client2.run("remote remove default")
server2 = TestServer([("*/*@*/*", "*")], [("*/*@*/*", "*")],
users={"lasote": "mypass"})
client2.users = {"server2": [("lasote", "mypass")]}
client2.update_servers({"server2": server2})
client2.run("upload * --all --confirm -r=server2")
self.assertIn("Uploading conanfile.py", client2.out)
self.assertIn("Uploading conan_package.tgz", client2.out)
|
|
#
# Hubblemon - Yet another general purpose system monitor
#
# Copyright 2015 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, time, threading
import datetime
import common.settings
import binascii, socket
import data_loader.basic_loader
import data_loader.loader_factory
mod_cache = {}
mod_query_cache = {}
#
# default loader settings
#
def loader(entity_table, filter = None, title = ''):
results = []
info = _get_listener_info(entity_table)
lisnter = info[0]
storage_manager = info[1]
handle = storage_manager.get_handle(entity_table)
try:
return data_loader.basic_loader.basic_loader(handle, filter, title)
except:
return data_loader.basic_loader.basic_loader(None, [])
#
# data, system handling
#
def _get_listener_info(entity_table):
#print(entity_table)
entity = entity_table.split('/')[0]
#ip = socket.gethostbyname(entity)
ret = binascii.crc32(bytes(entity, 'utf-8'))
n = len(common.settings.listener_list)
idx = ret % n
return common.settings.listener_list[idx]
# local or remote
def get_entity_list():
entity_map = {}
for item in common.settings.listener_list:
storage_manager = item[1]
entities = storage_manager.get_entity_list()
for entity in entities:
entity_map[entity] = 1
return list(entity_map.keys())
# local or remote
def get_table_list_of_entity(entity, prefix):
info = _get_listener_info(entity)
data_list = []
storage_manager = info[1]
data_list += storage_manager.get_table_list_of_entity(entity, prefix)
return data_list
# local or remote
def get_all_table_list(prefix):
table_list = []
for item in common.settings.listener_list:
storage_manager = item[1]
table_list += storage_manager.get_all_table_list(prefix)
return table_list
#
# plugins
#
def get_chart_list(param):
if 'type' not in param:
return ([], {})
type = param['type']
type = type.split('_')[0]
if type not in mod_cache:
pkg = __import__('%s_mon.%s_view' % (type, type))
mod = getattr(pkg, '%s_view' % type)
mod.init_plugin()
mod_cache[type] = mod
return mod_cache[type].get_chart_list(param)
def get_chart_data(param):
if 'type' not in param:
return None
type = param['type']
type = type.split('_')[0]
if type not in mod_cache:
pkg = __import__('%s_mon.%s_view' % (type, type))
mod = getattr(pkg, '%s_view' % type)
mod.init_plugin()
mod_cache[type] = mod
return mod_cache[type].get_chart_data(param)
def auth_fields(param):
if 'type' not in param:
return None
type = param['type']
type = type.split('_')[0]
if type not in mod_cache:
pkg = __import__('%s_mon.%s_view' % (type, type))
mod = getattr(pkg, '%s_view' % type)
mod.init_plugin()
mod_cache[type] = mod
if type not in mod_query_cache:
pkg = __import__('%s_mon.%s_query' % (type, type))
mod = getattr(pkg, '%s_query' % type)
mod_query_cache[type] = mod
return mod_query_cache[type].auth_fields(param)
def query(param, ip):
if 'type' not in param:
return None
type = param['type']
type = type.split('_')[0]
if type not in mod_cache:
pkg = __import__('%s_mon.%s_view' % (type, type))
mod = getattr(pkg, '%s_view' % type)
mod.init_plugin()
mod_cache[type] = mod
if type not in mod_query_cache:
pkg = __import__('%s_mon.%s_query' % (type, type))
mod = getattr(pkg, '%s_query' % type)
mod_query_cache[type] = mod
return mod_query_cache[type].query(param, ip)
def get_addon_page(param):
if 'type' not in param:
return ''
type = param['type']
type = type.split('_')[0]
if type not in mod_cache:
pkg = __import__('%s_mon.%s_view' % (type, type))
mod = getattr(pkg, '%s_view' % type)
mod.init_plugin()
mod_cache[type] = mod
return mod_cache[type].get_addon_page(param)
#
# functions that used for expr
#
import psutil_mon.psutil_view
import arcus_mon.arcus_view
import redis_mon.redis_view
import fnmatch
def system_view(entity, item = 'brief', type='serial'):
if isinstance(entity, str):
if '*' in entity or '?' in entity: # wild card
entity_list = []
all_entities = get_entity_list()
for c in all_entities:
if fnmatch.fnmatch(c, entity):
entity_list.append(c)
else:
entity_list = [ entity ]
else: # list, tuple
entity_list = entity
ret = []
for entity in entity_list:
ret.append(psutil_mon.psutil_view.system_view(entity, item))
if type == 'merge':
return data_loader.loader_factory.merge_loader(ret)
return data_loader.loader_factory.serial_loader(ret)
def arcus_view(instance): # entity/arcus_port
if isinstance(instance, str):
instances = [ instance ]
else: # list, tuple
instances = instance
ret = []
for instance in instances:
ret.append(arcus_mon.arcus_view.arcus_view(instance))
return data_loader.loader_factory.serial_loader(ret)
def arcus_instance_list(name):
if isinstance(name, str):
names = [ name ]
else: # list, tuple
names = name
node_list = []
for name in names:
node_list += arcus_mon.arcus_view.arcus_cloud_map[name]
return node_list
def arcus_cloud_list(zk = None):
if zk == None:
return list(arcus_mon.arcus_view.arcus_cloud_map.keys())
if zk in arcus_mon.arcus_view.arcus_zk_map:
return arcus_mon.arcus_view.arcus_zk_map[zk]
return None
def for_each(name, filter, fun, start_ts = int(time.time())-60*30, end_ts = int(time.time())):
# change to timestamp
if isinstance(start_ts, str):
start_date = datetime.datetime.strptime(start_ts, '%Y-%m-%d %H:%M')
start_ts = int(start_date.timestamp())
elif isinstance(start_ts, datetime.datetime):
start_ts = int(start_ts.timestamp())
if isinstance(end_ts, str):
end_date = datetime.datetime.strptime(end_ts, '%Y-%m-%d %H:%M')
end_ts = int(end_date.timestamp())
elif isinstance(end_ts, datetime.datetime):
send_ts = int(end_ts.timestamp())
if isinstance(name, str):
names = [ name ]
else: # list, tuple
names = name
selected = []
for name in names:
ldr = loader(name)
ldr.parse(start_ts, end_ts)
if filter(ldr):
selected.append(name)
print('# selected: ' + str(selected))
result = []
for i in selected:
ret = fun(i)
if isinstance(ret, list):
result += ret
else:
result.append(ret)
#print(result)
return result
#
# functions that used for query eval
#
def return_as_string(result, p = {}):
result = str(result)
result = result.replace('\n', '<br>')
result = result.replace(' ', ' ')
p['result'] = result
return p['result']
def return_as_textarea(result, p = {}):
p['result'] = '<textarea>%s</textarea>' % result
return p['result']
def return_as_table(result, p = {}):
tr = ''
# cursor meta info
if hasattr(result, 'description'):
td = ''
for d in result.description:
td += '<td>%s</td>' % d[0]
tr += '<tr>%s</tr>' % td
# values
for row in result:
td = ''
for item in row:
td += '<td>%s</td>' % item
tr += '<tr>%s</tr>' % td
p['result'] = '<table border="1">%s</table>' % tr
return p['result']
# result zookeeper.load_all()
from arcus_mon.arcus_driver.arcus_util import zookeeper
def get_arcus_zk_load_all(addr):
zoo = zookeeper(addr)
zoo.load_all()
return zoo
def get_arcus_zk_node_cloud_map(addr):
arcus_node_cloud_map = {}
zoo = zookeeper(addr)
nodes = zoo.get_arcus_node_all()
for node in nodes:
arcus_node_cloud_map[node.ip + ":" + node.port] = node.code
return arcus_node_cloud_map
|
|
# (C) Copyright 2020 by Rocky Bernstein
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__docformat__ = "restructuredtext"
from collections import namedtuple
from xdis.codetype.base import *
from xdis.codetype.code13 import *
from xdis.codetype.code15 import *
from xdis.codetype.code20 import *
from xdis.codetype.code30 import *
from xdis.codetype.code38 import *
import types
from xdis.version_info import PYTHON_VERSION
def codeType2Portable(code, version=PYTHON_VERSION):
"""Converts a native types.CodeType code object into a the
corresponding more flexible xdis Code type,.
"""
if isinstance(code, CodeBase):
return code
if not (
isinstance(code, types.CodeType)
or isinstance(code, CodeTypeUnion)
):
raise TypeError(
"parameter expected to be a types.CodeType type; is %s instead" % type(code)
)
if isinstance(version, str):
version = float(version)
if version >= 3.0:
if version < 3.8:
return Code3(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
else:
return Code38(
code.co_argcount,
code.co_posonlyargcount, # Not in < 3.8
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
elif version > 2.0:
# 2.0 .. 2.7
return Code2(
code.co_argcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars, # not in 1.x
code.co_cellvars, # not in 1.x
)
else:
# 1.0 .. 1.5
if version < 1.5:
# 1.0 .. 1.3
return Code13(
code.co_argcount,
code.co_nlocals,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
)
else:
return Code15(
code.co_argcount,
code.co_nlocals,
code.co_stacksize, # not in 1.0..1.4
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno, # Not in 1.0..1.4
code.co_lnotab, # Not in 1.0..1.4
)
def portableCodeType(version=PYTHON_VERSION):
"""
Return the portable CodeType version for the supplied Python release version.
`version` is a floating-point number, like 2.7, or 3.9. If no version
number is supplied we'll use the current interpreter version.
"""
if version >= 3.0:
if version < 3.8:
# 3.0 .. 3.7
return Code3
else:
# 3.8 ..
return Code38
elif version > 2.0:
# 2.0 .. 2.7
return Code2
else:
# 1.0 .. 1.5
if version <= 1.3:
return Code13
else:
return Code15
raise RuntimeError("Implementation bug: can't handle version %s" % version)
# In contrast to Code3, Code2, etc. you can use CodeTypeUnint for building
# an incomplete code type, which might be converted to another code type
# later.
CodeTypeUnionFields = Code38FieldNames.split()
CodeTypeUnion = namedtuple("CodeTypeUnion", CodeTypeUnionFields)
# Note: default values of `None` indicate a required parameter.
# default values of -1, (None,) or "" indicate an unsupplied parameter.
def to_portable(
co_argcount,
co_posonlyargcount = -1, # 3.8+
co_kwonlyargcount = -1, # 3.0+
co_nlocals = None,
co_stacksize = -1, # 1.5+
co_flags = None,
co_code = None, # 3.0+ this type changes from <str> to <bytes>
co_consts = None,
co_names = None,
co_varnames = None,
co_filename = None,
co_name = None,
co_firstlineno = -1,
co_lnotab = "", # 1.5+; 3.0+ this type changes from <str> to <bytes>
co_freevars = (None,), # 2.0+
co_cellvars = (None,), # 2.0+
version = PYTHON_VERSION,
):
code = CodeTypeUnion(
co_argcount,
co_posonlyargcount,
co_kwonlyargcount,
co_nlocals,
co_stacksize,
co_flags,
co_code,
co_consts,
co_names,
co_varnames,
co_filename,
co_name,
co_firstlineno,
co_lnotab,
co_freevars,
co_cellvars,
)
return codeType2Portable(code, version)
if __name__ == "__main__":
x = codeType2Portable(to_portable.__code__)
print(x)
|
|
from __future__ import unicode_literals
from datetime import datetime, timedelta
import logging
import tweepy
import models
LOGGER = logging.getLogger(__name__)
SERVICES = [
'API', 'JS', 'DASH'
]
STATUSES = [
'', 'UP', 'DOWN', 'ISSUE'
]
class TwitterStatusProcessor(object):
SEPARATOR = ':'
def __init__(self, **auth):
if auth:
self.auth = tweepy.OAuthHandler(auth['consumer_key'],
auth['consumer_secret'])
self.auth.set_access_token(auth['token'], auth['token_secret'])
self.twitter = tweepy.API(
self.auth,
api_root='/1.1',
)
def _parse_tweet(self, message, created_at, tweet_id, tweet_id_str):
spec, _, message = message.partition(self.SEPARATOR)
if not spec:
return
spec = spec.split('-')
service = spec[0]
if len(spec) > 1:
state = spec[1].upper()
else:
state = ''
if service not in SERVICES or state not in STATUSES:
return
self._insert(service, created_at, message, state, tweet_id_str)
self._set_last_updated(service, tweet_id)
def _get_tw_key(self, service=None):
if not service:
service = '__general'
return service + '-timestamp'
def _set_last_updated(self, service=None, tweet_id=None):
key = self._get_tw_key(service)
kv = models.KV(k=key, value=str(tweet_id), key_name=key)
kv.put()
def _set_notified(self, tweet_id):
results = models.Tweet.all()
results.filter('tweet_id =', tweet_id)
for tweet in results:
if tweet:
tweet.set_notified()
def _get_last_updated(self, service=None):
key = self._get_tw_key(service)
return models.KV.get(key)
def _insert(self, service, created_at, message, status, tweet_id):
key = '{}-{}'.format(service, created_at)
tw = models.Tweet.all().filter('key_name=', key).fetch(1)
if not tw:
tw = models.Tweet(
service=service,
created_at=created_at,
message=message,
status=status,
key_name=key,
tweet_id=tweet_id,
notified=False
)
tw.put()
def _get_tweets(self, min_date):
filters = {}
if min_date:
filters['since_id'] = min_date.value
return self.twitter.user_timeline(**filters)
def run(self):
last_check = self._get_last_updated()
# get tweets where > last_updated
tweets = self._get_tweets(last_check)
for tweet in tweets:
self._parse_tweet(tweet.text,
tweet.created_at,
tweet.id,
tweet.id_str)
if tweets:
max_id = max(tweet.id for tweet in tweets)
self._set_last_updated(tweet_id=max_id)
return max_id == last_check
return False
def get(self, service=None, count=10):
tweets = models.Tweet.all().filter(
'service = ', service
).order('-created_at').fetch(limit=count)
return tweets
def get_by_date(self, service, date):
tweets = models.Tweet.all().filter(
'service = ', service
).filter(
'created_date =', date.strftime('%Y-%m-%d')
).order('-created_at')
return tweets
def get_by_dates(self, service, min_date, max_date):
tweets = models.Tweet.all().filter(
'service = ', service
).filter(
'created_at <=', max_date
).filter(
'created_at >', min_date
)
return tweets
def get_last_message(self, service):
tweets = models.Tweet.all().filter(
'service = ', service
).order('-created_at').fetch(1)
try:
return tweets[0]
except IndexError:
return None
def get_latest_state(self, service):
tweets = models.Tweet.all().filter(
'service = ', service
).order('-created_at')
for tweet in tweets:
if tweet.status:
return tweet.status
return None
def get_uptime_month(self, service):
"""Find the uptime for a given month by calculating the number of
seconds the service was down over the number of seconds in a month.
We make the assumption that 35 days ago, the service was up. We then
read all tweets since then, modifying the state and calculating
downtime as we go.
We start early in case the month starts with downtime. As long as all
downtime lasts less than five days, we can make an accurate calculation
for 30 days, which is all we care about.
"""
DAYS_PER_MONTH = 30
GRACE_PERIOD = timedelta(days=5) # in case month starts with downtime
end = datetime.now()
start = end - timedelta(days=DAYS_PER_MONTH)
tweets = self.get_by_dates(service, start - GRACE_PERIOD, end) \
.order('created_at')
# read tweets, calculate downtime
currently_up = True
last_update = start
total_downtime = timedelta()
for tweet in tweets:
if not tweet.status:
continue
now_up = tweet.status == 'UP'
if now_up == currently_up:
continue
if now_up:
# only add the downtime if the it happened after the start
if tweet.created_at > last_update:
total_downtime += (tweet.created_at - last_update)
else:
last_update = tweet.created_at
currently_up = now_up
if not currently_up:
total_downtime += (end - last_update)
seconds = lambda dt: float(dt.seconds + (dt.days * 24 * 60 * 60))
downtime = (seconds(total_downtime) / seconds(end - start))
return (1 - downtime) * 100
|
|
"""
This is a subfile for IsyClass.py
These funtions are accessable via the Isy class opj
"""
# author : Peter Shipley <peter.shipley@gmail.com>
# copyrigh : Copyright (C) 2015 Peter Shipley
# license : BSD
from ISY.IsyExceptionClass import IsyInvalidCmdError, IsyResponseError
from ISY.IsyProgramClass import IsyProgram
import xml.etree.ElementTree as ET
from warnings import warn
##
## ISY Programs Code
##
def load_prog(self, progid=None):
""" Load Program status and Info
args : none
internal function call
"""
if self.debug & 0x01:
print("load_prog")
if not hasattr(self, '_progdict') or not isinstance(self._progdict, dict):
self._progdict = dict()
if not hasattr(self, '_name2id') or not isinstance(self._name2id, dict):
self._name2id = dict()
if progid:
xurl = "/rest/programs/" + progid
else:
xurl = "/rest/programs?subfolders=true"
self.name2prog = dict()
prog_tree = self._getXMLetree(xurl, noquote=1)
for pg in prog_tree.iter("program"):
pdict = dict()
for k, v in pg.items():
pdict[k] = v
for pe in list(pg):
pdict[pe.tag] = pe.text
# spacial case for root program node folder
if not "parentId" in pdict:
pdict["parentId"] = pdict["id"]
if "id" in pdict:
if str(pdict["id"]) in self._progdict:
self._progdict[str(pdict["id"])].update(pdict)
else:
self._progdict[str(pdict["id"])] = pdict
n = pdict["name"].upper()
# # name2id to replace name2prog as a global lookup table
# # but not sure if consolidating namespace between prog & nodes
# # is it a good idea
# if n in self._name2id:
# # print("Dup name2id : \"" + n + "\" ", pdict["id"])
# # print("name2id ", self._name2id[n])
# pass
# else:
# self._name2id[n] = ("program", pdict["id"])
if n in self.name2prog:
print("Dup name : \"" + n + "\" ", pdict["id"])
print("name2prog ", self.name2prog[n])
else:
self.name2prog[n] = pdict["id"]
#self._printdict(self._progdict)
#self._printdict(self.name2prog)
def get_prog(self, pname):
""" Get a Program object for given program name or ID
args:
pname : program name of id
return:
An IsyProgram class object representing the requested program
"""
if self.debug & 0x01:
print("get_prog :" + pname)
if not self._progdict:
self.load_prog()
progid = self._prog_get_id(pname)
# print("\tprogid : " + progid)
if progid in self._progdict:
if not progid in self.progCdict:
# print("not progid in self.progCdict:")
# self._printdict(self._progdict[progid])
self.progCdict[progid] = IsyProgram(self, self._progdict[progid])
#self._printdict(self._progdict)
# print("return : ",)
#self._printdict(self.progCdict[progid])
return self.progCdict[progid]
else:
if self.debug & 0x01:
print("Isy get_prog no prog : \"%s\"" % progid)
raise LookupError("no prog : " + str(progid) )
def _prog_get_id(self, pname):
""" Lookup prog value by name or ID
returns ISY Id or None
"""
if isinstance(pname, IsyProgram):
return pname["id"]
if isinstance(pname, (int, long)):
p = "{0:04X}".format(pname)
else:
p = str(pname).strip()
if p.upper() in self._progdict:
# print("_prog_get_id : " + p + " progdict " + p.upper())
return p.upper()
if p in self.name2prog:
# print("_prog_get_id : " + p + " name2prog " + self.name2prog[p])
return self.name2prog[p]
# print("_prog_get_id : " + n + " None")
return None
def prog_get_path(self, pname):
" get path of parent names "
if not self._progdict:
self.load_prog()
prog_id = self._prog_get_id(pname)
if prog_id is None:
raise IsyValueError("prog_get_path: unknown program id : " + str(pname) )
return self._prog_get_path(prog_id)
def _prog_get_path(self, prog_id):
fpath = self._progdict[prog_id]['name']
pgm = self._progdict[ self._progdict[prog_id]['parentId'] ]
while pgm['id'] != '0001':
fpath = pgm['name'] + "/" + fpath
pgm = self._progdict[ pgm['parentId'] ]
fpath = "/" + fpath
return fpath
def prog_addrs(self):
"""
access method for prog address list
"""
if not self._progdict:
self.load_prog()
return self._progdict.viewkeys()
def prog_get_src(self, pname):
if not self._progdict:
self.load_prog()
prog_id = self._prog_get_id(pname)
if prog_id is None:
raise IsyValueError("prog_get_src: unknown program : " + str(prog_id) )
r = self.soapcomm("GetSysConf", name="/CONF/D2D/" + prog_id + ".PGM")
return r
def prog_iter(self):
""" Iterate though program objects
Returns an iterator over Program Objects types
"""
if not self._progdict:
self.load_prog()
k = sorted(self._progdict.keys())
for v in k:
yield self.get_prog(v)
prog_valid_comm = ['run', 'runThen', 'runElse',
'stop', 'enable', 'disable',
'enableRunAtStartup', 'disableRunAtStartup']
def prog_cmd_list(self):
""" get list of valid commands for prog_comm() """
return prog_valid_comm[:]
def prog_comm(self, paddr, cmd):
""" Send program command
args:
paddr = program name, address or program obj
cmd = name of command
raise:
LookupError : if node name or Id is invalid
IsyPropertyError : if property invalid
TypeError : if property valid
Valid Commands : 'run', 'runThen', 'runElse', 'stop', 'enable', 'disable', 'enableRunAtStartup', 'disableRunAtStartup'
calls /rest/programs/<pgm-id>/<pgm-cmd>
"""
prog_id = self._prog_get_id(paddr)
#print("self.controls :", self.controls)
#print("self.name2control :", self.name2control)
if not prog_id:
raise IsyValueError("prog_comm: unknown program id : " +
str(paddr) )
if not cmd in prog_valid_comm:
raise IsyInvalidCmdError("prog_comm: unknown command : " +
str(cmd) )
self._prog_comm(prog_id, cmd)
def _prog_comm(self, prog_id, cmd):
""" called by prog_comm() after argument validation """
# /rest/programs/<pgm-id>/<pgm-cmd>
xurl = "/rest/programs/" + prog_id + "/" + cmd
if self.debug & 0x02:
print("xurl = " + xurl)
resp = self._getXMLetree(xurl)
#self._printXML(resp)
if resp.attrib["succeeded"] != 'true':
raise IsyResponseError("ISY command error : prog_id=" +
str(prog_id) + " cmd=" + str(cmd))
def prog_rename(self, prog=None, progname=None):
"""
Named args:
prog a prog id
progname New prog name
"""
if prog is None:
raise IsyValueError("prog_rename: program id is None")
prog_id = self._prog_get_id(paddr)
if prog_id is None:
raise IsyValueError("prog_rename: unknown program id : " + str(prog) )
if not isinstance(progname, str):
raise IsyValueError("new program name should be string")
r = self._prog_rename(progid=prog_id, progname=progname )
if self._progdict is not None and progid in self._progdict:
self._progdict[progid]['name'] = progname
self.name2prog[progname] = progid
return r
def _prog_rename(self, progid=None, progname=None):
"""
Named args:
progid a prog id
progname New prog name
"""
if not isinstance(progid, str):
raise IsyValueError("program Id should be string")
prog_path="/CONF/D2D/{0}.PGM".format(progid)
result = self.soapcomm("GetSysConf", name=prog_path)
# with open("prog-orig.xml", 'w') as fi:
# fi.write(result)
if result is None:
raise IsyResponseError("Error loading Sys Conf file {0}".format(prog_path))
var_et = ET.fromstring(result)
p = var_et.find("trigger/name")
if not p is None:
p.text = progname
else:
errorstr = "Internal Error, \"name\" element missing from D2D code :\n{0}\n".format(result)
raise IsyRuntimeWarning(errorstr)
# use method='html' to generate expanded empty elements
new_prog_data = ET.tostring(var_et, method='html')
# with open("prog-changed.xml", 'w') as fi:
# fi.write(new_prog_data)
r = self._sendfile(data=new_prog_data, filename=prog_path, load="y")
return r
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
|
|
# import os
# import re
import sys
# import pandas as pd
import numpy as np
# from tqdm import tqdm
from nlpia.loaders import get_data
if len(sys.argv) > 1:
lang = sys.argv[1][:3].lower()
else:
lang = 'spa'
source_lang = 'eng'
max_text_len = 32 # 32: 62,829 spa examples (2 min/epoch), 64: 100k spa examples (6 min/epoch)
df = get_data(lang)
if lang not in df.columns:
# print(df.columns)
print(f"changing language name {lang} to {list(df.columns)[-1]}")
lang = list(df.columns)[-1]
df.columns = [source_lang, lang]
df = df.dropna()
df[source_lang + '_len'] = df[source_lang].str.len()
df = df[df[source_lang + '_len'] < max_text_len]
# need to make sure all batches are composed of similar-length texts (see NLPIA ch 10 & ch 13)
input_texts, target_texts = [], [] # <1>
start_token, stop_token = '\t\n' # <3>
input_vocab = set() # <2>
output_vocab = set(start_token + stop_token)
n_samples = min(100000, len(df)) # <4>
df['target'] = start_token + df[lang] + stop_token
for statement in df[source_lang]:
input_vocab.update(set(statement))
for reply in df[lang]:
output_vocab.update(set(reply))
input_vocab = tuple(sorted(input_vocab))
output_vocab = tuple(sorted(output_vocab))
max_encoder_seq_len = df[source_lang].str.len().max()
# max_encoder_seq_len
# 100
max_decoder_seq_len = df.target.str.len().max()
# max_decoder_seq_len
# 102
""" Construct character sequence encoder-decoder training set
"""
import numpy as np # <1> # noqa
encoder_input_onehot = np.zeros(
(len(df), max_encoder_seq_len, len(input_vocab)),
dtype='float32') # <2>
decoder_input_onehot = np.zeros(
(len(df), max_decoder_seq_len, len(output_vocab)),
dtype='float32')
decoder_target_onehot = np.zeros(
(len(df), max_decoder_seq_len, len(output_vocab)),
dtype='float32')
for i, (input_text, target_text) in enumerate(
zip(df[source_lang], df.target)): # <3>
for t, c in enumerate(input_text): # <4>
k = input_vocab.index(c)
encoder_input_onehot[i, t, k] = 1. # <5>
k = np.array([output_vocab.index(c) for c in target_text])
decoder_input_onehot[i, np.arange(len(target_text)), k] = 1.
decoder_target_onehot[i, np.arange(len(target_text) - 1), k[1:]] = 1.
# <1> You use numpy for the matrix manipulations.
# <2> The training tensors are initialized as zero tensors with the shape of number of samples
# (this number should be equal for the input and target samples) times the maximum number of sequence tokens
# times the number of possible characters.
# <3> Loop over the training samples; input and target texts need to match.
# <4> Loop over each character of each sample.
# <5> Set the index for the character at each time step to one; all other indices remain at zero.
# This creates the one-hot encoded representation of the training samples.
# <6> For the training data for the decoder, you create the `decoder_input_data` and `decoder_target_data`
# (which is one time step behind the _decoder_input_data_).
"""Construct and train a character sequence encoder-decoder network
"""
from keras.models import Model # noqa
from keras.layers import Input, LSTM, Dense # noqa
batch_size = 64 # <1>
epochs = 15 # <2>
num_neurons = 256 # <3>
encoder_inputs = Input(shape=(None, len(input_vocab)))
encoder = LSTM(num_neurons, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
decoder_inputs = Input(shape=(None, len(output_vocab)))
decoder_lstm = LSTM(num_neurons, return_sequences=True,
return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(len(output_vocab), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['acc'])
model.fit([encoder_input_onehot, decoder_input_onehot],
decoder_target_onehot, batch_size=batch_size, epochs=epochs,
validation_split=0.10) # <4>
# 57915/57915 [==============================] - 296s 5ms/step - loss: 0.7575 - acc: 0.1210 - val_loss: 0.6521 - val_acc: 0.1517
# Epoch 2/100
# 57915/57915 [==============================] - 283s 5ms/step - loss: 0.5924 - acc: 0.1613 - val_loss: 0.5738 - val_acc: 0.1734
# ...
# 57915/57915 [==============================] - 276s 5ms/step - loss: 0.4235 - acc: 0.2075 - val_loss: 0.4688 - val_acc: 0.2034
# Epoch 21/100
# 57915/57915 [==============================] - 277s 5ms/step - loss: 0.4217 - acc: 0.2080 - val_loss: 0.4680 - val_acc: 0.2037
# Epoch 22/100
# 57915/57915 [==============================] - 278s 5ms/step - loss: 0.4198 - acc: 0.2084 - val_loss: 0.4686 - val_acc: 0.2035
# ...
# Epoch 69/100 [1480/1902]
# 57915/57915 [==============================] - 276s 5ms/step - loss: 0.3830 - acc: 0.2191 - val_loss: 0.4912 - val_acc: 0.2008
# Epoch 70/100
# 57915/57915 [==============================] - 277s 5ms/step - loss: 0.3826 - acc: 0.2193 - val_loss: 0.4902 - val_acc: 0.2007
# Epoch 71/100
# ...
# Epoch 99/100
# 57915/57915 [==============================] - 277s 5ms/step - loss: 0.3738 - acc: 0.2220 - val_loss: 0.5000 - val_acc: 0.1994
# Epoch 100/100
# 57915/57915 [==============================] - 278s 5ms/step - loss: 0.3736 - acc: 0.2220 - val_loss: 0.5017 - val_acc: 0.1992
""" .Construct response generator model
>>> encoder_model = Model(encoder_inputs, encoder_states)
>>> thought_input = [
... Input(shape=(num_neurons,)), Input(shape=(num_neurons,))]
>>> decoder_outputs, state_h, state_c = decoder_lstm(
... decoder_inputs, initial_state=thought_input)
>>> decoder_states = [state_h, state_c]
>>> decoder_outputs = decoder_dense(decoder_outputs)
>>> decoder_model = Model(
... inputs=[decoder_inputs] + thought_input,
... output=[decoder_outputs] + decoder_states)
"""
encoder_model = Model(encoder_inputs, encoder_states)
thought_input = [
Input(shape=(num_neurons,)), Input(shape=(num_neurons,))]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=thought_input)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
inputs=[decoder_inputs] + thought_input,
output=[decoder_outputs] + decoder_states)
r"""
>>> def decode_sequence(input_seq):
... thought = encoder_model.predict(input_seq) # <1>
... target_seq = np.zeros((1, 1, len(output_vocab))) # <2>
... target_seq[0, 0, target_token_index[stop_token]
... ] = 1. # <3>
... stop_condition = False
... generated_sequence = ''
... while not stop_condition:
... output_tokens, h, c = decoder_model.predict(
... [target_seq] + thought) # <4>
... generated_token_idx = np.argmax(output_tokens[0, -1, :])
... generated_char = reverse_target_char_index[generated_token_idx]
... generated_sequence += generated_char
... if (generated_char == stop_token or
... len(generated_sequence) > max_decoder_seq_len
... ): # <5>
... stop_condition = True
... target_seq = np.zeros((1, 1, len(output_vocab))) # <6>
... target_seq[0, 0, generated_token_idx] = 1.
... thought = [h, c] # <7>
... return generated_sequence
"""
def decode_sequence(input_seq):
thought = encoder_model.predict(input_seq) # <1>
target_seq = np.zeros((1, 1, len(output_vocab))) # <2>
target_seq[0, 0, output_vocab.index(stop_token)] = 1. # <3>
stop_condition = False
generated_sequence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + thought) # <4>
# take only the most likely character (temperature = 0)
generated_token_idx = np.argmax(output_tokens[0, -1, :])
generated_char = output_vocab[generated_token_idx]
generated_sequence += generated_char
if (generated_char == stop_token or
len(generated_sequence) > max_decoder_seq_len
): # <5>
stop_condition = True
target_seq = np.zeros((1, 1, len(output_vocab))) # <6>
target_seq[0, 0, generated_token_idx] = 1.
thought = [h, c] # <7>
return generated_sequence
def respond(input_text, input_vocab=input_vocab):
input_text = input_text.lower()
input_text = ''.join(c if c in input_vocab else ' ' for c in input_text)
input_seq = np.zeros((1, max_encoder_seq_len, len(input_vocab)), dtype='float32')
for t, c in enumerate(input_text):
input_seq[0, t, input_vocab.index(c)] = 1.
decoded_sentence = decode_sequence(input_seq)
print('Human: {}'.format(input_text))
print('Bot:', decoded_sentence)
return decoded_sentence
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Different gradient implementations for benchmark purposes
def SpecializedGrad(l, grad):
return gen_linalg_ops.cholesky_grad(l, grad)
def _GradWithInverseL(l, l_inverse, grad):
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
def TriAngSolveCompositeGrad(l, grad):
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
# Compute ((l^{H} @ grad) * (tril(ones)-1/2*eye)) = middle
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
# Compute l^{-H} @ middle = z
l_inverse_middle = linalg_ops.matrix_triangular_solve(l, middle, adjoint=True)
# We need to compute z @ l^{-1}. With matrix_triangular_solve we
# actually compute l^{-H} @ z^{H} = grad. Since we later add grad^{H}
# we can ommit the conjugate transpose here.
z_h = math_ops.conj(array_ops.matrix_transpose(l_inverse_middle))
grad_a = linalg_ops.matrix_triangular_solve(l, z_h, adjoint=True)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
def MatrixInverseCompositeGrad(l, grad):
l_inverse = linalg_ops.matrix_inverse(l)
return _GradWithInverseL(l, l_inverse, grad)
def TriAngInvCompositeGrad(l, grad):
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(
l, linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=l.dtype))
return _GradWithInverseL(l, l_inverse, grad)
class CholeskyOpTest(test.TestCase):
def _verifyCholeskyBase(self, sess, x, chol, verification):
chol_np, verification_np = sess.run([chol, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix))
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def _verifyCholesky(self, x):
# Verify that LL^T == x.
with self.test_session(use_gpu=True) as sess:
chol = linalg_ops.cholesky(x)
verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, x, chol, verification)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
for dtype in (np.float32, np.float64):
self._verifyCholesky(data.astype(dtype))
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyCholesky(complex_data)
def testBatch(self):
simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices)
# Generate random complex valued positive-definite matrices.
matrices = np.random.rand(10, 5, 5) + 1j * np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T.conj(), matrices[i])
self._verifyCholesky(matrices)
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
with self.assertRaises(ValueError):
linalg_ops.cholesky(
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
]))
def testWrongDimensions(self):
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
def testNotInvertibleCPU(self):
# The input should be invertible.
with self.test_session(use_gpu=False):
with self.assertRaisesOpError(
"Cholesky decomposition was not successful. The"
" input might not be valid."):
# All rows of the matrix below add to zero
self._verifyCholesky(
np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1., 1.]]))
def testEmpty(self):
self._verifyCholesky(np.empty([0, 2, 2]))
self._verifyCholesky(np.empty([2, 0, 0]))
class CholeskyGradTest(test.TestCase):
_backprop_block_size = 32
def getShapes(self, shapeList):
return ((elem, int(np.floor(1.2 * elem))) for elem in shapeList)
def testSmallMatrices(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64))
def testSmallMatricesComplex(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64, dtypes_lib.complex128))
def testOneBlockMatrices(self):
np.random.seed(0)
shapes = self.getShapes([self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64),
scalarTest=True)
def testTwoBlockMatrixFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32,), scalarTest=True)
def testTwoBlockMatrixDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float64,), scalarTest=True)
def testTwoBlockMatrixComplexFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64,), scalarTest=True)
def testTwoBlockMatrixComplexDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex128,), scalarTest=True)
def testAgainstSpecialized(self):
np.random.seed(0)
data = np.random.randn(33, 33).astype(np.float32)
data = np.matmul(data, data.T)
grad_data = np.random.randn(*data.shape).astype(np.float32)
with ops.Graph().as_default(), self.test_session(use_gpu=False) as s:
x = constant_op.constant(data, dtypes_lib.float32)
chol = linalg_ops.cholesky(x)
composite_grad = gradients_impl.gradients(chol, x, grad_data)[0]
specialized_grad = SpecializedGrad(chol, grad_data)
reference, actual = s.run([specialized_grad, composite_grad])
self.assertAllClose(reference, actual)
def runFiniteDifferences(self,
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128),
scalarTest=False):
with self.test_session(use_gpu=True):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
data = np.random.randn(shape[0], shape[1])
if dtype.is_complex:
data = data.astype(np.complex64)
data += 1j * np.random.randn(shape[0], shape[1])
x = constant_op.constant(data, dtype)
tensor = math_ops.matmul(
x, math_ops.conj(array_ops.transpose(x))) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
data = np.random.randn()
if dtype.is_complex:
data = np.complex64(data)
data += 1j * np.random.randn()
x = constant_op.constant(data, dtype)
R = constant_op.constant(
np.random.randn(shape[0], shape[1]), dtype)
e = math_ops.multiply(R, x)
tensor = math_ops.matmul(
e, math_ops.conj(array_ops.transpose(e))) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = array_ops.tile(
array_ops.expand_dims(tensor, 0), [4, 1, 1])
y = linalg_ops.cholesky(tensor)
if scalarTest:
y = math_ops.reduce_mean(y)
error = gradient_checker.compute_gradient_error(
x, x._shape_as_list(), y, y._shape_as_list())
tf_logging.info("error = %f", error)
if dtype == dtypes_lib.float64:
self.assertLess(error, 1e-5)
elif dtype == dtypes_lib.complex128:
self.assertLess(error, 5e-5)
else:
self.assertLess(error, 5e-3)
class CholeskyBenchmark(test.Benchmark):
sizes = [
(4, 4), (16, 16), (256, 256), (1024, 1024), (2048, 2048),
(513, 2, 2), (513, 8, 8), (4, 513, 2, 2)
]
def _GenerateData(self, size):
batch_shape = size[:-2]
size = size[-2:]
assert size[0] == size[1]
n = size[0]
data = np.ones(size).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
return np.tile(data, batch_shape + (1, 1))
def benchmarkCholeskyOp(self):
for size in self.sizes:
data = self._GenerateData(size)
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
l = linalg_ops.cholesky(data)
self.run_op_benchmark(
sess, control_flow_ops.group(l,),
min_iters=25,
name="cholesky_cpu_{size}".format(size=size))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/device:GPU:0"):
l = linalg_ops.cholesky(data)
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_gpu_{size}".format(size=size))
def benchmarkGradVariants(self):
def _BenchmarkGrad(grad_fn, name, device):
for size in self.sizes:
data = self._GenerateData(size)
l = np.linalg.cholesky(data)
grad_data = np.random.randn(*data.shape).astype(np.float32)
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device(device):
grad = grad_fn(l, grad_data)
self.run_op_benchmark(
sess, control_flow_ops.group(grad,),
min_iters=25,
name="{name}_{dev}_{size}".format(
name=name, dev=grad.device, size=size))
if test.is_gpu_available(True):
_BenchmarkGrad(
MatrixInverseCompositeGrad, "composite_matrix_inverse", "/device:GPU:0")
_BenchmarkGrad(
TriAngInvCompositeGrad, "composite_tri_ang_inverse", "/device:GPU:0")
_BenchmarkGrad(
TriAngSolveCompositeGrad, "composite_triangular_solve", "/device:GPU:0")
_BenchmarkGrad(
MatrixInverseCompositeGrad, "composite_matrix_inverse", "/cpu:0")
_BenchmarkGrad(
TriAngInvCompositeGrad, "composite_tri_ang_inverse", "/cpu:0")
_BenchmarkGrad(
TriAngSolveCompositeGrad, "composite_triangular_solve", "/cpu:0")
_BenchmarkGrad(SpecializedGrad, "specialized", "/cpu:0")
if __name__ == "__main__":
test.main()
|
|
import re
from django.views.generic import View
from django.shortcuts import get_object_or_404
from django.core.files.base import ContentFile
from django.utils import timezone
from .settings import MAX_BYTES
from .models import ChunkedUpload
from .response import Response
from .constants import http_status, COMPLETE
from .exceptions import ChunkedUploadError
class ChunkedUploadBaseView(View):
"""
Base view for the rest of chunked upload views.
"""
# Has to be a ChunkedUpload subclass
model = ChunkedUpload
def get_queryset(self, request):
"""
Get (and filter) ChunkedUpload queryset.
By default, users can only continue uploading their own uploads.
"""
queryset = self.model.objects.all()
if hasattr(request, 'user') and request.user.is_authenticated():
queryset = queryset.filter(user=request.user)
return queryset
def validate(self, request):
"""
Placeholder method to define extra validation.
Must raise ChunkedUploadError if validation fails.
"""
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
return {}
def pre_save(self, chunked_upload, request, new=False):
"""
Placeholder method for calling before saving an object.
May be used to set attributes on the object that are implicit
in either the request, or the url.
"""
def save(self, chunked_upload, request, new=False):
"""
Method that calls save(). Overriding may be useful is save() needs
special args or kwargs.
"""
chunked_upload.save()
def post_save(self, chunked_upload, request, new=False):
"""
Placeholder method for calling after saving an object.
"""
def _save(self, chunked_upload):
"""
Wraps save() method.
"""
new = chunked_upload.id is None
self.pre_save(chunked_upload, self.request, new=new)
self.save(chunked_upload, self.request, new=new)
self.post_save(chunked_upload, self.request, new=new)
def check_permissions(self, request):
"""
Grants permission to start/continue an upload based on the request.
"""
if hasattr(request, 'user') and not request.user.is_authenticated():
raise ChunkedUploadError(
status=http_status.HTTP_403_FORBIDDEN,
detail='Authentication credentials were not provided'
)
def _post(self, request, *args, **kwargs):
raise NotImplementedError
def post(self, request, *args, **kwargs):
"""
Handle POST requests.
"""
try:
self.check_permissions(request)
return self._post(request, *args, **kwargs)
except ChunkedUploadError as error:
return Response(error.data, status=error.status_code)
class ChunkedUploadView(ChunkedUploadBaseView):
"""
Uploads large files in multiple chunks. Also, has the ability to resume
if the upload is interrupted.
"""
field_name = 'file'
content_range_header = 'HTTP_CONTENT_RANGE'
content_range_pattern = re.compile(
r'^bytes (?P<start>\d+)-(?P<end>\d+)/(?P<total>\d+)$'
)
max_bytes = MAX_BYTES # Max amount of data that can be uploaded
# If `fail_if_no_header` is True, an exception will be raised if the
# content-range header is not found. Default is False to match Jquery File
# Upload behavior (doesn't send header if the file is smaller than chunk)
fail_if_no_header = False
def get_extra_attrs(self, request):
"""
Extra attribute values to be passed to the new ChunkedUpload instance.
Should return a dictionary-like object.
"""
return {}
def get_max_bytes(self, request):
"""
Used to limit the max amount of data that can be uploaded. `None` means
no limit.
You can override this to have a custom `max_bytes`, e.g. based on
logged user.
"""
return self.max_bytes
def create_chunked_upload(self, save=False, **attrs):
"""
Creates new chunked upload instance. Called if no 'upload_id' is
found in the POST data.
"""
chunked_upload = self.model(**attrs)
# file starts empty
chunked_upload.file.save(name='', content=ContentFile(''), save=save)
return chunked_upload
def is_valid_chunked_upload(self, chunked_upload):
"""
Check if chunked upload has already expired or is already complete.
"""
if chunked_upload.expired:
raise ChunkedUploadError(status=http_status.HTTP_410_GONE,
detail='Upload has expired')
error_msg = 'Upload has already been marked as "%s"'
if chunked_upload.status == COMPLETE:
raise ChunkedUploadError(status=http_status.HTTP_400_BAD_REQUEST,
detail=error_msg % 'complete')
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
"""
return {
'upload_id': chunked_upload.upload_id,
'offset': chunked_upload.offset,
'expires': chunked_upload.expires_on
}
def _post(self, request, *args, **kwargs):
chunk = request.FILES.get(self.field_name)
if chunk is None:
raise ChunkedUploadError(status=http_status.HTTP_400_BAD_REQUEST,
detail='No chunk file was submitted')
self.validate(request)
upload_id = request.POST.get('upload_id')
if upload_id:
chunked_upload = get_object_or_404(self.get_queryset(request),
upload_id=upload_id)
self.is_valid_chunked_upload(chunked_upload)
else:
attrs = {'filename': chunk.name}
if hasattr(request, 'user') and request.user.is_authenticated():
attrs['user'] = request.user
attrs.update(self.get_extra_attrs(request))
chunked_upload = self.create_chunked_upload(save=False, **attrs)
content_range = request.META.get(self.content_range_header, '')
match = self.content_range_pattern.match(content_range)
if match:
start = int(match.group('start'))
end = int(match.group('end'))
total = int(match.group('total'))
elif self.fail_if_no_header:
raise ChunkedUploadError(status=http_status.HTTP_400_BAD_REQUEST,
detail='Error in request headers')
else:
# Use the whole size when HTTP_CONTENT_RANGE is not provided
start = 0
end = chunk.size - 1
total = chunk.size
chunk_size = end - start + 1
max_bytes = self.get_max_bytes(request)
if max_bytes is not None and total > max_bytes:
raise ChunkedUploadError(
status=http_status.HTTP_400_BAD_REQUEST,
detail='Size of file exceeds the limit (%s bytes)' % max_bytes
)
if chunked_upload.offset != start:
raise ChunkedUploadError(status=http_status.HTTP_400_BAD_REQUEST,
detail='Offsets do not match',
offset=chunked_upload.offset)
if chunk.size != chunk_size:
raise ChunkedUploadError(status=http_status.HTTP_400_BAD_REQUEST,
detail="File size doesn't match headers")
chunked_upload.append_chunk(chunk, chunk_size=chunk_size, save=False)
self._save(chunked_upload)
return Response(self.get_response_data(chunked_upload, request),
status=http_status.HTTP_200_OK)
class ChunkedUploadCompleteView(ChunkedUploadBaseView):
"""
Completes an chunked upload. Method `on_completion` is a placeholder to
define what to do when upload is complete.
"""
# I wouldn't recommend to turn off the md5 check, unless is really
# impacting your performance. Proceed at your own risk.
do_md5_check = True
def on_completion(self, uploaded_file, request):
"""
Placeholder method to define what to do when upload is complete.
"""
def is_valid_chunked_upload(self, chunked_upload):
"""
Check if chunked upload is already complete.
"""
if chunked_upload.status == COMPLETE:
error_msg = "Upload has already been marked as complete"
return ChunkedUploadError(status=http_status.HTTP_400_BAD_REQUEST,
detail=error_msg)
def md5_check(self, chunked_upload, md5):
"""
Verify if md5 checksum sent by client matches generated md5.
"""
if chunked_upload.md5 != md5:
raise ChunkedUploadError(status=http_status.HTTP_400_BAD_REQUEST,
detail='md5 checksum does not match')
def _post(self, request, *args, **kwargs):
upload_id = request.POST.get('upload_id')
md5 = request.POST.get('md5')
error_msg = None
if self.do_md5_check:
if not upload_id or not md5:
error_msg = "Both 'upload_id' and 'md5' are required"
elif not upload_id:
error_msg = "'upload_id' is required"
if error_msg:
raise ChunkedUploadError(status=http_status.HTTP_400_BAD_REQUEST,
detail=error_msg)
chunked_upload = get_object_or_404(self.get_queryset(request),
upload_id=upload_id)
self.validate(request)
self.is_valid_chunked_upload(chunked_upload)
if self.do_md5_check:
self.md5_check(chunked_upload, md5)
chunked_upload.status = COMPLETE
chunked_upload.completed_on = timezone.now()
self._save(chunked_upload)
self.on_completion(chunked_upload.get_uploaded_file(), request)
return Response(self.get_response_data(chunked_upload, request),
status=http_status.HTTP_200_OK)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from config import KafkaConfig
from kafkatest.services.kafka import config_property
from kafkatest.services.kafka.version import TRUNK
from kafkatest.services.kafka.directory import kafka_dir, KAFKA_TRUNK
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.services.security.minikdc import MiniKdc
import json
import re
import signal
import subprocess
import time
import os.path
class KafkaService(JmxMixin, Service):
PERSISTENT_ROOT = "/mnt"
STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "kafka.log")
STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "kafka.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "kafka-log4j.properties")
# Logs such as controller.log, server.log, etc all go here
OPERATIONAL_LOG_DIR = os.path.join(PERSISTENT_ROOT, "kafka-operational-logs")
# Kafka log segments etc go here
DATA_LOG_DIR = os.path.join(PERSISTENT_ROOT, "kafka-data-logs")
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "kafka.properties")
logs = {
"kafka_operational_logs": {
"path": OPERATIONAL_LOG_DIR,
"collect_default": True},
"kafka_data": {
"path": DATA_LOG_DIR,
"collect_default": False}
}
def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, topics=None, version=TRUNK, quota_config=None, jmx_object_names=None, jmx_attributes=[]):
"""
:type context
:type zk: ZookeeperService
:type topics: dict
"""
Service.__init__(self, context, num_nodes)
JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)
self.log_level = "DEBUG"
self.zk = zk
self.quota_config = quota_config
self.security_protocol = security_protocol
self.interbroker_security_protocol = interbroker_security_protocol
self.sasl_mechanism = sasl_mechanism
self.topics = topics
self.minikdc = None
for node in self.nodes:
node.version = version
node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
@property
def security_config(self):
return SecurityConfig(self.security_protocol, self.interbroker_security_protocol, sasl_mechanism=self.sasl_mechanism)
def start(self):
if self.security_config.has_sasl_kerberos:
if self.minikdc is None:
self.minikdc = MiniKdc(self.context, self.nodes)
self.minikdc.start()
Service.start(self)
# Create topics if necessary
if self.topics is not None:
for topic, topic_cfg in self.topics.items():
if topic_cfg is None:
topic_cfg = {}
topic_cfg["topic"] = topic
self.create_topic(topic_cfg)
def prop_file(self, node):
cfg = KafkaConfig(**node.config)
cfg[config_property.ADVERTISED_HOSTNAME] = node.account.hostname
cfg[config_property.ZOOKEEPER_CONNECT] = self.zk.connect_setting()
# TODO - clean up duplicate configuration logic
prop_file = cfg.render()
prop_file += self.render('kafka.properties', node=node, broker_id=self.idx(node),
security_config=self.security_config,
interbroker_security_protocol=self.interbroker_security_protocol,
sasl_mechanism=self.sasl_mechanism)
return prop_file
def start_cmd(self, node):
cmd = "export JMX_PORT=%d; " % self.jmx_port
cmd += "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG
cmd += "export LOG_DIR=%s; " % KafkaService.OPERATIONAL_LOG_DIR
cmd += "export KAFKA_OPTS=%s; " % self.security_config.kafka_opts
cmd += "/opt/" + kafka_dir(node) + "/bin/kafka-server-start.sh %s 1>> %s 2>> %s &" % (KafkaService.CONFIG_FILE, KafkaService.STDOUT_CAPTURE, KafkaService.STDERR_CAPTURE)
return cmd
def start_node(self, node):
prop_file = self.prop_file(node)
self.logger.info("kafka.properties:")
self.logger.info(prop_file)
node.account.create_file(KafkaService.CONFIG_FILE, prop_file)
node.account.create_file(self.LOG4J_CONFIG, self.render('log4j.properties', log_dir=KafkaService.OPERATIONAL_LOG_DIR))
self.security_config.setup_node(node)
cmd = self.start_cmd(node)
self.logger.debug("Attempting to start KafkaService on %s with command: %s" % (str(node.account), cmd))
with node.account.monitor_log(KafkaService.STDOUT_CAPTURE) as monitor:
node.account.ssh(cmd)
monitor.wait_until("Kafka Server.*started", timeout_sec=30, err_msg="Kafka server didn't finish startup")
self.start_jmx_tool(self.idx(node), node)
if len(self.pids(node)) == 0:
raise Exception("No process ids recorded on node %s" % str(node))
def pids(self, node):
"""Return process ids associated with running processes on the given node."""
try:
cmd = "ps ax | grep -i kafka | grep java | grep -v grep | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def signal_node(self, node, sig=signal.SIGTERM):
pids = self.pids(node)
for pid in pids:
node.account.signal(pid, sig)
def signal_leader(self, topic, partition=0, sig=signal.SIGTERM):
leader = self.leader(topic, partition)
self.signal_node(leader, sig)
def stop_node(self, node, clean_shutdown=True):
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=False)
wait_until(lambda: len(self.pids(node)) == 0, timeout_sec=20, err_msg="Kafka node failed to stop")
def clean_node(self, node):
JmxMixin.clean_node(self, node)
self.security_config.clean_node(node)
node.account.kill_process("kafka", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf /mnt/*", allow_fail=False)
def create_topic(self, topic_cfg, node=None):
"""Run the admin tool create topic command.
Specifying node is optional, and may be done if for different kafka nodes have different versions,
and we care where command gets run.
If the node is not specified, run the command from self.nodes[0]
"""
if node is None:
node = self.nodes[0]
self.logger.info("Creating topic %s with settings %s", topic_cfg["topic"], topic_cfg)
cmd = "/opt/%s/bin/kafka-topics.sh " % kafka_dir(node)
cmd += "--zookeeper %(zk_connect)s --create --topic %(topic)s --partitions %(partitions)d --replication-factor %(replication)d" % {
'zk_connect': self.zk.connect_setting(),
'topic': topic_cfg.get("topic"),
'partitions': topic_cfg.get('partitions', 1),
'replication': topic_cfg.get('replication-factor', 1)
}
if "configs" in topic_cfg.keys() and topic_cfg["configs"] is not None:
for config_name, config_value in topic_cfg["configs"].items():
cmd += " --config %s=%s" % (config_name, str(config_value))
self.logger.info("Running topic creation command...\n%s" % cmd)
node.account.ssh(cmd)
time.sleep(1)
self.logger.info("Checking to see if topic was properly created...\n%s" % cmd)
for line in self.describe_topic(topic_cfg["topic"]).split("\n"):
self.logger.info(line)
def describe_topic(self, topic, node=None):
if node is None:
node = self.nodes[0]
cmd = "/opt/%s/bin/kafka-topics.sh --zookeeper %s --topic %s --describe" % \
(kafka_dir(node), self.zk.connect_setting(), topic)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
return output
def verify_reassign_partitions(self, reassignment, node=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "/opt/%s/bin/kafka-reassign-partitions.sh " % kafka_dir(node)
cmd += "--zookeeper %s " % self.zk.connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--verify "
cmd += "&& sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Verifying parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
if re.match(".*is in progress.*", output) is not None:
return False
return True
def execute_reassign_partitions(self, reassignment, node=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "/opt/%s/bin/kafka-reassign-partitions.sh " % kafka_dir(node)
cmd += "--zookeeper %s " % self.zk.connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--execute"
cmd += " && sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Executing parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug("Verify partition reassignment:")
self.logger.debug(output)
def restart_node(self, node, clean_shutdown=True):
"""Restart the given node."""
self.stop_node(node, clean_shutdown)
self.start_node(node)
def leader(self, topic, partition=0):
""" Get the leader replica for the given topic and partition.
"""
kafka_dir = KAFKA_TRUNK
cmd = "/opt/%s/bin/kafka-run-class.sh kafka.tools.ZooKeeperMainWrapper -server %s " %\
(kafka_dir, self.zk.connect_setting())
cmd += "get /brokers/topics/%s/partitions/%d/state" % (topic, partition)
self.logger.debug(cmd)
node = self.zk.nodes[0]
self.logger.debug("Querying zookeeper to find leader replica for topic %s: \n%s" % (cmd, topic))
partition_state = None
for line in node.account.ssh_capture(cmd):
# loop through all lines in the output, but only hold on to the first match
if partition_state is None:
match = re.match("^({.+})$", line)
if match is not None:
partition_state = match.groups()[0]
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
leader_idx = int(partition_state["leader"])
self.logger.info("Leader for topic %s and partition %d is now: %d" % (topic, partition, leader_idx))
return self.get_node(leader_idx)
def bootstrap_servers(self):
"""Return comma-delimited list of brokers in this cluster formatted as HOSTNAME1:PORT1,HOSTNAME:PORT2,...
This is the format expected by many config files.
"""
return ','.join([node.account.hostname + ":9092" for node in self.nodes])
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the static tf.data optimizations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
def _captured_refvar_test_combinations():
def make_map_dataset(var):
return dataset_ops.Dataset.from_tensors(0).map(lambda x: x + var)
def make_flat_map_dataset(var):
return dataset_ops.Dataset.from_tensors(
0).flat_map(lambda _: dataset_ops.Dataset.from_tensors(var))
def make_filter_dataset(var):
return dataset_ops.Dataset.from_tensors(0).filter(lambda x: x < var)
def make_map_and_batch_dataset(var):
def map_fn(x):
return x + var
return dataset_ops.Dataset.from_tensors(0).apply(
batching.map_and_batch(map_fn, 1))
def make_group_by_reducer_dataset(var):
reducer = grouping.Reducer(
init_func=lambda _: 0,
reduce_func=lambda x, y: x,
finalize_func=lambda _: var)
return dataset_ops.Dataset.range(5).apply(
grouping.group_by_reducer(lambda x: x % 2, reducer))
def make_group_by_window_dataset(var):
def reduce_fn(key, bucket):
del key, bucket
return dataset_ops.Dataset.from_tensors(var)
return dataset_ops.Dataset.from_tensors(0).repeat(10).apply(
grouping.group_by_window(lambda _: 0, reduce_fn, 10))
def make_scan_dataset(var):
return dataset_ops.Dataset.from_tensors(0).apply(
scan_ops.scan(
0, lambda old_state, elem: (old_state + 1, elem + old_state + var)))
cases = [
# Core datasets
("Map", make_map_dataset),
("FlatMap", make_flat_map_dataset),
("Filter", make_filter_dataset),
# Experimental datasets
("MapAndBatch", make_map_and_batch_dataset),
("GroupByReducer", make_group_by_reducer_dataset),
("GroupByWindow", make_group_by_window_dataset),
("Scan", make_scan_dataset)
]
def reduce_fn(x, y):
name, dataset_fn = y
return x + combinations.combine(
dataset_fn=combinations.NamedObject(name, dataset_fn))
return functools.reduce(reduce_fn, cases, [])
class OptimizationTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testOptimizationStatefulFunction(self):
dataset = dataset_ops.Dataset.range(
10).map(lambda _: random_ops.random_uniform([])).batch(10)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
self.evaluate(get_next())
# TODO(b/123354468)
@combinations.generate(test_base.graph_only_combinations())
def testOptimizationLargeInputFromTensor(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None))
dataset = dataset_ops.Dataset.from_tensors(input_t)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([512, 1024, 1025], np.int32)})
self.evaluate(get_next)
# TODO(b/123354468)
@combinations.generate(test_base.graph_only_combinations())
def testOptimizationLargeInputFromTensorSlices(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None, None))
dataset = dataset_ops.Dataset.from_tensor_slices(input_t)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([1, 512, 1024, 1025], np.int32)})
self.evaluate(get_next)
@combinations.generate(test_base.default_test_combinations())
def testOptimizationNestedDataset(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(testing.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # Should be removed by noop elimination
dataset = dataset.cache()
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
@combinations.generate(test_base.default_test_combinations())
def testOptimizationNestedDatasetWithModifiedRetval(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(testing.assert_next(["MapAndBatch"]))
# Should be fused by map and batch fusion
dataset = dataset.map(lambda x: x)
dataset = dataset.batch(1)
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[[0]])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(autotune=False, autotune_buffers=False) +
combinations.combine(autotune=True, autotune_buffers=False) +
combinations.combine(autotune=True, autotune_buffers=True),
combinations.combine(set_env=[False, True])))
def testOptimizationEnableGradientDescent(self, autotune, autotune_buffers,
set_env):
if set_env:
os.environ["TF_DATA_EXPERIMENT_OPT_IN"] = "enable_gradient_descent"
os.environ["TF_JOB_NAME"] = "test_job"
dataset = dataset_ops.Dataset.range(5)
dataset = dataset.prefetch(buffer_size=-1)
dataset = dataset.map(lambda x: x + 1, num_parallel_calls=2)
dataset = dataset.map(lambda x: x + 1, num_parallel_calls=-1)
dataset = dataset.prefetch(buffer_size=3)
dataset = dataset.map(lambda x: x + 1, num_parallel_calls=-1)
dataset = dataset.prefetch(buffer_size=1)
options = dataset_ops.Options()
options.experimental_optimization.autotune = autotune
options.experimental_optimization.autotune_buffers = autotune_buffers
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=list(range(3, 8)))
if set_env:
del os.environ["TF_DATA_EXPERIMENT_OPT_IN"]
del os.environ["TF_JOB_NAME"]
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(autotune=[True, False, None]),
combinations.combine(map_parallelization=[True, False, None])))
def testOptimizationMapParallelization(self, autotune, map_parallelization):
dataset = dataset_ops.Dataset.range(5)
if autotune is not False and map_parallelization is not False: # pylint: disable=g-bool-id-comparison
dataset = dataset.apply(testing.assert_next(["ParallelMap"]))
else:
dataset = dataset.apply(testing.assert_next(["Map"]))
dataset = dataset.map(lambda x: x + 1)
options = dataset_ops.Options()
if autotune is not None:
options.experimental_optimization.autotune = autotune
if map_parallelization is not None:
options.experimental_optimization.map_parallelization = (
map_parallelization)
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=list(range(1, 6)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(autotune=False, autotune_buffers=False) +
combinations.combine(autotune=True, autotune_buffers=False) +
combinations.combine(autotune=True, autotune_buffers=True),
combinations.combine(first_buffer_sizes=[(1, -1, -1, 4),
(2, -1, 3, -1),
(2, 1, -1, -1)]),
combinations.combine(second_buffer_sizes=[(1, -1, -1, 4),
(2, -1, 3, -1),
(2, 1, -1, -1)]))
)
def testOptimizationAutotuneBuffers(self, autotune, autotune_buffers,
first_buffer_sizes, second_buffer_sizes):
dataset = dataset_ops.Dataset.range(10)
for buffer_size in first_buffer_sizes:
dataset = dataset.prefetch(buffer_size=buffer_size)
dataset = dataset.map(lambda x: x + 1)
for buffer_size in second_buffer_sizes:
dataset = dataset.prefetch(buffer_size=buffer_size)
options = dataset_ops.Options()
options.experimental_optimization.autotune = autotune
options.experimental_optimization.autotune_buffers = autotune_buffers
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=list(range(1, 11)))
# Reference variables are not supported in eager mode.
@combinations.generate(
combinations.times(test_base.graph_only_combinations(),
_captured_refvar_test_combinations()))
def testOptimizationWithCapturedRefVar(self, dataset_fn):
"""Tests that default optimizations are disabled with ref variables."""
variable = variable_scope.get_variable(
"v", initializer=0, use_resource=False)
assign_op = variable.assign_add(1)
unoptimized_dataset = dataset_fn(variable)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.map_and_batch_fusion = True
optimized_dataset = unoptimized_dataset.with_options(options)
optimized_it = dataset_ops.make_initializable_iterator(optimized_dataset)
# Check that outputs are the same in the optimized and unoptimized cases,
# when the variable value is changing.
unoptimized_it = dataset_ops.make_initializable_iterator(
unoptimized_dataset)
with ops.control_dependencies([assign_op]):
unoptimized_output = unoptimized_it.get_next()
optimized_output = optimized_it.get_next()
self.evaluate(variable.initializer)
self.evaluate((unoptimized_it.initializer, optimized_it.initializer))
while True:
try:
unoptimized, optimized = self.evaluate((unoptimized_output,
optimized_output))
self.assertEqual(unoptimized, optimized)
except errors.OutOfRangeError:
break
if __name__ == "__main__":
test.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Network definition of 3D ResNet for Action Recognition (CVPR 2018)
Reference : https://github.com/kenshohara/3D-ResNets-PyTorch
"""
# pylint: disable=unused-argument
from tvm import relay
from .init import create_workload
from . import layers
def residual_unit(
data,
num_filter,
stride,
dim_match,
name,
bottle_neck=True,
data_layout="NCDHW",
kernel_layout="OIDHW",
):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : bool
True means channel number between input and output is the same,
otherwise means differ
name : str
Base name of the operators
"""
if bottle_neck:
bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
conv1 = layers.conv3d(
data=act1,
channels=int(num_filter * 0.25),
kernel_size=(1, 1, 1),
strides=stride,
padding=(0, 0, 0),
name=name + "_conv1",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn2 = layers.batch_norm_infer(data=conv1, epsilon=2e-5, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
conv2 = layers.conv3d(
data=act2,
channels=int(num_filter * 0.25),
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding=(1, 1, 1),
name=name + "_conv2",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn3 = layers.batch_norm_infer(data=conv2, epsilon=2e-5, name=name + "_bn3")
act3 = relay.nn.relu(data=bn3)
conv3 = layers.conv3d(
data=act3,
channels=num_filter,
kernel_size=(1, 1, 1),
strides=(1, 1, 1),
padding=(0, 0, 0),
name=name + "_conv3",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
if dim_match:
shortcut = data
else:
shortcut = layers.conv3d(
data=act1,
channels=num_filter,
kernel_size=(1, 1, 1),
strides=stride,
name=name + "_sc",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
return relay.add(conv3, shortcut)
bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
conv1 = layers.conv3d(
data=act1,
channels=num_filter,
kernel_size=(3, 3, 3),
strides=stride,
padding=(1, 1, 1),
name=name + "_conv1",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn2 = layers.batch_norm_infer(data=conv1, epsilon=2e-5, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
conv2 = layers.conv3d(
data=act2,
channels=num_filter,
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding=(1, 1, 1),
name=name + "_conv2",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
if dim_match:
shortcut = data
else:
shortcut = layers.conv3d(
data=act1,
channels=num_filter,
kernel_size=(1, 1, 1),
strides=stride,
name=name + "_sc",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
return relay.add(conv2, shortcut)
def resnet(
units,
num_stages,
filter_list,
num_classes,
data_shape,
bottle_neck=True,
layout="NCDHW",
dtype="float32",
):
"""Return ResNet Program.
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stages
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
data_shape : tuple of int.
The shape of input data.
bottle_neck : bool
Whether apply bottleneck transformation.
layout: str
The data layout for conv3d
dtype : str
The global data type.
"""
data_layout = layout
kernel_layout = "OIDHW" if layout == "NCDHW" else "DHWIO"
num_unit = len(units)
assert num_unit == num_stages
data = relay.var("data", shape=data_shape, dtype=dtype)
data = layers.batch_norm_infer(data=data, epsilon=2e-5, scale=False, name="bn_data")
if layout == "NCDHW":
(_, _, _, height, _) = data_shape
else:
(_, _, height, _, _) = data_shape
if height <= 32: # such as cifar10
body = layers.conv3d(
data=data,
channels=filter_list[0],
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding=(1, 1, 1),
name="conv0",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
else: # often expected to be 224 such as imagenet
body = layers.conv3d(
data=data,
channels=filter_list[0],
kernel_size=(3, 7, 7),
strides=(1, 2, 2),
padding=(1, 3, 3),
name="conv0",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
body = layers.batch_norm_infer(data=body, epsilon=2e-5, name="bn0")
body = relay.nn.relu(data=body)
# body = relay.nn.max_pool3d(data=body, pool_size=(3, 3), strides=(2, 2), padding=(1, 1),
# layout=data_layout)
for i in range(num_stages):
body = residual_unit(
body,
filter_list[i + 1],
(1 if i == 0 else 2, 1 if i == 0 else 2, 1 if i == 0 else 2),
False,
name="stage%d_unit%d" % (i + 1, 1),
bottle_neck=bottle_neck,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
for j in range(units[i] - 1):
body = residual_unit(
body,
filter_list[i + 1],
(1, 1, 1),
True,
name="stage%d_unit%d" % (i + 1, j + 2),
bottle_neck=bottle_neck,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn1 = layers.batch_norm_infer(data=body, epsilon=2e-5, name="bn1")
relu1 = relay.nn.relu(data=bn1)
# Although kernel is not used here when global_pool=True, we should put one
pool1 = relay.nn.global_avg_pool3d(data=relu1, layout=data_layout)
flat = relay.nn.batch_flatten(data=pool1)
fc1 = layers.dense_add_bias(data=flat, units=num_classes, name="fc1")
net = relay.nn.softmax(data=fc1)
return relay.Function(relay.analysis.free_vars(net), net)
def get_net(
batch_size,
num_classes,
num_layers=50,
image_shape=(3, 16, 112, 112),
layout="NCDHW",
dtype="float32",
**kwargs,
):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
if layout == "NCDHW":
(_, _, height, _) = image_shape
else:
(_, height, _, _) = image_shape
data_shape = (batch_size,) + image_shape
if height <= 28:
num_stages = 3
if (num_layers - 2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers - 2) // 9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers - 2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers - 2) // 6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}".format(num_layers))
return resnet(
units=units,
num_stages=num_stages,
filter_list=filter_list,
num_classes=num_classes,
data_shape=data_shape,
bottle_neck=bottle_neck,
layout=layout,
dtype=dtype,
)
def get_workload(
batch_size=1,
num_classes=1000,
num_layers=18,
image_shape=(3, 16, 112, 112),
layout="NCDHW",
dtype="float32",
**kwargs,
):
"""Get benchmark workload for resnet
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
num_layers : int, optional
Number of layers
image_shape : tuple, optional
The input image shape
layout: str
The data layout for conv3d
dtype : str, optional
The data type
kwargs : dict
Extra arguments
Returns
-------
mod : tvm.IRModule
The relay module that contains a ResNet network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(
batch_size=batch_size,
num_classes=num_classes,
num_layers=num_layers,
image_shape=image_shape,
dtype=dtype,
layout=layout,
**kwargs,
)
return create_workload(net)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2011 Christopher D. Lasher
#
# This software is released under the MIT License. Please see
# LICENSE.txt for details.
"""Markov chains for BPN."""
import math
import random
import recorders
import states
import logging
logger = logging.getLogger('bpn.mcmcbpn.chains')
from defaults import (NUM_STEPS, BURN_IN, TRANSITION_TYPE_RATIO,
BROADCAST_PERCENT, SUPERDEBUG, SUPERDEBUG_MODE)
class MarkovChain(object):
pass
class PLNMarkovChain(MarkovChain):
"""A class representing the Markov chain for process linkage
networks.
"""
def __init__(
self,
state_recorder,
burn_in,
num_steps,
annotated_interactions,
active_gene_threshold,
transition_type_ratio=TRANSITION_TYPE_RATIO,
seed_links=None,
link_false_pos=None,
link_false_neg=None,
link_prior=None,
parameters_state_class=states.PLNParametersState
):
"""Create a new instance.
:Parameters:
- `state_recorder`: a `PLNStateRecorder` instance, used to
record the steps taken in the chain
- `burn_in`: the number of steps to take before recording state
information about the Markov chain (state records are
discarded until complete)
- `num_steps`: the number of steps to take in the Markov chain
- `annotated_interactions`: an `AnnotatedInteractionsGraph`
instance
- `active_gene_threshold`: the threshold at or above which a
gene is considered "active"
- `transition_type_ratio`: a `float` indicating the ratio of link
transitions to parameter transitions
- `seed_links`: a user-defined seed of links to start as
selected
- `link_false_pos`: the false-positive rate for links, the
portion of gene-gene interactions which were included, but
shouldn't have been
- `link_false_neg`: the false-negative rate for links, the
portion of gene-gene interactions which weren't included, but
should have been
- `link_prior`: the assumed probability we would pick any one
link as being active; see `PLNParametersState` for more
information
- `parameters_state_class`: the class of the parameters state to
use [default: `states.PLNParametersState]`
"""
self.state_recorder = state_recorder
self.current_state = states.PLNOverallState(
annotated_interactions,
active_gene_threshold,
transition_type_ratio,
seed_links,
link_false_pos,
link_false_neg,
link_prior,
parameters_state_class
)
self.burn_in_steps = burn_in
self.num_steps = num_steps
self.burn_in_period = True
# This attribute will keep track of how we transition through
# the Markov chain by storing a tuple for the previous
# transition. The first item for the transition information is
# the type of transition performed, which is obtained from the
# PLNOverallState._delta attribute's key. The second item of the
# tuple is a floating point value representing the log of the
# transition ratio computed in calc_log_transition_ratio(). The
# third item in the tuple is either `True`, representing that
# the transition was rejected, or `False`, representing that the
# transition was accepted.
self.last_transition_info = None
self.current_step = None
def calc_log_transition_ratio(self, proposed_state):
"""Calculates the likelihoods of the current state, the proposed
state, and the transition ratio between them.
Returns the current state's log-likelihood, the proposed state's
log-likelihood, and the transition ratio between the current and
proposed states.
:Parameters:
- `proposed_state`: the proposed state of transition
"""
current_log_likelihood = (
self.current_state.calc_log_likelihood())
current_num_neighbors = (
self.current_state.calc_num_neighboring_states())
proposed_log_likelihood = (
proposed_state.calc_log_likelihood())
proposed_num_neighbors = (
proposed_state.calc_num_neighboring_states())
log_transition_ratio = (
(proposed_log_likelihood -
math.log10(current_num_neighbors)) -
(current_log_likelihood -
math.log10(proposed_num_neighbors))
)
return (current_log_likelihood, proposed_log_likelihood,
log_transition_ratio)
def next_state(self):
"""Move to the next state in the Markov chain.
This method creates a proposed state for a transition; it then
assesses the "fitness" of this new state by comparing the
likelihood of the proposed state to the likelihood of the
current state as a (log of the) ratio of the two likelihoods.
If this ratio is greater than 1 (i.e., the log of the ratio is
positive, we accept the proposed state and transition to it. If,
instead, the ratio is less than 1 (i.e., the log of the ratio is
negative), we flip a rejection coin. If the ratio is still
greater than the rejection coin, we accept the less likely
proposed state, anyway (a feature which allows us to exit local
maxima); otherwise we reject the proposed state and continue
with the current state.
"""
proposed_state = self.current_state.create_new_state()
proposed_transition_type = proposed_state._delta[0]
(current_log_likelihood, proposed_log_likelihood,
log_transition_ratio) = self.calc_log_transition_ratio(
proposed_state)
logger.debug("Log of transition ratio: %s" % (
log_transition_ratio))
# Remember, a very favorable state has a likelihood close to 1
# and its log-likelihood close to 0 (i.e., very small negative
# value); a very unfavorable state has a likelihood close to 0,
# and a log-likelihood with a very negative value (large
# magnitude).
# If the proposed state is more likely than the current state,
# the proposed state's log-likelihood will be a smaller negative
# number than the current state's; thus log_transition_ratio
# will be a positive value. If the proposed state is less likely
# than the current state, the proposed state's log-likelihood
# will be a larger negative number than the current state's;
# thus the log_transition_ratio will be a negative value.
#
# If the proposed state is more likely, we want to go ahead and
# accept it.
if log_transition_ratio > 0:
log_rejection_prob = None
self.current_state = proposed_state
logger.debug("Accepted proposed state.")
accepted = True
log_state_likelihood = proposed_log_likelihood
else:
# Otherwise, the proposed state is equally or less likely
# than the current state, however, we may still choose it,
# even though this may be unfavorable, to avoid local
# maxima. We will do this by drawing a number uniformly at
# random in the range [0, 1], as the probability we will
# reject the state. If the log of the log-transition ratio
# is greater than the log of the rejection probability,
# we'll still accept the less favorable proposed state.
log_rejection_prob = math.log10(random.random())
logger.debug("Log rejection probability: %s" % log_rejection_prob)
if log_transition_ratio > log_rejection_prob:
self.current_state = proposed_state
logger.debug("Accepted proposed state.")
accepted = True
log_state_likelihood = proposed_log_likelihood
else:
logger.debug("Rejected proposed state.")
accepted = False
log_state_likelihood = current_log_likelihood
logger.debug("Log of state likelihood: %s" % (
log_state_likelihood))
# In a special case, if this is the very first step of the
# actual recorded steps (i.e., the first step taken after the
# burn in period), we want to make sure that it's marked as
# "accepted" because this is the first time in the records that
# we will have seen this state (even if it was the last state
# seen in the burn-in)
if self.current_step == 0:
accepted = True
# NOTE: if changing the order of any of these items, please make
# sure to update code in recorders.py!!!
# In the future, consider using a named-tuple instead of a
# normal tuple for this data structure.
self.last_transition_info = (
proposed_transition_type,
log_transition_ratio,
log_state_likelihood,
accepted,
log_rejection_prob
)
def run(self):
"""Step through the states of the Markov chain.
Runs for a total number of iterations equal to `burn_in` +
`num_steps`.
"""
logger.info("Beginning burn-in of %d steps." %
self.burn_in_steps)
broadcast_percent_complete = 0
for i in xrange(self.burn_in_steps):
logger.debug("Burn-in step %d of %d" % (i + 1,
self.burn_in_steps))
self.next_state()
percent_complete = int(100 * float(i + 1) /
self.burn_in_steps)
if percent_complete >= (broadcast_percent_complete +
BROADCAST_PERCENT):
broadcast_percent_complete = percent_complete
logger.info("%d%% of burn-in complete." % (
percent_complete))
logger.info("Beginning run of %d steps." % self.num_steps)
self.burn_in_period = False
broadcast_percent_complete = 0
for i in xrange(self.num_steps):
self.current_step = i
logger.debug("Step %d of %d" % (i + 1, self.num_steps))
self.next_state()
self.state_recorder.record_state(self)
percent_complete = int(100 * float(i + 1) / self.num_steps)
if percent_complete >= (broadcast_percent_complete +
BROADCAST_PERCENT):
broadcast_percent_complete = percent_complete
logger.info("%d%% of steps complete." % (
percent_complete))
class ArrayMarkovChain(PLNMarkovChain):
"""Similar to `PLNMarkovChain`, but using `numpy` arrays to track
state information.
"""
def __init__(
self,
state_recorder,
burn_in,
num_steps,
annotated_interactions,
active_gene_threshold,
transition_type_ratio=TRANSITION_TYPE_RATIO,
seed_links_indices=None,
link_false_pos=None,
link_false_neg=None,
link_prior=None,
parameters_state_class=states.PLNParametersState,
links_state_class=states.ArrayLinksState
):
"""Create a new instance.
:Parameters:
- `state_recorder`: an `ArrayStateRecorder` instance, used to
record the steps taken in the chain
- `burn_in`: the number of steps to take before recording state
information about the Markov chain (state records are
discarded until complete)
- `num_steps`: the number of steps to take in the Markov chain
- `annotated_interactions`: an `AnnotatedInteractionsArray`
instance
- `active_gene_threshold`: the threshold at or above which a
gene is considered "active"
- `transition_type_ratio`: a `float` indicating the ratio of link
transitions to parameter transitions
- `seed_links_indices`: a user-defined seed of indices to
links to start as selected
- `link_false_pos`: the false-positive rate for links, the
portion of gene-gene interactions which were included, but
shouldn't have been
- `link_false_neg`: the false-negative rate for links, the
portion of gene-gene interactions which weren't included, but
should have been
- `link_prior`: the assumed probability we would pick any one
link as being active; see `PLNParametersState` for more
information
- `parameters_state_class`: the class of the parameters state to
use [default: `states.PLNParametersState`]
- `links_state_class`: the class of the links state to use
[default: `states.ArrayLinksState`]
"""
self.state_recorder = state_recorder
self.current_state = states.ArrayOverallState(
annotated_interactions,
active_gene_threshold,
transition_type_ratio,
seed_links_indices,
link_false_pos,
link_false_neg,
link_prior,
parameters_state_class,
links_state_class
)
self.burn_in_steps = burn_in
self.num_steps = num_steps
self.burn_in_period = True
self.last_transition_info = None
self.current_step = None
class TermsBasedMarkovChain(ArrayMarkovChain):
"""A Markov chain based on states that consider both term and link
selections.
"""
def __init__(
self,
state_recorder,
burn_in,
num_steps,
annotated_interactions,
active_gene_threshold,
transition_type_ratio=TRANSITION_TYPE_RATIO,
seed_terms_indices=None,
seed_links_indices=None,
link_false_pos=None,
link_false_neg=None,
link_prior=None,
term_prior=None,
parameters_state_class=states.TermPriorParametersState,
links_state_class=states.TermsAndLinksState
):
"""Create a new instance.
:Parameters:
- `state_recorder`: an `ArrayStateRecorder` instance, used to
record the steps taken in the chain
- `burn_in`: the number of steps to take before recording state
information about the Markov chain (state records are
discarded until complete)
- `num_steps`: the number of steps to take in the Markov chain
- `annotated_interactions`: an `AnnotatedInteractionsArray`
instance
- `active_gene_threshold`: the threshold at or above which a
gene is considered "active"
- `transition_type_ratio`: a `float` indicating the ratio of link
transitions to parameter transitions
- `seed_links_indices`: a user-defined seed of indices to
links to start as selected
- `link_false_pos`: the false-positive rate for links, the
portion of gene-gene interactions which were included, but
shouldn't have been
- `link_false_neg`: the false-negative rate for links, the
portion of gene-gene interactions which weren't included, but
should have been
- `link_prior`: the assumed probability we would pick any one
link as being active; see `PLNParametersState` for more
information
- `term_prior`:the assumed probability we would select any one
term; see `RandomTransitionParametersState` for more
information
- `parameters_state_class`: the class of the parameters state to
use [default: `states.TermPriorParametersState`]
- `links_state_class`: the class of the links state to use
[default: `states.TermsAndLinksState`]
"""
self.state_recorder = state_recorder
self.current_state = states.TermsBasedOverallState(
annotated_interactions,
active_gene_threshold,
transition_type_ratio,
seed_links_indices=seed_links_indices,
link_false_pos=link_false_pos,
link_false_neg=link_false_neg,
link_prior=link_prior,
term_prior=term_prior,
parameters_state_class=parameters_state_class,
links_state_class=links_state_class
)
self.burn_in_steps = burn_in
self.num_steps = num_steps
self.burn_in_period = True
self.last_transition_info = None
self.current_step = None
class IndependentTermsBasedMarkovChain(TermsBasedMarkovChain):
"""A Markov chain based on states that consider both term and link
selections.
"""
def __init__(
self,
state_recorder,
burn_in,
num_steps,
annotated_interactions,
active_gene_threshold,
transition_type_ratio=TRANSITION_TYPE_RATIO,
seed_terms_indices=None,
seed_links_indices=None,
link_false_pos=None,
link_false_neg=None,
link_prior=None,
term_prior=None,
parameters_state_class=states.TermPriorParametersState,
links_state_class=states.IndependentIntraTermsAndLinksState
):
"""Create a new instance.
:Parameters:
- `state_recorder`: a `TermsBasedStateRecorder` instance, used
to record the steps taken in the chain
- `burn_in`: the number of steps to take before recording state
information about the Markov chain (state records are
discarded until complete)
- `num_steps`: the number of steps to take in the Markov chain
- `annotated_interactions`: an `AnnotatedInteractionsArray`
instance
- `active_gene_threshold`: the threshold at or above which a
gene is considered "active"
- `transition_type_ratio`: a `float` indicating the ratio of link
transitions to parameter transitions
- `seed_terms_indices`: indices for the subset of terms being
considered as "selected" initially
- `seed_links_indices`: a user-defined seed of indices to
links to start as selected
- `link_false_pos`: the false-positive rate for links, the
portion of gene-gene interactions which were included, but
shouldn't have been
- `link_false_neg`: the false-negative rate for links, the
portion of gene-gene interactions which weren't included, but
should have been
- `link_prior`: the assumed probability we would pick any one
link as being active; see `PLNParametersState` for more
information
- `term_prior`:the assumed probability we would select any one
term; see `RandomTransitionParametersState` for more
information
- `parameters_state_class`: the class of the parameters state to
use [default: `states.TermPriorParametersState`]
- `links_state_class`: the class of the links state to use
[default: `states.IndependentIntraTermsAndLinksState`]
"""
self.state_recorder = state_recorder
self.current_state = states.IndependentTermsBasedOverallState(
annotated_interactions,
active_gene_threshold,
transition_type_ratio,
seed_terms_indices=seed_terms_indices,
seed_links_indices=seed_links_indices,
link_false_pos=link_false_pos,
link_false_neg=link_false_neg,
link_prior=link_prior,
term_prior=term_prior,
parameters_state_class=parameters_state_class,
links_state_class=links_state_class
)
self.burn_in_steps = burn_in
self.num_steps = num_steps
self.burn_in_period = True
self.last_transition_info = None
self.current_step = None
class GenesBasedMarkovChain(IndependentTermsBasedMarkovChain):
"""A Markov chain based on states that consider both term and link
selections, where overlap is based on genes.
"""
def __init__(
self,
state_recorder,
burn_in,
num_steps,
annotated_interactions,
active_gene_threshold,
transition_type_ratio=TRANSITION_TYPE_RATIO,
seed_terms_indices=None,
seed_links_indices=None,
link_false_pos=None,
link_false_neg=None,
link_prior=None,
term_false_pos=None,
term_false_neg=None,
term_prior=None,
parameters_state_class=states.TermsParametersState,
links_state_class=states.GenesBasedTermsAndLinksState
):
"""Create a new instance.
:Parameters:
- `state_recorder`: a `GenesBasedStateRecorder` instance, used
to record the steps taken in the chain
- `burn_in`: the number of steps to take before recording state
information about the Markov chain (state records are
discarded until complete)
- `num_steps`: the number of steps to take in the Markov chain
- `annotated_interactions`: an `AnnotatedInteractionsArray`
instance
- `active_gene_threshold`: the threshold at or above which a
gene is considered "active"
- `transition_type_ratio`: a `float` indicating the ratio of link
transitions to parameter transitions
- `seed_terms_indices`: indices for the subset of terms being
considered as "selected" initially
- `seed_links_indices`: a user-defined seed of indices to
links to start as selected
- `link_false_pos`: the false-positive rate for links, the
portion of gene-gene interactions which were included, but
shouldn't have been
- `link_false_neg`: the false-negative rate for links, the
portion of gene-gene interactions which weren't included, but
should have been
- `link_prior`: the assumed probability we would pick any one
link as being active; see `PLNParametersState` for more
information
- `term_false_pos`: the false-positive rate for terms, the
portion of genes which were included, but shouldn't have been
- `term_false_neg`: the false-negative rate for terms, the
portion of genes which weren't included, but should have been
- `term_prior`:the assumed probability we would select any one
term; see `RandomTransitionParametersState` for more
information
- `parameters_state_class`: the class of the parameters state to
use [default: `states.TermsParametersState`]
- `links_state_class`: the class of the links state to use
[default: `states.GenesBasedTermsAndLinksState`]
"""
self.state_recorder = state_recorder
self.current_state = states.GenesBasedOverallState(
annotated_interactions,
active_gene_threshold,
transition_type_ratio,
seed_terms_indices=seed_terms_indices,
seed_links_indices=seed_links_indices,
link_false_pos=link_false_pos,
link_false_neg=link_false_neg,
link_prior=link_prior,
term_false_pos=term_false_pos,
term_false_neg=term_false_neg,
term_prior=term_prior,
parameters_state_class=parameters_state_class,
links_state_class=links_state_class
)
self.burn_in_steps = burn_in
self.num_steps = num_steps
self.burn_in_period = True
self.last_transition_info = None
self.current_step = None
|
|
"""A contents manager that uses the local file system for storage."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os
import shutil
import mimetypes
import nbformat
from tornado import web
from .filecheckpoints import FileCheckpoints
from .fileio import FileManagerMixin
from .manager import ContentsManager
from ipython_genutils.importstring import import_item
from traitlets import Any, Unicode, Bool, TraitError
from ipython_genutils.py3compat import getcwd, string_types
from . import tz
from notebook.utils import (
is_hidden,
to_api_path,
)
_script_exporter = None
def _post_save_script(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
base, ext = os.path.splitext(os_path)
py_fname = base + '.py'
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
class FileContentsManager(FileManagerMixin, ContentsManager):
root_dir = Unicode(config=True)
def _root_dir_default(self):
try:
return self.parent.notebook_dir
except AttributeError:
return getcwd()
save_script = Bool(False, config=True, help='DEPRECATED, use post_save_hook')
def _save_script_changed(self):
self.log.warn("""
`--script` is deprecated. You can trigger nbconvert via pre- or post-save hooks:
ContentsManager.pre_save_hook
FileContentsManager.post_save_hook
A post-save hook has been registered that calls:
ipython nbconvert --to script [notebook]
which behaves similarly to `--script`.
""")
self.post_save_hook = _post_save_script
post_save_hook = Any(None, config=True,
help="""Python callable or importstring thereof
to be called on the path of a file just saved.
This can be used to process the file on disk,
such as converting the notebook to a script or HTML via nbconvert.
It will be called as (all arguments passed by keyword)::
hook(os_path=os_path, model=model, contents_manager=instance)
- path: the filesystem path to the file just written
- model: the model representing the file
- contents_manager: this ContentsManager instance
"""
)
def _post_save_hook_changed(self, name, old, new):
if new and isinstance(new, string_types):
self.post_save_hook = import_item(self.post_save_hook)
elif new:
if not callable(new):
raise TraitError("post_save_hook must be callable")
def run_post_save_hook(self, model, os_path):
"""Run the post-save hook if defined, and log errors"""
if self.post_save_hook:
try:
self.log.debug("Running post-save hook on %s", os_path)
self.post_save_hook(os_path=os_path, model=model, contents_manager=self)
except Exception:
self.log.error("Post-save hook failed on %s", os_path, exc_info=True)
def _root_dir_changed(self, name, old, new):
"""Do a bit of validation of the root_dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.root_dir = os.path.abspath(new)
return
if not os.path.isdir(new):
raise TraitError("%r is not a directory" % new)
def _checkpoints_class_default(self):
return FileCheckpoints
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
hidden : bool
Whether the path exists and is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.root_dir)
def file_exists(self, path):
"""Returns True if the file exists, else returns False.
API-style wrapper for os.path.isfile
Parameters
----------
path : string
The relative path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the file exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path)
return os.path.isfile(os_path)
def dir_exists(self, path):
"""Does the API-style path refer to an extant directory?
API-style wrapper for os.path.isdir
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def exists(self, path):
"""Returns True if the path exists, else returns False.
API-style wrapper for os.path.exists
Parameters
----------
path : string
The API path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the target exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.exists(os_path)
def _base_model(self, path):
"""Build the common base of a contents model"""
os_path = self._get_os_path(path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the base model.
model = {}
model['name'] = path.rsplit('/', 1)[-1]
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['content'] = None
model['format'] = None
model['mimetype'] = None
try:
model['writable'] = os.access(os_path, os.W_OK)
except OSError:
self.log.error("Failed to check write permissions on %s", os_path)
model['writable'] = False
return model
def _dir_model(self, path, content=True):
"""Build a model for a directory
if content is requested, will include a listing of the directory
"""
os_path = self._get_os_path(path)
four_o_four = u'directory does not exist: %r' % path
if not os.path.isdir(os_path):
raise web.HTTPError(404, four_o_four)
elif is_hidden(os_path, self.root_dir):
self.log.info("Refusing to serve hidden directory %r, via 404 Error",
os_path
)
raise web.HTTPError(404, four_o_four)
model = self._base_model(path)
model['type'] = 'directory'
if content:
model['content'] = contents = []
os_dir = self._get_os_path(path)
for name in os.listdir(os_dir):
os_path = os.path.join(os_dir, name)
# skip over broken symlinks in listing
if not os.path.exists(os_path):
self.log.warn("%s doesn't exist", os_path)
continue
elif not os.path.isfile(os_path) and not os.path.isdir(os_path):
self.log.debug("%s not a regular file", os_path)
continue
if self.should_list(name) and not is_hidden(os_path, self.root_dir):
contents.append(self.get(
path='%s/%s' % (path, name),
content=False)
)
model['format'] = 'json'
return model
def _file_model(self, path, content=True, format=None):
"""Build a model for a file
if content is requested, include the file contents.
format:
If 'text', the contents will be decoded as UTF-8.
If 'base64', the raw bytes contents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
model = self._base_model(path)
model['type'] = 'file'
os_path = self._get_os_path(path)
if content:
content, format = self._read_file(os_path, format)
default_mime = {
'text': 'text/plain',
'base64': 'application/octet-stream'
}[format]
model.update(
content=content,
format=format,
mimetype=mimetypes.guess_type(os_path)[0] or default_mime,
)
return model
def _notebook_model(self, path, content=True):
"""Build a notebook model
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
model = self._base_model(path)
model['type'] = 'notebook'
if content:
os_path = self._get_os_path(path)
nb = self._read_notebook(os_path, as_version=4)
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
self.validate_notebook_model(model)
return model
def get(self, path, content=True, type=None, format=None):
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the contents in the reply
type : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directory: %s' % path)
os_path = self._get_os_path(path)
if os.path.isdir(os_path):
if type not in (None, 'directory'):
raise web.HTTPError(400,
u'%s is a directory, not a %s' % (path, type), reason='bad type')
model = self._dir_model(path, content=content)
elif type == 'notebook' or (type is None and path.endswith('.ipynb')):
model = self._notebook_model(path, content=content)
else:
if type == 'directory':
raise web.HTTPError(400,
u'%s is not a directory' % path, reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
def _save_directory(self, os_path, model, path=''):
"""create a directory"""
if is_hidden(os_path, self.root_dir):
raise web.HTTPError(400, u'Cannot create hidden directory %r' % os_path)
if not os.path.exists(os_path):
with self.perm_to_403():
os.mkdir(os_path)
elif not os.path.isdir(os_path):
raise web.HTTPError(400, u'Not a directory: %s' % (os_path))
else:
self.log.debug("Directory %r already exists", os_path)
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
path = path.strip('/')
if 'type' not in model:
raise web.HTTPError(400, u'No file type provided')
if 'content' not in model and model['type'] != 'directory':
raise web.HTTPError(400, u'No file content provided')
os_path = self._get_os_path(path)
self.log.debug("Saving %s", os_path)
self.run_pre_save_hook(model=model, path=path)
try:
if model['type'] == 'notebook':
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
self._save_notebook(os_path, nb)
# One checkpoint should always exist for notebooks.
if not self.checkpoints.list_checkpoints(path):
self.create_checkpoint(path)
elif model['type'] == 'file':
# Missing format will be handled internally by _save_file.
self._save_file(os_path, model['content'], model.get('format'))
elif model['type'] == 'directory':
self._save_directory(os_path, model, path)
else:
raise web.HTTPError(400, "Unhandled contents type: %s" % model['type'])
except web.HTTPError:
raise
except Exception as e:
self.log.error(u'Error while saving file: %s %s', path, e, exc_info=True)
raise web.HTTPError(500, u'Unexpected error while saving file: %s %s' % (path, e))
validation_message = None
if model['type'] == 'notebook':
self.validate_notebook_model(model)
validation_message = model.get('message', None)
model = self.get(path, content=False)
if validation_message:
model['message'] = validation_message
self.run_post_save_hook(model=model, os_path=os_path)
return model
def delete_file(self, path):
"""Delete file at path."""
path = path.strip('/')
os_path = self._get_os_path(path)
rm = os.unlink
if os.path.isdir(os_path):
listing = os.listdir(os_path)
# Don't delete non-empty directories.
# A directory containing only leftover checkpoints is
# considered empty.
cp_dir = getattr(self.checkpoints, 'checkpoint_dir', None)
for entry in listing:
if entry != cp_dir:
raise web.HTTPError(400, u'Directory %s not empty' % os_path)
elif not os.path.isfile(os_path):
raise web.HTTPError(404, u'File does not exist: %s' % os_path)
if os.path.isdir(os_path):
self.log.debug("Removing directory %s", os_path)
with self.perm_to_403():
shutil.rmtree(os_path)
else:
self.log.debug("Unlinking file %s", os_path)
with self.perm_to_403():
rm(os_path)
def rename_file(self, old_path, new_path):
"""Rename a file."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_path == old_path:
return
new_os_path = self._get_os_path(new_path)
old_os_path = self._get_os_path(old_path)
# Should we proceed with the move?
if os.path.exists(new_os_path):
raise web.HTTPError(409, u'File already exists: %s' % new_path)
# Move the file
try:
with self.perm_to_403():
shutil.move(old_os_path, new_os_path)
except web.HTTPError:
raise
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming file: %s %s' % (old_path, e))
def info_string(self):
return "Serving notebooks from local directory: %s" % self.root_dir
def get_kernel_path(self, path, model=None):
"""Return the initial API path of a kernel associated with a given notebook"""
if '/' in path:
parent_dir = path.rsplit('/', 1)[0]
else:
parent_dir = ''
return parent_dir
|
|
import socket
import struct
import threading
import sys
import time
import os
from lib import state
from lib.terminal_colors import Fore, Back, Style
class Client(threading.Thread):
def __init__(self, verbose, test, time, target):
threading.Thread.__init__(self)
self.test = test
self.verbose = verbose
self.results = None
# If Time is not provided, set it to the default 30 seconds
if time == None:
self.time = 30
# Time should not be negative or greater than 1 hour
elif time < 0 or time > 3600:
print("The Time provided can not be negative or greater than 1 hour")
sys.exit(1)
else:
self.time = time
# Store target host
self.target = target
# Test Request Data
# 3-Byte message sent from Client to Server to initiate test
self.tests = {
"TCP_STREAM": {
"data": struct.pack("!3b", 0x01, 0x01, 0x01),
"type": "Bandwidth"
},
"TCP_RR": {
"data": struct.pack("!3b", 0x01, 0x01, 0x02),
"type": "Latency"
},
"UDP_STREAM": {
"data": struct.pack("!3b", 0x01, 0x01, 0x03),
"type": "Bandwidth"
},
"UDP_RR": {
"data": struct.pack("!3b", 0x01, 0x01, 0x04),
"type": "Latency"
}
}
# Get data for test type
if self.test in self.tests:
self.controlData = self.tests[self.test]["data"]
else:
print("Test not supported")
sys.exit(1)
def run(self):
self.controlSocket()
def controlSocket(self):
# Create socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.time + 10)
except socket.error as msg:
print("Error: Could not create TCP Control Socket")
print("Description: {}".format(msg))
return
# Create connection to host
try:
s.connect((self.target, 5678))
print("{}-----------------------------------------------------------{}".format(Style.BRIGHT, Style.RESET))
print("{}[MODE]:{}\t\t\t\t{}CLIENT{}".format((Style.BRIGHT + Back.BLUE), Style.RESET, Style.BRIGHT, Style.RESET))
print("{}[SERVER]:{}\t\t\t{}{}:5678{}".format((Style.BRIGHT + Back.BLUE), Style.RESET, Style.BRIGHT, self.target, Style.RESET))
print("{}[TEST]:{}\t\t\t\t{}\"{}\" {} Test{}".format((Style.BRIGHT + Back.BLUE), Style.RESET, Style.BRIGHT, self.test, self.tests[self.test]["type"], Style.RESET))
print("{}-----------------------------------------------------------{}".format(Style.BRIGHT, Style.RESET))
print("{}[CONNECTED TO SERVER]{}".format((Style.BRIGHT + Back.CYAN), Style.RESET))
except socket.error as msg:
print("Error: Could not connect to target host")
print("Description: {}".format(msg))
s.close()
return
# Send Test Request
try:
s.sendall(self.controlData)
except socket.timeout as msg:
print("Error: Control Socket has timed out")
print("Description: {}".format(msg))
s.close()
return
except socket.error as msg:
print("Error: Could not send Control Data")
print("Description: {}".format(msg))
s.close()
return
if self.verbose:
print("{}[SENT TEST REQUEST]:{}\t\t{}{}{}".format((Style.BRIGHT + Back.GREEN), Style.RESET, Style.BRIGHT, self.controlData, Style.RESET))
print("{}PROTOCOL VERSION:{}\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, self.controlData[0], Style.RESET))
print("{}MESSAGE TYPE:{}\t\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, self.controlData[1], Style.RESET))
print("{}TEST TYPE:{}\t\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, self.controlData[2], Style.RESET))
# Receive Test Reply
try:
data = s.recv(16)
except socket.timeout as msg:
print("Error: Control Socket has timed out")
print("Description: {}".format(msg))
s.close()
return
except socket.error as msg:
print("Error: Could not receive the reply")
print("Description: {}".format(msg))
s.close()
return
# Unpack Test Reply
try:
protocol, msg, port = struct.unpack("!2b1H", data)
except struct.error as msg:
print("Error: Could not unpack struct")
print("Description: {}".format(msg))
s.close()
return
if self.verbose:
print("{}[RECEIVED TEST REPLY]:{}\t\t{}{}{}".format((Style.BRIGHT + Back.GREEN), Style.RESET, Style.BRIGHT, data, Style.RESET))
print("{}PROTOCOL VERSION:{}\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, protocol, Style.RESET))
print("{}MESSAGE TYPE:{}\t\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, msg, Style.RESET))
print("{}TEST PORT:{}\t\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, port, Style.RESET))
if msg == 2:
# Launch Data Socket
ds = threading.Thread(target=self.dataSocket, args=(port,))
ds.start()
ds.join()
if state.shutdown == True:
return
# Test Done
p = 0x01
m = 0x03
# Send Test Done to server
reply = struct.pack("!2b", p, m)
try:
s.sendall(reply)
if self.verbose:
print("{}[SENT TEST DONE]:{}\t\t{}{}{}".format((Style.BRIGHT + Back.GREEN), Style.RESET, Style.BRIGHT, reply, Style.RESET))
print("{}PROTOCOL VERSION:{}\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, p, Style.RESET))
print("{}MESSAGE TYPE:{}\t\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, m, Style.RESET))
except socket.timeout as msg:
print("Error: Control Socket has timed out")
print("Description: {}".format(msg))
s.close()
return
except socket.error as msg:
print("Error: Could not send the reply")
print("Description: {}".format(msg))
s.close()
return
# Receive Test Results
try:
data = s.recv(16)
except socket.timeout as msg:
print("Error: Control Socket has timed out")
print("Description: {}".format(msg))
s.close()
return
except socket.error as msg:
print("Error: Could not get reply")
print("Description: {}".format(msg))
s.close()
return
# Unpack Test Results
protocol = None
msg = None
packets = None
try:
if self.test == "UDP_STREAM" or self.test == "UDP_RR":
protocol, msg, packets = struct.unpack("!2b1d", data)
else:
protocol, msg = struct.unpack("!2b", data)
except struct.error as msg:
print("Error: Could not unpack struct")
print("Description: {}".format(msg))
s.close()
return
if msg == 4:
if self.verbose:
print("{}[RECEIVED TEST RESULTS]:{}\t{}{}{}".format((Style.BRIGHT + Back.GREEN), Style.RESET, Style.BRIGHT, data, Style.RESET))
if self.test == "UDP_STREAM" or self.test == "UDP_RR":
print("{}PROTOCOL VERSION:{}\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, protocol, Style.RESET))
print("{}MESSAGE TYPE:{}\t\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, msg, Style.RESET))
print("{}UDP PACKETS RECEIVED:{}\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, int(packets), Style.RESET))
else:
print("{}PROTOCOL VERSION:{}\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, protocol, Style.RESET))
print("{}MESSAGE TYPE:{}\t\t\t{}{}{}".format((Style.BRIGHT + Fore.GREEN), Style.RESET, Style.BRIGHT, msg, Style.RESET))
if self.test == "TCP_STREAM":
print("{}[MEGABITS PER SECOND]:{} \t\t{}{}".format((Style.BRIGHT + Back.GREEN), (Style.RESET + Style.BRIGHT), self.results, Style.RESET))
elif self.test == "TCP_RR":
print("{}[PACKETS PER SECOND]:{} \t\t{}{}".format((Style.BRIGHT + Back.GREEN), (Style.RESET + Style.BRIGHT), self.results, Style.RESET))
elif self.test == "UDP_STREAM":
# Convert bytes to bits
bits = (packets*1472) * 8
# Converts bits to megabits
mb = bits / 1000000
# Create megabits per second
mbps = int(mb/(self.results[0]))
print("{}[MEGABITS PER SECOND]:{} \t\t{}{}".format((Style.BRIGHT + Back.GREEN), (Style.RESET + Style.BRIGHT), mbps, Style.RESET))
percentage = int(self.results[1]/packets)
print("{}[PACKET LOSS PERCENTAGE]:{} \t{}%{}".format((Style.BRIGHT + Back.GREEN), (Style.RESET + Style.BRIGHT), percentage, Style.RESET))
elif self.test == "UDP_RR":
print("{}[MEGABITS PER SECOND]:{} \t\t{}{}".format((Style.BRIGHT + Back.GREEN), (Style.RESET + Style.BRIGHT), self.results, Style.RESET))
percentage = int(self.results/int(packets))
print("{}[PACKET LOSS PERCENTAGE]:{} \t{}%{}".format((Style.BRIGHT + Back.GREEN), (Style.RESET + Style.BRIGHT), percentage, Style.RESET))
print("{}[DISCONNECTED FROM SERVER]{}".format((Style.BRIGHT + Back.CYAN), Style.RESET))
s.close()
else:
print("Error: Message not supported")
s.close()
return
else:
print("Error: Message not supported")
s.close()
return
def dataSocket(self, port):
print("{}[DATA THREAD OPENING]{}".format((Style.BRIGHT + Back.YELLOW), Style.RESET))
data = None
increment = None
bufferSize = None
packets = 0
count = 0
# Data Socket
time.sleep(1)
s = None
if self.test == "TCP_STREAM":
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as msg:
print("Error: Could not create Data Socket")
print("Description: {}".format(msg))
return
data = bytes(64*1024)
increment = 64*1024
elif self.test == "TCP_RR":
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as msg:
print("Error: Could not create Data Socket")
print("Description: {}".format(msg))
return
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
data = bytes(1)
increment = 1
bufferSize = 1
elif self.test == "UDP_STREAM":
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error as msg:
print("Error: Could not create Data Socket")
print("Description: {}".format(msg))
return
data = bytes(1472)
bufferSize = 1472
increment = 1
elif self.test == "UDP_RR":
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error as msg:
print("Error: Could not create Data Socket")
print("Description: {}".format(msg))
return
data = bytes(1472)
bufferSize = 1
increment = 1
s.settimeout(self.time + 10)
if self.test == "TCP_STREAM" or self.test == "TCP_RR":
try:
s.connect((self.target, port))
except socket.error as msg:
print("Error: Could not connect to Server Data Socket")
print("Description: {}".format(msg))
return
t0 = time.time()
tFinal = t0 + self.time
if self.test == "TCP_STREAM":
while time.time() < tFinal and not state.shutdown:
try:
s.sendall(data)
except socket.timeout as msg:
print("Error: Data Socket has timed out")
print("Description: {}".format(msg))
s.close()
state.shutdown = True
return
except socket.error as msg:
print("Error: Could not send data to Server Data Socket")
print("Description: {}".format(msg))
return
count+=increment
elif self.test == "TCP_RR":
s.settimeout(45)
while time.time() < tFinal and not state.shutdown:
try:
s.sendall(data)
except socket.timeout as msg:
print("Error: Data Socket has timed out")
print("Description: {}".format(msg))
s.close()
state.shutdown = True
return
except socket.error as msg:
print("Error: Could not send data to Server Data Socket")
print("Description: {}".format(msg))
s.close()
return
try:
tmp = s.recv(bufferSize)
except socket.timeout as msg:
print("Error: Data Socket has timed out")
print("Description: {}".format(msg))
s.close()
state.shutdown = True
return
except socket.error as msg:
print("Error: Could not receive data from Server Data Socket")
print("Description: {}".format(msg))
s.close()
return
count+=increment
elif self.test == "UDP_STREAM":
s.connect((self.target, port))
while time.time() < tFinal and not state.shutdown:
try:
s.send(data)
except socket.timeout as msg:
print("Error: Data Socket has timed out")
print("Description: {}".format(msg))
s.close()
state.shutdown = True
return
except socket.error as msg:
print("Error: Could not send data to Server Data Socket")
print("Description: {}".format(msg))
s.close()
return
packets+=increment
elif self.test == "UDP_RR":
s.connect((self.target, port))
while time.time() < tFinal and not state.shutdown:
try:
s.send(data)
except socket.timeout as msg:
print("Error: Data Socket has timed out")
print("Description: {}".format(msg))
s.close()
state.shutdown = True
return
except socket.error as msg:
print("Error: Could not send data to Server Data Socket")
print("Description: {}".format(msg))
s.close()
return
try:
tmp = s.recv(bufferSize)
except socket.timeout as msg:
print("Error: Data Socket has timed out")
print("Description: {}".format(msg))
s.close()
state.shutdown = True
return
except socket.error as msg:
print("Error: Could not send data to Server Data Socket")
print("Description: {}".format(msg))
s.close()
return
packets+=increment
t1 = time.time()
s.close()
if self.test == "TCP_STREAM":
# Convert bytes to bits
kb = count * 8
# Converts bits to megabits
mb = kb / 1000000
# Create megabits per second
mbps = int(mb/(t1-t0))
self.results = mbps
elif self.test == "TCP_RR":
pps = int(count/(t1-t0))
self.results = pps
elif self.test == "UDP_STREAM":
diff = t1-t0
self.results = (diff, packets)
elif self.test == "UDP_RR":
pps = int(packets/(t1-t0))
self.results = pps
print("{}[DATA THREAD CLOSING]{}".format((Style.BRIGHT + Back.YELLOW), Style.RESET))
|
|
import ctypes
from ctypes import CFUNCTYPE, c_int
from ctypes.util import find_library
import gc
import locale
import os
import platform
import re
import subprocess
import sys
import unittest
from contextlib import contextmanager
from tempfile import mkstemp
from llvmlite import ir
from llvmlite import binding as llvm
from llvmlite.binding import ffi
from llvmlite.tests import TestCase
# arvm7l needs extra ABI symbols to link successfully
if platform.machine() == 'armv7l':
llvm.load_library_permanently('libgcc_s.so.1')
def no_de_locale():
cur = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, 'de_DE')
except locale.Error:
return True
else:
return False
finally:
locale.setlocale(locale.LC_ALL, cur)
asm_sum = r"""
; ModuleID = '<string>'
source_filename = "asm_sum.c"
target triple = "{triple}"
%struct.glob_type = type {{ i64, [2 x i64]}}
@glob = global i32 0
@glob_b = global i8 0
@glob_f = global float 1.5
@glob_struct = global %struct.glob_type {{i64 0, [2 x i64] [i64 0, i64 0]}}
define i32 @sum(i32 %.1, i32 %.2) {{
%.3 = add i32 %.1, %.2
%.4 = add i32 0, %.3
ret i32 %.4
}}
"""
asm_sum2 = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define i32 @sum(i32 %.1, i32 %.2) {{
%.3 = add i32 %.1, %.2
ret i32 %.3
}}
"""
asm_mul = r"""
; ModuleID = '<string>'
target triple = "{triple}"
@mul_glob = global i32 0
define i32 @mul(i32 %.1, i32 %.2) {{
%.3 = mul i32 %.1, %.2
ret i32 %.3
}}
"""
# `fadd` used on integer inputs
asm_parse_error = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define i32 @sum(i32 %.1, i32 %.2) {{
%.3 = fadd i32 %.1, %.2
ret i32 %.3
}}
"""
# "%.bug" definition references itself
asm_verification_fail = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define void @sum() {{
%.bug = add i32 1, %.bug
ret void
}}
"""
asm_sum_declare = r"""
; ModuleID = '<string>'
target triple = "{triple}"
declare i32 @sum(i32 %.1, i32 %.2)
"""
asm_double_locale = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define void @foo() {{
%const = fadd double 0.0, 3.14
ret void
}}
"""
asm_inlineasm = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define void @foo() {{
call void asm sideeffect "nop", ""()
ret void
}}
"""
asm_global_ctors = r"""
; ModuleID = "<string>"
target triple = "{triple}"
@A = global i32 undef
define void @ctor_A()
{{
store i32 10, i32* @A
ret void
}}
define void @dtor_A()
{{
store i32 20, i32* @A
ret void
}}
define i32 @foo()
{{
%.2 = load i32, i32* @A
%.3 = add i32 %.2, 2
ret i32 %.3
}}
@llvm.global_ctors = appending global [1 x {{i32, void ()*, i8*}}] [{{i32, void ()*, i8*}} {{i32 0, void ()* @ctor_A, i8* null}}]
@llvm.global_dtors = appending global [1 x {{i32, void ()*, i8*}}] [{{i32, void ()*, i8*}} {{i32 0, void ()* @dtor_A, i8* null}}]
""" # noqa E501
asm_nonalphanum_blocklabel = """; ModuleID = ""
target triple = "unknown-unknown-unknown"
target datalayout = ""
define i32 @"foo"()
{
"<>!*''#":
ret i32 12345
}
""" # noqa W291 # trailing space needed for match later
riscv_asm_ilp32 = [
'addi\tsp, sp, -16',
'sw\ta1, 8(sp)',
'sw\ta2, 12(sp)',
'fld\tft0, 8(sp)',
'fmv.w.x\tft1, a0',
'fcvt.d.s\tft1, ft1',
'fadd.d\tft0, ft1, ft0',
'fsd\tft0, 8(sp)',
'lw\ta0, 8(sp)',
'lw\ta1, 12(sp)',
'addi\tsp, sp, 16',
'ret'
]
riscv_asm_ilp32f = [
'addi\tsp, sp, -16',
'sw\ta0, 8(sp)',
'sw\ta1, 12(sp)',
'fld\tft0, 8(sp)',
'fcvt.d.s\tft1, fa0',
'fadd.d\tft0, ft1, ft0',
'fsd\tft0, 8(sp)',
'lw\ta0, 8(sp)',
'lw\ta1, 12(sp)',
'addi\tsp, sp, 16',
'ret'
]
riscv_asm_ilp32d = [
'fcvt.d.s\tft0, fa0',
'fadd.d\tfa0, ft0, fa1',
'ret'
]
asm_attributes = r"""
declare void @a_readonly_func(i8 *) readonly
declare i8* @a_arg0_return_func(i8* returned, i32*)
"""
# This produces the following output from objdump:
#
# $ objdump -D 632.elf
#
# 632.elf: file format elf64-x86-64
#
#
# Disassembly of section .text:
#
# 0000000000000000 <__arybo>:
# 0: 48 c1 e2 20 shl $0x20,%rdx
# 4: 48 09 c2 or %rax,%rdx
# 7: 48 89 d0 mov %rdx,%rax
# a: 48 c1 c0 3d rol $0x3d,%rax
# e: 48 31 d0 xor %rdx,%rax
# 11: 48 b9 01 20 00 04 80 movabs $0x7010008004002001,%rcx
# 18: 00 10 70
# 1b: 48 0f af c8 imul %rax,%rcx
issue_632_elf = \
"7f454c4602010100000000000000000001003e00010000000000000000000000000000" \
"0000000000e0000000000000000000000040000000000040000500010048c1e2204809" \
"c24889d048c1c03d4831d048b90120000480001070480fafc800000000000000000000" \
"0000000000000000000000000000002f0000000400f1ff000000000000000000000000" \
"00000000070000001200020000000000000000001f00000000000000002e7465787400" \
"5f5f617279626f002e6e6f74652e474e552d737461636b002e737472746162002e7379" \
"6d746162003c737472696e673e00000000000000000000000000000000000000000000" \
"0000000000000000000000000000000000000000000000000000000000000000000000" \
"00000000000000001f0000000300000000000000000000000000000000000000a80000" \
"0000000000380000000000000000000000000000000100000000000000000000000000" \
"000001000000010000000600000000000000000000000000000040000000000000001f" \
"000000000000000000000000000000100000000000000000000000000000000f000000" \
"01000000000000000000000000000000000000005f0000000000000000000000000000" \
"0000000000000000000100000000000000000000000000000027000000020000000000" \
"0000000000000000000000000000600000000000000048000000000000000100000002" \
"00000008000000000000001800000000000000"
issue_632_text = \
"48c1e2204809c24889d048c1c03d4831d048b90120000480001070480fafc8"
class BaseTest(TestCase):
def setUp(self):
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
gc.collect()
self.old_garbage = gc.garbage[:]
gc.garbage[:] = []
def tearDown(self):
# Test that no uncollectable objects were created
# (llvmlite objects have a __del__ so a reference cycle could
# create some).
gc.collect()
self.assertEqual(gc.garbage, [])
# This will probably put any existing garbage in gc.garbage again
del self.old_garbage
def module(self, asm=asm_sum, context=None):
asm = asm.format(triple=llvm.get_default_triple())
mod = llvm.parse_assembly(asm, context)
return mod
def glob(self, name='glob', mod=None):
if mod is None:
mod = self.module()
return mod.get_global_variable(name)
def target_machine(self, *, jit):
target = llvm.Target.from_default_triple()
return target.create_target_machine(jit=jit)
class TestDependencies(BaseTest):
"""
Test DLL dependencies are within a certain expected set.
"""
@unittest.skipUnless(sys.platform.startswith('linux'),
"Linux-specific test")
@unittest.skipUnless(os.environ.get('LLVMLITE_DIST_TEST'),
"Distribution-specific test")
def test_linux(self):
lib_path = ffi.lib._name
env = os.environ.copy()
env['LANG'] = 'C'
p = subprocess.Popen(["objdump", "-p", lib_path],
stdout=subprocess.PIPE, env=env)
out, _ = p.communicate()
self.assertEqual(0, p.returncode)
# Parse library dependencies
lib_pat = re.compile(r'^([-_a-zA-Z0-9]+)\.so(?:\.\d+){0,3}$')
deps = set()
for line in out.decode().splitlines():
parts = line.split()
if parts and parts[0] == 'NEEDED':
dep = parts[1]
m = lib_pat.match(dep)
if len(parts) != 2 or not m:
self.fail("invalid NEEDED line: %r" % (line,))
deps.add(m.group(1))
# Sanity check that our dependencies were parsed ok
if 'libc' not in deps or 'libpthread' not in deps:
self.fail("failed parsing dependencies? got %r" % (deps,))
# Ensure all dependencies are expected
allowed = set(['librt', 'libdl', 'libpthread', 'libz', 'libm',
'libgcc_s', 'libc', 'ld-linux', 'ld64'])
if platform.python_implementation() == 'PyPy':
allowed.add('libtinfo')
for dep in deps:
if not dep.startswith('ld-linux-') and dep not in allowed:
self.fail("unexpected dependency %r in %r" % (dep, deps))
class TestRISCVABI(BaseTest):
"""
Test calling convention of floating point arguments of RISC-V
using different ABI.
"""
triple = "riscv32-unknown-linux"
def setUp(self):
super().setUp()
llvm.initialize_all_targets()
llvm.initialize_all_asmprinters()
def check_riscv_target(self):
try:
llvm.Target.from_triple(self.triple)
except RuntimeError as e:
if "No available targets are compatible with triple" in str(e):
self.skipTest("RISCV target unsupported by linked LLVM.")
else:
raise e
def riscv_target_machine(self, **kwarg):
lltarget = llvm.Target.from_triple(self.triple)
return lltarget.create_target_machine(**kwarg)
def fpadd_ll_module(self):
f64 = ir.DoubleType()
f32 = ir.FloatType()
fnty = ir.FunctionType(f64, (f32, f64))
module = ir.Module()
func = ir.Function(module, fnty, name="fpadd")
block = func.append_basic_block()
builder = ir.IRBuilder(block)
a, b = func.args
arg0 = builder.fpext(a, f64)
result = builder.fadd(arg0, b)
builder.ret(result)
llmod = llvm.parse_assembly(str(module))
llmod.verify()
return llmod
def break_up_asm(self, asm):
asm_list = []
for line in asm.splitlines():
s_line = line.strip()
if not (s_line.startswith(".") or s_line.startswith("fpadd")
or s_line == ""):
asm_list.append(s_line)
return asm_list
def test_rv32d_ilp32(self):
self.check_riscv_target()
llmod = self.fpadd_ll_module()
target = self.riscv_target_machine(features="+f,+d")
self.assertEqual(self.break_up_asm(target.emit_assembly(llmod)),
riscv_asm_ilp32)
def test_rv32d_ilp32f(self):
self.check_riscv_target()
llmod = self.fpadd_ll_module()
target = self.riscv_target_machine(features="+f,+d", abiname="ilp32f")
self.assertEqual(self.break_up_asm(target.emit_assembly(llmod)),
riscv_asm_ilp32f)
def test_rv32d_ilp32d(self):
self.check_riscv_target()
llmod = self.fpadd_ll_module()
target = self.riscv_target_machine(features="+f,+d", abiname="ilp32d")
self.assertEqual(self.break_up_asm(target.emit_assembly(llmod)),
riscv_asm_ilp32d)
class TestMisc(BaseTest):
"""
Test miscellaneous functions in llvm.binding.
"""
def test_parse_assembly(self):
self.module(asm_sum)
def test_parse_assembly_error(self):
with self.assertRaises(RuntimeError) as cm:
self.module(asm_parse_error)
s = str(cm.exception)
self.assertIn("parsing error", s)
self.assertIn("invalid operand type", s)
def test_nonalphanum_block_name(self):
mod = ir.Module()
ft = ir.FunctionType(ir.IntType(32), [])
fn = ir.Function(mod, ft, "foo")
bd = ir.IRBuilder(fn.append_basic_block(name="<>!*''#"))
bd.ret(ir.Constant(ir.IntType(32), 12345))
asm = str(mod)
self.assertEqual(asm, asm_nonalphanum_blocklabel)
def test_global_context(self):
gcontext1 = llvm.context.get_global_context()
gcontext2 = llvm.context.get_global_context()
assert gcontext1 == gcontext2
def test_dylib_symbols(self):
llvm.add_symbol("__xyzzy", 1234)
llvm.add_symbol("__xyzzy", 5678)
addr = llvm.address_of_symbol("__xyzzy")
self.assertEqual(addr, 5678)
addr = llvm.address_of_symbol("__foobar")
self.assertIs(addr, None)
def test_get_default_triple(self):
triple = llvm.get_default_triple()
self.assertIsInstance(triple, str)
self.assertTrue(triple)
def test_get_process_triple(self):
triple = llvm.get_process_triple()
default = llvm.get_default_triple()
self.assertIsInstance(triple, str)
self.assertTrue(triple)
default_parts = default.split('-')
triple_parts = triple.split('-')
# Arch must be equal
self.assertEqual(default_parts[0], triple_parts[0])
def test_get_host_cpu_features(self):
features = llvm.get_host_cpu_features()
# Check the content of `features`
self.assertIsInstance(features, dict)
self.assertIsInstance(features, llvm.FeatureMap)
for k, v in features.items():
self.assertIsInstance(k, str)
self.assertTrue(k) # single feature string cannot be empty
self.assertIsInstance(v, bool)
self.assertIsInstance(features.flatten(), str)
re_term = r"[+\-][a-zA-Z0-9\._-]+"
regex = r"^({0}|{0}(,{0})*)?$".format(re_term)
# quick check for our regex
self.assertIsNotNone(re.match(regex, ""))
self.assertIsNotNone(re.match(regex, "+aa"))
self.assertIsNotNone(re.match(regex, "+a,-bb"))
# check CpuFeature.flatten()
if len(features) == 0:
self.assertEqual(features.flatten(), "")
else:
self.assertIsNotNone(re.match(regex, features.flatten()))
def test_get_host_cpu_name(self):
cpu = llvm.get_host_cpu_name()
self.assertIsInstance(cpu, str)
self.assertTrue(cpu)
def test_initfini(self):
code = """if 1:
from llvmlite import binding as llvm
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
llvm.initialize_all_targets()
llvm.initialize_all_asmprinters()
llvm.shutdown()
"""
subprocess.check_call([sys.executable, "-c", code])
def test_set_option(self):
# We cannot set an option multiple times (LLVM would exit() the
# process), so run the code in a subprocess.
code = """if 1:
from llvmlite import binding as llvm
llvm.set_option("progname", "-debug-pass=Disabled")
"""
subprocess.check_call([sys.executable, "-c", code])
def test_version(self):
major, minor, patch = llvm.llvm_version_info
# one of these can be valid
valid = [(11,)]
self.assertIn((major,), valid)
self.assertIn(patch, range(10))
def test_check_jit_execution(self):
llvm.check_jit_execution()
@unittest.skipIf(no_de_locale(), "Locale not available")
def test_print_double_locale(self):
m = self.module(asm_double_locale)
expect = str(m)
# Change the locale so that comma is used as decimal-point
# to trigger the LLVM bug (llvmlite issue #80)
locale.setlocale(locale.LC_ALL, 'de_DE')
# The LLVM bug is trigged by print the module with double constant
got = str(m)
# Changing the locale should not affect the LLVM IR
self.assertEqual(expect, got)
class TestModuleRef(BaseTest):
def test_str(self):
mod = self.module()
s = str(mod).strip()
self.assertTrue(s.startswith('; ModuleID ='), s)
def test_close(self):
mod = self.module()
str(mod)
mod.close()
with self.assertRaises(ctypes.ArgumentError):
str(mod)
mod.close()
def test_with(self):
mod = self.module()
str(mod)
with mod:
str(mod)
with self.assertRaises(ctypes.ArgumentError):
str(mod)
with self.assertRaises(RuntimeError):
with mod:
pass
def test_name(self):
mod = self.module()
mod.name = "foo"
self.assertEqual(mod.name, "foo")
mod.name = "bar"
self.assertEqual(mod.name, "bar")
def test_source_file(self):
mod = self.module()
self.assertEqual(mod.source_file, "asm_sum.c")
def test_data_layout(self):
mod = self.module()
s = mod.data_layout
self.assertIsInstance(s, str)
mod.data_layout = s
self.assertEqual(s, mod.data_layout)
def test_triple(self):
mod = self.module()
s = mod.triple
self.assertEqual(s, llvm.get_default_triple())
mod.triple = ''
self.assertEqual(mod.triple, '')
def test_verify(self):
# Verify successful
mod = self.module()
self.assertIs(mod.verify(), None)
# Verify failed
mod = self.module(asm_verification_fail)
with self.assertRaises(RuntimeError) as cm:
mod.verify()
s = str(cm.exception)
self.assertIn("%.bug = add i32 1, %.bug", s)
def test_get_function(self):
mod = self.module()
fn = mod.get_function("sum")
self.assertIsInstance(fn, llvm.ValueRef)
self.assertEqual(fn.name, "sum")
with self.assertRaises(NameError):
mod.get_function("foo")
# Check that fn keeps the module instance alive
del mod
str(fn.module)
def test_get_struct_type(self):
mod = self.module()
st_ty = mod.get_struct_type("struct.glob_type")
self.assertEqual(st_ty.name, "struct.glob_type")
# also match struct names of form "%struct.glob_type.{some_index}"
self.assertIsNotNone(re.match(
r'%struct\.glob_type(\.[\d]+)? = type { i64, \[2 x i64\] }',
str(st_ty)))
with self.assertRaises(NameError):
mod.get_struct_type("struct.doesnt_exist")
def test_get_global_variable(self):
mod = self.module()
gv = mod.get_global_variable("glob")
self.assertIsInstance(gv, llvm.ValueRef)
self.assertEqual(gv.name, "glob")
with self.assertRaises(NameError):
mod.get_global_variable("bar")
# Check that gv keeps the module instance alive
del mod
str(gv.module)
def test_global_variables(self):
mod = self.module()
it = mod.global_variables
del mod
globs = sorted(it, key=lambda value: value.name)
self.assertEqual(len(globs), 4)
self.assertEqual([g.name for g in globs],
["glob", "glob_b", "glob_f", "glob_struct"])
def test_functions(self):
mod = self.module()
it = mod.functions
del mod
funcs = list(it)
self.assertEqual(len(funcs), 1)
self.assertEqual(funcs[0].name, "sum")
def test_structs(self):
mod = self.module()
it = mod.struct_types
del mod
structs = list(it)
self.assertEqual(len(structs), 1)
self.assertIsNotNone(re.match(r'struct\.glob_type(\.[\d]+)?',
structs[0].name))
self.assertIsNotNone(re.match(
r'%struct\.glob_type(\.[\d]+)? = type { i64, \[2 x i64\] }',
str(structs[0])))
def test_link_in(self):
dest = self.module()
src = self.module(asm_mul)
dest.link_in(src)
self.assertEqual(
sorted(f.name for f in dest.functions), ["mul", "sum"])
dest.get_function("mul")
dest.close()
with self.assertRaises(ctypes.ArgumentError):
src.get_function("mul")
def test_link_in_preserve(self):
dest = self.module()
src2 = self.module(asm_mul)
dest.link_in(src2, preserve=True)
self.assertEqual(
sorted(f.name for f in dest.functions), ["mul", "sum"])
dest.close()
self.assertEqual(sorted(f.name for f in src2.functions), ["mul"])
src2.get_function("mul")
def test_link_in_error(self):
# Raise an error by trying to link two modules with the same global
# definition "sum".
dest = self.module()
src = self.module(asm_sum2)
with self.assertRaises(RuntimeError) as cm:
dest.link_in(src)
self.assertIn("symbol multiply defined", str(cm.exception))
def test_as_bitcode(self):
mod = self.module()
bc = mod.as_bitcode()
# Refer to http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00064 # noqa E501
# and http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00092 # noqa E501
bitcode_wrapper_magic = b'\xde\xc0\x17\x0b'
bitcode_magic = b'BC'
self.assertTrue(bc.startswith(bitcode_magic) or
bc.startswith(bitcode_wrapper_magic))
def test_parse_bitcode_error(self):
with self.assertRaises(RuntimeError) as cm:
llvm.parse_bitcode(b"")
self.assertIn("LLVM bitcode parsing error", str(cm.exception))
# for llvm < 9
if llvm.llvm_version_info[0] < 9:
self.assertIn("Invalid bitcode signature", str(cm.exception))
else:
self.assertIn(
"file too small to contain bitcode header", str(cm.exception),
)
def test_bitcode_roundtrip(self):
# create a new context to avoid struct renaming
context1 = llvm.create_context()
bc = self.module(context=context1).as_bitcode()
context2 = llvm.create_context()
mod = llvm.parse_bitcode(bc, context2)
self.assertEqual(mod.as_bitcode(), bc)
mod.get_function("sum")
mod.get_global_variable("glob")
def test_cloning(self):
m = self.module()
cloned = m.clone()
self.assertIsNot(cloned, m)
self.assertEqual(cloned.as_bitcode(), m.as_bitcode())
class JITTestMixin(object):
"""
Mixin for ExecutionEngine tests.
"""
def get_sum(self, ee, func_name="sum"):
ee.finalize_object()
cfptr = ee.get_function_address(func_name)
self.assertTrue(cfptr)
return CFUNCTYPE(c_int, c_int, c_int)(cfptr)
def test_run_code(self):
mod = self.module()
with self.jit(mod) as ee:
cfunc = self.get_sum(ee)
res = cfunc(2, -5)
self.assertEqual(-3, res)
def test_close(self):
ee = self.jit(self.module())
ee.close()
ee.close()
with self.assertRaises(ctypes.ArgumentError):
ee.finalize_object()
def test_with(self):
ee = self.jit(self.module())
with ee:
pass
with self.assertRaises(RuntimeError):
with ee:
pass
with self.assertRaises(ctypes.ArgumentError):
ee.finalize_object()
def test_module_lifetime(self):
mod = self.module()
ee = self.jit(mod)
ee.close()
mod.close()
def test_module_lifetime2(self):
mod = self.module()
ee = self.jit(mod)
mod.close()
ee.close()
def test_add_module(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
with self.assertRaises(KeyError):
ee.add_module(mod)
self.assertFalse(mod.closed)
ee.close()
self.assertTrue(mod.closed)
def test_add_module_lifetime(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
mod.close()
ee.close()
def test_add_module_lifetime2(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
ee.close()
mod.close()
def test_remove_module(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
ee.remove_module(mod)
with self.assertRaises(KeyError):
ee.remove_module(mod)
self.assertFalse(mod.closed)
ee.close()
self.assertFalse(mod.closed)
def test_target_data(self):
mod = self.module()
ee = self.jit(mod)
td = ee.target_data
# A singleton is returned
self.assertIs(ee.target_data, td)
str(td)
del mod, ee
str(td)
def test_target_data_abi_enquiries(self):
mod = self.module()
ee = self.jit(mod)
td = ee.target_data
gv_i32 = mod.get_global_variable("glob")
gv_i8 = mod.get_global_variable("glob_b")
gv_struct = mod.get_global_variable("glob_struct")
# A global is a pointer, it has the ABI size of a pointer
pointer_size = 4 if sys.maxsize < 2 ** 32 else 8
for g in (gv_i32, gv_i8, gv_struct):
self.assertEqual(td.get_abi_size(g.type), pointer_size)
self.assertEqual(td.get_pointee_abi_size(gv_i32.type), 4)
self.assertEqual(td.get_pointee_abi_alignment(gv_i32.type), 4)
self.assertEqual(td.get_pointee_abi_size(gv_i8.type), 1)
self.assertIn(td.get_pointee_abi_alignment(gv_i8.type), (1, 2, 4))
self.assertEqual(td.get_pointee_abi_size(gv_struct.type), 24)
self.assertIn(td.get_pointee_abi_alignment(gv_struct.type), (4, 8))
def test_object_cache_notify(self):
notifies = []
def notify(mod, buf):
notifies.append((mod, buf))
mod = self.module()
ee = self.jit(mod)
ee.set_object_cache(notify)
self.assertEqual(len(notifies), 0)
cfunc = self.get_sum(ee)
cfunc(2, -5)
self.assertEqual(len(notifies), 1)
# The right module object was found
self.assertIs(notifies[0][0], mod)
self.assertIsInstance(notifies[0][1], bytes)
notifies[:] = []
mod2 = self.module(asm_mul)
ee.add_module(mod2)
cfunc = self.get_sum(ee, "mul")
self.assertEqual(len(notifies), 1)
# The right module object was found
self.assertIs(notifies[0][0], mod2)
self.assertIsInstance(notifies[0][1], bytes)
def test_object_cache_getbuffer(self):
notifies = []
getbuffers = []
def notify(mod, buf):
notifies.append((mod, buf))
def getbuffer(mod):
getbuffers.append(mod)
mod = self.module()
ee = self.jit(mod)
ee.set_object_cache(notify, getbuffer)
# First return None from getbuffer(): the object is compiled normally
self.assertEqual(len(notifies), 0)
self.assertEqual(len(getbuffers), 0)
cfunc = self.get_sum(ee)
self.assertEqual(len(notifies), 1)
self.assertEqual(len(getbuffers), 1)
self.assertIs(getbuffers[0], mod)
sum_buffer = notifies[0][1]
# Recreate a new EE, and use getbuffer() to return the previously
# compiled object.
def getbuffer_successful(mod):
getbuffers.append(mod)
return sum_buffer
notifies[:] = []
getbuffers[:] = []
# Use another source module to make sure it is ignored
mod = self.module(asm_mul)
ee = self.jit(mod)
ee.set_object_cache(notify, getbuffer_successful)
self.assertEqual(len(notifies), 0)
self.assertEqual(len(getbuffers), 0)
cfunc = self.get_sum(ee)
self.assertEqual(cfunc(2, -5), -3)
self.assertEqual(len(notifies), 0)
self.assertEqual(len(getbuffers), 1)
class JITWithTMTestMixin(JITTestMixin):
def test_emit_assembly(self):
"""Test TargetMachineRef.emit_assembly()"""
target_machine = self.target_machine(jit=True)
mod = self.module()
ee = self.jit(mod, target_machine) # noqa F841 # Keeps pointers alive
raw_asm = target_machine.emit_assembly(mod)
self.assertIn("sum", raw_asm)
target_machine.set_asm_verbosity(True)
raw_asm_verbose = target_machine.emit_assembly(mod)
self.assertIn("sum", raw_asm)
self.assertNotEqual(raw_asm, raw_asm_verbose)
def test_emit_object(self):
"""Test TargetMachineRef.emit_object()"""
target_machine = self.target_machine(jit=True)
mod = self.module()
ee = self.jit(mod, target_machine) # noqa F841 # Keeps pointers alive
code_object = target_machine.emit_object(mod)
self.assertIsInstance(code_object, bytes)
if sys.platform.startswith('linux'):
# Sanity check
self.assertIn(b"ELF", code_object[:10])
class TestMCJit(BaseTest, JITWithTMTestMixin):
"""
Test JIT engines created with create_mcjit_compiler().
"""
def jit(self, mod, target_machine=None):
if target_machine is None:
target_machine = self.target_machine(jit=True)
return llvm.create_mcjit_compiler(mod, target_machine)
class TestValueRef(BaseTest):
def test_str(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertEqual(str(glob), "@glob = global i32 0")
def test_name(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertEqual(glob.name, "glob")
glob.name = "foobar"
self.assertEqual(glob.name, "foobar")
def test_linkage(self):
mod = self.module()
glob = mod.get_global_variable("glob")
linkage = glob.linkage
self.assertIsInstance(glob.linkage, llvm.Linkage)
glob.linkage = linkage
self.assertEqual(glob.linkage, linkage)
for linkage in ("internal", "external"):
glob.linkage = linkage
self.assertIsInstance(glob.linkage, llvm.Linkage)
self.assertEqual(glob.linkage.name, linkage)
def test_visibility(self):
mod = self.module()
glob = mod.get_global_variable("glob")
visibility = glob.visibility
self.assertIsInstance(glob.visibility, llvm.Visibility)
glob.visibility = visibility
self.assertEqual(glob.visibility, visibility)
for visibility in ("hidden", "protected", "default"):
glob.visibility = visibility
self.assertIsInstance(glob.visibility, llvm.Visibility)
self.assertEqual(glob.visibility.name, visibility)
def test_storage_class(self):
mod = self.module()
glob = mod.get_global_variable("glob")
storage_class = glob.storage_class
self.assertIsInstance(glob.storage_class, llvm.StorageClass)
glob.storage_class = storage_class
self.assertEqual(glob.storage_class, storage_class)
for storage_class in ("dllimport", "dllexport", "default"):
glob.storage_class = storage_class
self.assertIsInstance(glob.storage_class, llvm.StorageClass)
self.assertEqual(glob.storage_class.name, storage_class)
def test_add_function_attribute(self):
mod = self.module()
fn = mod.get_function("sum")
fn.add_function_attribute("nocapture")
with self.assertRaises(ValueError) as raises:
fn.add_function_attribute("zext")
self.assertEqual(str(raises.exception), "no such attribute 'zext'")
def test_module(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertIs(glob.module, mod)
def test_type(self):
mod = self.module()
glob = mod.get_global_variable("glob")
tp = glob.type
self.assertIsInstance(tp, llvm.TypeRef)
def test_type_name(self):
mod = self.module()
glob = mod.get_global_variable("glob")
tp = glob.type
self.assertEqual(tp.name, "")
st = mod.get_global_variable("glob_struct")
self.assertIsNotNone(re.match(r"struct\.glob_type(\.[\d]+)?",
st.type.element_type.name))
def test_type_printing_variable(self):
mod = self.module()
glob = mod.get_global_variable("glob")
tp = glob.type
self.assertEqual(str(tp), 'i32*')
def test_type_printing_function(self):
mod = self.module()
fn = mod.get_function("sum")
self.assertEqual(str(fn.type), "i32 (i32, i32)*")
def test_type_printing_struct(self):
mod = self.module()
st = mod.get_global_variable("glob_struct")
self.assertTrue(st.type.is_pointer)
self.assertIsNotNone(re.match(r'%struct\.glob_type(\.[\d]+)?\*',
str(st.type)))
self.assertIsNotNone(re.match(
r"%struct\.glob_type(\.[\d]+)? = type { i64, \[2 x i64\] }",
str(st.type.element_type)))
def test_close(self):
glob = self.glob()
glob.close()
glob.close()
def test_is_declaration(self):
defined = self.module().get_function('sum')
declared = self.module(asm_sum_declare).get_function('sum')
self.assertFalse(defined.is_declaration)
self.assertTrue(declared.is_declaration)
def test_module_global_variables(self):
mod = self.module(asm_sum)
gvars = list(mod.global_variables)
self.assertEqual(len(gvars), 4)
for v in gvars:
self.assertTrue(v.is_global)
def test_module_functions(self):
mod = self.module()
funcs = list(mod.functions)
self.assertEqual(len(funcs), 1)
func = funcs[0]
self.assertTrue(func.is_function)
self.assertEqual(func.name, 'sum')
with self.assertRaises(ValueError):
func.instructions
with self.assertRaises(ValueError):
func.operands
with self.assertRaises(ValueError):
func.opcode
def test_function_arguments(self):
mod = self.module()
func = mod.get_function('sum')
self.assertTrue(func.is_function)
args = list(func.arguments)
self.assertEqual(len(args), 2)
self.assertTrue(args[0].is_argument)
self.assertTrue(args[1].is_argument)
self.assertEqual(args[0].name, '.1')
self.assertEqual(str(args[0].type), 'i32')
self.assertEqual(args[1].name, '.2')
self.assertEqual(str(args[1].type), 'i32')
with self.assertRaises(ValueError):
args[0].blocks
with self.assertRaises(ValueError):
args[0].arguments
def test_function_blocks(self):
func = self.module().get_function('sum')
blocks = list(func.blocks)
self.assertEqual(len(blocks), 1)
block = blocks[0]
self.assertTrue(block.is_block)
def test_block_instructions(self):
func = self.module().get_function('sum')
insts = list(list(func.blocks)[0].instructions)
self.assertEqual(len(insts), 3)
self.assertTrue(insts[0].is_instruction)
self.assertTrue(insts[1].is_instruction)
self.assertTrue(insts[2].is_instruction)
self.assertEqual(insts[0].opcode, 'add')
self.assertEqual(insts[1].opcode, 'add')
self.assertEqual(insts[2].opcode, 'ret')
def test_instruction_operands(self):
func = self.module().get_function('sum')
add = list(list(func.blocks)[0].instructions)[0]
self.assertEqual(add.opcode, 'add')
operands = list(add.operands)
self.assertEqual(len(operands), 2)
self.assertTrue(operands[0].is_operand)
self.assertTrue(operands[1].is_operand)
self.assertEqual(operands[0].name, '.1')
self.assertEqual(str(operands[0].type), 'i32')
self.assertEqual(operands[1].name, '.2')
self.assertEqual(str(operands[1].type), 'i32')
def test_function_attributes(self):
mod = self.module(asm_attributes)
for func in mod.functions:
attrs = list(func.attributes)
if func.name == 'a_readonly_func':
self.assertEqual(attrs, [b'readonly'])
elif func.name == 'a_arg0_return_func':
self.assertEqual(attrs, [])
args = list(func.arguments)
self.assertEqual(list(args[0].attributes), [b'returned'])
self.assertEqual(list(args[1].attributes), [])
class TestTarget(BaseTest):
def test_from_triple(self):
f = llvm.Target.from_triple
with self.assertRaises(RuntimeError) as cm:
f("foobar")
self.assertIn("No available targets are compatible with",
str(cm.exception))
triple = llvm.get_default_triple()
target = f(triple)
self.assertEqual(target.triple, triple)
target.close()
def test_create_target_machine(self):
target = llvm.Target.from_triple(llvm.get_default_triple())
# With the default settings
target.create_target_machine('', '', 1, 'default', 'default')
# With the host's CPU
cpu = llvm.get_host_cpu_name()
target.create_target_machine(cpu, '', 1, 'default', 'default')
def test_name(self):
t = llvm.Target.from_triple(llvm.get_default_triple())
u = llvm.Target.from_default_triple()
self.assertIsInstance(t.name, str)
self.assertEqual(t.name, u.name)
def test_description(self):
t = llvm.Target.from_triple(llvm.get_default_triple())
u = llvm.Target.from_default_triple()
self.assertIsInstance(t.description, str)
self.assertEqual(t.description, u.description)
def test_str(self):
target = llvm.Target.from_triple(llvm.get_default_triple())
s = str(target)
self.assertIn(target.name, s)
self.assertIn(target.description, s)
class TestTargetData(BaseTest):
def target_data(self):
return llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
def test_get_abi_size(self):
td = self.target_data()
glob = self.glob()
self.assertEqual(td.get_abi_size(glob.type), 8)
def test_get_pointee_abi_size(self):
td = self.target_data()
glob = self.glob()
self.assertEqual(td.get_pointee_abi_size(glob.type), 4)
glob = self.glob("glob_struct")
self.assertEqual(td.get_pointee_abi_size(glob.type), 24)
def test_get_struct_element_offset(self):
td = self.target_data()
glob = self.glob("glob_struct")
with self.assertRaises(ValueError):
td.get_element_offset(glob.type, 0)
struct_type = glob.type.element_type
self.assertEqual(td.get_element_offset(struct_type, 0), 0)
self.assertEqual(td.get_element_offset(struct_type, 1), 8)
class TestTargetMachine(BaseTest):
def test_add_analysis_passes(self):
tm = self.target_machine(jit=False)
pm = llvm.create_module_pass_manager()
tm.add_analysis_passes(pm)
def test_target_data_from_tm(self):
tm = self.target_machine(jit=False)
td = tm.target_data
mod = self.module()
gv_i32 = mod.get_global_variable("glob")
# A global is a pointer, it has the ABI size of a pointer
pointer_size = 4 if sys.maxsize < 2 ** 32 else 8
self.assertEqual(td.get_abi_size(gv_i32.type), pointer_size)
class TestPassManagerBuilder(BaseTest):
def pmb(self):
return llvm.PassManagerBuilder()
def test_old_api(self):
# Test the create_pass_manager_builder() factory function
pmb = llvm.create_pass_manager_builder()
pmb.inlining_threshold = 2
pmb.opt_level = 3
def test_close(self):
pmb = self.pmb()
pmb.close()
pmb.close()
def test_opt_level(self):
pmb = self.pmb()
self.assertIsInstance(pmb.opt_level, int)
for i in range(4):
pmb.opt_level = i
self.assertEqual(pmb.opt_level, i)
def test_size_level(self):
pmb = self.pmb()
self.assertIsInstance(pmb.size_level, int)
for i in range(4):
pmb.size_level = i
self.assertEqual(pmb.size_level, i)
def test_inlining_threshold(self):
pmb = self.pmb()
with self.assertRaises(NotImplementedError):
pmb.inlining_threshold
for i in (25, 80, 350):
pmb.inlining_threshold = i
def test_disable_unroll_loops(self):
pmb = self.pmb()
self.assertIsInstance(pmb.disable_unroll_loops, bool)
for b in (True, False):
pmb.disable_unroll_loops = b
self.assertEqual(pmb.disable_unroll_loops, b)
def test_loop_vectorize(self):
pmb = self.pmb()
self.assertIsInstance(pmb.loop_vectorize, bool)
for b in (True, False):
pmb.loop_vectorize = b
self.assertEqual(pmb.loop_vectorize, b)
def test_slp_vectorize(self):
pmb = self.pmb()
self.assertIsInstance(pmb.slp_vectorize, bool)
for b in (True, False):
pmb.slp_vectorize = b
self.assertEqual(pmb.slp_vectorize, b)
def test_populate_module_pass_manager(self):
pmb = self.pmb()
pm = llvm.create_module_pass_manager()
pmb.populate(pm)
pmb.close()
pm.close()
def test_populate_function_pass_manager(self):
mod = self.module()
pmb = self.pmb()
pm = llvm.create_function_pass_manager(mod)
pmb.populate(pm)
pmb.close()
pm.close()
class PassManagerTestMixin(object):
def pmb(self):
pmb = llvm.create_pass_manager_builder()
pmb.opt_level = 2
return pmb
def test_close(self):
pm = self.pm()
pm.close()
pm.close()
class TestModulePassManager(BaseTest, PassManagerTestMixin):
def pm(self):
return llvm.create_module_pass_manager()
def test_run(self):
pm = self.pm()
self.pmb().populate(pm)
mod = self.module()
orig_asm = str(mod)
pm.run(mod)
opt_asm = str(mod)
# Quick check that optimizations were run, should get:
# define i32 @sum(i32 %.1, i32 %.2) local_unnamed_addr #0 {
# %.X = add i32 %.2, %.1
# ret i32 %.X
# }
# where X in %.X is 3 or 4
opt_asm_split = opt_asm.splitlines()
for idx, l in enumerate(opt_asm_split):
if l.strip().startswith('ret i32'):
toks = {'%.3', '%.4'}
for t in toks:
if t in l:
break
else:
raise RuntimeError("expected tokens not found")
othertoken = (toks ^ {t}).pop()
self.assertIn("%.3", orig_asm)
self.assertNotIn(othertoken, opt_asm)
break
else:
raise RuntimeError("expected IR not found")
class TestFunctionPassManager(BaseTest, PassManagerTestMixin):
def pm(self, mod=None):
mod = mod or self.module()
return llvm.create_function_pass_manager(mod)
def test_initfini(self):
pm = self.pm()
pm.initialize()
pm.finalize()
def test_run(self):
mod = self.module()
fn = mod.get_function("sum")
pm = self.pm(mod)
self.pmb().populate(pm)
mod.close()
orig_asm = str(fn)
pm.initialize()
pm.run(fn)
pm.finalize()
opt_asm = str(fn)
# Quick check that optimizations were run
self.assertIn("%.4", orig_asm)
self.assertNotIn("%.4", opt_asm)
class TestPasses(BaseTest, PassManagerTestMixin):
def pm(self):
return llvm.create_module_pass_manager()
def test_populate(self):
pm = self.pm()
pm.add_constant_merge_pass()
pm.add_dead_arg_elimination_pass()
pm.add_function_attrs_pass()
pm.add_function_inlining_pass(225)
pm.add_global_dce_pass()
pm.add_global_optimizer_pass()
pm.add_ipsccp_pass()
pm.add_dead_code_elimination_pass()
pm.add_cfg_simplification_pass()
pm.add_gvn_pass()
pm.add_instruction_combining_pass()
pm.add_licm_pass()
pm.add_sccp_pass()
pm.add_sroa_pass()
pm.add_type_based_alias_analysis_pass()
pm.add_basic_alias_analysis_pass()
pm.add_loop_rotate_pass()
class TestDylib(BaseTest):
def test_bad_library(self):
with self.assertRaises(RuntimeError):
llvm.load_library_permanently("zzzasdkf;jasd;l")
@unittest.skipUnless(platform.system() in ["Linux", "Darwin"],
"test only works on Linux and Darwin")
def test_libm(self):
system = platform.system()
if system == "Linux":
libm = find_library("m")
elif system == "Darwin":
libm = find_library("libm")
llvm.load_library_permanently(libm)
class TestAnalysis(BaseTest):
def build_ir_module(self):
m = ir.Module()
ft = ir.FunctionType(ir.IntType(32), [ir.IntType(32), ir.IntType(32)])
fn = ir.Function(m, ft, "foo")
bd = ir.IRBuilder(fn.append_basic_block())
x, y = fn.args
z = bd.add(x, y)
bd.ret(z)
return m
def test_get_function_cfg_on_ir(self):
mod = self.build_ir_module()
foo = mod.get_global('foo')
dot_showing_inst = llvm.get_function_cfg(foo)
dot_without_inst = llvm.get_function_cfg(foo, show_inst=False)
inst = "%.5 = add i32 %.1, %.2"
self.assertIn(inst, dot_showing_inst)
self.assertNotIn(inst, dot_without_inst)
def test_function_cfg_on_llvm_value(self):
defined = self.module().get_function('sum')
dot_showing_inst = llvm.get_function_cfg(defined, show_inst=True)
dot_without_inst = llvm.get_function_cfg(defined, show_inst=False)
# Check "digraph"
prefix = 'digraph'
self.assertIn(prefix, dot_showing_inst)
self.assertIn(prefix, dot_without_inst)
# Check function name
fname = "CFG for 'sum' function"
self.assertIn(fname, dot_showing_inst)
self.assertIn(fname, dot_without_inst)
# Check instruction
inst = "%.3 = add i32 %.1, %.2"
self.assertIn(inst, dot_showing_inst)
self.assertNotIn(inst, dot_without_inst)
class TestTypeParsing(BaseTest):
@contextmanager
def check_parsing(self):
mod = ir.Module()
# Yield to caller and provide the module for adding
# new GV.
yield mod
# Caller yield back and continue with testing
asm = str(mod)
llvm.parse_assembly(asm)
def test_literal_struct(self):
# Natural layout
with self.check_parsing() as mod:
typ = ir.LiteralStructType([ir.IntType(32)])
gv = ir.GlobalVariable(mod, typ, "foo")
# Also test constant text repr
gv.initializer = ir.Constant(typ, [1])
# Packed layout
with self.check_parsing() as mod:
typ = ir.LiteralStructType([ir.IntType(32)],
packed=True)
gv = ir.GlobalVariable(mod, typ, "foo")
# Also test constant text repr
gv.initializer = ir.Constant(typ, [1])
class TestGlobalConstructors(TestMCJit):
def test_global_ctors_dtors(self):
# test issue #303
# (https://github.com/numba/llvmlite/issues/303)
mod = self.module(asm_global_ctors)
ee = self.jit(mod)
ee.finalize_object()
ee.run_static_constructors()
# global variable should have been initialized
ptr_addr = ee.get_global_value_address("A")
ptr_t = ctypes.POINTER(ctypes.c_int32)
ptr = ctypes.cast(ptr_addr, ptr_t)
self.assertEqual(ptr.contents.value, 10)
foo_addr = ee.get_function_address("foo")
foo = ctypes.CFUNCTYPE(ctypes.c_int32)(foo_addr)
self.assertEqual(foo(), 12)
ee.run_static_destructors()
# destructor should have run
self.assertEqual(ptr.contents.value, 20)
class TestGlobalVariables(BaseTest):
def check_global_variable_linkage(self, linkage, has_undef=True):
# This test default initializer on global variables with different
# linkages. Some linkages requires an initializer be present, while
# it is optional for others. This test uses ``parse_assembly()``
# to verify that we are adding an `undef` automatically if user didn't
# specific one for certain linkages. It is a IR syntax error if the
# initializer is not present for certain linkages e.g. "external".
mod = ir.Module()
typ = ir.IntType(32)
gv = ir.GlobalVariable(mod, typ, "foo")
gv.linkage = linkage
asm = str(mod)
# check if 'undef' is present
if has_undef:
self.assertIn('undef', asm)
else:
self.assertNotIn('undef', asm)
# parse assembly to ensure correctness
self.module(asm)
def test_internal_linkage(self):
self.check_global_variable_linkage('internal')
def test_common_linkage(self):
self.check_global_variable_linkage('common')
def test_external_linkage(self):
self.check_global_variable_linkage('external', has_undef=False)
def test_available_externally_linkage(self):
self.check_global_variable_linkage('available_externally')
def test_private_linkage(self):
self.check_global_variable_linkage('private')
def test_linkonce_linkage(self):
self.check_global_variable_linkage('linkonce')
def test_weak_linkage(self):
self.check_global_variable_linkage('weak')
def test_appending_linkage(self):
self.check_global_variable_linkage('appending')
def test_extern_weak_linkage(self):
self.check_global_variable_linkage('extern_weak', has_undef=False)
def test_linkonce_odr_linkage(self):
self.check_global_variable_linkage('linkonce_odr')
def test_weak_odr_linkage(self):
self.check_global_variable_linkage('weak_odr')
@unittest.skipUnless(platform.machine().startswith('x86'), "only on x86")
class TestInlineAsm(BaseTest):
def test_inlineasm(self):
llvm.initialize_native_asmparser()
m = self.module(asm=asm_inlineasm)
tm = self.target_machine(jit=False)
asm = tm.emit_assembly(m)
self.assertIn('nop', asm)
class TestObjectFile(BaseTest):
mod_asm = """
;ModuleID = <string>
target triple = "{triple}"
declare i32 @sum(i32 %.1, i32 %.2)
define i32 @sum_twice(i32 %.1, i32 %.2) {{
%.3 = call i32 @sum(i32 %.1, i32 %.2)
%.4 = call i32 @sum(i32 %.3, i32 %.3)
ret i32 %.4
}}
"""
def test_object_file(self):
target_machine = self.target_machine(jit=False)
mod = self.module()
obj_bin = target_machine.emit_object(mod)
obj = llvm.ObjectFileRef.from_data(obj_bin)
# Check that we have a text section, and that she has a name and data
has_text = False
last_address = -1
for s in obj.sections():
if s.is_text():
has_text = True
self.assertIsNotNone(s.name())
self.assertTrue(s.size() > 0)
self.assertTrue(len(s.data()) > 0)
self.assertIsNotNone(s.address())
self.assertTrue(last_address < s.address())
last_address = s.address()
break
self.assertTrue(has_text)
def test_add_object_file(self):
target_machine = self.target_machine(jit=False)
mod = self.module()
obj_bin = target_machine.emit_object(mod)
obj = llvm.ObjectFileRef.from_data(obj_bin)
jit = llvm.create_mcjit_compiler(self.module(self.mod_asm),
target_machine)
jit.add_object_file(obj)
sum_twice = CFUNCTYPE(c_int, c_int, c_int)(
jit.get_function_address("sum_twice"))
self.assertEqual(sum_twice(2, 3), 10)
def test_add_object_file_from_filesystem(self):
target_machine = self.target_machine(jit=False)
mod = self.module()
obj_bin = target_machine.emit_object(mod)
temp_desc, temp_path = mkstemp()
try:
try:
f = os.fdopen(temp_desc, "wb")
f.write(obj_bin)
f.flush()
finally:
f.close()
jit = llvm.create_mcjit_compiler(self.module(self.mod_asm),
target_machine)
jit.add_object_file(temp_path)
finally:
os.unlink(temp_path)
sum_twice = CFUNCTYPE(c_int, c_int, c_int)(
jit.get_function_address("sum_twice"))
self.assertEqual(sum_twice(2, 3), 10)
def test_get_section_content(self):
# See Issue #632 - section contents were getting truncated at null
# bytes.
elf = bytes.fromhex(issue_632_elf)
obj = llvm.ObjectFileRef.from_data(elf)
for s in obj.sections():
if s.is_text():
self.assertEqual(len(s.data()), 31)
self.assertEqual(s.data().hex(), issue_632_text)
class TestTimePasses(BaseTest):
def test_reporting(self):
mp = llvm.create_module_pass_manager()
pmb = llvm.create_pass_manager_builder()
pmb.opt_level = 3
pmb.populate(mp)
try:
llvm.set_time_passes(True)
mp.run(self.module())
mp.run(self.module())
mp.run(self.module())
finally:
report = llvm.report_and_reset_timings()
llvm.set_time_passes(False)
self.assertIsInstance(report, str)
self.assertEqual(report.count("Pass execution timing report"), 1)
def test_empty_report(self):
# Returns empty str if no data is collected
self.assertFalse(llvm.report_and_reset_timings())
class TestLLVMLockCallbacks(BaseTest):
def test_lock_callbacks(self):
events = []
def acq():
events.append('acq')
def rel():
events.append('rel')
# register callback
llvm.ffi.register_lock_callback(acq, rel)
# Check: events are initially empty
self.assertFalse(events)
# Call LLVM functions
llvm.create_module_pass_manager()
# Check: there must be at least one acq and one rel
self.assertIn("acq", events)
self.assertIn("rel", events)
# unregister callback
llvm.ffi.unregister_lock_callback(acq, rel)
# Check: removing non-existent callbacks will trigger a ValueError
with self.assertRaises(ValueError):
llvm.ffi.unregister_lock_callback(acq, rel)
if __name__ == "__main__":
unittest.main()
|
|
import json
from unittest.mock import patch
from django.dispatch import Signal
from django.test import TestCase
from django.test.client import Client
from django.urls import reverse
import stripe
from ..models import Event, EventProcessingException
from ..webhooks import (
AccountExternalAccountCreatedWebhook,
AccountUpdatedWebhook,
Webhook,
registry
)
class NewAccountUpdatedWebhook(AccountUpdatedWebhook):
pass
class WebhookRegistryTest(TestCase):
def test_get_signal(self):
signal = registry.get_signal("account.updated")
self.assertTrue(isinstance(signal, Signal))
def test_get_signal_keyerror(self):
self.assertIsNone(registry.get_signal("not a webhook"))
def test_inherited_hook(self):
webhook = registry.get("account.updated")
self.assertIs(webhook, NewAccountUpdatedWebhook)
class WebhookTests(TestCase):
event_data = {
"api_version": "2017-06-05",
"created": 1348360173,
"data": {
"object": {
"amount": 455,
"currency": "usd",
"date": 1348876800,
"description": None,
"id": "ach_XXXXXXXXXXXX",
"object": "transfer",
"other_transfers": [],
"status": "pending",
"livemode": True,
"reversed": False,
"summary": {
"adjustment_count": 0,
"adjustment_fee_details": [],
"adjustment_fees": 0,
"adjustment_gross": 0,
"charge_count": 1,
"charge_fee_details": [{
"amount": 45,
"application": None,
"currency": "usd",
"description": None,
"type": "stripe_fee"
}],
"charge_fees": 45,
"charge_gross": 500,
"collected_fee_count": 0,
"collected_fee_gross": 0,
"currency": "usd",
"net": 455,
"refund_count": 0,
"refund_fees": 0,
"refund_gross": 0,
"validation_count": 0,
"validation_fees": 0
}
}
},
"id": "evt_XXXXXXXXXXXXx",
"livemode": True,
"object": "event",
"pending_webhooks": 1,
"type": "transfer.created"
}
def test_webhook_init(self):
event = Event(kind=None)
webhook = Webhook(event)
self.assertIsNone(webhook.name)
@patch("stripe.Webhook.construct_event")
@patch("stripe.Event.retrieve")
@patch("stripe.Transfer.retrieve")
def test_webhook_with_transfer_event(self, TransferMock, StripeEventMock, MockEvent):
MockEvent.return_value.to_dict_recursive.return_value = self.event_data.copy()
StripeEventMock.return_value.to_dict.return_value = self.event_data
TransferMock.return_value = self.event_data["data"]["object"]
msg = json.dumps(self.event_data)
resp = Client().post(
reverse("pinax_stripe_webhook"),
msg,
content_type="application/json",
HTTP_STRIPE_SIGNATURE="foo"
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(Event.objects.filter(kind="transfer.created").exists())
@patch("stripe.Webhook.construct_event")
@patch("stripe.Event.retrieve")
def test_webhook_associated_with_stripe_account(self, StripeEventMock, MockEvent):
connect_event_data = self.event_data.copy()
connect_event_data["account"] = "acc_XXX"
MockEvent.return_value.to_dict_recursive.return_value = connect_event_data
StripeEventMock.return_value.to_dict.return_value = connect_event_data
msg = json.dumps(connect_event_data)
resp = Client().post(
reverse("pinax_stripe_webhook"),
msg,
content_type="application/json",
HTTP_STRIPE_SIGNATURE="foo"
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(Event.objects.filter(kind="transfer.created").exists())
self.assertEqual(
Event.objects.filter(kind="transfer.created").first().account_id,
"acc_XXX"
)
@patch("stripe.Webhook.construct_event")
def test_webhook_duplicate_event(self, MockEvent):
MockEvent.return_value.to_dict_recursive.return_value = self.event_data.copy()
data = {"id": 123}
Event.objects.create(stripe_id=123, livemode=True, message={})
msg = json.dumps(data)
resp = Client().post(
reverse("pinax_stripe_webhook"),
msg,
content_type="application/json",
HTTP_STRIPE_SIGNATURE="foo"
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(Event.objects.filter(stripe_id="123").count(), 1)
def test_webhook_event_mismatch(self):
event = Event(kind="account.updated")
WH = registry.get("account.application.deauthorized")
with self.assertRaises(Exception):
WH(event)
def test_registry_unregister(self):
registry.unregister("account.updated")
self.assertFalse("account.updated" in registry._registry)
@patch("django.dispatch.Signal.send")
def test_send_signal(self, SignalSendMock):
event = Event(kind="account.application.deauthorized")
WH = registry.get("account.application.deauthorized")
WH(event).send_signal()
self.assertTrue(SignalSendMock.called)
def test_send_signal_not_sent(self):
event = Event(kind="account.application.deauthorized")
WH = registry.get("account.application.deauthorized")
def signal_handler(sender, *args, **kwargs):
self.fail("Should not have been called.")
registry.get_signal("account.application.deauthorized").connect(signal_handler)
webhook = WH(event)
webhook.name = "mismatch name" # Not sure how this ever happens due to the registry
webhook.send_signal()
@patch("pinax.stripe.webhooks.Webhook.process_webhook")
def test_process_exception_is_logged(self, ProcessWebhookMock):
# note: we choose an event type for which we do no processing
event = Event.objects.create(kind="account.external_account.created", message={}, processed=False)
ProcessWebhookMock.side_effect = stripe.error.StripeError("Message", "error")
with self.assertRaises(stripe.error.StripeError):
AccountExternalAccountCreatedWebhook(event).process()
self.assertTrue(EventProcessingException.objects.filter(event=event).exists())
@patch("pinax.stripe.webhooks.Webhook.process_webhook")
def test_process_already_processed(self, ProcessWebhookMock):
event = Event.objects.create(kind="account.external_account.created", message={}, processed=True)
hook = registry.get(event.kind)
hook(event).process()
self.assertFalse(ProcessWebhookMock.called)
@patch("pinax.stripe.webhooks.Webhook.process_webhook")
def test_process_exception_is_logged_non_stripeerror(self, ProcessWebhookMock):
# note: we choose an event type for which we do no processing
event = Event.objects.create(kind="account.external_account.created", message={}, processed=False)
ProcessWebhookMock.side_effect = Exception("generic exception")
with self.assertRaises(Exception):
AccountExternalAccountCreatedWebhook(event).process()
self.assertTrue(EventProcessingException.objects.filter(event=event).exists())
def test_process_return_none(self):
# note: we choose an event type for which we do no processing
event = Event.objects.create(kind="account.external_account.created", message={}, processed=False)
self.assertIsNone(AccountExternalAccountCreatedWebhook(event).process())
|
|
# requirements:
# pip install -e git://github.com/jekbradbury/chainer.git@raw-kernel
from chainer import cuda, Function, Variable, Chain
import chainer.links as L
import chainer.functions as F
import numpy as np
THREADS_PER_BLOCK = 32
class STRNNFunction(Function):
def forward_cpu(self, inputs):
f, z, hinit = inputs
b, t, c = f.shape
self.h = np.zeros((b, t + 1, c), dtype=np.float32)
self.h[:, 0, :] = hinit
t_size = f.shape[1]
for i in range(t_size):
self.h[:, i+1, :] = self.h[:, i, :] * f[:, i, :] + z[:, i, :]
return self.h[:, 1:, :],
def forward_gpu(self, inputs):
f, z, hinit = inputs
b, t, c = f.shape
assert c % THREADS_PER_BLOCK == 0
self.h = cuda.cupy.zeros((b, t + 1, c), dtype=np.float32)
self.h[:, 0, :] = hinit
cuda.raw('''
#define THREADS_PER_BLOCK 32
extern "C" __global__ void strnn_fwd(
const CArray<float, 3> f, const CArray<float, 3> z,
CArray<float, 3> h) {
int index[3];
const int t_size = f.shape()[1];
index[0] = blockIdx.x;
index[1] = 0;
index[2] = blockIdx.y * THREADS_PER_BLOCK + threadIdx.x;
float prev_h = h[index];
for (int i = 0; i < t_size; i++){
index[1] = i;
const float ft = f[index];
const float zt = z[index];
index[1] = i + 1;
float &ht = h[index];
prev_h = prev_h * ft + zt;
ht = prev_h;
}
}''', 'strnn_fwd')(
(b, c // THREADS_PER_BLOCK), (THREADS_PER_BLOCK,),
(f, z, self.h))
return self.h[:, 1:, :],
def backward_cpu(self, inputs, grads):
f, z = inputs[:2]
gh, = grads
b, t, c = f.shape
gz = np.zeros_like(gh)
t_size = f.shape[1]
gz[:, -1, :] = gh[:, -1, :]
for i in range(t_size - 1, 0, -1):
gz[:, i-1, :] = gz[:, i, :] * f[:, i, :] + gh[:, i-1, :]
gf = self.h[:, :-1, :] * gz
ghinit = f[:, 0, :] * gz[:, 0, :]
return gf, gz, ghinit
def backward_gpu(self, inputs, grads):
f, z = inputs[:2]
gh, = grads
b, t, c = f.shape
gz = cuda.cupy.zeros_like(gh)
cuda.raw('''
#define THREADS_PER_BLOCK 32
extern "C" __global__ void strnn_back(
const CArray<float, 3> f, const CArray<float, 3> gh,
CArray<float, 3> gz) {
int index[3];
const int t_size = f.shape()[1];
index[0] = blockIdx.x;
index[2] = blockIdx.y * THREADS_PER_BLOCK + threadIdx.x;
index[1] = t_size - 1;
float &gz_last = gz[index];
gz_last = gh[index];
float prev_gz = gz_last;
for (int i = t_size - 1; i > 0; i--){
index[1] = i;
const float ft = f[index];
index[1] = i - 1;
const float ght = gh[index];
float &gzt = gz[index];
prev_gz = prev_gz * ft + ght;
gzt = prev_gz;
}
}''', 'strnn_back')(
(b, c // THREADS_PER_BLOCK), (THREADS_PER_BLOCK,),
(f, gh, gz))
gf = self.h[:, :-1, :] * gz
ghinit = f[:, 0, :] * gz[:, 0, :]
return gf, gz, ghinit
def strnn(f, z, h0):
return STRNNFunction()(f, z, h0)
def attention_sum(encoding, query):
alpha = F.softmax(F.batch_matmul(encoding, query, transb=True))
alpha, encoding = F.broadcast(alpha[:, :, :, None],
encoding[:, :, None, :])
return F.sum(alpha * encoding, axis=1)
class Linear(L.Linear):
def __call__(self, x):
shape = x.shape
if len(shape) == 3:
x = F.reshape(x, (-1, shape[2]))
y = super(Linear, self).__call__(x)
if len(shape) == 3:
y = F.reshape(y, (shape[0], shape[1], -1))
return y
class QRNNLayer(Chain):
def __init__(self, in_size, out_size, kernel_size=2, attention=False,
decoder=False):
if kernel_size == 1:
super(QRNNLayer, self).__init__(W=Linear(in_size, 3 * out_size))
elif kernel_size == 2:
super(QRNNLayer, self).__init__(W=Linear(in_size, 3 * out_size, nobias=True),
V=Linear(in_size, 3 * out_size))
else:
super(QRNNLayer, self).__init__(
conv=L.ConvolutionND(1, in_size, 3 * out_size, kernel_size,
stride=1, pad=kernel_size - 1))
if attention:
self.add_link('U', Linear(out_size, 3 * in_size))
self.add_link('o', Linear(2 * out_size, out_size))
self.in_size, self.size, self.attention = in_size, out_size, attention
self.kernel_size = kernel_size
def pre(self, x):
dims = len(x.shape) - 1
if self.kernel_size == 1:
ret = self.W(x)
elif self.kernel_size == 2:
if dims == 2:
xprev = Variable(
self.xp.zeros((self.batch_size, 1, self.in_size),
dtype=np.float32), volatile='AUTO')
xtminus1 = F.concat((xprev, x[:, :-1, :]), axis=1)
else:
xtminus1 = self.x
ret = self.W(x) + self.V(xtminus1)
else:
ret = F.swapaxes(self.conv(
F.swapaxes(x, 1, 2))[:, :, :x.shape[2]], 1, 2)
if not self.attention:
return ret
if dims == 1:
enc = self.encoding[:, -1, :]
else:
enc = self.encoding[:, -1:, :]
return sum(F.broadcast(self.U(enc), ret))
def init(self, encoder_c=None, encoder_h=None):
self.encoding = encoder_c
self.c, self.x = None, None
if self.encoding is not None:
self.batch_size = self.encoding.shape[0]
if not self.attention:
self.c = self.encoding[:, -1, :]
if self.c is None or self.c.shape[0] < self.batch_size:
self.c = Variable(self.xp.zeros((self.batch_size, self.size),
dtype=np.float32), volatile='AUTO')
if self.x is None or self.x.shape[0] < self.batch_size:
self.x = Variable(self.xp.zeros((self.batch_size, self.in_size),
dtype=np.float32), volatile='AUTO')
def __call__(self, x):
if not hasattr(self, 'encoding') or self.encoding is None:
self.batch_size = x.shape[0]
self.init()
dims = len(x.shape) - 1
f, z, o = F.split_axis(self.pre(x), 3, axis=dims)
f = F.sigmoid(f)
z = (1 - f) * F.tanh(z)
o = F.sigmoid(o)
if dims == 2:
self.c = strnn(f, z, self.c[:self.batch_size])
else:
self.c = f * self.c + z
if self.attention:
context = attention_sum(self.encoding, self.c)
self.h = o * self.o(F.concat((self.c, context), axis=dims))
else:
self.h = self.c * o
self.x = x
return self.h
def reset_state(self):
self.encoding = None
def get_state(self):
return F.concat((self.x, self.c, self.h), axis=1)
def set_state(self, state):
self.x, self.c, self.h = F.split_axis(
state, (self.in_size, self.in_size + self.size), axis=1)
state = property(get_state, set_state)
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestPolicy(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.iam import Policy
return Policy
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
empty = frozenset()
policy = self._make_one()
self.assertIsNone(policy.etag)
self.assertIsNone(policy.version)
self.assertEqual(policy.owners, empty)
self.assertEqual(policy.editors, empty)
self.assertEqual(policy.viewers, empty)
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test_ctor_explicit(self):
VERSION = 17
ETAG = 'ETAG'
empty = frozenset()
policy = self._make_one(ETAG, VERSION)
self.assertEqual(policy.etag, ETAG)
self.assertEqual(policy.version, VERSION)
self.assertEqual(policy.owners, empty)
self.assertEqual(policy.editors, empty)
self.assertEqual(policy.viewers, empty)
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test___getitem___miss(self):
policy = self._make_one()
self.assertEqual(policy['nonesuch'], set())
def test___setitem__(self):
USER = 'user:phred@example.com'
PRINCIPALS = set([USER])
policy = self._make_one()
policy['rolename'] = [USER]
self.assertEqual(policy['rolename'], PRINCIPALS)
self.assertEqual(len(policy), 1)
self.assertEqual(dict(policy), {'rolename': PRINCIPALS})
def test___delitem___hit(self):
policy = self._make_one()
policy._bindings['rolename'] = ['phred@example.com']
del policy['rolename']
self.assertEqual(len(policy), 0)
self.assertEqual(dict(policy), {})
def test___delitem___miss(self):
policy = self._make_one()
with self.assertRaises(KeyError):
del policy['nonesuch']
def test_owners_getter(self):
from google.cloud.iam import OWNER_ROLE
MEMBER = 'user:phred@example.com'
expected = frozenset([MEMBER])
policy = self._make_one()
policy[OWNER_ROLE] = [MEMBER]
self.assertEqual(policy.owners, expected)
def test_owners_setter(self):
import warnings
from google.cloud.iam import OWNER_ROLE
MEMBER = 'user:phred@example.com'
expected = set([MEMBER])
policy = self._make_one()
with warnings.catch_warnings():
warnings.simplefilter('always')
policy.owners = [MEMBER]
self.assertEqual(policy[OWNER_ROLE], expected)
def test_editors_getter(self):
from google.cloud.iam import EDITOR_ROLE
MEMBER = 'user:phred@example.com'
expected = frozenset([MEMBER])
policy = self._make_one()
policy[EDITOR_ROLE] = [MEMBER]
self.assertEqual(policy.editors, expected)
def test_editors_setter(self):
import warnings
from google.cloud.iam import EDITOR_ROLE
MEMBER = 'user:phred@example.com'
expected = set([MEMBER])
policy = self._make_one()
with warnings.catch_warnings():
warnings.simplefilter('always')
policy.editors = [MEMBER]
self.assertEqual(policy[EDITOR_ROLE], expected)
def test_viewers_getter(self):
from google.cloud.iam import VIEWER_ROLE
MEMBER = 'user:phred@example.com'
expected = frozenset([MEMBER])
policy = self._make_one()
policy[VIEWER_ROLE] = [MEMBER]
self.assertEqual(policy.viewers, expected)
def test_viewers_setter(self):
import warnings
from google.cloud.iam import VIEWER_ROLE
MEMBER = 'user:phred@example.com'
expected = set([MEMBER])
policy = self._make_one()
with warnings.catch_warnings():
warnings.simplefilter('always')
policy.viewers = [MEMBER]
self.assertEqual(policy[VIEWER_ROLE], expected)
def test_user(self):
EMAIL = 'phred@example.com'
MEMBER = 'user:%s' % (EMAIL,)
policy = self._make_one()
self.assertEqual(policy.user(EMAIL), MEMBER)
def test_service_account(self):
EMAIL = 'phred@example.com'
MEMBER = 'serviceAccount:%s' % (EMAIL,)
policy = self._make_one()
self.assertEqual(policy.service_account(EMAIL), MEMBER)
def test_group(self):
EMAIL = 'phred@example.com'
MEMBER = 'group:%s' % (EMAIL,)
policy = self._make_one()
self.assertEqual(policy.group(EMAIL), MEMBER)
def test_domain(self):
DOMAIN = 'example.com'
MEMBER = 'domain:%s' % (DOMAIN,)
policy = self._make_one()
self.assertEqual(policy.domain(DOMAIN), MEMBER)
def test_all_users(self):
policy = self._make_one()
self.assertEqual(policy.all_users(), 'allUsers')
def test_authenticated_users(self):
policy = self._make_one()
self.assertEqual(policy.authenticated_users(), 'allAuthenticatedUsers')
def test_from_api_repr_only_etag(self):
empty = frozenset()
RESOURCE = {
'etag': 'ACAB',
}
klass = self._get_target_class()
policy = klass.from_api_repr(RESOURCE)
self.assertEqual(policy.etag, 'ACAB')
self.assertIsNone(policy.version)
self.assertEqual(policy.owners, empty)
self.assertEqual(policy.editors, empty)
self.assertEqual(policy.viewers, empty)
self.assertEqual(dict(policy), {})
def test_from_api_repr_complete(self):
from google.cloud.iam import (
OWNER_ROLE,
EDITOR_ROLE,
VIEWER_ROLE,
)
OWNER1 = 'group:cloud-logs@google.com'
OWNER2 = 'user:phred@example.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
RESOURCE = {
'etag': 'DEADBEEF',
'version': 17,
'bindings': [
{'role': OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
],
}
klass = self._get_target_class()
policy = klass.from_api_repr(RESOURCE)
self.assertEqual(policy.etag, 'DEADBEEF')
self.assertEqual(policy.version, 17)
self.assertEqual(policy.owners, frozenset([OWNER1, OWNER2]))
self.assertEqual(policy.editors, frozenset([EDITOR1, EDITOR2]))
self.assertEqual(policy.viewers, frozenset([VIEWER1, VIEWER2]))
self.assertEqual(
dict(policy), {
OWNER_ROLE: set([OWNER1, OWNER2]),
EDITOR_ROLE: set([EDITOR1, EDITOR2]),
VIEWER_ROLE: set([VIEWER1, VIEWER2]),
})
def test_from_api_repr_unknown_role(self):
USER = 'user:phred@example.com'
GROUP = 'group:cloud-logs@google.com'
RESOURCE = {
'etag': 'DEADBEEF',
'version': 17,
'bindings': [
{'role': 'unknown', 'members': [USER, GROUP]},
],
}
klass = self._get_target_class()
policy = klass.from_api_repr(RESOURCE)
self.assertEqual(policy.etag, 'DEADBEEF')
self.assertEqual(policy.version, 17)
self.assertEqual(dict(policy), {'unknown': set([GROUP, USER])})
def test_to_api_repr_defaults(self):
policy = self._make_one()
self.assertEqual(policy.to_api_repr(), {})
def test_to_api_repr_only_etag(self):
policy = self._make_one('DEADBEEF')
self.assertEqual(policy.to_api_repr(), {'etag': 'DEADBEEF'})
def test_to_api_repr_binding_wo_members(self):
policy = self._make_one()
policy['empty'] = []
self.assertEqual(policy.to_api_repr(), {})
def test_to_api_repr_binding_w_duplicates(self):
from google.cloud.iam import OWNER_ROLE
OWNER = 'group:cloud-logs@google.com'
policy = self._make_one()
policy.owners = [OWNER, OWNER]
self.assertEqual(
policy.to_api_repr(), {
'bindings': [{'role': OWNER_ROLE, 'members': [OWNER]}],
})
def test_to_api_repr_full(self):
import operator
from google.cloud.iam import (
OWNER_ROLE,
EDITOR_ROLE,
VIEWER_ROLE,
)
OWNER1 = 'group:cloud-logs@google.com'
OWNER2 = 'user:phred@example.com'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:phred@example.com'
VIEWER1 = 'serviceAccount:1234-abcdef@service.example.com'
VIEWER2 = 'user:phred@example.com'
BINDINGS = [
{'role': OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
]
policy = self._make_one('DEADBEEF', 17)
policy.owners = [OWNER1, OWNER2]
policy.editors = [EDITOR1, EDITOR2]
policy.viewers = [VIEWER1, VIEWER2]
resource = policy.to_api_repr()
self.assertEqual(resource['etag'], 'DEADBEEF')
self.assertEqual(resource['version'], 17)
key = operator.itemgetter('role')
self.assertEqual(
sorted(resource['bindings'], key=key), sorted(BINDINGS, key=key))
|
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import logging
import re
from c7n.filters import Filter, FilterValidationError, ValueFilter
from c7n.filters.related import RelatedResourceFilter
from c7n.query import sources
from c7n.resources import load_resources
from c7n.utils import local_session, type_schema
from c7n_azure.actions.base import AzureBaseAction
from c7n_azure.constants import GRAPH_AUTH_ENDPOINT
from c7n_azure.provider import Azure, resources
from c7n_azure.query import DescribeSource, QueryResourceManager
from c7n_azure.utils import GraphHelper
log = logging.getLogger('custodian.azure.access_control')
@resources.register('roleassignment')
class RoleAssignment(QueryResourceManager):
"""Role assignments map role definitions to principals. The Azure
object only contains the unique ID of the principal, however we
attempt to augment the object with the prinicpal name, display name
and type from AAD.
Augmenting with data from AAD requires executing account to have
permissions to read from the Microsoft AAD Graph. For Service Principal
Authorization the Service Principal must have the permissions to
`read all users' full profiles`. Azure CLI authentication will
provide the necessary permissions to run the policy locally.
:example:
Return role assignments with the `Owner role`.
.. code-block:: yaml
policies:
- name: role-assignment-owner
resource: azure.roleassignment
filters:
- type: role
key: properties.roleName
op: eq
value: Owner
:example:
Return assignments with the principal name custodian@example.com
.. code-block:: yaml
policies:
- name: assignment-by-principal-name
resource: azure.roleassignment
filters:
- type: value
key: principalName
op: eq
value: custodian@example.com
:example:
Delete the assignment with principal name custodian@example.com.
**Note: The permissions required to run the
delete action requires delete permissions to Microsoft.Authorization.
The built-in role with the necessary permissions is Owner.**
.. code-block:: yaml
policies:
- name: delete-assignment-by-principal-name
resource: azure.roleassignment
filters:
- type: value
key: principalName
op: eq
value: custodian@example.com
actions:
- type: delete
"""
class resource_type(QueryResourceManager.resource_type):
doc_groups = ['Active Directory']
service = 'azure.mgmt.authorization'
client = 'AuthorizationManagementClient'
enum_spec = ('role_assignments', 'list', None)
get_spec = ('role_assignments', 'get_by_id', None)
id = 'id'
default_report_fields = (
'principalName',
'displayName',
'aadType',
'name',
'type',
'properties.scope',
'properties.roleDefinitionId'
)
def augment(self, resources):
s = self.get_session().get_session_for_resource(GRAPH_AUTH_ENDPOINT)
graph_client = s.client('azure.graphrbac.GraphRbacManagementClient')
object_ids = list(set(
resource['properties']['principalId'] for resource in resources
if resource['properties']['principalId']))
principal_dics = GraphHelper.get_principal_dictionary(graph_client, object_ids)
for resource in resources:
if resource['properties']['principalId'] in principal_dics.keys():
graph_resource = principal_dics[resource['properties']['principalId']]
if graph_resource.object_id:
resource['principalName'] = GraphHelper.get_principal_name(graph_resource)
resource['displayName'] = graph_resource.display_name
resource['aadType'] = graph_resource.object_type
return resources
@resources.register('roledefinition')
class RoleDefinition(QueryResourceManager):
"""Role definitions define sets of permissions that can be assigned
to an identity.
:example:
Return role definitions that explicitly have the permission to read authorization objects (role
assignments, role definitions, etc). If a role definition inherits permissions
(e.g. by having * permissions) they are not returned in this filter.
.. code-block:: yaml
policies:
- name: role-definition-permissions
resource: azure.roledefinition
filters:
- type: value
key: properties.permissions[0].actions
value: Microsoft.Authorization/*/read
op: contains
"""
class resource_type(QueryResourceManager.resource_type):
doc_groups = ['Active Directory']
service = 'azure.mgmt.authorization'
client = 'AuthorizationManagementClient'
get_spec = ('role_definitions', 'get_by_id', None)
type = 'roleDefinition'
id = 'id'
default_report_fields = (
'properties.roleName',
'properties.description',
'id',
'name',
'type'
'properties.type',
'properties.permissions'
)
@property
def source_type(self):
return self.data.get('source', 'describe-azure-roledefinition')
@sources.register('describe-azure-roledefinition')
class DescribeSource(DescribeSource):
def get_resources(self, query):
s = local_session(self.manager.session_factory)
client = s.client('azure.mgmt.authorization.AuthorizationManagementClient')
scope = '/subscriptions/%s' % (s.subscription_id)
resources = client.role_definitions.list(scope)
return [r.serialize(True) for r in resources]
@RoleAssignment.filter_registry.register('role')
class RoleFilter(RelatedResourceFilter):
"""Filters role assignments based on role definitions
:example:
Return role assignments with the `Owner role`.
.. code-block:: yaml
policies:
- name: assignments-by-role-definition
resource: azure.roleassignment
filters:
- type: role
key: properties.roleName
op: in
value: Owner
:example:
Return all assignments with the `Owner role` that have access to virtual machines. For the
resource-access filter, the related resource can be any custodian supported azure
resource other than `azure.roleassignments` or `azure.roledefinitions`.
.. code-block:: yaml
policies:
- name: assignment-by-role-and-resource
resource: azure.roleassignment
filters:
- type: role
key: properties.roleName
op: eq
value: Owner
- type: resource-access
relatedResource: azure.vm
:example:
Return all assignments with the `Owner role` that have access to virtual machines in `westus2`:
.. code-block:: yaml
policies:
- name: assignment-by-role-and-resource-access
resource: azure.roleassignment
filters:
- type: role
key: properties.roleName
op: eq
value: Owner
- type: resource-access
relatedResource: azure.vm
key: location
op: eq
value: westus2
"""
schema = type_schema('role', rinherit=ValueFilter.schema)
RelatedResource = "c7n_azure.resources.access_control.RoleDefinition"
RelatedIdsExpression = "properties.roleDefinitionId"
@RoleAssignment.filter_registry.register('resource-access')
class ResourceAccessFilter(RelatedResourceFilter):
"""Filters role assignments that have access to a certain
type of azure resource.
:example:
.. code-block:: yaml
policies:
- name: assignments-by-azure-resource
resource: azure.roleassignment
filters:
- type: resource-access
relatedResource: azure.vm
"""
schema = type_schema(
'resource-access',
relatedResource={'type': 'string'},
rinherit=RelatedResourceFilter.schema,
required=['relatedResource']
)
def __init__(self, data, manager=None):
super(ResourceAccessFilter, self).__init__(data, manager)
resource_type = self.data['relatedResource']
load_resources((resource_type,))
self.factory = Azure.resources.get(
resource_type.rsplit('.', 1)[-1])
def get_related(self, resources):
related = self.manager.get_resource_manager(self.factory.type).resources()
if self.data.get('op'):
return [r['id'] for r in related if self.match(r)]
else:
return [r['id'] for r in related]
def process_resource(self, resource, related):
for r in related:
if resource['properties']['scope'] in r:
return True
return False
def validate(self):
if self.factory is None:
raise FilterValidationError(
"The related resource is not a custodian supported azure resource"
)
if (self.data['relatedResource'] == 'azure.roleassignment' or
self.data['relatedResource'] == 'azure.roledefinition'):
raise FilterValidationError(
"The related resource can not be role assignments or role definitions"
)
@RoleAssignment.filter_registry.register('scope')
class ScopeFilter(Filter):
"""
Filter role assignments by assignment scope.
:example:
Return all role assignments with the `Subscription` level scope access.
.. code-block:: yaml
policies:
- name: assignments-subscription-scope
resource: azure.roleassignment
filters:
- type: scope
value: subscription
:example:
Role assignments with scope other than `Subscription` or `Resource Group`.
.. code-block:: yaml
policies:
- name: assignments-other-level-scope
resource: azure.roleassignment
filters:
- not:
- type: scope
value: subscription
- not:
- type: scope
value: resource-group
:example:
Return all service principal role assignments with the `Subscription` level scope access.
.. code-block:: yaml
policies:
- name: service-principal-assignments-subscription-scope
resource: azure.roleassignment
filters:
- type: value
key: aadType
op: eq
value: ServicePrincipal
- type: scope
value: subscription
"""
SUBSCRIPTION_SCOPE = 'subscription'
RG_SCOPE = 'resource-group'
MG_SCOPE = 'management-group'
schema = type_schema(
'scope',
value={'type': 'string', 'enum': [SUBSCRIPTION_SCOPE, RG_SCOPE, MG_SCOPE]})
def process(self, data, event=None):
scope_value = self.data.get('value', '')
return [d for d in data if self.is_scope(d["properties"]["scope"], scope_value)]
def is_scope(self, scope, scope_type):
if not isinstance(scope, str):
return False
regex = ""
if scope_type == self.SUBSCRIPTION_SCOPE:
regex = r"^\/subscriptions\/[^\/]+$"
elif scope_type == self.RG_SCOPE:
regex = r"^\/subscriptions\/([^\/]+)\/resourceGroups\/[^\/]+$"
elif scope_type == self.MG_SCOPE:
regex = r"^\/providers\/Microsoft\.Management\/managementGroups/[^\/]+$"
else:
return False
match = re.match(regex, scope, flags=re.IGNORECASE)
return bool(match)
@RoleAssignment.action_registry.register('delete')
class DeleteAssignmentAction(AzureBaseAction):
schema = type_schema('delete')
def _prepare_processing(self,):
self.client = self.manager.get_client()
def _process_resource(self, resource):
self.client.role_assignments.delete(
resource['properties']['scope'], resource['name'])
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AutoCastVariable."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.keras import combinations
from tensorflow.python.keras.mixed_precision.experimental import autocast_variable
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent as gradient_descent_v1
from tensorflow.python.training.tracking import util as trackable_utils
TESTCASES = ({
'testcase_name': 'base',
'distribute': False
}, {
'testcase_name': 'distribute',
'distribute': True
})
def get_distribute_scope(distribute):
class DummyContextManager(object):
def __enter__(self):
pass
def __exit__(self, *args):
pass
if distribute:
return mirrored_strategy.MirroredStrategy(['cpu:0']).scope()
else:
return DummyContextManager()
def get_var(val, dtype, name=None):
return variables.VariableV1(val, use_resource=True, dtype=dtype, name=name)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class AutoCastVariableTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(*TESTCASES)
def test_read(self, distribute):
with get_distribute_scope(distribute):
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# outside of auto cast scope.
self.assertEqual(x.dtype, dtypes.float32)
self.assertEqual(x.value().dtype, dtypes.float32)
self.assertEqual(x.read_value().dtype, dtypes.float32)
self.assertEqual(array_ops.identity(x).dtype, dtypes.float32)
# within auto cast scope of different dtype
with ops.get_default_graph()._enable_auto_casting_variables(
dtypes.float16):
self.assertEqual(x.dtype, dtypes.float16)
self.assertEqual(x.value().dtype, dtypes.float16)
self.assertEqual(x.read_value().dtype, dtypes.float16)
self.assertEqual(array_ops.identity(x).dtype, dtypes.float16)
# within auto cast scope of same dtype
with ops.get_default_graph()._enable_auto_casting_variables(
dtypes.float32):
self.assertEqual(x.dtype, dtypes.float32)
self.assertEqual(x.value().dtype, dtypes.float32)
self.assertEqual(x.read_value().dtype, dtypes.float32)
self.assertEqual(array_ops.identity(x).dtype, dtypes.float32)
def test_sparse_reads(self):
x = get_var([1., 2], dtypes.float32)
# DistributedVariables do not support sparse_read or gather_nd, so we pass
# distribute=False
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.assertEqual(x.sparse_read([0]).dtype, dtypes.float32)
self.assertEqual(x.gather_nd([0]).dtype, dtypes.float32)
with ops.get_default_graph()._enable_auto_casting_variables(
dtypes.float16):
self.assertEqual(x.sparse_read([0]).dtype, dtypes.float16)
self.assertEqual(x.gather_nd([0]).dtype, dtypes.float16)
@parameterized.named_parameters(*TESTCASES)
def test_read_nested_scopes(self, distribute):
with get_distribute_scope(distribute):
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
with ops.get_default_graph()._enable_auto_casting_variables(
dtypes.float16):
self.assertEqual(x.dtype, dtypes.float16)
self.assertEqual(x.read_value().dtype, dtypes.float16)
with ops.get_default_graph()._enable_auto_casting_variables(
dtypes.float32):
self.assertEqual(x.dtype, dtypes.float32)
self.assertEqual(x.read_value().dtype, dtypes.float32)
self.assertEqual(x.dtype, dtypes.float16)
self.assertEqual(x.read_value().dtype, dtypes.float16)
@parameterized.named_parameters(*TESTCASES)
def test_dtype_is_not_string(self, distribute):
with get_distribute_scope(distribute):
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.assertEqual(x.dtype, dtypes.float32)
self.assertIsInstance(x.dtype, dtypes.DType)
self.assertEqual(x.true_dtype, dtypes.float32)
self.assertIsInstance(x.true_dtype, dtypes.DType)
with ops.get_default_graph()._enable_auto_casting_variables('float16'):
self.assertEqual(x.dtype, dtypes.float16)
self.assertIsInstance(x.dtype, dtypes.DType)
self.assertEqual(x.true_dtype, dtypes.float32)
self.assertIsInstance(x.true_dtype, dtypes.DType)
@parameterized.named_parameters(*TESTCASES)
def test_method_delegations(self, distribute):
# Test AutoCastVariable correctly delegates Variable methods to the
# underlying variable.
with self.test_session(), get_distribute_scope(distribute):
for read_dtype in (dtypes.float32, dtypes.float16):
if distribute:
# MirroredVariable.assign will (incorrectly) return a Mirrored value
# instead of a MirroredVariable. So we cannot properly wrap it in an
# AutoCastVariable.
evaluate = self.evaluate
else:
def evaluate(var):
self.assertIsInstance(var, autocast_variable.AutoCastVariable)
self.assertEqual(var.dtype, read_dtype)
return self.evaluate(var)
x = get_var(7., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
with ops.get_default_graph()._enable_auto_casting_variables(
read_dtype):
self.evaluate(x.initializer)
self.assertEqual(self.evaluate(x.value()), 7)
self.assertEqual(self.evaluate(x.read_value()), 7)
self.assertTrue(x.trainable)
self.assertEqual(x.synchronization, x._variable.synchronization)
self.assertEqual(x.aggregation, x._variable.aggregation)
self.assertEqual(self.evaluate(x.initialized_value()), 7)
if not context.executing_eagerly():
if not distribute:
# These functions are not supported for DistributedVariables
x.load(9)
self.assertEqual(x.eval(), 9)
self.assertEqual(self.evaluate(x.initial_value), 7)
self.assertEqual(x.op, x._variable.op)
self.assertEqual(x.graph, x._variable.graph)
if not distribute:
# These attributes are not supported for DistributedVariables
self.assertIsNone(x.constraint)
self.assertEqual(x.initializer, x._variable.initializer)
self.assertEqual(evaluate(x.assign(8)), 8)
self.assertEqual(evaluate(x.assign_add(2)), 10)
self.assertEqual(evaluate(x.assign_sub(3)), 7)
self.assertEqual(x.name, x._variable.name)
self.assertEqual(x.device, x._variable.device)
self.assertEqual(x.shape, ())
self.assertEqual(x.get_shape(), ())
if not distribute:
# Test scatter_* methods. These are not supported for
# DistributedVariables
x = get_var([7, 8], dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
with ops.get_default_graph()._enable_auto_casting_variables(
read_dtype):
self.evaluate(x.initializer)
self.assertAllEqual(self.evaluate(x.value()), [7, 8])
def slices(val, index):
return indexed_slices.IndexedSlices(
values=constant_op.constant(val, dtype=dtypes.float32),
indices=constant_op.constant(index, dtype=dtypes.int32),
dense_shape=constant_op.constant([2], dtype=dtypes.int32))
self.assertAllEqual(evaluate(x.scatter_sub(slices(1., 0))), [6, 8])
self.assertAllEqual(evaluate(x.scatter_add(slices(1., 0))), [7, 8])
self.assertAllEqual(evaluate(x.scatter_max(slices(9., 1))), [7, 9])
self.assertAllEqual(evaluate(x.scatter_min(slices(8., 1))), [7, 8])
self.assertAllEqual(evaluate(x.scatter_mul(slices(2., 1))), [7, 16])
self.assertAllEqual(evaluate(x.scatter_div(slices(2., 1))), [7, 8])
self.assertAllEqual(
evaluate(x.scatter_update(slices(4., 1))), [7, 4])
self.assertAllEqual(
evaluate(x.scatter_nd_sub([[0], [1]], [1., 2.])), [6, 2])
self.assertAllEqual(
evaluate(x.scatter_nd_add([[0], [1]], [1., 2.])), [7, 4])
self.assertAllEqual(
evaluate(x.scatter_nd_update([[0], [1]], [1., 2.])), [1, 2])
@parameterized.named_parameters(*TESTCASES)
def test_operator_overloads(self, distribute):
with get_distribute_scope(distribute):
for read_dtype in (dtypes.float32, dtypes.float16):
x = get_var(7., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
with ops.get_default_graph()._enable_auto_casting_variables(
read_dtype):
self.evaluate(x.initializer)
self.assertAlmostEqual(8, self.evaluate(x + 1))
self.assertAlmostEqual(10, self.evaluate(3 + x))
self.assertAlmostEqual(14, self.evaluate(x + x))
self.assertAlmostEqual(5, self.evaluate(x - 2))
self.assertAlmostEqual(6, self.evaluate(13 - x))
self.assertAlmostEqual(0, self.evaluate(x - x))
self.assertAlmostEqual(14, self.evaluate(x * 2))
self.assertAlmostEqual(21, self.evaluate(3 * x))
self.assertAlmostEqual(49, self.evaluate(x * x))
self.assertAlmostEqual(3.5, self.evaluate(x / 2))
self.assertAlmostEqual(1.5, self.evaluate(10.5 / x))
self.assertAlmostEqual(3, self.evaluate(x // 2))
self.assertAlmostEqual(2, self.evaluate(15 // x))
if read_dtype == dtypes.float32:
# The "mod" operator does not support float16
self.assertAlmostEqual(1, self.evaluate(x % 2))
self.assertAlmostEqual(2, self.evaluate(16 % x))
self.assertTrue(self.evaluate(x < 12))
self.assertTrue(self.evaluate(x <= 12))
self.assertFalse(self.evaluate(x > 12))
self.assertFalse(self.evaluate(x >= 12))
self.assertFalse(self.evaluate(12 < x))
self.assertFalse(self.evaluate(12 <= x))
self.assertTrue(self.evaluate(12 > x))
self.assertTrue(self.evaluate(12 >= x))
self.assertAlmostEqual(343, self.evaluate(pow(x, 3)), places=4)
self.assertAlmostEqual(128, self.evaluate(pow(2, x)), places=4)
self.assertAlmostEqual(-7, self.evaluate(-x))
self.assertAlmostEqual(7, self.evaluate(abs(x)))
x = get_var([7, 8, 9], dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.assertEqual(self.evaluate(x[1]), 8)
if tf2.enabled() and context.executing_eagerly():
self.assertAllEqual(x == [7., 8., 10.], [True, True, False])
self.assertAllEqual(x != [7., 8., 10.], [False, False, True])
@parameterized.named_parameters(*TESTCASES)
def test_assign(self, distribute):
with get_distribute_scope(distribute):
x = get_var(0., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# outside of auto cast scope.
v1 = constant_op.constant(3.14, dtype=dtypes.float32)
v2 = constant_op.constant(3.14, dtype=dtypes.float16)
def run_and_check():
# Assign float32 values
self.assertAllClose(3.14, self.evaluate(x.assign(v1)))
self.assertAllClose(3.14 * 2, self.evaluate(x.assign_add(v1)))
self.assertAllClose(3.14, self.evaluate(x.assign_sub(v1)))
# Attempt to assign float16 values
with self.assertRaisesRegexp(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign(v2))
with self.assertRaisesRegexp(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign_add(v2))
with self.assertRaisesRegexp(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign_sub(v2))
# Assign Python floats
self.assertAllClose(0., self.evaluate(x.assign(0.)))
self.assertAllClose(3.14, self.evaluate(x.assign(3.14)))
self.assertAllClose(3.14 * 2, self.evaluate(x.assign_add(3.14)))
self.assertAllClose(3.14, self.evaluate(x.assign_sub(3.14)))
# Assign multiple times
assign = x.assign(1.)
self.assertAllClose(1., self.evaluate(assign))
self.assertAllClose(0., self.evaluate(assign.assign(0.)))
assign_add = x.assign_add(3.14)
self.assertAllClose(3.14, self.evaluate(assign_add))
self.assertAllClose(3.14 * 3,
self.evaluate(x.assign_add(3.14).assign_add(3.14)))
self.assertAllClose(3.14 * 3, x)
assign_sub = x.assign_sub(3.14)
self.assertAllClose(3.14 * 2, self.evaluate(assign_sub))
self.assertAllClose(0.,
self.evaluate(x.assign_sub(3.14).assign_sub(3.14)))
# Assign with read_value=False
self.assertIsNone(self.evaluate(x.assign(1., read_value=False)))
self.assertAllClose(1., self.evaluate(x))
self.assertIsNone(self.evaluate(x.assign_add(2., read_value=False)))
self.assertAllClose(3., self.evaluate(x))
self.assertIsNone(self.evaluate(x.assign_sub(3., read_value=False)))
self.assertAllClose(0., self.evaluate(x))
# Use the tf.assign functions instead of the var.assign methods.
self.assertAllClose(0., self.evaluate(state_ops.assign(x, 0.)))
self.assertAllClose(3.14, self.evaluate(state_ops.assign(x, 3.14)))
self.assertAllClose(3.14 * 2,
self.evaluate(state_ops.assign_add(x, 3.14)))
self.assertAllClose(3.14, self.evaluate(state_ops.assign_sub(x, 3.14)))
run_and_check()
# reset x
self.evaluate(x.assign(0.))
# within auto cast scope.
with ops.get_default_graph()._enable_auto_casting_variables(
dtypes.float16):
# assign still expect float32 value even if in float16 scope
run_and_check()
@parameterized.named_parameters(*TESTCASES)
def test_assign_stays_in_true_dtype(self, distribute):
with get_distribute_scope(distribute):
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# small_val is a value such that 1.0 + small_val == 1.0 in fp16, but not
# in fp32
small_val = np.finfo('float16').eps / 2
small_tensor = constant_op.constant(small_val, dtype=dtypes.float32)
with ops.get_default_graph()._enable_auto_casting_variables(
dtypes.float16):
# Variable should be increased, despite it appearing to be the same
# float16 value.
self.assertEqual(1. + small_val,
self.evaluate(x.assign(1. + small_tensor)))
self.assertEqual(1., self.evaluate(x.value()))
self.assertEqual(1. + small_val, self.evaluate(x.value()))
self.evaluate(x.assign(1.))
with ops.get_default_graph()._enable_auto_casting_variables(
dtypes.float16):
self.assertEqual(1. + small_val,
self.evaluate(x.assign_add(small_tensor)))
self.assertEqual(1., self.evaluate(x.value()))
self.assertEqual(1. + small_val, self.evaluate(x.value()))
@parameterized.named_parameters(*TESTCASES)
def test_checkpoint(self, distribute):
with self.test_session():
with get_distribute_scope(distribute):
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.evaluate(x.assign(123.))
checkpoint = trackable_utils.Checkpoint(x=x)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
self.evaluate(x.assign(234.))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertEqual(self.evaluate(x), 123.)
@parameterized.named_parameters(*TESTCASES)
def test_invalid_wrapped_variable(self, distribute):
with get_distribute_scope(distribute):
# Wrap a non-variable
with self.assertRaisesRegexp(ValueError, 'variable must be of type'):
x = constant_op.constant([1.], dtype=dtypes.float32)
autocast_variable.create_autocast_variable(x)
# Wrap a non-floating point variable
with self.assertRaisesRegexp(ValueError,
'variable must be a floating point'):
x = get_var(1, dtypes.int32)
autocast_variable.create_autocast_variable(x)
def test_repr(self):
# We do not test with DistributionStrategy because we do not want to rely on
# the exact __repr__ output of a DistributedVariable.
x = get_var(1., dtypes.float32, name='x')
x = autocast_variable.create_autocast_variable(x)
if context.executing_eagerly():
self.assertStartsWith(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 true_dtype=float32, "
"numpy="
)
with ops.get_default_graph()._enable_auto_casting_variables(
dtypes.float16):
self.assertStartsWith(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float16 "
"true_dtype=float32, numpy="
)
else:
self.assertEqual(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 true_dtype=float32>"
)
with ops.get_default_graph()._enable_auto_casting_variables(
dtypes.float16):
self.assertEqual(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float16 true_dtype=float32>"
)
def test_repr_distributed(self):
with get_distribute_scope(distribute=True):
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
self.assertRegexpMatches(
repr(x).replace('\n', ' '),
'<AutoCastDistributedVariable dtype=float32 true_dtype=float32 '
'inner_variable=MirroredVariable.*>'
)
@parameterized.named_parameters(
('v1', gradient_descent_v1.GradientDescentOptimizer),
('v2', gradient_descent_v2.SGD))
def test_optimizer(self, optimizer_class):
x = get_var(1., dtypes.float32)
x = autocast_variable.create_autocast_variable(x)
opt = optimizer_class(1.)
@def_function.function
def f():
opt.minimize(lambda: x + 1., var_list=[x])
if context.executing_eagerly():
f()
else:
op = f() # pylint: disable=assignment-from-no-return
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
self.assertEqual(self.evaluate(x), 0)
if __name__ == '__main__':
test.main()
|
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['FS_MOUNT_PATH'] = r'/var/cbs3'
new_script_process = {'env': env_map}
ctx.logger.info('Operation is executed with inputs {0}'.format(inputs))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
new_script_process['env'].update(inputs['process']['env'])
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('artifacts/alien-extended-storage-types/scripts/unmount.sh'), new_script_process, operationOutputNames)
for k,v in parsed_output['outputs'].items():
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:stop:{0}'.format(k)] = v
ctx.instance.runtime_properties['partition_name'] = get_attribute(ctx, '_a4c_OO:tosca.interfaces.relationship.Configure:pre_configure_source:PARTITION_NAME')
ctx.instance.update()
|
|
"""Webhook handlers for mobile_app."""
import asyncio
from functools import wraps
import logging
import secrets
from aiohttp.web import HTTPBadRequest, Request, Response, json_response
from nacl.secret import SecretBox
import voluptuous as vol
from homeassistant.components import notify as hass_notify, tag
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES as BINARY_SENSOR_CLASSES,
)
from homeassistant.components.camera import SUPPORT_STREAM as CAMERA_SUPPORT_STREAM
from homeassistant.components.device_tracker import (
ATTR_BATTERY,
ATTR_GPS,
ATTR_GPS_ACCURACY,
ATTR_LOCATION_NAME,
)
from homeassistant.components.frontend import MANIFEST_JSON
from homeassistant.components.sensor import DEVICE_CLASSES as SENSOR_CLASSES
from homeassistant.components.zone.const import DOMAIN as ZONE_DOMAIN
from homeassistant.const import (
ATTR_DEVICE_ID,
ATTR_DOMAIN,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
ATTR_SUPPORTED_FEATURES,
CONF_WEBHOOK_ID,
HTTP_BAD_REQUEST,
HTTP_CREATED,
)
from homeassistant.core import EventOrigin
from homeassistant.exceptions import HomeAssistantError, ServiceNotFound
from homeassistant.helpers import (
config_validation as cv,
device_registry as dr,
entity_registry as er,
template,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.decorator import Registry
from .const import (
ATTR_ALTITUDE,
ATTR_APP_DATA,
ATTR_APP_VERSION,
ATTR_CAMERA_ENTITY_ID,
ATTR_COURSE,
ATTR_DEVICE_NAME,
ATTR_EVENT_DATA,
ATTR_EVENT_TYPE,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_OS_VERSION,
ATTR_SENSOR_ATTRIBUTES,
ATTR_SENSOR_DEVICE_CLASS,
ATTR_SENSOR_ICON,
ATTR_SENSOR_NAME,
ATTR_SENSOR_STATE,
ATTR_SENSOR_TYPE,
ATTR_SENSOR_TYPE_BINARY_SENSOR,
ATTR_SENSOR_TYPE_SENSOR,
ATTR_SENSOR_UNIQUE_ID,
ATTR_SENSOR_UOM,
ATTR_SPEED,
ATTR_SUPPORTS_ENCRYPTION,
ATTR_TEMPLATE,
ATTR_TEMPLATE_VARIABLES,
ATTR_VERTICAL_ACCURACY,
ATTR_WEBHOOK_DATA,
ATTR_WEBHOOK_ENCRYPTED,
ATTR_WEBHOOK_ENCRYPTED_DATA,
ATTR_WEBHOOK_TYPE,
CONF_CLOUDHOOK_URL,
CONF_REMOTE_UI_URL,
CONF_SECRET,
DATA_CONFIG_ENTRIES,
DATA_DELETED_IDS,
DOMAIN,
ERR_ENCRYPTION_ALREADY_ENABLED,
ERR_ENCRYPTION_NOT_AVAILABLE,
ERR_ENCRYPTION_REQUIRED,
ERR_INVALID_FORMAT,
ERR_SENSOR_NOT_REGISTERED,
SIGNAL_LOCATION_UPDATE,
SIGNAL_SENSOR_UPDATE,
)
from .helpers import (
_decrypt_payload,
empty_okay_response,
error_response,
registration_context,
safe_registration,
supports_encryption,
webhook_response,
)
_LOGGER = logging.getLogger(__name__)
DELAY_SAVE = 10
WEBHOOK_COMMANDS = Registry()
COMBINED_CLASSES = set(BINARY_SENSOR_CLASSES + SENSOR_CLASSES)
SENSOR_TYPES = [ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR]
WEBHOOK_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_WEBHOOK_TYPE): cv.string,
vol.Required(ATTR_WEBHOOK_DATA, default={}): vol.Any(dict, list),
vol.Optional(ATTR_WEBHOOK_ENCRYPTED, default=False): cv.boolean,
vol.Optional(ATTR_WEBHOOK_ENCRYPTED_DATA): cv.string,
}
)
def validate_schema(schema):
"""Decorate a webhook function with a schema."""
if isinstance(schema, dict):
schema = vol.Schema(schema)
def wrapper(func):
"""Wrap function so we validate schema."""
@wraps(func)
async def validate_and_run(hass, config_entry, data):
"""Validate input and call handler."""
try:
data = schema(data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(data, ex)
_LOGGER.error("Received invalid webhook payload: %s", err)
return empty_okay_response()
return await func(hass, config_entry, data)
return validate_and_run
return wrapper
async def handle_webhook(
hass: HomeAssistantType, webhook_id: str, request: Request
) -> Response:
"""Handle webhook callback."""
if webhook_id in hass.data[DOMAIN][DATA_DELETED_IDS]:
return Response(status=410)
config_entry = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id]
device_name = config_entry.data[ATTR_DEVICE_NAME]
try:
req_data = await request.json()
except ValueError:
_LOGGER.warning("Received invalid JSON from mobile_app device: %s", device_name)
return empty_okay_response(status=HTTP_BAD_REQUEST)
if (
ATTR_WEBHOOK_ENCRYPTED not in req_data
and config_entry.data[ATTR_SUPPORTS_ENCRYPTION]
):
_LOGGER.warning(
"Refusing to accept unencrypted webhook from %s",
device_name,
)
return error_response(ERR_ENCRYPTION_REQUIRED, "Encryption required")
try:
req_data = WEBHOOK_PAYLOAD_SCHEMA(req_data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(req_data, ex)
_LOGGER.error(
"Received invalid webhook from %s with payload: %s", device_name, err
)
return empty_okay_response()
webhook_type = req_data[ATTR_WEBHOOK_TYPE]
webhook_payload = req_data.get(ATTR_WEBHOOK_DATA, {})
if req_data[ATTR_WEBHOOK_ENCRYPTED]:
enc_data = req_data[ATTR_WEBHOOK_ENCRYPTED_DATA]
webhook_payload = _decrypt_payload(config_entry.data[CONF_SECRET], enc_data)
if webhook_type not in WEBHOOK_COMMANDS:
_LOGGER.error(
"Received invalid webhook from %s of type: %s", device_name, webhook_type
)
return empty_okay_response()
_LOGGER.debug(
"Received webhook payload from %s for type %s: %s",
device_name,
webhook_type,
webhook_payload,
)
# Shield so we make sure we finish the webhook, even if sender hangs up.
return await asyncio.shield(
WEBHOOK_COMMANDS[webhook_type](hass, config_entry, webhook_payload)
)
@WEBHOOK_COMMANDS.register("call_service")
@validate_schema(
{
vol.Required(ATTR_DOMAIN): cv.string,
vol.Required(ATTR_SERVICE): cv.string,
vol.Optional(ATTR_SERVICE_DATA, default={}): dict,
}
)
async def webhook_call_service(hass, config_entry, data):
"""Handle a call service webhook."""
try:
await hass.services.async_call(
data[ATTR_DOMAIN],
data[ATTR_SERVICE],
data[ATTR_SERVICE_DATA],
blocking=True,
context=registration_context(config_entry.data),
)
except (vol.Invalid, ServiceNotFound, Exception) as ex:
_LOGGER.error(
"Error when calling service during mobile_app "
"webhook (device name: %s): %s",
config_entry.data[ATTR_DEVICE_NAME],
ex,
)
raise HTTPBadRequest() from ex
return empty_okay_response()
@WEBHOOK_COMMANDS.register("fire_event")
@validate_schema(
{
vol.Required(ATTR_EVENT_TYPE): cv.string,
vol.Optional(ATTR_EVENT_DATA, default={}): dict,
}
)
async def webhook_fire_event(hass, config_entry, data):
"""Handle a fire event webhook."""
event_type = data[ATTR_EVENT_TYPE]
hass.bus.async_fire(
event_type,
data[ATTR_EVENT_DATA],
EventOrigin.remote,
context=registration_context(config_entry.data),
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("stream_camera")
@validate_schema({vol.Required(ATTR_CAMERA_ENTITY_ID): cv.string})
async def webhook_stream_camera(hass, config_entry, data):
"""Handle a request to HLS-stream a camera."""
camera = hass.states.get(data[ATTR_CAMERA_ENTITY_ID])
if camera is None:
return webhook_response(
{"success": False},
registration=config_entry.data,
status=HTTP_BAD_REQUEST,
)
resp = {"mjpeg_path": "/api/camera_proxy_stream/%s" % (camera.entity_id)}
if camera.attributes[ATTR_SUPPORTED_FEATURES] & CAMERA_SUPPORT_STREAM:
try:
resp["hls_path"] = await hass.components.camera.async_request_stream(
camera.entity_id, "hls"
)
except HomeAssistantError:
resp["hls_path"] = None
else:
resp["hls_path"] = None
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("render_template")
@validate_schema(
{
str: {
vol.Required(ATTR_TEMPLATE): cv.string,
vol.Optional(ATTR_TEMPLATE_VARIABLES, default={}): dict,
}
}
)
async def webhook_render_template(hass, config_entry, data):
"""Handle a render template webhook."""
resp = {}
for key, item in data.items():
try:
tpl = template.Template(item[ATTR_TEMPLATE], hass)
resp[key] = tpl.async_render(item.get(ATTR_TEMPLATE_VARIABLES))
except template.TemplateError as ex:
resp[key] = {"error": str(ex)}
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("update_location")
@validate_schema(
{
vol.Optional(ATTR_LOCATION_NAME): cv.string,
vol.Required(ATTR_GPS): cv.gps,
vol.Required(ATTR_GPS_ACCURACY): cv.positive_int,
vol.Optional(ATTR_BATTERY): cv.positive_int,
vol.Optional(ATTR_SPEED): cv.positive_int,
vol.Optional(ATTR_ALTITUDE): vol.Coerce(float),
vol.Optional(ATTR_COURSE): cv.positive_int,
vol.Optional(ATTR_VERTICAL_ACCURACY): cv.positive_int,
}
)
async def webhook_update_location(hass, config_entry, data):
"""Handle an update location webhook."""
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_LOCATION_UPDATE.format(config_entry.entry_id), data
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("update_registration")
@validate_schema(
{
vol.Optional(ATTR_APP_DATA, default={}): dict,
vol.Required(ATTR_APP_VERSION): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_MANUFACTURER): cv.string,
vol.Required(ATTR_MODEL): cv.string,
vol.Optional(ATTR_OS_VERSION): cv.string,
}
)
async def webhook_update_registration(hass, config_entry, data):
"""Handle an update registration webhook."""
new_registration = {**config_entry.data, **data}
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, config_entry.data[ATTR_DEVICE_ID])},
manufacturer=new_registration[ATTR_MANUFACTURER],
model=new_registration[ATTR_MODEL],
name=new_registration[ATTR_DEVICE_NAME],
sw_version=new_registration[ATTR_OS_VERSION],
)
hass.config_entries.async_update_entry(config_entry, data=new_registration)
await hass_notify.async_reload(hass, DOMAIN)
return webhook_response(
safe_registration(new_registration),
registration=new_registration,
)
@WEBHOOK_COMMANDS.register("enable_encryption")
async def webhook_enable_encryption(hass, config_entry, data):
"""Handle a encryption enable webhook."""
if config_entry.data[ATTR_SUPPORTS_ENCRYPTION]:
_LOGGER.warning(
"Refusing to enable encryption for %s because it is already enabled!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(
ERR_ENCRYPTION_ALREADY_ENABLED, "Encryption already enabled"
)
if not supports_encryption():
_LOGGER.warning(
"Unable to enable encryption for %s because libsodium is unavailable!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(ERR_ENCRYPTION_NOT_AVAILABLE, "Encryption is unavailable")
secret = secrets.token_hex(SecretBox.KEY_SIZE)
data = {**config_entry.data, ATTR_SUPPORTS_ENCRYPTION: True, CONF_SECRET: secret}
hass.config_entries.async_update_entry(config_entry, data=data)
return json_response({"secret": secret})
@WEBHOOK_COMMANDS.register("register_sensor")
@validate_schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_DEVICE_CLASS): vol.All(
vol.Lower, vol.In(COMBINED_CLASSES)
),
vol.Required(ATTR_SENSOR_NAME): cv.string,
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
vol.Optional(ATTR_SENSOR_UOM): cv.string,
vol.Optional(ATTR_SENSOR_STATE, default=None): vol.Any(
None, bool, str, int, float
),
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
}
)
async def webhook_register_sensor(hass, config_entry, data):
"""Handle a register sensor webhook."""
entity_type = data[ATTR_SENSOR_TYPE]
unique_id = data[ATTR_SENSOR_UNIQUE_ID]
device_name = config_entry.data[ATTR_DEVICE_NAME]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
entity_registry = await er.async_get_registry(hass)
existing_sensor = entity_registry.async_get_entity_id(
entity_type, DOMAIN, unique_store_key
)
data[CONF_WEBHOOK_ID] = config_entry.data[CONF_WEBHOOK_ID]
# If sensor already is registered, update current state instead
if existing_sensor:
_LOGGER.debug(
"Re-register for %s of existing sensor %s", device_name, unique_id
)
async_dispatcher_send(hass, SIGNAL_SENSOR_UPDATE, data)
else:
register_signal = f"{DOMAIN}_{data[ATTR_SENSOR_TYPE]}_register"
async_dispatcher_send(hass, register_signal, data)
return webhook_response(
{"success": True},
registration=config_entry.data,
status=HTTP_CREATED,
)
@WEBHOOK_COMMANDS.register("update_sensor_states")
@validate_schema(
vol.All(
cv.ensure_list,
[
# Partial schema, enough to identify schema.
# We don't validate everything because otherwise 1 invalid sensor
# will invalidate all sensors.
vol.Schema(
{
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
],
)
)
async def webhook_update_sensor_states(hass, config_entry, data):
"""Handle an update sensor states webhook."""
sensor_schema_full = vol.Schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
vol.Required(ATTR_SENSOR_STATE): vol.Any(None, bool, str, int, float),
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
}
)
device_name = config_entry.data[ATTR_DEVICE_NAME]
resp = {}
for sensor in data:
entity_type = sensor[ATTR_SENSOR_TYPE]
unique_id = sensor[ATTR_SENSOR_UNIQUE_ID]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
entity_registry = await er.async_get_registry(hass)
if not entity_registry.async_get_entity_id(
entity_type, DOMAIN, unique_store_key
):
_LOGGER.error(
"Refusing to update %s non-registered sensor: %s",
device_name,
unique_store_key,
)
err_msg = f"{entity_type} {unique_id} is not registered"
resp[unique_id] = {
"success": False,
"error": {"code": ERR_SENSOR_NOT_REGISTERED, "message": err_msg},
}
continue
entry = {CONF_WEBHOOK_ID: config_entry.data[CONF_WEBHOOK_ID]}
try:
sensor = sensor_schema_full(sensor)
except vol.Invalid as err:
err_msg = vol.humanize.humanize_error(sensor, err)
_LOGGER.error(
"Received invalid sensor payload from %s for %s: %s",
device_name,
unique_id,
err_msg,
)
resp[unique_id] = {
"success": False,
"error": {"code": ERR_INVALID_FORMAT, "message": err_msg},
}
continue
new_state = {**entry, **sensor}
async_dispatcher_send(hass, SIGNAL_SENSOR_UPDATE, new_state)
resp[unique_id] = {"success": True}
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_zones")
async def webhook_get_zones(hass, config_entry, data):
"""Handle a get zones webhook."""
zones = [
hass.states.get(entity_id)
for entity_id in sorted(hass.states.async_entity_ids(ZONE_DOMAIN))
]
return webhook_response(zones, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_config")
async def webhook_get_config(hass, config_entry, data):
"""Handle a get config webhook."""
hass_config = hass.config.as_dict()
resp = {
"latitude": hass_config["latitude"],
"longitude": hass_config["longitude"],
"elevation": hass_config["elevation"],
"unit_system": hass_config["unit_system"],
"location_name": hass_config["location_name"],
"time_zone": hass_config["time_zone"],
"components": hass_config["components"],
"version": hass_config["version"],
"theme_color": MANIFEST_JSON["theme_color"],
}
if CONF_CLOUDHOOK_URL in config_entry.data:
resp[CONF_CLOUDHOOK_URL] = config_entry.data[CONF_CLOUDHOOK_URL]
try:
resp[CONF_REMOTE_UI_URL] = hass.components.cloud.async_remote_ui_url()
except hass.components.cloud.CloudNotAvailable:
pass
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("scan_tag")
@validate_schema({vol.Required("tag_id"): cv.string})
async def webhook_scan_tag(hass, config_entry, data):
"""Handle a fire event webhook."""
await tag.async_scan_tag(
hass,
data["tag_id"],
config_entry.data[ATTR_DEVICE_ID],
registration_context(config_entry.data),
)
return empty_okay_response()
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Background workers to run tasks from Cloud Pub/Sub."""
import logging
from typing import Callable, Dict, Generic, Optional, Type, TypeVar
import attr
from proto_task_queue import task_pb2
from google.cloud.pubsub_v1.subscriber import client
from google.cloud.pubsub_v1.subscriber import futures as pubsub_futures
from google.cloud.pubsub_v1.subscriber import message as pubsub_message
from google.protobuf import message as proto_message
from google.protobuf import text_format
# pylint: disable=invalid-name
_TaskArgsType = TypeVar('_TaskArgsType', bound=proto_message.Message)
_TaskArgsClassType = Type[_TaskArgsType]
_TaskCallbackType = Callable[[_TaskArgsType], None]
# pylint: enable=invalid-name
@attr.s(auto_attribs=True)
class _Registration(Generic[_TaskArgsType]):
"""Data about a single registered task type.
Attributes:
task_args_class: Proto message class of the task's args.
callback: Implementation of the task.
"""
task_args_class: _TaskArgsClassType # pytype: disable=not-supported-yet
callback: _TaskCallbackType # pytype: disable=not-supported-yet
class Worker(object):
"""Background worker that runs tasks from Cloud Pub/Sub.
Typical usage example:
# Set up the worker.
my_worker = Worker()
my_worker.register(MyProtoClass, my_callback)
my_worker.register(OtherProtoClass, other_callback)
# Start subscribing.
subscribe_future = my_worker.subscribe(
'projects/my-project/subscriptions/my-subscription')
# Block the current thread on the subscriber thread.
subscribe_future.result()
Alternatively, the last two lines can be replaced with other logic if the
current thread should continue doing work. In that case, see
https://docs.python.org/3/library/concurrent.futures.html#future-objects for
subscribe_future's other methods.
"""
_message_type_registry: Dict[str, _Registration]
_subscriber: client.Client
_possibly_subscribing: bool
def __init__(
self,
pubsub_subscriber_client: Optional[client.Client] = None,
*,
task_to_string: Callable[[task_pb2.Task],
str] = text_format.MessageToString,
):
"""Constructor.
Args:
pubsub_subscriber_client: Cloud Pub/Sub subscriber client, or None to use
the default.
task_to_string: Function that converts a Task to a human-readable string
for logging.
"""
self._message_type_registry = {}
self._subscriber = pubsub_subscriber_client or client.Client()
self._possibly_subscribing = False
self._task_to_string = task_to_string
def register(self, task_args_class: _TaskArgsClassType,
callback: _TaskCallbackType) -> None:
"""Registers a new task.
Calling this method after calling subscribe() is not supported, because the
internal registry is not thread-safe if one thread writes to it while
another is reading.
Args:
task_args_class: Proto message class of the task's args.
callback: Implementation of the task. It takes an object of the type
task_args_class as an argument. This should be idempotent, see
https://cloud.google.com/pubsub/docs/subscriber
Raises:
RuntimeError: register() was called after a call to subscribe().
"""
if self._possibly_subscribing:
raise RuntimeError(
'Worker does not support registering a new task type after a '
'subscriber is started.')
full_name = task_args_class.DESCRIPTOR.full_name
self._message_type_registry[full_name] = _Registration(
task_args_class=task_args_class, callback=callback)
logging.info('Registered callback for %s', full_name)
def subscribe(self,
subscription_name: str) -> pubsub_futures.StreamingPullFuture:
"""Starts processing tasks from a subscription, in the background.
Args:
subscription_name: Relative resource name of the subscription, e.g.,
"projects/my-project/subscriptions/my-subscription".
Returns:
A Future object for the running subscriber.
"""
self._possibly_subscribing = True
return self._subscriber.subscribe(subscription_name, self._process_message)
def _process_message(self, message: pubsub_message.Message) -> None:
"""Processes a single message from Pub/Sub.
Args:
message: Message from Pub/Sub.
"""
# Extract the task proto from the message.
try:
task = task_pb2.Task.FromString(message.data)
except proto_message.DecodeError as e:
logging.error('Unable to deserialize Task proto: %s', e)
# If the message is gibberish, nacking keeps putting it back, wasting
# resources for no reason. If the message is fine but there's a parsing
# bug, nacking makes it possible to process the message normally after
# fixing the bug. If the expected format of the message ever changes in an
# incompatible way and a message with the new format is sent before the
# worker is updated, nacking makes it possible to process the message
# normally after updating the worker.
message.nack()
return
# Find the registration, based on the type of proto stored in task.args.
_, _, full_name = task.args.type_url.partition('/')
try:
registration = self._message_type_registry[full_name]
except KeyError:
logging.warning('Unknown type of task: %s', task.args.type_url)
# If the task has a bogus type, nacking keeps putting it back, wasting
# resources for no reason. If a new task type is added and those tasks are
# requested before the worker code is updated, nacking makes it possible
# to process the tasks after the worker code is updated. If an existing
# task type is removed from the running worker code before all tasks of
# that type have been processed, nacking keeps putting it back, wasting
# resources.
message.nack()
return
# Get the args proto.
args = registration.task_args_class()
task.args.Unpack(args)
# Convert the task to a loggable string.
try:
task_string = self._task_to_string(task)
except Exception: # pylint: disable=broad-except
logging.exception(
'Unable to convert task of type %s to a string for logging.',
full_name)
# If self._task_to_string() fails for a reason unrelated to the task
# itself, nacking makes it possible to process the task once
# self._task_to_string() is working again. If something about the task
# makes self._task_to_string() fail consistently, nacking makes it
# possible to process the task once the bug in self._task_to_string() is
# fixed. Additionally, users can catch and ignore exceptions in
# self._task_to_string() itself if they want to always process tasks
# regardless of whether it's possible to log the contents of the task.
message.nack()
return
# Call the registered callback.
logging.info('Processing task (message_id=%s):\n%s', message.message_id,
task_string)
try:
registration.callback(args)
except Exception: # pylint: disable=broad-except
logging.exception('Task failed (message_id=%s).', message.message_id)
# See the comment above about nacking on self._task_to_string() failures
# for the considerations here.
message.nack()
else:
logging.info('Finished task (message_id=%s).', message.message_id)
message.ack()
|
|
import tempfile, shutil
import os
import re
import subprocess
import time
import datetime
import csv
import json, yaml
import string
from bson.objectid import ObjectId
from bson import json_util
from dateutil.parser import parse
from django.conf import settings
from hashlib import md5
from crits.core.class_mapper import class_from_value
from crits.core.exceptions import ZipFileError
from crits.core.mongo_tools import get_file
def get_file_fs(sample_md5):
"""
Read a file from the filesystem. The path to the file is:
/data/files/<md5[:2]>/<md5[2:4]>/<md5>
:param sample_md5: The MD5 of the file to read off of disk.
:type sample_md5: str
:returns: str
"""
try:
fin = open('/data/files/%s/%s/%s' % (sample_md5[:2],
sample_md5[2:4],
sample_md5),
'rb')
data = fin.read()
fin.close()
except:
raise
return data
def put_file_fs(data):
"""
Write a file to the filesystem. The path to write the file to is:
/data/files/<md5[:2]>/<md5[2:4]>/<md5>
:param data: The data of the file to write.
:type data: str
:returns: str (the md5 of the file written)
"""
a = md5()
a.update(data)
sample_md5 = a.hexdigest()
try:
fout = open('/data/files/%s/%s/%s' % (sample_md5[:2],
sample_md5[2:4],
sample_md5),
'wb')
fout.write(data)
fout.close()
except:
raise
return sample_md5
def create_zip(files, pw_protect=True):
"""
Create a zip file. Creates a temporary directory to write files to on disk
using :class:`tempfile`. Uses /usr/bin/zip as the zipping mechanism
currently. Will password protect the zip file as a default. The password for
the zip file defaults to "infected", but it can be changed in the config
under zip7_password.
:param files: The files to add to the zip file.
:type files: list of files which are in the format of a list or tuple of
(<filename>, <data>).
:param pw_protect: To password protect the zip file or not.
:type pw_protect: boolean
:returns: :class:`crits.core.exceptions.ZipFileError`, str
"""
dumpdir = ""
try:
# Zip can take data from stdin to compress, but
# you can't define the filenames within the archive,
# they show up as "-". Therefore, we need to write
# out the file, compress it and return the zip.
# Save the sample as a file in a temp directory
# NOTE: the following line was causing a "permission denied" exception.
# Removed dir arg.
from crits.config.config import CRITsConfig
crits_config = CRITsConfig.objects().first()
if crits_config:
zip7_password = crits_config.zip7_password
else:
zip7_password = settings.ZIP7_PASSWORD
dumpdir = tempfile.mkdtemp() #dir=temproot
#write out binary files
for f in files:
filename = f[0]
file_data = f[1]
# make sure our desired path doesn't already exist (some files may
# have the same name but different data)
path = dumpdir + "/" + filename.encode("utf-8")
i = 1
tmp = path
while os.path.exists(tmp):
tmp = path+"("+str(i)+")"
i += 1
with open(tmp, "wb") as fh:
fh.write(file_data)
# Build the command line for zip
# NOTE: forking subprocess instead of using Python's ZipFile library
# because ZipFile does not allow us to create password-protected zip
# archives, only read them.
# -j don't include original filepath
zipname = "zip.zip" #The name we give it doesn't really matter
args = ["/usr/bin/zip", "-r", "-j", dumpdir+"/"+zipname, dumpdir]
if pw_protect:
args += ["-P", zip7_password]
args += [dumpdir+"/"+zipname, dumpdir]
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Give the process 30 seconds to complete, otherwise kill it
waitSeconds = 30
while (proc.poll() is None and waitSeconds):
time.sleep(1)
waitSeconds -= 1
zipdata = ""
if proc.returncode: # zip spit out an error
errmsg = "Error while creating archive\n" + proc.stdout.read()
raise ZipFileError, errmsg
elif not waitSeconds: # Process timed out
proc.terminate()
raise ZipFileError, "Error:\nProcess failed to terminate"
else:
with open(dumpdir + "/" + zipname, "rb") as fh:
zipdata = fh.read()
if not len(zipdata):
raise ZipFileError, "Error:\nThe zip archive contains no data"
return zipdata
except ZipFileError:
raise
except Exception, ex:
errmsg = ""
for err in ex.args:
errmsg = errmsg + " " + unicode(err)
raise ZipFileError, errmsg
finally:
if os.path.isdir(dumpdir):
shutil.rmtree(dumpdir)
def format_file(data, file_format):
"""
Format data into the provided format. Acceptable formats are:
- base64
- zlib
- raw
- invert
:param data: The data to format.
:type data: str
:param file_format: The format to convert the data into.
:type file_format: str
:returns: tuple of (<formatted_data>, <file_extension>)
"""
if file_format == "base64":
import base64
data = base64.b64encode(data)
ext = ".b64"
elif file_format == "zlib":
import zlib
data = zlib.compress(data)
ext = ".Z"
elif file_format == "raw":
ext = ""
elif file_format == "invert":
data = ''.join([chr(ord(c) ^ 0xff) for c in data])
ext = ".ff"
return (data, ext)
def convert_datetimes_to_string(obj):
"""
Iterates over all the keys of a document to convert all datetime objects
to strings.
Will also work with ordinary datetime objects or lists of datetimes and
lists of dictionaries. Any non-datetime values will be left as-is.
:param obj: The date object(s) to convert to a string.
:type obj: datetime.datetime, list, dict
:returns: obj
"""
if isinstance(obj, datetime.datetime):
return datetime.datetime.strftime(obj, settings.PY_DATETIME_FORMAT)
elif isinstance(obj, list) or isinstance(obj, dict):
for idx in (xrange(len(obj)) if isinstance(obj, list) else obj.keys()):
obj[idx] = convert_datetimes_to_string(obj[idx])
return obj
def convert_string_to_bool(value):
"""
Converts the string values "True" or "False" to their boolean
representation.
:param value: The string.
:type value: str.
:returns: True, False
"""
if(value != None) and ((value == True) or (value == "True") or (value == "true")):
return True
else:
return False
def format_object(obj_type, obj_id, data_format="yaml", cleanse=True,
obj_sources=[], remove_source=False, remove_rels=False,
remove_schema_version=False, remove_campaign=False,
remove_buckets=False, remove_releasability=False,
remove_unsupported=False):
"""
Formats a top-level object for utilization in certain conditions. Removes
CRITs-internal necessary data so users editing the document via the
interface don't alter or have the ability to overwrite things they should
not.
:param obj_type: The CRITs type of the top-level object to format.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param data_format: The format of the returned data.
:type data_format: str of "yaml" or "json"
:param cleanse: Remove "to", "actions", "releasability", and "bucket_list"
if this is an Email or Indicator.
:type cleanse: boolean
:param obj_sources: The sources to overwrite into the document or to set
the source list to an empty list if remove_source is
False.
:type obj_sources: list
:param remove_source: Remove the source key from the document.
:type remove_source: boolean
:param remove_rels: Remove the relationships key from the document.
:type remove_rels: boolean
:param remove_schema_version: Remove the schema_version key from the
document.
:type remove_schema_version: boolean
:param remove_campaign: Remove the campaign key from the document.
:type remove_campaign: boolean
:param remove_buckets: Remove the bucket_list key from the document.
:type remove_buckets: boolean
:param remove_releasability: Remove the releasability key from the document.
:type remove_releasability: boolean
:param remove_unsupported: Remove the unsupported_attrs key from the document.
:type remove_unsupported: boolean
:returns: str
"""
collection = settings.CRITS_TYPES[obj_type]
obj_class = class_from_value(obj_type, obj_id)
if not obj_class:
return ""
data = obj_class.to_dict()
if data is None:
return ""
# Emails use raw_header (singular) as the attribute but store it as
# raw_headers (plural) in the database. When viewing an email in YAML
# or JSON convert from plural to singular. This will allow a copy/paste
# of these views to be imported correctly.
if 'raw_headers' in data:
data['raw_header'] = data['raw_headers']
del data['raw_headers']
if cleanse and collection in [settings.COL_EMAIL, settings.COL_INDICATORS]:
if "to" in data:
del data["to"]
if "actions" in data:
del data["actions"]
if "releasability" in data:
del data["releasability"]
if "bucket_list" in data:
del data["bucket_list"]
if remove_source and 'source' in data:
del data["source"]
elif 'source' in data:
data['source'] = obj_sources
if remove_rels and 'relationships' in data:
del data["relationships"]
if remove_rels and 'objects' in data:
del data["objects"]
if remove_schema_version and 'schema_version' in data:
del data["schema_version"]
if remove_campaign and 'campaign' in data:
del data["campaign"]
del data["_id"]
if data.has_key("modified"):
del data["modified"]
if remove_buckets and 'bucket_list' in data:
del data['bucket_list']
if remove_releasability and 'releasability' in data:
del data['releasability']
if remove_unsupported and 'unsupported_attrs' in data:
del data['unsupported_attrs']
data = json.dumps(convert_datetimes_to_string(data),
default=json_util.default)
if data_format == "yaml":
data = yaml.dump(yaml.load(data), default_flow_style=False)
elif data_format == "json":
data = json.dumps(json.loads(data))
return data
def make_ascii_strings(md5=None, data=None):
"""
Find and return all printable ASCII strings in a string.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
strings_data = 'ASCII Strings\n'
strings_data += "-" * 30
strings_data += "\n"
ascii_regex = re.compile('([ -~]{4,})')
matches = ascii_regex.findall(data)
strings_data += '\n'.join([x for x in matches])
return strings_data + "\n\n\n\n"
def make_unicode_strings(md5=None, data=None):
"""
Find and return all printable Unicode strings in a string.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
strings_data = 'Unicode Strings\n'
strings_data += "-" * 30
strings_data += "\n"
unicode_regex = re.compile('(([%s]\x00){4,})' % string.printable)
matches = unicode_regex.findall(data)
strings_data += '\n'.join([x[0].replace('\x00', '') for x in matches])
return strings_data + "\n\n\n\n"
def make_stackstrings(md5=None, data=None):
"""
Find and return all stack strings in a string.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
x = 0
prev = 0
strings = ''
while x < len(data):
if (data[x] == '\xc6') and ((data[x+1] == '\x45') or (data[x+1] == '\x84')):
a = ord(data[x+3])
if (a <= 126 and a >= 32) or (a==9): strings += data[x+3]
prev = x
x += 4
elif (data[x] == '\xc6') and (data[x+1] == '\x44'):
a = ord(data[x+4])
if (a <= 126 and a >= 32) or (a==9): strings += data[x+4]
prev = x
x += 5
elif (data[x] == '\xc6') and ((data[x+1] == '\x05') or (data[x+1] == '\x85')):
a = ord(data[x+6])
if (a <= 126 and a >= 32) or (a==9): strings += data[x+6]
prev = x
x += 7
else:
if ((x - prev) ==12): strings += '\n'
x += 1
strings = strings.replace('\x00', '\r')
return strings
def make_hex(md5=None, data=None):
"""
Convert data into hex formatted output.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
length = 16
hex_data = ''
digits = 4 if isinstance(data, unicode) else 2
for i in xrange(0, len(data), length):
s = data[i:i+length]
hexa = ' '.join(["%0*X" % (digits, ord(x)) for x in s])
text = ' '.join([x if 0x20 <= ord(x) < 0x7F else '.' for x in s])
hex_data += "%04X %-*s %s\r\n" % (i, length*(digits + 1), hexa, text)
return hex_data
def xor_string(md5=None, data=None, key=0, null=0):
"""
XOR data.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:param key: The XOR key to use.
:type key: int
:param null: Whether or not to skip nulls.
:type null: int (0 or 1)
:returns: str
"""
if md5:
data = get_file(md5)
out = ''
for c in data:
if ord(c) == 0 and null == 1:
out += c
elif ord(c) == key and null == 1:
out += c
else:
out += chr(ord(c) ^ key)
return out
def xor_search(md5=None, data=None, string=None, skip_nulls=0):
"""
Search a string for potential XOR keys. Uses a small list of common
plaintext terms, XORs those terms using keys 0-255 and searches the data for
any match. If there is a match, that key is included in the results.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:param string: The custom string to XOR and search for.
:type string: str
:param skip_nulls: Whether or not to skip nulls.
:type skip_nulls: int (0 or 1)
:returns: list
"""
if md5:
data = get_file(md5)
if string is None or string == '':
plaintext_list = [
'This program',
'kernel32',
'KERNEL32',
'http',
'svchost',
'Microsoft',
'PE for WIN32',
'startxref',
'!This program cannot be run in DOS mode',
'\xD0\xCF\x11\xE0\xA1\xB1\x1a\xE1',
'D\x00o\x00c\x00u\x00m\x00e\x00n\x00t\x00 \x00S\x00u\x00m\x00m\x00a\x00r\x00y\x00 \x00I\x00n\x00f\x00o\x00r\x00m\x00a\x00t\x00i\x00o\x00n',
]
else:
plaintext_list = ["%s" % string]
results = []
for plaintext in plaintext_list:
for i in range(0, 255):
xord_string = xor_string(data=plaintext,
key=i,
null=skip_nulls)
if xord_string in data:
if i not in results:
results.append(i)
results.sort()
return results
def make_list(s):
"""
Make a list of out a string of data that needs to be parsed using
:class:`csv.reader`.
:param s: The string to convert
:type s: str
:returns: list
"""
l = []
l.append(s)
a = csv.reader(l, skipinitialspace=True)
b = None
for i in a:
b = i
return b
def remove_html_tags(data):
"""
Remove html tags from a string.
:param data: The string to parse.
:type data: str
:returns: str
"""
p = re.compile(r'<.*?>')
return p.sub('', data)
def datestring_to_isodate(datestring):
"""
Parse a string using :class:`dateutil` and return the results.
:param datestring: The date string to parse.
:returns: datetime.datetime
"""
return parse(datestring, fuzzy=True)
def clean_dict(dict_, keys_to_remove):
"""
Remove keys we don't want to display to the user.
Can also be used to remove keys from user input that we want to manage
ourselves. In the latter case, be sure the query is using $set and not
completely replacing the document, otherwise keys added elsewhere might
be lost.
:param dict_: The dictionary to iterate over.
:type dict_: dict
:param keys_to_remove: The list of keys we want to remove.
:type keys_to_remove: list
"""
for key in keys_to_remove:
if key in dict_:
del dict_[key]
def json_handler(obj):
"""
Handles converting datetimes and Mongo ObjectIds to string.
Usage: json.dumps(..., default=json_handler)
:param obj: The object that needs converting.
:type obj: datetime.datetime, ObjectId
:returns: str
"""
if isinstance(obj, datetime.datetime):
return datetime.datetime.strftime(obj, settings.PY_DATETIME_FORMAT)
elif isinstance(obj, ObjectId):
return str(obj)
def generate_qrcode(data, size):
"""
Generate a QR Code Image from a string.
Will attempt to import qrcode (which also requires Pillow) and io. If
this fails we will return None.
:param data: data to be converted into a QR Code
:type data: str
:param size: tuple of (width, height) in pixels to resize the QR Code
:type size: tuple
:returns: str in base64 format
"""
try:
import qrcode, io
except:
return None
a = io.BytesIO()
qr = qrcode.QRCode()
qr.add_data(data)
img = qr.make_image().resize(size)
img.save(a, 'PNG')
qr_img = a.getvalue().encode('base64').replace('\n', '')
a.close()
return qr_img
def validate_md5_checksum(md5_checksum):
"""
Validates that string is truly an MD5.
:param md5_checksum: The string to validate.
:type md5_checksum: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
retVal = {'success': True, 'message': ''}
if re.match("^[a-fA-F0-9]{32}$", md5_checksum) == None:
retVal['message'] += "The MD5 digest needs to be 32 hex characters."
retVal['success'] = False
return retVal
def validate_sha1_checksum(sha1_checksum):
"""
Validates that string is truly a SHA1.
:param sha1_checksum: str
:return: dict with keys "success" (boolean) and "message" (str)
"""
retVal = {'success': True, 'message': ''}
if re.match("^[a-fA-F0-9]{40}$", sha1_checksum) == None:
retVal['message'] += "The SHA1 digest needs to be 40 hex characters."
retVal['success'] = False
return retVal
def validate_sha256_checksum(sha256_checksum):
"""
Validates that string is truly a SHA256.
:param sha256_checksum: The string to validate.
:type sha256_checksum: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
retVal = {'success': True, 'message': ''}
if re.match("^[a-fA-F0-9]{64}$", sha256_checksum) == None:
retVal['message'] += "The SHA256 digest needs to be 64 hex characters."
retVal['success'] = False
return retVal
|
|
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import socket
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
_PY26 = sys.version_info[0:2] == (2, 6)
from cloudbaseinit.openstack.common.gettextutils import _
from cloudbaseinit.openstack.common import importutils
from cloudbaseinit.openstack.common import jsonutils
from cloudbaseinit.openstack.common import local
# NOTE(flaper87): Pls, remove when graduating this module
# from the incubator.
from cloudbaseinit.openstack.common.strutils import mask_password # noqa
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
"keystonemiddleware=WARN", "routes.middleware=WARN",
"stevedore=WARN"]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def isEnabledFor(self, level):
if _PY26:
# This method was added in python 2.7 (and it does the exact
# same logic, so we need to do the exact same logic so that
# python 2.6 has this capability as well).
return self.logger.isEnabledFor(level)
else:
return super(BaseLoggerAdapter, self).isEnabledFor(level)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
if six.PY3:
# In Python 3, the code fails because the 'manager' attribute
# cannot be found when using a LoggerAdapter as the
# underlying logger. Work around this issue.
self._logger.manager = self._logger.logger.manager
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(msg, six.text_type):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except (moves.configparser.Error, KeyError) as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string=None,
default_log_levels=None):
# Just in case the caller is not setting the
# default_log_level. This is insurance because
# we introduced the default_log_level parameter
# later in a backwards in-compatible change
if default_log_levels is not None:
cfg.set_defaults(
log_opts,
default_log_levels=default_log_levels)
if logging_context_format_string is not None:
cfg.set_defaults(
log_opts,
logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
try:
handler = importutils.import_object(
"cloudbaseinit.openstack.common.log_handler."
"PublishErrorsHandler",
logging.ERROR)
except ImportError:
handler = importutils.import_object(
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
if CONF.use_syslog:
try:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(facility=facility)
else:
syslog = logging.handlers.SysLogHandler(facility=facility)
log_root.addHandler(syslog)
except socket.error:
log_root.error('Unable to add syslog handler. Verify that syslog'
'is running.')
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
|
|
# encoding: utf-8
"""
environment.py
Created by Thomas Mangin on 2011-11-29.
Copyright (c) 2011-2015 Exa Networks. All rights reserved.
"""
# XXX: raised exception not caught
# XXX: reloading mid-program not possible
# XXX: validation for path, file, etc not correctly test (ie surely buggy)
import os
import sys
import pwd
import syslog
from exabgp.util.ip import isip
# ===================================================================== NoneDict
#
class NoneDict (dict):
def __getitem__ (self, name):
return None
nonedict = NoneDict()
# ================================================================== environment
# XXX: FIXME: Upper case for class !
class environment (object):
# class returned on issues
class Error (Exception):
pass
application = 'unset'
# the configuration to be set by the program
configuration = {}
# the final parsed settings
_settings = None
location = os.path.normpath(sys.argv[0]) if sys.argv[0].startswith('/') else os.path.normpath(os.path.join(os.getcwd(),sys.argv[0]))
log_levels = ['EMERG', 'ALERT', 'CRIT', 'CRITICAL', 'ERR', 'ERROR', 'WARNING', 'NOTICE', 'INFO', 'DEBUG']
@staticmethod
def setup (conf):
if environment._settings:
# nosetest is performing the setup multiple times, so we can not raise anymore
# raise RuntimeError('You already initialised the environment')
return environment._settings
environment._settings = _env(conf)
return environment._settings
@staticmethod
def settings ():
if not environment._settings:
raise RuntimeError('You can not have an import using settings() before main() initialised environment')
return environment._settings
@staticmethod
def root (path):
roots = environment.location.split(os.sep)
location = []
for index in range(len(roots)-1,-1,-1):
if roots[index] == 'lib':
if index:
location = roots[:index]
break
root = os.path.join(*location)
paths = [
os.path.normpath(os.path.join(os.path.join(os.sep,root,path))),
os.path.normpath(os.path.expanduser(environment.unquote(path))),
os.path.normpath(os.path.join('/',path)),
]
return paths
@staticmethod
def integer (_):
return int(_)
@staticmethod
def real (_):
return float(_)
@staticmethod
def lowunquote (_):
return _.strip().strip('\'"').lower()
@staticmethod
def unquote (_):
return _.strip().strip('\'"')
@staticmethod
def quote (_):
return "'%s'" % str(_)
@staticmethod
def nop (_):
return _
@staticmethod
def boolean (_):
return _.lower() in ('1','yes','on','enable','true')
@staticmethod
def api (_):
encoder = _.lower()
if encoder not in ('text','json'):
raise TypeError('invalid encoder')
return encoder
@staticmethod
def methods (_):
return _.upper().split()
@staticmethod
def list (_):
return "'%s'" % ' '.join(_)
@staticmethod
def lower (_):
return str(_).lower()
@staticmethod
def ip (_):
if isip(_):
return _
raise TypeError('ip %s is invalid' % _)
@staticmethod
def optional_ip (_):
if not _ or isip(_):
return _
raise TypeError('ip %s is invalid' % _)
@staticmethod
def user (_):
# XXX: incomplete
try:
pwd.getpwnam(_)
# uid = answer[2]
except KeyError:
raise TypeError('user %s is not found on this system' % _)
return _
@staticmethod
def folder (path):
paths = environment.root(path)
options = [p for p in paths if os.path.exists(path)]
if not options:
raise TypeError('%s does not exists' % path)
first = options[0]
if not first:
raise TypeError('%s does not exists' % first)
return first
@staticmethod
def path (path):
split = sys.argv[0].split('lib/exabgp')
if len(split) > 1:
prefix = os.sep.join(split[:1])
if prefix and path.startswith(prefix):
path = path[len(prefix):]
home = os.path.expanduser('~')
if path.startswith(home):
return "'~%s'" % path[len(home):]
return "'%s'" % path
@staticmethod
def conf (path):
first = environment.folder(path)
if not os.path.isfile(first):
raise TypeError('%s is not a file' % path)
return first
@staticmethod
def exe (path):
first = environment.conf(path)
if not os.access(first, os.X_OK):
raise TypeError('%s is not an executable' % first)
return first
@staticmethod
def syslog (path):
path = environment.unquote(path)
if path in ('stdout','stderr'):
return path
if path.startswith('host:'):
return path
return path
@staticmethod
def redirector (name):
if name == 'url' or name.startswith('icap://'):
return name
raise TypeError('invalid redirector protocol %s, options are url or header' % name)
@staticmethod
def syslog_value (log):
if log not in environment.log_levels:
if log == 'CRITICAL':
log = 'CRIT'
if log == 'ERROR':
log = 'ERR'
raise TypeError('invalid log level %s' % log)
return getattr(syslog,'LOG_%s' % log)
@staticmethod
def syslog_name (log):
for name in environment.log_levels:
if name == 'CRITICAL':
name = 'CRIT'
if name == 'ERROR':
name = 'ERR'
if getattr(syslog,'LOG_%s' % name) == log:
return name
raise TypeError('invalid log level %s' % log)
@staticmethod
def default ():
for section in sorted(environment.configuration):
if section in ('internal','debug'):
continue
for option in sorted(environment.configuration[section]):
values = environment.configuration[section][option]
default = "'%s'" % values['value'] if values['write'] in (environment.list,environment.path,environment.quote,environment.syslog) else values['value']
yield '%s.%s.%s %s: %s. default (%s)' % (environment.application,section,option,' '*(20-len(section)-len(option)),values['help'],default)
@staticmethod
def iter_ini (diff=False):
for section in sorted(environment._settings):
if section in ('internal','debug'):
continue
header = '\n[%s.%s]' % (environment.application,section)
for k in sorted(environment._settings[section]):
v = environment._settings[section][k]
if diff and environment.configuration[section][k]['read'](environment.configuration[section][k]['value']) == v:
continue
if header:
yield header
header = ''
yield '%s = %s' % (k,environment.configuration[section][k]['write'](v))
@staticmethod
def iter_env (diff=False):
for section,values in environment._settings.items():
if section in ('internal','debug'):
continue
for k,v in values.items():
if diff and environment.configuration[section][k]['read'](environment.configuration[section][k]['value']) == v:
continue
if environment.configuration[section][k]['write'] == environment.quote:
yield "%s.%s.%s='%s'" % (environment.application,section,k,v)
continue
yield "%s.%s.%s=%s" % (environment.application,section,k,environment.configuration[section][k]['write'](v))
# ========================================================================= _env
#
import ConfigParser
from exabgp.util.hashtable import HashTable
def _env (conf):
here = os.path.join(os.sep,*os.path.join(environment.location.split(os.sep)))
location, directory = os.path.split(here)
while directory:
if directory == 'lib':
location = os.path.join(location,'lib')
break
location, directory = os.path.split(location)
# we did not break - ie, we did not find the location in the normal path.
else:
# let's try to see if we are running from the QA folder (for unittesting)
location, directory = os.path.split(here)
while directory:
if directory == 'dev':
location = os.path.join(location,'lib')
break
location, directory = os.path.split(location)
else:
# oh ! bad, let set the path to something ...
location = '/lib'
_conf_paths = []
if conf:
_conf_paths.append(os.path.abspath(os.path.normpath(conf)))
if location:
_conf_paths.append(os.path.normpath(os.path.join(location,'etc',environment.application,'%s.env' % environment.application)))
_conf_paths.append(os.path.normpath(os.path.join('/','etc',environment.application,'%s.env' % environment.application)))
env = HashTable()
ini = ConfigParser.ConfigParser()
ini_files = [path for path in _conf_paths if os.path.exists(path)]
if ini_files:
ini.read(ini_files[0])
for section in environment.configuration:
default = environment.configuration[section]
for option in default:
convert = default[option]['read']
try:
proxy_section = '%s.%s' % (environment.application,section)
env_name = '%s.%s' % (proxy_section,option)
rep_name = env_name.replace('.','_')
if env_name in os.environ:
conf = os.environ.get(env_name)
elif rep_name in os.environ:
conf = os.environ.get(rep_name)
else:
conf = environment.unquote(ini.get(proxy_section,option,nonedict))
# name without an = or : in the configuration and no value
if conf is None:
conf = default[option]['value']
except (ConfigParser.NoSectionError,ConfigParser.NoOptionError):
conf = default[option]['value']
try:
env.setdefault(section,HashTable())[option] = convert(conf)
except TypeError:
raise environment.Error('invalid value for %s.%s : %s' % (section,option,conf))
return env
|
|
from sympy.core import Expr, S, C, Symbol, Equality, Interval, sympify, Wild
from sympy.solvers import solve
from sympy.utilities import flatten
class Sum(Expr):
"""Represents unevaluated summation."""
def __new__(cls, f, *symbols, **assumptions):
f = sympify(f)
if f.is_Number:
if f is S.NaN:
return S.NaN
elif f is S.Zero:
return S.Zero
if not symbols:
limits = f.atoms(Symbol)
if not limits:
return f
else:
limits = []
for V in symbols:
if isinstance(V, Symbol):
limits.append(V)
continue
elif isinstance(V, Equality):
if isinstance(V.lhs, Symbol):
if isinstance(V.rhs, Interval):
limits.append((V.lhs, V.rhs.start, V.rhs.end))
else:
limits.append((V.lhs, V.rhs))
continue
elif isinstance(V, (tuple, list)):
V = flatten(V)
if len(V) == 1:
if isinstance(V[0], Symbol):
limits.append(V[0])
continue
elif len(V) in (2, 3):
if isinstance(V[0], Symbol):
limits.append(tuple(map(sympify, V)))
continue
raise ValueError("Invalid summation variable or limits")
obj = Expr.__new__(cls, **assumptions)
obj._args = (f, tuple(limits))
return obj
@property
def function(self):
return self._args[0]
@property
def limits(self):
return self._args[1]
def doit(self, **hints):
#if not hints.get('sums', True):
# return self
f = self.function
for i, a, b in self.limits:
f = eval_sum(f, (i, a, b))
if f is None:
return self
if hints.get('deep', True):
return f.doit(**hints)
else:
return f
def _eval_summation(self, f, x):
return
def euler_maclaurin(self, m=0, n=0, eps=0, eval_integral=True):
"""
Return an Euler-Maclaurin approximation of self, where m is the
number of leading terms to sum directly and n is the number of
terms in the tail.
With m = n = 0, this is simply the corresponding integral
plus a first-order endpoint correction.
Returns (s, e) where s is the Euler-Maclaurin approximation
and e is the estimated error (taken to be the magnitude of
the first omitted term in the tail):
>>> from sympy.abc import k, a, b
>>> from sympy import Sum
>>> Sum(1/k, (k, 2, 5)).doit().evalf()
1.28333333333333
>>> s, e = Sum(1/k, (k, 2, 5)).euler_maclaurin()
>>> s
7/20 - log(2) + log(5)
>>> from sympy import sstr
>>> print sstr((s.evalf(), e.evalf()), full_prec=True)
(1.26629073187416, 0.0175000000000000)
The endpoints may be symbolic:
>>> s, e = Sum(1/k, (k, a, b)).euler_maclaurin()
>>> s
-log(a) + log(b) + 1/(2*a) + 1/(2*b)
>>> e
abs(-1/(12*b**2) + 1/(12*a**2))
If the function is a polynomial of degree at most 2n+1, the
Euler-Maclaurin formula becomes exact (and e = 0 is returned):
>>> Sum(k, (k, 2, b)).euler_maclaurin()
(-1 + b/2 + b**2/2, 0)
>>> Sum(k, (k, 2, b)).doit()
-1 + b/2 + b**2/2
With a nonzero eps specified, the summation is ended
as soon as the remainder term is less than the epsilon.
"""
m = int(m)
n = int(n)
f = self.function
assert len(self.limits) == 1
i, a, b = self.limits[0]
s = S.Zero
if m:
for k in range(m):
term = f.subs(i, a+k)
if (eps and term and abs(term.evalf(3)) < eps):
return s, abs(term)
s += term
a += m
x = Symbol('x', dummy=True)
I = C.Integral(f.subs(i, x), (x, a, b))
if eval_integral:
I = I.doit()
s += I
def fpoint(expr):
if b is S.Infinity:
return expr.subs(i, a), 0
return expr.subs(i, a), expr.subs(i, b)
fa, fb = fpoint(f)
iterm = (fa + fb)/2
g = f.diff(i)
for k in xrange(1, n+2):
ga, gb = fpoint(g)
term = C.bernoulli(2*k)/C.Factorial(2*k)*(gb-ga)
if (eps and term and abs(term.evalf(3)) < eps) or (k > n):
break
s += term
g = g.diff(i, 2)
return s + iterm, abs(term)
def _eval_subs(self, old, new):
newargs = (self.args[1][0][0], self.args[1][0][1].subs(old,new),
self.args[1][0][2].subs(old,new))
return Sum(self.args[0].subs(old, new), newargs)
def sum(*args, **kwargs):
summation = Sum(*args, **kwargs)
if isinstance(summation, Sum):
return summation.doit(deep=False)
else:
return summation
def getab(expr):
cls = expr.func
return cls(expr.args[0]), cls(*expr.args[1:])
def telescopic_direct(L, R, n, (i, a, b)):
"""Returns the direct summation of the terms of a telescopic sum
L is the term with lower index
R is the term with higher index
n difference between the indexes of L and R
For example:
>>> from sympy.concrete.summations import telescopic_direct
>>> from sympy.abc import k, a, b
>>> telescopic_direct(1/k, -1/(k+2), 2, (k, a, b))
1/a + 1/(1 + a) - 1/(1 + b) - 1/(2 + b)
"""
s = 0
for m in xrange(n):
s += L.subs(i,a+m) + R.subs(i,b-m)
return s
def telescopic(L, R, (i, a, b)):
'''Tries to perform the summation using the telescopic property
return None if not possible
'''
if L.is_Add or R.is_Add:
return None
s = None
#First we try to solve using match
#Maybe this should go inside solve
k = Wild("k")
sol = (-R).match(L.subs(i, i + k))
if sol and k in sol:
if L.subs(i,i + sol[k]) == -R:
#sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x}))
s = sol[k]
#Then we try to solve using solve
if not s or not s.is_Integer:
m = Symbol("m")
try:
s = solve(L.subs(i, i + m) + R, m)[0]
except IndexError:#(ValueError, IndexError):
pass
if s and s.is_Integer:
if s < 0:
return telescopic_direct(R, L, abs(s), (i, a, b))
elif s > 0:
return telescopic_direct(L, R, s, (i, a, b))
return None
def eval_sum(f, (i, a, b)):
if not f.has(i):
return f*(b-a+1)
definite = a.is_Integer and b.is_Integer
# Doing it directly may be faster if there are very few terms.
if definite and (b-a < 100):
return eval_sum_direct(f, (i, a, b))
# Try to do it symbolically. Even when the number of terms is known,
# this can save time when b-a is big.
# We should try to transform to partial fractions
value = eval_sum_symbolic(f.expand(), (i, a, b))
if value is not None:
return value
# Do it directly
if definite:
return eval_sum_direct(f, (i, a, b))
def eval_sum_symbolic(f, (i, a, b)):
if not f.has(i):
return f*(b-a+1)
# Linearity
if f.is_Mul:
L, R = getab(f)
if not L.has(i):
sR = eval_sum_symbolic(R, (i, a, b))
if sR: return L*sR
if not R.has(i):
sL = eval_sum_symbolic(L, (i, a, b))
if sL: return R*sL
if f.is_Add:
L, R = getab(f)
lrsum = telescopic(L, R, (i, a, b))
if lrsum: return lrsum
lsum = eval_sum_symbolic(L, (i, a, b))
rsum = eval_sum_symbolic(R, (i, a, b))
if None not in (lsum, rsum):
return lsum + rsum
# Polynomial terms with Faulhaber's formula
p = C.Wild('p')
e = f.match(i**p)
if e != None:
c = p.subs(e)
B = C.bernoulli
if c.is_integer and c >= 0:
s = (B(c+1, b+1) - B(c+1, a))/(c+1)
return s.expand()
# Geometric terms
c1 = C.Wild('c1', exclude=[i])
c2 = C.Wild('c2', exclude=[i])
c3 = C.Wild('c3', exclude=[i])
e = f.match(c1**(c2*i+c3))
if e is not None:
c1 = c1.subs(e)
c2 = c2.subs(e)
c3 = c3.subs(e)
# TODO: more general limit handling
return c1**c3 * (c1**(a*c2) - c1**(c2+b*c2)) / (1 - c1**c2)
return None
def eval_sum_direct(expr, (i, a, b)):
s = S.Zero
if expr.has(i):
for j in xrange(a, b+1):
s += expr.subs(i, j)
else:
for j in xrange(a, b+1):
s += expr
return s
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import argparse
import functools
from PIL import Image
import paddle.fluid as fluid
import reader
from pyramidbox import PyramidBox
from visualize import draw_bboxes
from utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('use_gpu', bool, True, "Whether use GPU or not.")
add_arg('use_pyramidbox', bool, True, "Whether use PyramidBox model.")
add_arg('data_dir', str, 'data/WIDER_val/images/', "The validation dataset path.")
add_arg('model_dir', str, '', "The model path.")
add_arg('pred_dir', str, 'pred', "The path to save the evaluation results.")
add_arg('file_list', str, 'data/wider_face_split/wider_face_val_bbx_gt.txt', "The validation dataset path.")
add_arg('infer', bool, False, "Whether do infer or eval.")
add_arg('confs_threshold', float, 0.15, "Confidence threshold to draw bbox.")
add_arg('image_path', str, '', "The image used to inference and visualize.")
# yapf: enable
def infer(args, config):
model_dir = args.model_dir
pred_dir = args.pred_dir
if not os.path.exists(model_dir):
raise ValueError("The model path [%s] does not exist." % (model_dir))
if args.infer:
image_path = args.image_path
image = Image.open(image_path)
if image.mode == 'L':
image = img.convert('RGB')
shrink, max_shrink = get_shrink(image.size[1], image.size[0])
det0 = detect_face(image, shrink)
if args.use_gpu:
det1 = flip_test(image, shrink)
[det2, det3] = multi_scale_test(image, max_shrink)
det4 = multi_scale_test_pyramid(image, max_shrink)
det = np.row_stack((det0, det1, det2, det3, det4))
dets = bbox_vote(det)
else:
# when infer on cpu, use a simple case
dets = det0
keep_index = np.where(dets[:, 4] >= args.confs_threshold)[0]
dets = dets[keep_index, :]
draw_bboxes(image_path, dets[:, 0:4])
else:
test_reader = reader.test(config, args.file_list)
for image, image_path in test_reader():
shrink, max_shrink = get_shrink(image.size[1], image.size[0])
det0 = detect_face(image, shrink)
det1 = flip_test(image, shrink)
[det2, det3] = multi_scale_test(image, max_shrink)
det4 = multi_scale_test_pyramid(image, max_shrink)
det = np.row_stack((det0, det1, det2, det3, det4))
dets = bbox_vote(det)
save_widerface_bboxes(image_path, dets, pred_dir)
print("Finish evaluation.")
def save_widerface_bboxes(image_path, bboxes_scores, output_dir):
"""
Save predicted results, including bbox and score into text file.
Args:
image_path (string): file name.
bboxes_scores (np.array|list): the predicted bboxed and scores, layout
is (xmin, ymin, xmax, ymax, score)
output_dir (string): output directory.
"""
image_name = image_path.split('/')[-1]
image_class = image_path.split('/')[-2]
odir = os.path.join(output_dir, image_class)
if not os.path.exists(odir):
os.makedirs(odir)
ofname = os.path.join(odir, '%s.txt' % (image_name[:-4]))
f = open(ofname, 'w')
f.write('{:s}\n'.format(image_class + '/' + image_name))
f.write('{:d}\n'.format(bboxes_scores.shape[0]))
for box_score in bboxes_scores:
xmin, ymin, xmax, ymax, score = box_score
f.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\n'.format(xmin, ymin, (
xmax - xmin + 1), (ymax - ymin + 1), score))
f.close()
print("The predicted result is saved as {}".format(ofname))
def detect_face(image, shrink):
image_shape = [3, image.size[1], image.size[0]]
if shrink != 1:
h, w = int(image_shape[1] * shrink), int(image_shape[2] * shrink)
image = image.resize((w, h), Image.ANTIALIAS)
image_shape = [3, h, w]
img = np.array(image)
img = reader.to_chw_bgr(img)
mean = [104., 117., 123.]
scale = 0.007843
img = img.astype('float32')
img -= np.array(mean)[:, np.newaxis, np.newaxis].astype('float32')
img = img * scale
img = [img]
img = np.array(img)
detection, = exe.run(infer_program,
feed={'image': img},
fetch_list=fetches,
return_numpy=False)
detection = np.array(detection)
# layout: xmin, ymin, xmax. ymax, score
if np.prod(detection.shape) == 1:
print("No face detected")
return np.array([[0, 0, 0, 0, 0]])
det_conf = detection[:, 1]
det_xmin = image_shape[2] * detection[:, 2] / shrink
det_ymin = image_shape[1] * detection[:, 3] / shrink
det_xmax = image_shape[2] * detection[:, 4] / shrink
det_ymax = image_shape[1] * detection[:, 5] / shrink
det = np.column_stack((det_xmin, det_ymin, det_xmax, det_ymax, det_conf))
return det
def bbox_vote(det):
order = det[:, 4].ravel().argsort()[::-1]
det = det[order, :]
if det.shape[0] == 0:
dets = np.array([[10, 10, 20, 20, 0.002]])
det = np.empty(shape=[0, 5])
while det.shape[0] > 0:
# IOU
area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)
xx1 = np.maximum(det[0, 0], det[:, 0])
yy1 = np.maximum(det[0, 1], det[:, 1])
xx2 = np.minimum(det[0, 2], det[:, 2])
yy2 = np.minimum(det[0, 3], det[:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
o = inter / (area[0] + area[:] - inter)
# nms
merge_index = np.where(o >= 0.3)[0]
det_accu = det[merge_index, :]
det = np.delete(det, merge_index, 0)
if merge_index.shape[0] <= 1:
if det.shape[0] == 0:
try:
dets = np.row_stack((dets, det_accu))
except:
dets = det_accu
continue
det_accu[:, 0:4] = det_accu[:, 0:4] * np.tile(det_accu[:, -1:], (1, 4))
max_score = np.max(det_accu[:, 4])
det_accu_sum = np.zeros((1, 5))
det_accu_sum[:, 0:4] = np.sum(det_accu[:, 0:4],
axis=0) / np.sum(det_accu[:, -1:])
det_accu_sum[:, 4] = max_score
try:
dets = np.row_stack((dets, det_accu_sum))
except:
dets = det_accu_sum
dets = dets[0:750, :]
return dets
def flip_test(image, shrink):
img = image.transpose(Image.FLIP_LEFT_RIGHT)
det_f = detect_face(img, shrink)
det_t = np.zeros(det_f.shape)
# image.size: [width, height]
det_t[:, 0] = image.size[0] - det_f[:, 2]
det_t[:, 1] = det_f[:, 1]
det_t[:, 2] = image.size[0] - det_f[:, 0]
det_t[:, 3] = det_f[:, 3]
det_t[:, 4] = det_f[:, 4]
return det_t
def multi_scale_test(image, max_shrink):
# Shrink detecting is only used to detect big faces
st = 0.5 if max_shrink >= 0.75 else 0.5 * max_shrink
det_s = detect_face(image, st)
index = np.where(
np.maximum(det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1)
> 30)[0]
det_s = det_s[index, :]
# Enlarge one times
bt = min(2, max_shrink) if max_shrink > 1 else (st + max_shrink) / 2
det_b = detect_face(image, bt)
# Enlarge small image x times for small faces
if max_shrink > 2:
bt *= 2
while bt < max_shrink:
det_b = np.row_stack((det_b, detect_face(image, bt)))
bt *= 2
det_b = np.row_stack((det_b, detect_face(image, max_shrink)))
# Enlarged images are only used to detect small faces.
if bt > 1:
index = np.where(
np.minimum(det_b[:, 2] - det_b[:, 0] + 1,
det_b[:, 3] - det_b[:, 1] + 1) < 100)[0]
det_b = det_b[index, :]
# Shrinked images are only used to detect big faces.
else:
index = np.where(
np.maximum(det_b[:, 2] - det_b[:, 0] + 1,
det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]
det_b = det_b[index, :]
return det_s, det_b
def multi_scale_test_pyramid(image, max_shrink):
# Use image pyramids to detect faces
det_b = detect_face(image, 0.25)
index = np.where(
np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1)
> 30)[0]
det_b = det_b[index, :]
st = [0.75, 1.25, 1.5, 1.75]
for i in range(len(st)):
if (st[i] <= max_shrink):
det_temp = detect_face(image, st[i])
# Enlarged images are only used to detect small faces.
if st[i] > 1:
index = np.where(
np.minimum(det_temp[:, 2] - det_temp[:, 0] + 1,
det_temp[:, 3] - det_temp[:, 1] + 1) < 100)[0]
det_temp = det_temp[index, :]
# Shrinked images are only used to detect big faces.
else:
index = np.where(
np.maximum(det_temp[:, 2] - det_temp[:, 0] + 1,
det_temp[:, 3] - det_temp[:, 1] + 1) > 30)[0]
det_temp = det_temp[index, :]
det_b = np.row_stack((det_b, det_temp))
return det_b
def get_shrink(height, width):
"""
Args:
height (int): image height.
width (int): image width.
"""
# avoid out of memory
max_shrink_v1 = (0x7fffffff / 577.0 / (height * width))**0.5
max_shrink_v2 = ((678 * 1024 * 2.0 * 2.0) / (height * width))**0.5
def get_round(x, loc):
str_x = str(x)
if '.' in str_x:
str_before, str_after = str_x.split('.')
len_after = len(str_after)
if len_after >= 3:
str_final = str_before + '.' + str_after[0:loc]
return float(str_final)
else:
return x
max_shrink = get_round(min(max_shrink_v1, max_shrink_v2), 2) - 0.3
if max_shrink >= 1.5 and max_shrink < 2:
max_shrink = max_shrink - 0.1
elif max_shrink >= 2 and max_shrink < 3:
max_shrink = max_shrink - 0.2
elif max_shrink >= 3 and max_shrink < 4:
max_shrink = max_shrink - 0.3
elif max_shrink >= 4 and max_shrink < 5:
max_shrink = max_shrink - 0.4
elif max_shrink >= 5:
max_shrink = max_shrink - 0.5
shrink = max_shrink if max_shrink < 1 else 1
return shrink, max_shrink
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
config = reader.Settings(data_dir=args.data_dir)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
main_program = fluid.Program()
startup_program = fluid.Program()
image_shape = [3, 1024, 1024]
with fluid.program_guard(main_program, startup_program):
network = PyramidBox(
data_shape=image_shape,
sub_network=args.use_pyramidbox,
is_infer=True)
infer_program, nmsed_out = network.infer(main_program)
fetches = [nmsed_out]
fluid.io.load_persistables(
exe, args.model_dir, main_program=infer_program)
# save model and program
#fluid.io.save_inference_model('pyramidbox_model',
# ['image'], [nmsed_out], exe, main_program=infer_program,
# model_filename='model', params_filename='params')
infer(args, config)
|
|
import argparse
import os
import sys
import numpy as np
from PIL import Image
import chainer
from chainer import cuda
import chainer.functions as F
from chainer.functions import caffe
from chainer import Variable, optimizers
import pickle
def subtract_mean(x0):
x = x0.copy()
x[0,0,:,:] -= 104
x[0,1,:,:] -= 117
x[0,2,:,:] -= 123
return x
def add_mean(x0):
x = x0.copy()
x[0,0,:,:] += 104
x[0,1,:,:] += 117
x[0,2,:,:] += 123
return x
def image_resize(img_file, width):
gogh = Image.open(img_file)
orig_w, orig_h = gogh.size[0], gogh.size[1]
if orig_w>orig_h:
new_w = width
new_h = width*orig_h/orig_w
gogh = np.asarray(gogh.resize((new_w,new_h)))[:,:,:3].transpose(2, 0, 1)[::-1].astype(np.float32)
gogh = gogh.reshape((1,3,new_h,new_w))
print("image resized to: ", gogh.shape)
hoge= np.zeros((1,3,width,width), dtype=np.float32)
hoge[0,:,width-new_h:,:] = gogh[0,:,:,:]
gogh = subtract_mean(hoge)
else:
new_w = width*orig_w/orig_h
new_h = width
gogh = np.asarray(gogh.resize((new_w,new_h)))[:,:,:3].transpose(2, 0, 1)[::-1].astype(np.float32)
gogh = gogh.reshape((1,3,new_h,new_w))
print("image resized to: ", gogh.shape)
hoge= np.zeros((1,3,width,width), dtype=np.float32)
hoge[0,:,:,width-new_w:] = gogh[0,:,:,:]
gogh = subtract_mean(hoge)
return xp.asarray(gogh), new_w, new_h
def save_image(img, width, new_w, new_h, it):
def to_img(x):
im = np.zeros((new_h,new_w,3))
im[:,:,0] = x[2,:,:]
im[:,:,1] = x[1,:,:]
im[:,:,2] = x[0,:,:]
def clip(a):
return 0 if a<0 else (255 if a>255 else a)
im = np.vectorize(clip)(im).astype(np.uint8)
Image.fromarray(im).save(args.out_dir+"/im_%05d.png"%it)
if args.gpu>=0:
img_cpu = add_mean(img.get())
else:
img_cpu = add_mean(img)
if width==new_w:
to_img(img_cpu[0,:,width-new_h:,:])
else:
to_img(img_cpu[0,:,:,width-new_w:])
def nin_forward(x):
y0 = F.relu(model.conv1(x))
y1 = model.cccp2(F.relu(model.cccp1(y0)))
x1 = F.relu(model.conv2(F.average_pooling_2d(F.relu(y1), 3, stride=2)))
y2 = model.cccp4(F.relu(model.cccp3(x1)))
x2 = F.relu(model.conv3(F.average_pooling_2d(F.relu(y2), 3, stride=2)))
y3 = model.cccp6(F.relu(model.cccp5(x2)))
x3 = F.relu(getattr(model,"conv4-1024")(F.dropout(F.average_pooling_2d(F.relu(y3), 3, stride=2), train=False)))
return [y0,x1,x2,x3]
def vgg_forward(x):
y1 = model.conv1_2(F.relu(model.conv1_1(x)))
x1 = F.average_pooling_2d(F.relu(y1), 2, stride=2)
y2 = model.conv2_2(F.relu(model.conv2_1(x1)))
x2 = F.average_pooling_2d(F.relu(y2), 2, stride=2)
y3 = model.conv3_3(F.relu(model.conv3_2(F.relu(model.conv3_1(x2)))))
x3 = F.average_pooling_2d(F.relu(y3), 2, stride=2)
y4 = model.conv4_3(F.relu(model.conv4_2(F.relu(model.conv4_1(x3)))))
# x4 = F.average_pooling_2d(F.relu(y4), 2, stride=2)
# y5 = model.conv5_3(F.relu(model.conv5_2(F.relu(model.conv5_1(x4)))))
return [y1,y2,y3,y4]
def get_matrix(y):
ch = y.data.shape[1]
wd = y.data.shape[2]
gogh_y = F.reshape(y, (ch,wd**2))
gogh_matrix = F.matmul(gogh_y, gogh_y, transb=True)/np.float32(ch*wd**2)
return gogh_matrix
class Clip(chainer.Function):
def forward(self, x):
x = x[0]
ret = cuda.elementwise(
'T x','T ret',
'''
ret = x<-100?-100:(x>100?100:x);
''','clip')(x)
return ret
def generate_image(img_orig, img_style, width, nw, nh, max_iter, lr, alpha, beta, img_gen=None):
mid_orig = nin_forward(Variable(img_orig))
style_mats = [get_matrix(y) for y in nin_forward(Variable(img_style))]
if img_gen is None:
if args.gpu >= 0:
img_gen = xp.random.uniform(-20,20,(1,3,width,width),dtype=np.float32)
else:
img_gen = np.random.uniform(-20,20,(1,3,width,width)).astype(np.float32)
x = Variable(img_gen)
xg = xp.zeros_like(x.data)
optimizer = optimizers.Adam(alpha=lr)
optimizer.setup((img_gen,xg))
for i in range(max_iter):
x = Variable(img_gen)
y = nin_forward(x)
optimizer.zero_grads()
L = Variable(xp.zeros((), dtype=np.float32))
for l in range(4):
ch = y[l].data.shape[1]
wd = y[l].data.shape[2]
gogh_y = F.reshape(y[l], (ch,wd**2))
gogh_matrix = F.matmul(gogh_y, gogh_y, transb=True)/np.float32(ch*wd**2)
L1 = np.float32(alpha[l])*F.mean_squared_error(y[l], Variable(mid_orig[l].data))
L2 = np.float32(beta[l])*F.mean_squared_error(gogh_matrix, Variable(style_mats[l].data))/np.float32(4)
L += L1+L2
if i%100==0:
print i,l,L1.data,L2.data
L.backward()
xg += x.grad
optimizer.update()
tmp_shape = img_gen.shape
if args.gpu >= 0:
img_gen += Clip().forward(img_gen).reshape(tmp_shape) - img_gen
else:
def clip(x):
return -100 if x<-100 else (100 if x>100 else x)
img_gen += np.vectorize(clip)(img_gen).reshape(tmp_shape) - img_gen
if i%50==0:
save_image(img_gen, W, nw, nh, i)
parser = argparse.ArgumentParser(
description='A Neural Algorithm of Artistic Style')
parser.add_argument('--model', '-m', default='nin_imagenet.caffemodel',
help='model file')
parser.add_argument('--orig_img', '-i', default='orig.png',
help='Original image')
parser.add_argument('--style_img', '-s', default='style.png',
help='Style image')
parser.add_argument('--out_dir', '-o', default='output',
help='Output directory')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--iter', default=5000, type=int,
help='number of iteration')
parser.add_argument('--lr', default=4.0, type=float,
help='learning rate')
parser.add_argument('--lam', default=0.005, type=float,
help='original image weight / style weight ratio')
parser.add_argument('--width', '-w', default=435, type=int,
help='image width, height')
args = parser.parse_args()
try:
os.mkdir(args.out_dir)
except:
pass
if args.gpu >= 0:
cuda.check_cuda_available()
cuda.get_device(args.gpu).use()
xp = cuda.cupy
else:
xp = np
chainer.Function.type_check_enable = False
print "load model... %s"%args.model
func = caffe.CaffeFunction(args.model)
model = func.fs
if args.gpu>=0:
model.to_gpu()
W = args.width
img_gogh,_,_ = image_resize(args.style_img, W)
img_hongo,nw,nh = image_resize(args.orig_img, W)
generate_image(img_hongo, img_gogh, W, nw, nh, img_gen=None, max_iter=args.iter, lr=args.lr, alpha=[args.lam * x for x in [0,0,1,1]], beta=[1,1,1,1])
|
|
""" Finite Union of Intervals
Implements a class to handle finite (disjoint) unions of intervals.
* assumes that intervals are always closed and that the union is disjoint
* open intervals remaining at the end of any operations (eg. complement)
* are always made closed. e.g. [0,1]^C = [-np.inf,0] [1,np.inf]
* end intervals being unbounded is handled using -np.inf and np.inf
* does some okay handling for point intervals [a,a]
Darren Rhea, 2012; Chris Hillar revised, April 30, 2013;
Ram Mehta revised, 2013; Copyright (c) 2013, All rights reserved;
Chris Hillar revised, 2015
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
class Intervals(object):
""" Finite Union of Intervals [ai,bi] backed by sorted lists.
parameters
intervals: (M x 2) numpy np.double array
"""
def __init__(self, intervals=None):
if intervals is None or len(intervals) == 0:
self.intervals = np.array([])
return
intervals = np.atleast_2d(np.asarray(intervals))
idx = intervals[:, 0].argsort()
self.intervals = intervals[idx, :]
if not self._is_disjoint():
self._make_disjoint()
def __iter__(self):
return iter(self.intervals)
def __len__(self):
return self.measure()
def __str__(self):
if self.is_empty():
return "EmptySet"
ivt = self.intervals
return " ".join(["[%s,%s]" % (ivt[i, 0], ivt[i, 1])
for i in range(ivt.shape[0])])
def __add__(self, F):
return self.union(F)
def __mul__(self, F):
return self.intersect(F)
def __sub__(self, F):
return self.remove(F)
def __invert__(self):
return self.complement()
def _is_disjoint(self):
""" Checks if intervals are indeed disjoint """
self.intervals = np.atleast_2d(self.intervals)
if (self.intervals is None) or (self.intervals.shape[0] < 2):
return True
return ((self.intervals[:-1, 1] < self.intervals[1:, 0]).all() and
(self.intervals[:, 1] >= self.intervals[:, 0]).all())
def _make_disjoint(self):
""" Remove intervals [a, b] with a > b """
good_intervals_idx = self.intervals[:, 1] >= self.intervals[:, 0]
self.intervals = self.intervals[good_intervals_idx, :]
# union together intervals that overlap
tot = None
curr_idx = 0
curr_lhs = self.intervals[curr_idx, 0]
curr_rhs = self.intervals[curr_idx, 1]
while curr_idx < self.intervals.shape[0]:
while curr_rhs >= self.intervals[curr_idx, 0]:
curr_rhs = max(curr_rhs, self.intervals[curr_idx, 1])
curr_idx += 1
if curr_idx >= self.intervals.shape[0]:
break
if tot is None:
tot = np.array([curr_lhs, curr_rhs])
else:
tot = np.vstack((tot, np.array([curr_lhs, curr_rhs])))
if curr_idx < self.intervals.shape[0]:
curr_lhs = self.intervals[curr_idx, 0]
curr_rhs = self.intervals[curr_idx, 1]
self.intervals = np.atleast_2d(tot)
def copy(self):
return Intervals(self.intervals.copy())
def contains(self, x):
""" Check if x is in the Finite Union of Intervals. """
if self.is_empty():
return False
idx = self.intervals[:, 0].searchsorted(x)
if idx == 0:
return x >= self.intervals[idx, 0]
if idx >= self.intervals.shape[0]:
return x <= self.intervals[idx - 1, 1]
if (self.intervals[idx - 1, 0] <= x) and \
(self.intervals[idx - 1, 1] >= x):
return True
if (self.intervals[idx, 0] <= x) and (self.intervals[idx, 1] >= x):
return True
return False
def index_of_first_intersection(self, x, find_nearest=False):
""" finds interval nearest to given number x and containing x
if find_nearest=False: doesn't require x to be in the interval """
if self.is_empty():
return -1
idx = self.intervals[:, 0].searchsorted(x)
if idx == 0:
if x >= self.intervals[idx, 0]:
return idx
else:
return -1
if find_nearest:
return idx
if idx >= self.intervals.shape[0]:
if x <= self.intervals[idx - 1, 1]:
return -1
else:
return idx - 1
if (self.intervals[idx - 1, 0] <= x) and \
(self.intervals[idx - 1, 1] >= x):
return idx - 1
if (self.intervals[idx, 0] <= x) and (self.intervals[idx, 1] >= x):
return idx - 1
return -1
def is_empty(self):
return self.intervals.shape[0] == 0
def num(self):
if self.is_empty():
return 0
return self.intervals.shape[0]
def union(self, F):
""" New Intervals object which is the union of self and
Intervals F. """
if F.is_empty():
return self
if self.is_empty():
return F
return Intervals(np.vstack((self.intervals, F.intervals)))
def intersect(self, F):
""" New Intervals object which is the intersection of self and
Intervals F. """
if F.is_empty():
return F
if self.is_empty():
return self
return ~(~self + ~F)
def intersect_with_interval(self, a, b):
""" returns (not a copy) Intervals object which is the intersection
of self and [a, b]
(faster than intersect) """
if self.is_empty():
return self
if (self.intervals[:, 1] > a).sum() == 0 or \
(self.intervals[:, 0] < b).sum() == 0:
return Intervals()
idx_first_gta = (self.intervals[:, 1] > a).nonzero()[0][0]
idx_last_ltb = self.intervals.shape[
0] - (self.intervals[:, 0] < b)[::-1].nonzero()[0][0]
return Intervals(self.intervals[idx_first_gta:idx_last_ltb, :])
def complement(self):
""" New Intervals object which is the complement of self. """
if self.is_empty():
return Intervals([-np.inf, np.inf])
M = self.intervals.shape[0]
# complement bulk
# 2 * M - 2 points
list_endpoints = self.intervals.ravel()
I = np.zeros((M - 1, 2))
odds = range(1, (M - 1) * 2, 2)
evens = range(0, (M - 1) * 2, 2)
I[:, 1] = list_endpoints[1: -1][odds]
I[:, 0] = list_endpoints[1: -1][evens]
# fix complement ends
a, b = list_endpoints[0], list_endpoints[-1]
if a > -np.inf:
I = np.vstack((np.array([-np.inf, a]), I))
if b < np.inf:
I = np.vstack((I, np.array([b, np.inf])))
return Intervals() if I.shape[0] == 0 else Intervals(I)
def measure(self):
if self.is_empty():
return 0
diff_arr = self.intervals[:, 1] - self.intervals[:, 0]
return diff_arr.sum()
def trim(self, eps=0.001):
""" Removes intervals with lengths <= eps. """
if self.is_empty():
return self
diff_arr = self.intervals[:, 1] - self.intervals[:, 0]
idx = diff_arr > eps
self.intervals = self.intervals[idx, :]
return self
def connect_gaps(self, eps=0.001):
""" connects consecutive intervals separated by lengths <= eps """
H = ~self
diff_arr = H.intervals[:, 1] - H.intervals[:, 0]
idx = diff_arr <= eps
if idx.sum() == 0:
return self
B = Intervals(H.intervals[idx, :])
A = self.union(B)
self.intervals = A.intervals
return self
def connect_gaps_by_rule(self, rule):
""" Returns a new object with gaps connected when rule returns True.
Parameters
rule: Callable that takes parameters start_time and end_time.
"""
if self.is_empty():
return self
new_intervals = []
i = 0
dim = self.intervals.shape[0]
while i < dim:
a = self.intervals[i, 0]
while i + 1 < dim and rule(self.intervals[i, 1],
self.intervals[i + 1, 0]):
i += 1
b = self.intervals[i, 1]
new_intervals.append([a, b])
i += 1
return Intervals(np.array(new_intervals))
def remove(self, other):
return self.intersect(~other)
def symmetric_difference(self, other):
return (self - other) + (other - self)
def subordinate_to_array(self, arr):
""" returns a new Intervals object with only intervals containing
elements of arr
(NOTE: arr is assumed sorted)
"""
arr = np.array(arr)
F = Intervals()
for interval in self.intervals:
a, b = interval[0], interval[1]
idxa = arr.searchsorted(a)
idxb = arr.searchsorted(b)
# arr has a point in interval
if idxa != idxb or (idxa < len(arr) and a == arr[idxa]):
F += Intervals([a, b])
return F
def save(self, filename='Intervals_save'):
np.savez(filename, intervals=self.intervals)
def load(self, filename='Intervals_save.npz'):
import os
if os.path.exists(filename):
arr = np.load(filename)
self.intervals = arr['intervals']
return self
def ASs(self, ISDT=20):
""" returns new object of Active States given self as Events """
return self.complement().trim(ISDT).complement()
def ISs(self, ISDT=20):
""" returns new object of Inactive States given self as Events """
return self.complement().trim(ISDT)
def intervals_from_binary(bin_array, times):
"""
Given a one dimensional bin_array of 0s and 1s,
returns a Intervals object of times corresponding to consecutives 1s
"""
F = Intervals()
idx = 0
while idx < bin_array.shape[0]:
curr_bit = bin_array[idx]
if curr_bit == 0:
idx += 1
else:
AS_idx = idx + 1
start_idx = idx
while AS_idx < bin_array.shape[0] and bin_array[AS_idx] == 1:
AS_idx += 1
F = F.union(Intervals([times[start_idx], times[AS_idx - 1]]))
idx = AS_idx
return F
def binary_from_intervals(intervals, length=None):
""" From an intervals object produce a binary sequence of size length """
if length is None:
length = int(intervals.intervals[-1, 1] - intervals.intervals[0, 0])
binary = np.zeros(length)
start = intervals.intervals[0, 0]
end = intervals.intervals[-1, 1]
arr = np.linspace(start, end, length)
for c, time in enumerate(arr):
if intervals.contains(time):
binary[c] = 1
return binary
def timestamps_to_interval(array, eps=.01):
""" given a 1D array with event timestamps, returns an interval centered
on timestamp and eps wide.
default 0.01 is half of minimum HCM sampling rate
"""
new_arr = zip(array - eps, array + eps)
new_I = Intervals(new_arr)
return new_I
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GoGrid driver
"""
import time
import hashlib
import copy
from libcloud.utils.py3 import b
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.common.gogrid import GoGridConnection, BaseGoGridDriver
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
STATE = {
"Starting": NodeState.PENDING,
"On": NodeState.RUNNING,
"On/Saving": NodeState.RUNNING,
"Off": NodeState.PENDING,
"Restarting": NodeState.REBOOTING,
"Saving": NodeState.PENDING,
"Restoring": NodeState.PENDING,
}
GOGRID_INSTANCE_TYPES = {
'512MB': {'id': '512MB',
'name': '512MB',
'ram': 512,
'disk': 30,
'bandwidth': None},
'1GB': {'id': '1GB',
'name': '1GB',
'ram': 1024,
'disk': 60,
'bandwidth': None},
'2GB': {'id': '2GB',
'name': '2GB',
'ram': 2048,
'disk': 120,
'bandwidth': None},
'4GB': {'id': '4GB',
'name': '4GB',
'ram': 4096,
'disk': 240,
'bandwidth': None},
'8GB': {'id': '8GB',
'name': '8GB',
'ram': 8192,
'disk': 480,
'bandwidth': None},
'16GB': {'id': '16GB',
'name': '16GB',
'ram': 16384,
'disk': 960,
'bandwidth': None},
'24GB': {'id': '24GB',
'name': '24GB',
'ram': 24576,
'disk': 960,
'bandwidth': None},
}
class GoGridNode(Node):
# Generating uuid based on public ip to get around missing id on
# create_node in gogrid api
#
# Used public ip since it is not mutable and specified at create time,
# so uuid of node should not change after add is completed
def get_uuid(self):
return hashlib.sha1(
b("%s:%s" % (self.public_ips, self.driver.type))
).hexdigest()
class GoGridNodeDriver(BaseGoGridDriver, NodeDriver):
"""
GoGrid node driver
"""
connectionCls = GoGridConnection
type = Provider.GOGRID
api_name = 'gogrid'
name = 'GoGrid'
website = 'http://www.gogrid.com/'
features = {"create_node": ["generates_password"]}
_instance_types = GOGRID_INSTANCE_TYPES
def __init__(self, *args, **kwargs):
"""
@inherits: L{NodeDriver.__init__}
"""
super(GoGridNodeDriver, self).__init__(*args, **kwargs)
def _get_state(self, element):
try:
return STATE[element['state']['name']]
except:
pass
return NodeState.UNKNOWN
def _get_ip(self, element):
return element.get('ip').get('ip')
def _get_id(self, element):
return element.get('id')
def _to_node(self, element, password=None):
state = self._get_state(element)
ip = self._get_ip(element)
id = self._get_id(element)
n = GoGridNode(id=id,
name=element['name'],
state=state,
public_ips=[ip],
private_ips=[],
extra={'ram': element.get('ram').get('name'),
'description': element.get('description', '')},
driver=self.connection.driver)
if password:
n.extra['password'] = password
return n
def _to_image(self, element):
n = NodeImage(id=element['id'],
name=element['friendlyName'],
driver=self.connection.driver)
return n
def _to_images(self, object):
return [self._to_image(el)
for el in object['list']]
def _to_location(self, element):
location = NodeLocation(id=element['id'],
name=element['name'],
country="US",
driver=self.connection.driver)
return location
def _to_locations(self, object):
return [self._to_location(el)
for el in object['list']]
def list_images(self, location=None):
params = {}
if location is not None:
params["datacenter"] = location.id
images = self._to_images(
self.connection.request('/api/grid/image/list', params).object)
return images
def list_nodes(self):
"""
@inherits: L{NodeDriver.list_nodes}
@rtype: C{list} of L{GoGridNode}
"""
passwords_map = {}
res = self._server_list()
try:
for password in self._password_list()['list']:
try:
passwords_map[password['server']['id']] = \
password['password']
except KeyError:
pass
except InvalidCredsError:
# some gogrid API keys don't have permission to access the
# password list.
pass
return [self._to_node(el, passwords_map.get(el.get('id')))
for el in res['list']]
def reboot_node(self, node):
"""
@inherits: L{NodeDriver.reboot_node}
@type node: L{GoGridNode}
"""
id = node.id
power = 'restart'
res = self._server_power(id, power)
if not res.success():
raise Exception(res.parse_error())
return True
def destroy_node(self, node):
"""
@inherits: L{NodeDriver.reboot_node}
@type node: L{GoGridNode}
"""
id = node.id
res = self._server_delete(id)
if not res.success():
raise Exception(res.parse_error())
return True
def _server_list(self):
return self.connection.request('/api/grid/server/list').object
def _password_list(self):
return self.connection.request('/api/support/password/list').object
def _server_power(self, id, power):
# power in ['start', 'stop', 'restart']
params = {'id': id, 'power': power}
return self.connection.request("/api/grid/server/power", params,
method='POST')
def _server_delete(self, id):
params = {'id': id}
return self.connection.request("/api/grid/server/delete", params,
method='POST')
def _get_first_ip(self, location=None):
ips = self.ex_list_ips(public=True, assigned=False, location=location)
try:
return ips[0].ip
except IndexError:
raise LibcloudError('No public unassigned IPs left',
GoGridNodeDriver)
def list_sizes(self, location=None):
sizes = []
for key, values in self._instance_types.items():
attributes = copy.deepcopy(values)
attributes.update({'price': self._get_size_price(size_id=key)})
sizes.append(NodeSize(driver=self.connection.driver, **attributes))
return sizes
def list_locations(self):
locations = self._to_locations(
self.connection.request('/api/common/lookup/list',
params={'lookup': 'ip.datacenter'}).object)
return locations
def ex_create_node_nowait(self, **kwargs):
"""Don't block until GoGrid allocates id for a node
but return right away with id == None.
The existance of this method is explained by the fact
that GoGrid assigns id to a node only few minutes after
creation.
@keyword name: String with a name for this new node (required)
@type name: C{str}
@keyword size: The size of resources allocated to this node .
(required)
@type size: L{NodeSize}
@keyword image: OS Image to boot on node. (required)
@type image: L{NodeImage}
@keyword ex_description: Description of a Node
@type ex_description: C{str}
@keyword ex_ip: Public IP address to use for a Node. If not
specified, first available IP address will be picked
@type ex_ip: C{str}
@rtype: L{GoGridNode}
"""
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
try:
ip = kwargs['ex_ip']
except KeyError:
ip = self._get_first_ip(kwargs.get('location'))
params = {'name': name,
'image': image.id,
'description': kwargs.get('ex_description', ''),
'server.ram': size.id,
'ip': ip}
object = self.connection.request('/api/grid/server/add',
params=params, method='POST').object
node = self._to_node(object['list'][0])
return node
def create_node(self, **kwargs):
"""Create a new GoGird node
@inherits: L{NodeDriver.create_node}
@keyword ex_description: Description of a Node
@type ex_description: C{str}
@keyword ex_ip: Public IP address to use for a Node. If not
specified, first available IP address will be picked
@type ex_ip: C{str}
@rtype: L{GoGridNode}
"""
node = self.ex_create_node_nowait(**kwargs)
timeout = 60 * 20
waittime = 0
interval = 2 * 60
while node.id is None and waittime < timeout:
nodes = self.list_nodes()
for i in nodes:
if i.public_ips[0] == node.public_ips[0] and i.id is not None:
return i
waittime += interval
time.sleep(interval)
if id is None:
raise Exception(
"Wasn't able to wait for id allocation for the node %s"
% str(node))
return node
def ex_save_image(self, node, name):
"""Create an image for node.
Please refer to GoGrid documentation to get info
how prepare a node for image creation:
http://wiki.gogrid.com/wiki/index.php/MyGSI
@keyword node: node to use as a base for image
@type node: L{GoGridNode}
@keyword name: name for new image
@type name: C{str}
@rtype: L{NodeImage}
"""
params = {'server': node.id,
'friendlyName': name}
object = self.connection.request('/api/grid/image/save', params=params,
method='POST').object
return self._to_images(object)[0]
def ex_edit_node(self, **kwargs):
"""Change attributes of a node.
@keyword node: node to be edited (required)
@type node: L{GoGridNode}
@keyword size: new size of a node (required)
@type size: L{NodeSize}
@keyword ex_description: new description of a node
@type ex_description: C{str}
@rtype: L{Node}
"""
node = kwargs['node']
size = kwargs['size']
params = {'id': node.id,
'server.ram': size.id}
if 'ex_description' in kwargs:
params['description'] = kwargs['ex_description']
object = self.connection.request('/api/grid/server/edit',
params=params).object
return self._to_node(object['list'][0])
def ex_edit_image(self, **kwargs):
"""Edit metadata of a server image.
@keyword image: image to be edited (required)
@type image: L{NodeImage}
@keyword public: should be the image public (required)
@type public: C{bool}
@keyword ex_description: description of the image (optional)
@type ex_description: C{str}
@keyword name: name of the image
@type name C{str}
@rtype: L{NodeImage}
"""
image = kwargs['image']
public = kwargs['public']
params = {'id': image.id,
'isPublic': str(public).lower()}
if 'ex_description' in kwargs:
params['description'] = kwargs['ex_description']
if 'name' in kwargs:
params['friendlyName'] = kwargs['name']
object = self.connection.request('/api/grid/image/edit',
params=params).object
return self._to_image(object['list'][0])
def ex_list_ips(self, **kwargs):
"""Return list of IP addresses assigned to
the account.
@keyword public: set to True to list only
public IPs or False to list only
private IPs. Set to None or not specify
at all not to filter by type
@type public: C{bool}
@keyword assigned: set to True to list only addresses
assigned to servers, False to list unassigned
addresses and set to None or don't set at all
not no filter by state
@type assigned: C{bool}
@keyword location: filter IP addresses by location
@type location: L{NodeLocation}
@rtype: C{list} of L{GoGridIpAddress}
"""
params = {}
if "public" in kwargs and kwargs["public"] is not None:
params["ip.type"] = {True: "Public",
False: "Private"}[kwargs["public"]]
if "assigned" in kwargs and kwargs["assigned"] is not None:
params["ip.state"] = {True: "Assigned",
False: "Unassigned"}[kwargs["assigned"]]
if "location" in kwargs and kwargs['location'] is not None:
params['datacenter'] = kwargs['location'].id
ips = self._to_ips(
self.connection.request('/api/grid/ip/list',
params=params).object)
return ips
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Detection model trainer.
This file provides a generic training method that can be used to train a
DetectionModel.
"""
import functools
import tensorflow as tf
from google3.pyglib import logging
from object_detection.builders import optimizer_builder
from object_detection.core import standard_fields as fields
from object_detection.utils import ops as util_ops
from object_detection.utils import variables_helper
from deployment import model_deploy
slim = tf.contrib.slim
def create_input_queue(create_tensor_dict_fn):
"""Sets up reader, prefetcher and returns input queue.
Args:
create_tensor_dict_fn: function to create tensor dictionary.
Returns:
all_dict: A dictionary holds tensors for images, boxes, and targets.
"""
tensor_dict = create_tensor_dict_fn()
all_dict = {}
num_images = len(tensor_dict[fields.InputDataFields.image])
all_dict['batch'] = tensor_dict['batch']
del tensor_dict['batch']
for i in range(num_images):
suffix = str(i)
for key, val in tensor_dict.items():
all_dict[key + suffix] = val[i]
all_dict[fields.InputDataFields.image + suffix] = tf.to_float(
tf.expand_dims(all_dict[fields.InputDataFields.image + suffix], 0))
return all_dict
def get_inputs(input_queue, num_classes, merge_multiple_label_boxes=False):
"""Dequeues batch and constructs inputs to object detection model.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
num_classes: Number of classes.
merge_multiple_label_boxes: Whether to merge boxes with multiple labels
or not. Defaults to false. Merged boxes are represented with a single
box and a k-hot encoding of the multiple labels associated with the
boxes.
Returns:
images: a list of 3-D float tensor of images.
image_keys: a list of string keys for the images.
locations: a list of tensors of shape [num_boxes, 4] containing the corners
of the groundtruth boxes.
classes: a list of padded one-hot tensors containing target classes.
masks: a list of 3-D float tensors of shape [num_boxes, image_height,
image_width] containing instance masks for objects if present in the
input_queue. Else returns None.
keypoints: a list of 3-D float tensors of shape [num_boxes, num_keypoints,
2] containing keypoints for objects if present in the
input queue. Else returns None.
"""
read_data_list = input_queue
label_id_offset = 1
def extract_images_and_targets(read_data):
"""Extract images and targets from the input dict."""
suffix = 0
images = []
keys = []
locations = []
classes = []
masks = []
keypoints = []
while fields.InputDataFields.image + str(suffix) in read_data:
image = read_data[fields.InputDataFields.image + str(suffix)]
key = ''
if fields.InputDataFields.source_id in read_data:
key = read_data[fields.InputDataFields.source_id + str(suffix)]
location_gt = (
read_data[fields.InputDataFields.groundtruth_boxes + str(suffix)])
classes_gt = tf.cast(
read_data[fields.InputDataFields.groundtruth_classes + str(suffix)],
tf.int32)
classes_gt -= label_id_offset
masks_gt = read_data.get(
fields.InputDataFields.groundtruth_instance_masks + str(suffix))
keypoints_gt = read_data.get(
fields.InputDataFields.groundtruth_keypoints + str(suffix))
if merge_multiple_label_boxes:
location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
location_gt, classes_gt, num_classes)
else:
classes_gt = util_ops.padded_one_hot_encoding(
indices=classes_gt, depth=num_classes, left_pad=0)
# Batch read input data and groundtruth. Images and locations, classes by
# default should have the same number of items.
images.append(image)
keys.append(key)
locations.append(location_gt)
classes.append(classes_gt)
masks.append(masks_gt)
keypoints.append(keypoints_gt)
suffix += 1
return (images, keys, locations, classes, masks, keypoints)
return extract_images_and_targets(read_data_list)
def _create_losses(input_queue, create_model_fn, train_config):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
train_config: a train_pb2.TrainConfig protobuf.
"""
detection_model = create_model_fn()
(images, _, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_masks_list, groundtruth_keypoints_list) = get_inputs(
input_queue, detection_model.num_classes,
train_config.merge_multiple_label_boxes)
preprocessed_images = []
true_image_shapes = []
for image in images:
resized_image, true_image_shape = detection_model.preprocess(image)
preprocessed_images.append(resized_image)
true_image_shapes.append(true_image_shape)
images = tf.concat(preprocessed_images, 0)
true_image_shapes = tf.concat(true_image_shapes, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
if any(keypoints is None for keypoints in groundtruth_keypoints_list):
groundtruth_keypoints_list = None
detection_model.provide_groundtruth(
groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list,
groundtruth_keypoints_list)
prediction_dict = detection_model.predict(images, true_image_shapes,
input_queue['batch'])
losses_dict = detection_model.loss(prediction_dict, true_image_shapes)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)
def get_restore_checkpoint_ops(restore_checkpoints, detection_model,
train_config):
"""Restore checkpoint from saved checkpoints.
Args:
restore_checkpoints: loaded checkpoints.
detection_model: Object detection model built from config file.
train_config: a train_pb2.TrainConfig protobuf.
Returns:
restorers: A list ops to init the model from checkpoints.
"""
restorers = []
vars_restored = []
for restore_checkpoint in restore_checkpoints:
var_map = detection_model.restore_map(
fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type)
available_var_map = (
variables_helper.get_variables_available_in_checkpoint(
var_map, restore_checkpoint))
for var_name, var in available_var_map.iteritems():
if var in vars_restored:
logging.info('Variable %s contained in multiple checkpoints',
var.op.name)
del available_var_map[var_name]
else:
vars_restored.append(var)
# Initialize from ExponentialMovingAverages if possible.
available_ema_var_map = {}
ckpt_reader = tf.train.NewCheckpointReader(restore_checkpoint)
ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_shape_map()
for var_name, var in available_var_map.iteritems():
var_name_ema = var_name + '/ExponentialMovingAverage'
if var_name_ema in ckpt_vars_to_shape_map:
available_ema_var_map[var_name_ema] = var
else:
available_ema_var_map[var_name] = var
available_var_map = available_ema_var_map
init_saver = tf.train.Saver(available_var_map)
if available_var_map.keys():
restorers.append(init_saver)
else:
logging.info('WARNING: Checkpoint %s has no restorable variables',
restore_checkpoint)
return restorers
def train(create_tensor_dict_fn,
create_model_fn,
train_config,
master,
task,
num_clones,
worker_replicas,
clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
train_dir,
graph_hook_fn=None):
"""Training function for detection models.
Args:
create_tensor_dict_fn: a function to create a tensor input dictionary.
create_model_fn: a function that creates a DetectionModel and generates
losses.
train_config: a train_pb2.TrainConfig protobuf.
master: BNS name of the TensorFlow master to use.
task: The task id of this training instance.
num_clones: The number of clones to run per machine.
worker_replicas: The number of work replicas to train with.
clone_on_cpu: True if clones should be forced to run on CPU.
ps_tasks: Number of parameter server tasks.
worker_job_name: Name of the worker job.
is_chief: Whether this replica is the chief replica.
train_dir: Directory to write checkpoints and training summaries to.
graph_hook_fn: Optional function that is called after the training graph is
completely built. This is helpful to perform additional changes to the
training graph such as optimizing batchnorm. The function should modify
the default graph.
"""
detection_model = create_model_fn()
with tf.Graph().as_default():
# Build a configuration specifying multi-GPU and multi-replicas.
deploy_config = model_deploy.DeploymentConfig(
num_clones=num_clones,
clone_on_cpu=clone_on_cpu,
replica_id=task,
num_replicas=worker_replicas,
num_ps_tasks=ps_tasks,
worker_job_name=worker_job_name)
# Place the global step on the device storing the variables.
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
with tf.device(deploy_config.inputs_device()):
input_queue = create_input_queue(create_tensor_dict_fn)
# Gather initial summaries.
# TODO(rathodv): See if summaries can be added/extracted from global tf
# collections so that they don't have to be passed around.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
global_summaries = set([])
model_fn = functools.partial(
_create_losses,
create_model_fn=create_model_fn,
train_config=train_config)
clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue])
first_clone_scope = clones[0].scope
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
with tf.device(deploy_config.optimizer_device()):
training_optimizer, optimizer_summary_vars = optimizer_builder.build(
train_config.optimizer)
for var in optimizer_summary_vars:
tf.summary.scalar(var.op.name, var)
sync_optimizer = None
if train_config.sync_replicas:
training_optimizer = tf.train.SyncReplicasOptimizer(
training_optimizer,
replicas_to_aggregate=train_config.replicas_to_aggregate,
total_num_replicas=train_config.worker_replicas)
sync_optimizer = training_optimizer
# Create ops required to initialize the model from a given checkpoint.
init_fn = None
if train_config.fine_tune_checkpoint:
restore_checkpoints = [
path.strip() for path in train_config.fine_tune_checkpoint.split(',')
]
restorers = get_restore_checkpoint_ops(restore_checkpoints,
detection_model, train_config)
def initializer_fn(sess):
for i, restorer in enumerate(restorers):
restorer.restore(sess, restore_checkpoints[i])
init_fn = initializer_fn
with tf.device(deploy_config.optimizer_device()):
regularization_losses = (
None if train_config.add_regularization_loss else [])
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones,
training_optimizer,
regularization_losses=regularization_losses)
total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.')
# Optionally multiply bias gradients by train_config.bias_grad_multiplier.
if train_config.bias_grad_multiplier:
biases_regex_list = ['.*/biases']
grads_and_vars = variables_helper.multiply_gradients_matching_regex(
grads_and_vars,
biases_regex_list,
multiplier=train_config.bias_grad_multiplier)
# Optionally clip gradients
if train_config.gradient_clipping_by_norm > 0:
with tf.name_scope('clip_grads'):
grads_and_vars = slim.learning.clip_gradient_norms(
grads_and_vars, train_config.gradient_clipping_by_norm)
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(0.9999, global_step)
update_ops.append(variable_averages.apply(moving_average_variables))
# Create gradient updates.
grad_updates = training_optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops, name='update_barrier')
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
if graph_hook_fn:
with tf.device(deploy_config.variables_device()):
graph_hook_fn()
# Add summaries.
for model_var in slim.get_model_variables():
global_summaries.add(tf.summary.histogram(model_var.op.name, model_var))
for loss_tensor in tf.losses.get_losses():
global_summaries.add(tf.summary.scalar(loss_tensor.op.name, loss_tensor))
global_summaries.add(
tf.summary.scalar('TotalLoss', tf.losses.get_total_loss()))
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(
tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, 'critic_loss'))
summaries |= global_summaries
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
# Save checkpoints regularly.
keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours
saver = tf.train.Saver(
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
slim.learning.train(
train_tensor,
logdir=train_dir,
master=master,
is_chief=is_chief,
session_config=session_config,
startup_delay_steps=train_config.startup_delay_steps,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=(train_config.num_steps
if train_config.num_steps else None),
save_summaries_secs=120,
sync_optimizer=sync_optimizer,
saver=saver)
|
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for the pages for subtopics, and related models."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.domain import change_domain
from core.domain import html_validation_service
from core.domain import state_domain
from core.platform import models
import feconf
import python_utils
import utils
(topic_models,) = models.Registry.import_models([models.NAMES.topic])
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML = 'page_contents_html'
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO = 'page_contents_audio'
SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS = 'page_written_translations'
CMD_CREATE_NEW = 'create_new'
# These take additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY = 'update_subtopic_page_property'
class SubtopicPageChange(change_domain.BaseChange):
"""Domain object for changes made to subtopic_page object.
The allowed commands, together with the attributes:
- 'create_new' (with topic_id, subtopic_id)
- 'update_subtopic_page_property' (
with property_name, new_value, old_value, subtopic_id).
"""
# The allowed list of subtopic page properties which can be used in
# update_subtopic_page_property command.
SUBTOPIC_PAGE_PROPERTIES = (
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML,
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO,
SUBTOPIC_PAGE_PROPERTY_PAGE_WRITTEN_TRANSLATIONS)
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': ['topic_id', 'subtopic_id'],
'optional_attribute_names': [],
'user_id_attribute_names': []
}, {
'name': CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY,
'required_attribute_names': [
'property_name', 'new_value', 'old_value', 'subtopic_id'],
'optional_attribute_names': [],
'user_id_attribute_names': [],
'allowed_values': {'property_name': SUBTOPIC_PAGE_PROPERTIES}
}]
class SubtopicPageContents(python_utils.OBJECT):
"""Domain object for the contents on a subtopic page."""
def __init__(
self, subtitled_html, recorded_voiceovers, written_translations):
"""Constructs a SubtopicPageContents domain object.
Args:
subtitled_html: SubtitledHtml. The html data being displayed on
the page.
recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for
the subtopic page content and their translations in different
languages.
written_translations: WrittenTranslations. The text translations of
the subtopic page content.
"""
self.subtitled_html = subtitled_html
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations
def validate(self):
"""Validates the SubtopicPageContentsObject, verifying that all
fields are of the correct type.
"""
self.subtitled_html.validate()
content_ids = set([self.subtitled_html.content_id])
self.recorded_voiceovers.validate(content_ids)
self.written_translations.validate(content_ids)
@classmethod
def create_default_subtopic_page_contents(cls):
"""Creates a default subtopic page contents object.
Returns:
SubtopicPageContents. A default object.
"""
content_id = feconf.DEFAULT_SUBTOPIC_PAGE_CONTENT_ID
return cls(
state_domain.SubtitledHtml.create_default_subtitled_html(
content_id),
state_domain.RecordedVoiceovers.from_dict(
{'voiceovers_mapping': {content_id: {}}}),
state_domain.WrittenTranslations.from_dict(
{'translations_mapping': {content_id: {}}}))
def to_dict(self):
"""Returns a dict representing this SubtopicPageContents domain object.
Returns:
dict. A dict, mapping all fields of SubtopicPageContents instance.
"""
return {
'subtitled_html': self.subtitled_html.to_dict(),
'recorded_voiceovers': self.recorded_voiceovers.to_dict(),
'written_translations': self.written_translations.to_dict()
}
@classmethod
def from_dict(cls, page_contents_dict):
"""Creates a subtopic page contents object from a dictionary.
Args:
page_contents_dict: dict. The dict representation of
SubtopicPageContents object.
Returns:
SubtopicPageContents. The corresponding object.
"""
page_contents = state_domain.SubtitledHtml.from_dict(
page_contents_dict['subtitled_html'])
page_contents.validate()
return cls(
page_contents,
state_domain.RecordedVoiceovers.from_dict(page_contents_dict[
'recorded_voiceovers']),
state_domain.WrittenTranslations.from_dict(page_contents_dict[
'written_translations']))
class SubtopicPage(python_utils.OBJECT):
"""Domain object for a Subtopic page."""
def __init__(
self, subtopic_page_id, topic_id, page_contents,
page_contents_schema_version, language_code, version):
"""Constructs a SubtopicPage domain object.
Args:
subtopic_page_id: str. The unique ID of the subtopic page.
topic_id: str. The ID of the topic that this subtopic is a part of.
page_contents: SubtopicPageContents. The html and audio
translations to be surfaced to the learner.
page_contents_schema_version: int. The schema version for the page
contents object.
language_code: str. The ISO 639-1 code for the language this
subtopic page is written in.
version: int. The current version of the subtopic.
"""
self.id = subtopic_page_id
self.topic_id = topic_id
self.page_contents = page_contents
self.page_contents_schema_version = page_contents_schema_version
self.language_code = language_code
self.version = version
def to_dict(self):
"""Returns a dict representing this SubtopicPage domain object.
Returns:
dict. A dict, mapping all fields of SubtopicPage instance.
"""
return {
'id': self.id,
'topic_id': self.topic_id,
'page_contents': self.page_contents.to_dict(),
'page_contents_schema_version': self.page_contents_schema_version,
'language_code': self.language_code,
'version': self.version
}
@classmethod
def get_subtopic_page_id(cls, topic_id, subtopic_id):
"""Returns the subtopic page id from the topic_id and subtopic_id.
Args:
topic_id: str. The id of the topic that the subtopic is a part of.
subtopic_id: int. The id of the subtopic.
Returns:
str. The subtopic_page_id calculated from the given values.
"""
return '%s-%s' % (topic_id, subtopic_id)
@classmethod
def create_default_subtopic_page(cls, subtopic_id, topic_id):
"""Creates a SubtopicPage object with default values.
Args:
subtopic_id: str. ID of the subtopic.
topic_id: str. The Id of the topic to which this page is linked
with.
Returns:
SubtopicPage. A subtopic object with given id, topic_id and default
page contents field.
"""
subtopic_page_id = cls.get_subtopic_page_id(topic_id, subtopic_id)
return cls(
subtopic_page_id, topic_id,
SubtopicPageContents.create_default_subtopic_page_contents(),
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, 0)
@classmethod
def _convert_page_contents_v1_dict_to_v2_dict(cls, page_contents_dict):
"""Converts v1 SubtopicPage Contents schema to the v2 schema.
v2 schema introduces the new schema for Math components.
Args:
page_contents_dict: dict. A dict used to initialize a SubtopicPage
domain object.
Returns:
dict. The converted page_contents_dict.
"""
page_contents_dict['written_translations'] = (
state_domain.WrittenTranslations.
convert_html_in_written_translations(
page_contents_dict['written_translations'],
html_validation_service.
add_math_content_to_math_rte_components))
page_contents_dict['subtitled_html']['html'] = (
html_validation_service.
add_math_content_to_math_rte_components(
page_contents_dict['subtitled_html']['html']))
return page_contents_dict
@classmethod
def update_page_contents_from_model(
cls, versioned_page_contents, current_version):
"""Converts the page_contents blob contained in the given
versioned_page_contents dict from current_version to
current_version + 1. Note that the versioned_page_contents being
passed in is modified in-place.
Args:
versioned_page_contents: dict. A dict with two keys:
- schema_version: str. The schema version for the
page_contents dict.
- page_contents: dict. The dict comprising the subtopic page
contents.
current_version: int. The current schema version of page_contents.
"""
versioned_page_contents['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_page_contents_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
versioned_page_contents['page_contents'] = conversion_fn(
versioned_page_contents['page_contents'])
def get_subtopic_id_from_subtopic_page_id(self):
"""Returns the id from the subtopic page id of the object.
Returns:
int. The subtopic_id of the object.
"""
return int(self.id[len(self.topic_id) + 1:])
def update_page_contents_html(self, new_page_contents_html):
"""The new value for the html data field.
Args:
new_page_contents_html: SubtitledHtml. The new html for the subtopic
page.
"""
self.page_contents.subtitled_html = new_page_contents_html
def update_page_contents_audio(self, new_page_contents_audio):
"""The new value for the recorded_voiceovers data field.
Args:
new_page_contents_audio: RecordedVoiceovers. The new audio for
the subtopic page.
"""
self.page_contents.recorded_voiceovers = new_page_contents_audio
def update_page_contents_written_translations(
self, new_page_written_translations_dict):
"""The new value for the written_translations data field.
Args:
new_page_written_translations_dict: dict. The new translation for
the subtopic page.
"""
self.page_contents.written_translations = (
state_domain.WrittenTranslations.from_dict(
new_page_written_translations_dict))
def validate(self):
"""Validates various properties of the SubtopicPage object.
Raises:
ValidationError. One or more attributes of the subtopic page are
invalid.
"""
if not isinstance(self.topic_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected topic_id to be a string, received %s' %
self.topic_id)
if not isinstance(self.version, int):
raise utils.ValidationError(
'Expected version number to be an int, received %s' %
self.version)
self.page_contents.validate()
if not isinstance(self.page_contents_schema_version, int):
raise utils.ValidationError(
'Expected page contents schema version to be an integer, '
'received %s' % self.page_contents_schema_version)
if (
self.page_contents_schema_version !=
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected page contents schema version to be %s, received %s'
% (
feconf.CURRENT_SUBTOPIC_PAGE_CONTENTS_SCHEMA_VERSION,
self.page_contents_schema_version)
)
if not isinstance(self.language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not any([self.language_code == lc['code']
for lc in constants.SUPPORTED_CONTENT_LANGUAGES]):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("Pack")
def _PackGrad(op, grad):
"""Gradient for pack op."""
return array_ops.unpack(grad, num=op.get_attr("N"))
@ops.RegisterGradient("Unpack")
def _UnpackGrad(_, *grads):
"""Gradient for unpack op."""
return array_ops.pack(grads)
@ops.RegisterGradient("Concat")
def _ConcatGrad(op, grad):
"""Gradient for concat op."""
def _CreateDenseMaskAndBegin(sizes, concat_dim):
"""Create variables for iteratively slicing a dense gradients tensor."""
# Since shape is 1-D, shape_of_shape = [rank-of-inputs]
shape_of_shape = array_ops.shape(sizes[0])
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
mask = array_ops.concat(0,
[array_ops.fill(
array_ops.expand_dims(concat_dim, 0), 0),
[1],
array_ops.fill(
shape_of_shape - concat_dim - 1, 0)])
begin = array_ops.fill(shape_of_shape, 0)
return mask, begin
# Degenerate concatenation, just return grad.
if len(op.inputs) == 2:
return [None, grad]
concat_dim = op.inputs[0]
out_grads = []
if isinstance(grad, ops.Tensor):
# Get the inputs' tensor shapes
sizes = array_ops.shape_n(op.inputs[1:])
# pylint: disable=protected-access
offset = gen_array_ops._concat_offset(concat_dim, sizes)
# pylint: enable=protected-access
for (begin, size) in zip(offset, sizes):
out_grads.append(array_ops.slice(grad, begin, size))
elif isinstance(grad, ops.IndexedSlices):
concat_dim_static = tensor_util.constant_value(concat_dim)
if concat_dim_static is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"statically-known concat_dim")
# Get the inputs' tensor shapes
sizes = [array_ops.shape(x) for x in op.inputs[1:]]
if concat_dim_static > 0:
# IndexedSlices, concat_dim > 0. Each input gets IndexedSlices gradients
# with all the indices, but with grad.values sliced accordingly. This
# is like the Tensor case, except shape(grad.values)[0] is not equal to
# shape(sizes[i])[0], since only a subset of the dim-0 values are stored.
mask, begin = _CreateDenseMaskAndBegin(sizes, concat_dim)
for size in sizes:
new_values = array_ops.slice(
grad.values,
begin,
array_ops.concat(0, [[-1], array_ops.slice(size, [1], [-1])]))
out_grads.append(
ops.IndexedSlices(new_values, grad.indices, size))
# Lint complains begin = begin + ...
begin = math_ops.add(begin, size * mask)
else:
# IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
# only for the relevant indices.
start = constant_op.constant(0, dtype=grad.indices.dtype)
for size in sizes:
size_concat_dim = array_ops.gather(size, concat_dim)
if size_concat_dim.dtype != grad.indices.dtype:
size_concat_dim = math_ops.cast(size_concat_dim,
dtype=grad.indices.dtype)
end = start + size_concat_dim
# Compute the 1-D Tensor of indices relevant for this input.
indices_to_select = array_ops.squeeze(
array_ops.where(math_ops.logical_and(grad.indices >= start,
grad.indices < end)),
squeeze_dims=[1])
new_indices = array_ops.gather(grad.indices, indices_to_select) - start
new_values = array_ops.gather(grad.values, indices_to_select)
out_grads.append(
ops.IndexedSlices(new_values, new_indices, size))
start = end
else:
raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))
return [None] + out_grads
ops.NoGradient("ConcatOffset")
@ops.RegisterGradient("Slice")
def _SliceGrad(op, grad):
"""Gradient for Slice op."""
# Create an Nx2 padding where the first column represents how many
# zeros are to be prepended for each dimension, and the second
# column indicates how many zeros are appended.
#
# The number of zeros to append is the shape of the input
# elementwise-subtracted by both the begin vector and sizes vector.
#
# Some more reshaping is needed to assemble this tensor with the
# right dimensions.
input_vec = op.inputs[0]
begin_vec = op.inputs[1]
input_rank = array_ops.rank(input_vec)
slice_size = array_ops.shape(op.outputs[0])
shape = array_ops.pack([input_rank, 1])
before_pad = array_ops.reshape(begin_vec, shape)
after_pad = array_ops.reshape(
array_ops.shape(input_vec) - slice_size - begin_vec, shape)
paddings = array_ops.concat(1, [before_pad, after_pad])
return array_ops.pad(grad, paddings), None, None
@ops.RegisterGradient("Split")
def _SplitGrad(op, *grads):
return None, array_ops.concat(op.inputs[0], list(grads))
ops.NoGradient("Const")
@ops.RegisterGradient("Diag")
def _DiagGrad(_, grad):
return array_ops.diag_part(grad)
@ops.RegisterGradient("DiagPart")
def _DiagPartGrad(_, grad):
return array_ops.diag(grad)
# Edit Distance has no gradient (but can be used to eval seq2seq or CTC).
ops.NoGradient("EditDistance")
@ops.RegisterGradient("Fill")
def _FillGrad(_, grad):
return None, math_ops.reduce_sum(grad)
ops.NoGradient("ZerosLike")
@ops.RegisterGradient("Gather")
def _GatherGrad(op, grad):
if op.inputs[0].get_shape().is_fully_defined():
dense_shape = constant_op.constant(op.inputs[0].get_shape().as_list())
values_shape = [-1] + op.inputs[0].get_shape()[1:].as_list()
else:
# op.inputs[0] can be large, so colocate the shape calculation with it.
with ops.colocate_with(op.inputs[0]):
dense_shape = array_ops.shape(op.inputs[0])
values_shape = array_ops.concat(0, [[-1], dense_shape[1:]])
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(op.inputs[1], [-1])
return [ops.IndexedSlices(values, indices, dense_shape), None]
@ops.RegisterGradient("Identity")
def _IdGrad(_, grad):
return grad
@ops.RegisterGradient("RefIdentity")
def _RefIdGrad(_, grad):
return grad
ops.NoGradient("StopGradient")
@ops.RegisterGradient("Reshape")
def _ReshapeGrad(op, grad):
return [array_ops.reshape(grad, array_ops.shape(op.inputs[0])), None]
ops.NoGradient("InvertPermutation")
def _ReshapeToInput(op, grad):
"""Reshapes the gradient to the shape of the original input."""
return array_ops.reshape(grad, array_ops.shape(op.inputs[0]))
@ops.RegisterGradient("ExpandDims")
def _ExpandDimsGrad(op, grad):
return [_ReshapeToInput(op, grad), None]
@ops.RegisterGradient("Squeeze")
def _SqueezeGrad(op, grad):
return _ReshapeToInput(op, grad)
@ops.RegisterGradient("Transpose")
def _TransposeGrad(op, grad):
"""Returns unshuffle(grad)."""
p = op.inputs[1]
return [array_ops.transpose(grad, array_ops.invert_permutation(p)), None]
ops.NoGradient("Shape")
ops.NoGradient("ShapeN")
ops.NoGradient("Rank")
ops.NoGradient("Size")
@ops.RegisterGradient("Tile")
def _TileGrad(op, grad):
"""Sum reduces grad along the tiled dimensions."""
assert isinstance(grad, ops.Tensor)
input_shape = array_ops.shape(op.inputs[0])
# We interleave multiples and input_shape to get split_shape,
# reshape grad to split_shape, and reduce along all even
# dimensions (the tiled dimensions) to get the result
# with shape input_shape. For example
# input_shape = [20, 30, 40]
# multiples = [2, 3, 4]
# split_shape = [2, 20, 3, 30, 4, 40]
# axes = [0, 2, 4]
split_shape = array_ops.reshape(array_ops.transpose(
array_ops.pack([op.inputs[1], input_shape])), [-1])
axes = math_ops.range(0, array_ops.size(split_shape), 2)
input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
# Fix shape inference
input_grad.set_shape(op.inputs[0].get_shape())
return [input_grad, None]
ops.NoGradient("TileGrad")
ops.NoGradient("BroadcastGradientArgs")
@ops.RegisterGradient("Pad")
def _PadGrad(op, grad):
"""Gradient for Pad."""
# Pad introduces values around the original tensor, so the gradient function
# slices the original shape out of the gradient."""
x = op.inputs[0]
a = op.inputs[1] # [Rank(x), 2]
# Takes a slice of a. The 1st column. [Rank(x), 1].
pad_before = array_ops.slice(a, [0, 0],
array_ops.pack([array_ops.rank(x), 1]))
# Make it a 1-D tensor.
begin = array_ops.reshape(pad_before, [-1])
sizes = array_ops.shape(x)
return array_ops.slice(grad, begin, sizes), None
# ReverseSequence is just a permutation. The gradient permutes back.
@ops.RegisterGradient("ReverseSequence")
def _ReverseSequenceGrad(op, grad):
seq_lengths = op.inputs[1]
return [array_ops.reverse_sequence(grad,
batch_dim=op.get_attr("batch_dim"),
seq_dim=op.get_attr("seq_dim"),
seq_lengths=seq_lengths),
None]
@ops.RegisterGradient("Reverse")
def _ReverseGrad(op, grad):
reverse_dims = op.inputs[1]
return array_ops.reverse(grad, reverse_dims), None
@ops.RegisterGradient("SpaceToDepth")
def _SpaceToDepthGrad(op, grad):
# Its gradient is the opposite op: DepthToSpace.
block_size = op.get_attr("block_size")
return array_ops.depth_to_space(grad, block_size)
@ops.RegisterGradient("DepthToSpace")
def _DepthToSpaceGrad(op, grad):
# Its gradient is the opposite op: SpaceToDepth.
block_size = op.get_attr("block_size")
return array_ops.space_to_depth(grad, block_size)
ops.NoGradient("OneHot")
@ops.RegisterGradient("MirrorPad")
def _MirrorPadGrad(op, grad):
mode = op.get_attr("mode")
# pylint: disable=protected-access
return [gen_array_ops._mirror_pad_grad(grad, op.inputs[1], mode=mode), None]
# pylint: enable=protected-access
@ops.RegisterGradient("MirrorPadGrad")
def _MirrorPadGradGrad(op, grad):
mode = op.get_attr("mode")
# pylint: disable=protected-access
return [gen_array_ops._mirror_pad(grad, op.inputs[1], mode=mode), None]
# pylint: enable=protected-access
|
|
import glob
import logging
import os
import sys
from urlparse import urljoin
import requests
from requests.exceptions import ConnectionError
from invoke import ctask as task, Collection
from invocations.testing import test
from tessera import app, db, config
from tessera_client.api.model import Section
from tessera.importer.graphite import GraphiteDashboardImporter
from tessera.importer.json import JsonImporter, JsonExporter
from werkzeug.serving import run_simple
import flask
from flask.ext import migrate
warn = logging.WARN
log = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)-8s [%(name)s] %(message)s'
)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(warn)
logging.getLogger('sqlalchemy.engine').setLevel(warn)
DEFAULT_TESSERA_URL = 'http://{0}:{1}'.format(config['SERVER_ADDRESS'], config['SERVER_PORT'])
DEFAULT_GRAPHITE_URL = config['GRAPHITE_URL']
DEFAULT_MIGRATION_DIR = config['MIGRATION_DIR']
@task
def run(c):
"""Launch the server."""
run_simple(config['SERVER_ADDRESS'], config['SERVER_PORT'], app, use_reloader=True)
# =============================================================================
# db collection
# inv db.init
# inv db.init_migrations
# inv db.current
# inv db.revisions
# inv db.migrate
# inv db.upgrade
# inv db.downgrade
# inv db.stamp
# inv db.history
# =============================================================================
@task
def initdb(c):
"""
Deprecated, use db.init instead.
"""
db.create_all()
@task(name='init')
def db_init(c):
"""
Set up a new, empty database.
"""
db.create_all()
@task(name='init_migrations')
def db_init_migrations(c, dir=None):
"""
Update the project to support migrations.
"""
with app.app_context():
migrate.init(dir)
@task(name='current')
def db_current(c, dir=DEFAULT_MIGRATION_DIR):
"""
Show current migration revision.
"""
with app.app_context():
migrate.current(directory=dir)
@task(name='revision')
def db_revision(c, dir=DEFAULT_MIGRATION_DIR):
"""
Generate new empty revision script.
"""
with app.app_context():
migrate.revision(directory=dir)
@task(name='migrate')
def db_migrate(c, dir=DEFAULT_MIGRATION_DIR):
"""
Generate new autofilled migration.
"""
with app.app_context():
migrate.migrate(directory=dir)
@task(name='upgrade')
def db_upgrade(c, dir=DEFAULT_MIGRATION_DIR):
"""
Run any migrations needed make database current.
"""
with app.app_context():
migrate.upgrade(directory=dir)
@task(name='downgrade')
def db_downgrade(c, dir=DEFAULT_MIGRATION_DIR):
"""
Downgrade database to a specific revision.
"""
with app.app_context():
migrate.downgrade(directory=dir)
@task(name='stamp')
def db_stamp(c, dir=DEFAULT_MIGRATION_DIR):
"""
Set database revision to a specific value.
"""
with app.app_context(directory=dir):
pass
@task(name='history')
def db_history(c, dir=DEFAULT_MIGRATION_DIR):
"""
List migration history.
"""
with app.app_context():
migrate.history(directory=dir)
# =============================================================================
# graphite tasks
# inv graphite.import
# inv graphite.export
# =============================================================================
@task(name='import')
def import_graphite_dashboards(
c, query='', layout=Section.Layout.FLUID, columns=4, overwrite=False,
graphite=DEFAULT_GRAPHITE_URL, tessera=DEFAULT_TESSERA_URL
):
"""
Import dashboards from a Graphite vanilla dashboard.
"""
log.info('Importing dashboards from graphite')
importer = GraphiteDashboardImporter(graphite, tessera, config['GRAPHITE_AUTH'])
importer.import_dashboards(
query, overwrite=overwrite, layout=layout, columns=int(columns)
)
@task(name='dump')
def dump_graphite_dashboards(c, query='', graphite=DEFAULT_GRAPHITE_URL, tessera=DEFAULT_TESSERA_URL):
"""
Dump Graphite dashboards to stdout in Tessera JSON format.
"""
log.info('Importing dashboards from graphite')
importer = GraphiteDashboardImporter(graphite, tessera)
importer.dump_dashboards(query)
# =============================================================================
# json tasks
# inv json.import
# inv json.export
# =============================================================================
@task(name='export')
def export_json(c, dir, tag=None, graphite=DEFAULT_GRAPHITE_URL, tessera=DEFAULT_TESSERA_URL):
"""
Export dashboards as JSON to a local directory.
"""
msg = 'Exporting dashboards (tagged: {0}) as JSON to directory {1}'
log.info(msg.format(tag, dir))
exporter = JsonExporter(graphite, tessera)
exporter.export(dir, tag)
@task(name='import')
def import_json(c, pattern, graphite=DEFAULT_GRAPHITE_URL, tessera=DEFAULT_TESSERA_URL):
"""
Import dashboards from a directory previously used for exporting.
"""
log.info('Import dashboards from {0})'.format(pattern))
files = glob.glob(pattern)
log.info('Found {0} files to import'.format(len(files)))
importer = JsonImporter(graphite, tessera)
importer.import_files(files)
# =============================================================================
# test tasks
# inv test.unit
# inv test.integration
# =============================================================================
@task
def integration(c):
"""
Run high level integration test suite.
"""
return test(c, opts="--tests=integration")
tests = Collection('test')
tests.add_task(test, name='unit', default=True)
tests.add_task(integration)
@task
def copy(c, source_id, source_uri=None, destination_uri=None):
"""
Copy a dashboard (via API) between two running Tessera instances.
:param str source_id:
Source dashboard ID, e.g. if copying a dashboard that lives at
``http://mytessera.com/dashboards/123``, this would simply be ``123``.
:param str source_uri:
Source base URI, e.g. ``http://mytessera.com`` or
``https://tessera.example.com:8080``. Will pull default value from the
``TESSERA_SOURCE_URI`` environment variable if not given.
:param str destination_uri:
Destination base URI, similar to ``source_uri``. Will pull default
value from ``TESSERA_DESTINATION_URI`` if not given.
"""
# Arg handling junk
missing = []
if source_uri is None:
try:
source_uri = os.environ['TESSERA_SOURCE_URI']
except KeyError:
missing.append("source")
if destination_uri is None:
try:
destination_uri = os.environ['TESSERA_DESTINATION_URI']
except KeyError:
missing.append("destination")
if missing:
sys.exit("Missing the following URI parameters: {0}".format(
', '.join("{0}_uri".format(x) for x in missing)))
# Actual copy
endpoint = '/api/dashboard/'
source = reduce(urljoin, (source_uri, endpoint, source_id))
try:
original = requests.get(source, params={'definition': 'true'})
except ConnectionError as e:
sys.exit("Unable to connect to {0}: {1}".format(source, e))
dest = urljoin(destination_uri, endpoint)
try:
response = requests.post(dest, data=original.content,
headers={'Content-Type': 'application/json'})
except ConnectionError as e:
sys.exit("Unable to connect to {0}: {1}".format(dest, e))
new_uri = urljoin(dest, response.json()['view_href'])
print("{0} -> {1}".format(source, new_uri))
ns = Collection(
run,
copy,
initdb,
tests,
Collection('db',
db_init,
db_init_migrations,
db_current,
db_revision,
db_migrate,
db_upgrade,
db_downgrade,
db_stamp,
db_history
),
Collection('json', import_json, export_json),
Collection('graphite',
import_graphite_dashboards,
dump_graphite_dashboards,
),
)
|
|
##
# @file PlaceDB.py
# @author Yibo Lin
# @date Apr 2018
# @brief placement database
#
import sys
import os
import re
import math
import time
import numpy as np
import torch
import logging
import Params
import dreamplace
import dreamplace.ops.place_io.place_io as place_io
import dreamplace.ops.fence_region.fence_region as fence_region
import pdb
datatypes = {
'float32' : np.float32,
'float64' : np.float64
}
class PlaceDB (object):
"""
@brief placement database
"""
def __init__(self):
"""
initialization
To avoid the usage of list, I flatten everything.
"""
self.rawdb = None # raw placement database, a C++ object
self.num_physical_nodes = 0 # number of real nodes, including movable nodes, terminals, and terminal_NIs
self.num_terminals = 0 # number of terminals, essentially fixed macros
self.num_terminal_NIs = 0 # number of terminal_NIs that can be overlapped, essentially IO pins
self.node_name2id_map = {} # node name to id map, cell name
self.node_names = None # 1D array, cell name
self.node_x = None # 1D array, cell position x
self.node_y = None # 1D array, cell position y
self.node_orient = None # 1D array, cell orientation
self.node_size_x = None # 1D array, cell width
self.node_size_y = None # 1D array, cell height
self.node2orig_node_map = None # some fixed cells may have non-rectangular shapes; we flatten them and create new nodes
# this map maps the current multiple node ids into the original one
self.pin_direct = None # 1D array, pin direction IO
self.pin_offset_x = None # 1D array, pin offset x to its node
self.pin_offset_y = None # 1D array, pin offset y to its node
self.net_name2id_map = {} # net name to id map
self.net_names = None # net name
self.net_weights = None # weights for each net
self.net2pin_map = None # array of 1D array, each row stores pin id
self.flat_net2pin_map = None # flatten version of net2pin_map
self.flat_net2pin_start_map = None # starting index of each net in flat_net2pin_map
self.node2pin_map = None # array of 1D array, contains pin id of each node
self.flat_node2pin_map = None # flatten version of node2pin_map
self.flat_node2pin_start_map = None # starting index of each node in flat_node2pin_map
self.pin2node_map = None # 1D array, contain parent node id of each pin
self.pin2net_map = None # 1D array, contain parent net id of each pin
self.rows = None # NumRows x 4 array, stores xl, yl, xh, yh of each row
self.regions = None # array of 1D array, placement regions like FENCE and GUIDE
self.flat_region_boxes = None # flat version of regions
self.flat_region_boxes_start = None # start indices of regions, length of num regions + 1
self.node2fence_region_map = None # map cell to a region, maximum integer if no fence region
self.xl = None
self.yl = None
self.xh = None
self.yh = None
self.row_height = None
self.site_width = None
self.bin_size_x = None
self.bin_size_y = None
self.num_bins_x = None
self.num_bins_y = None
self.num_movable_pins = None
self.total_movable_node_area = None # total movable cell area
self.total_fixed_node_area = None # total fixed cell area
self.total_space_area = None # total placeable space area excluding fixed cells
# enable filler cells
# the Idea from e-place and RePlace
self.total_filler_node_area = None
self.num_filler_nodes = None
self.routing_grid_xl = None
self.routing_grid_yl = None
self.routing_grid_xh = None
self.routing_grid_yh = None
self.num_routing_grids_x = None
self.num_routing_grids_y = None
self.num_routing_layers = None
self.unit_horizontal_capacity = None # per unit distance, projected to one layer
self.unit_vertical_capacity = None # per unit distance, projected to one layer
self.unit_horizontal_capacities = None # per unit distance, layer by layer
self.unit_vertical_capacities = None # per unit distance, layer by layer
self.initial_horizontal_demand_map = None # routing demand map from fixed cells, indexed by (grid x, grid y), projected to one layer
self.initial_vertical_demand_map = None # routing demand map from fixed cells, indexed by (grid x, grid y), projected to one layer
self.dtype = None
def scale_pl(self, scale_factor):
"""
@brief scale placement solution only
@param scale_factor scale factor
"""
self.node_x *= scale_factor
self.node_y *= scale_factor
def scale(self, scale_factor):
"""
@brief scale distances
@param scale_factor scale factor
"""
logging.info("scale coordinate system by %g" % (scale_factor))
self.scale_pl(scale_factor)
self.node_size_x *= scale_factor
self.node_size_y *= scale_factor
self.pin_offset_x *= scale_factor
self.pin_offset_y *= scale_factor
self.xl *= scale_factor
self.yl *= scale_factor
self.xh *= scale_factor
self.yh *= scale_factor
self.row_height *= scale_factor
self.site_width *= scale_factor
self.rows *= scale_factor
self.total_space_area *= scale_factor * scale_factor # this is area
self.flat_region_boxes *= scale_factor
# may have performance issue
# I assume there are not many boxes
for i in range(len(self.regions)):
self.regions[i] *= scale_factor
def sort(self):
"""
@brief Sort net by degree.
Sort pin array such that pins belonging to the same net is abutting each other
"""
logging.info("sort nets by degree and pins by net")
# sort nets by degree
net_degrees = np.array([len(pins) for pins in self.net2pin_map])
net_order = net_degrees.argsort() # indexed by new net_id, content is old net_id
self.net_names = self.net_names[net_order]
self.net2pin_map = self.net2pin_map[net_order]
for net_id, net_name in enumerate(self.net_names):
self.net_name2id_map[net_name] = net_id
for new_net_id in range(len(net_order)):
for pin_id in self.net2pin_map[new_net_id]:
self.pin2net_map[pin_id] = new_net_id
## check
#for net_id in range(len(self.net2pin_map)):
# for j in range(len(self.net2pin_map[net_id])):
# assert self.pin2net_map[self.net2pin_map[net_id][j]] == net_id
# sort pins such that pins belonging to the same net is abutting each other
pin_order = self.pin2net_map.argsort() # indexed new pin_id, content is old pin_id
self.pin2net_map = self.pin2net_map[pin_order]
self.pin2node_map = self.pin2node_map[pin_order]
self.pin_direct = self.pin_direct[pin_order]
self.pin_offset_x = self.pin_offset_x[pin_order]
self.pin_offset_y = self.pin_offset_y[pin_order]
old2new_pin_id_map = np.zeros(len(pin_order), dtype=np.int32)
for new_pin_id in range(len(pin_order)):
old2new_pin_id_map[pin_order[new_pin_id]] = new_pin_id
for i in range(len(self.net2pin_map)):
for j in range(len(self.net2pin_map[i])):
self.net2pin_map[i][j] = old2new_pin_id_map[self.net2pin_map[i][j]]
for i in range(len(self.node2pin_map)):
for j in range(len(self.node2pin_map[i])):
self.node2pin_map[i][j] = old2new_pin_id_map[self.node2pin_map[i][j]]
## check
#for net_id in range(len(self.net2pin_map)):
# for j in range(len(self.net2pin_map[net_id])):
# assert self.pin2net_map[self.net2pin_map[net_id][j]] == net_id
#for node_id in range(len(self.node2pin_map)):
# for j in range(len(self.node2pin_map[node_id])):
# assert self.pin2node_map[self.node2pin_map[node_id][j]] == node_id
@property
def num_movable_nodes(self):
"""
@return number of movable nodes
"""
return self.num_physical_nodes - self.num_terminals - self.num_terminal_NIs
@property
def num_nodes(self):
"""
@return number of movable nodes, terminals, terminal_NIs, and fillers
"""
return self.num_physical_nodes + self.num_filler_nodes
@property
def num_nets(self):
"""
@return number of nets
"""
return len(self.net2pin_map)
@property
def num_pins(self):
"""
@return number of pins
"""
return len(self.pin2net_map)
@property
def width(self):
"""
@return width of layout
"""
return self.xh-self.xl
@property
def height(self):
"""
@return height of layout
"""
return self.yh-self.yl
@property
def area(self):
"""
@return area of layout
"""
return self.width*self.height
def bin_xl(self, id_x):
"""
@param id_x horizontal index
@return bin xl
"""
return self.xl+id_x*self.bin_size_x
def bin_xh(self, id_x):
"""
@param id_x horizontal index
@return bin xh
"""
return min(self.bin_xl(id_x)+self.bin_size_x, self.xh)
def bin_yl(self, id_y):
"""
@param id_y vertical index
@return bin yl
"""
return self.yl+id_y*self.bin_size_y
def bin_yh(self, id_y):
"""
@param id_y vertical index
@return bin yh
"""
return min(self.bin_yl(id_y)+self.bin_size_y, self.yh)
def num_bins(self, l, h, bin_size):
"""
@brief compute number of bins
@param l lower bound
@param h upper bound
@param bin_size bin size
@return number of bins
"""
return int(np.ceil((h-l)/bin_size))
def bin_centers(self, l, h, bin_size):
"""
@brief compute bin centers
@param l lower bound
@param h upper bound
@param bin_size bin size
@return array of bin centers
"""
num_bins = self.num_bins(l, h, bin_size)
centers = np.zeros(num_bins, dtype=self.dtype)
for id_x in range(num_bins):
bin_l = l+id_x*bin_size
bin_h = min(bin_l+bin_size, h)
centers[id_x] = (bin_l+bin_h)/2
return centers
@property
def routing_grid_size_x(self):
return (self.routing_grid_xh - self.routing_grid_xl) / self.num_routing_grids_x
@property
def routing_grid_size_y(self):
return (self.routing_grid_yh - self.routing_grid_yl) / self.num_routing_grids_y
def net_hpwl(self, x, y, net_id):
"""
@brief compute HPWL of a net
@param x horizontal cell locations
@param y vertical cell locations
@return hpwl of a net
"""
pins = self.net2pin_map[net_id]
nodes = self.pin2node_map[pins]
hpwl_x = np.amax(x[nodes]+self.pin_offset_x[pins]) - np.amin(x[nodes]+self.pin_offset_x[pins])
hpwl_y = np.amax(y[nodes]+self.pin_offset_y[pins]) - np.amin(y[nodes]+self.pin_offset_y[pins])
return (hpwl_x+hpwl_y)*self.net_weights[net_id]
def hpwl(self, x, y):
"""
@brief compute total HPWL
@param x horizontal cell locations
@param y vertical cell locations
@return hpwl of all nets
"""
wl = 0
for net_id in range(len(self.net2pin_map)):
wl += self.net_hpwl(x, y, net_id)
return wl
def overlap(self, xl1, yl1, xh1, yh1, xl2, yl2, xh2, yh2):
"""
@brief compute overlap between two boxes
@return overlap area between two rectangles
"""
return max(min(xh1, xh2)-max(xl1, xl2), 0.0) * max(min(yh1, yh2)-max(yl1, yl2), 0.0)
def density_map(self, x, y):
"""
@brief this density map evaluates the overlap between cell and bins
@param x horizontal cell locations
@param y vertical cell locations
@return density map
"""
bin_index_xl = np.maximum(np.floor(x/self.bin_size_x).astype(np.int32), 0)
bin_index_xh = np.minimum(np.ceil((x+self.node_size_x)/self.bin_size_x).astype(np.int32), self.num_bins_x-1)
bin_index_yl = np.maximum(np.floor(y/self.bin_size_y).astype(np.int32), 0)
bin_index_yh = np.minimum(np.ceil((y+self.node_size_y)/self.bin_size_y).astype(np.int32), self.num_bins_y-1)
density_map = np.zeros([self.num_bins_x, self.num_bins_y])
for node_id in range(self.num_physical_nodes):
for ix in range(bin_index_xl[node_id], bin_index_xh[node_id]+1):
for iy in range(bin_index_yl[node_id], bin_index_yh[node_id]+1):
density_map[ix, iy] += self.overlap(
self.bin_xl(ix), self.bin_yl(iy), self.bin_xh(ix), self.bin_yh(iy),
x[node_id], y[node_id], x[node_id]+self.node_size_x[node_id], y[node_id]+self.node_size_y[node_id]
)
for ix in range(self.num_bins_x):
for iy in range(self.num_bins_y):
density_map[ix, iy] /= (self.bin_xh(ix)-self.bin_xl(ix))*(self.bin_yh(iy)-self.bin_yl(iy))
return density_map
def density_overflow(self, x, y, target_density):
"""
@brief if density of a bin is larger than target_density, consider as overflow bin
@param x horizontal cell locations
@param y vertical cell locations
@param target_density target density
@return density overflow cost
"""
density_map = self.density_map(x, y)
return np.sum(np.square(np.maximum(density_map-target_density, 0.0)))
def print_node(self, node_id):
"""
@brief print node information
@param node_id cell index
"""
logging.debug("node %s(%d), size (%g, %g), pos (%g, %g)" % (self.node_names[node_id], node_id, self.node_size_x[node_id], self.node_size_y[node_id], self.node_x[node_id], self.node_y[node_id]))
pins = "pins "
for pin_id in self.node2pin_map[node_id]:
pins += "%s(%s, %d) " % (self.node_names[self.pin2node_map[pin_id]], self.net_names[self.pin2net_map[pin_id]], pin_id)
logging.debug(pins)
def print_net(self, net_id):
"""
@brief print net information
@param net_id net index
"""
logging.debug("net %s(%d)" % (self.net_names[net_id], net_id))
pins = "pins "
for pin_id in self.net2pin_map[net_id]:
pins += "%s(%s, %d) " % (self.node_names[self.pin2node_map[pin_id]], self.net_names[self.pin2net_map[pin_id]], pin_id)
logging.debug(pins)
def print_row(self, row_id):
"""
@brief print row information
@param row_id row index
"""
logging.debug("row %d %s" % (row_id, self.rows[row_id]))
#def flatten_nested_map(self, net2pin_map):
# """
# @brief flatten an array of array to two arrays like CSV format
# @param net2pin_map array of array
# @return a pair of (elements, cumulative column indices of the beginning element of each row)
# """
# # flat netpin map, length of #pins
# flat_net2pin_map = np.zeros(len(pin2net_map), dtype=np.int32)
# # starting index in netpin map for each net, length of #nets+1, the last entry is #pins
# flat_net2pin_start_map = np.zeros(len(net2pin_map)+1, dtype=np.int32)
# count = 0
# for i in range(len(net2pin_map)):
# flat_net2pin_map[count:count+len(net2pin_map[i])] = net2pin_map[i]
# flat_net2pin_start_map[i] = count
# count += len(net2pin_map[i])
# assert flat_net2pin_map[-1] != 0
# flat_net2pin_start_map[len(net2pin_map)] = len(pin2net_map)
# return flat_net2pin_map, flat_net2pin_start_map
def read(self, params):
"""
@brief read using c++
@param params parameters
"""
self.dtype = datatypes[params.dtype]
self.rawdb = place_io.PlaceIOFunction.read(params)
self.initialize_from_rawdb(params)
def initialize_from_rawdb(self, params):
"""
@brief initialize data members from raw database
@param params parameters
"""
pydb = place_io.PlaceIOFunction.pydb(self.rawdb)
self.num_physical_nodes = pydb.num_nodes
self.num_terminals = pydb.num_terminals
self.num_terminal_NIs = pydb.num_terminal_NIs
self.node_name2id_map = pydb.node_name2id_map
self.node_names = np.array(pydb.node_names, dtype=np.string_)
# If the placer directly takes a global placement solution,
# the cell positions may still be floating point numbers.
# It is not good to use the place_io OP to round the positions.
# Currently we only support BOOKSHELF format.
use_read_pl_flag = False
if (not params.global_place_flag) and os.path.exists(params.aux_input):
filename = None
with open(params.aux_input, "r") as f:
for line in f:
line = line.strip()
if ".pl" in line:
tokens = line.split()
for token in tokens:
if token.endswith(".pl"):
filename = token
break
filename = os.path.join(os.path.dirname(params.aux_input), filename)
if filename is not None and os.path.exists(filename):
self.node_x = np.zeros(self.num_physical_nodes, dtype=self.dtype)
self.node_y = np.zeros(self.num_physical_nodes, dtype=self.dtype)
self.node_orient = np.zeros(self.num_physical_nodes, dtype=np.string_)
self.read_pl(params, filename)
use_read_pl_flag = True
if not use_read_pl_flag:
self.node_x = np.array(pydb.node_x, dtype=self.dtype)
self.node_y = np.array(pydb.node_y, dtype=self.dtype)
self.node_orient = np.array(pydb.node_orient, dtype=np.string_)
self.node_size_x = np.array(pydb.node_size_x, dtype=self.dtype)
self.node_size_y = np.array(pydb.node_size_y, dtype=self.dtype)
self.node2orig_node_map = np.array(pydb.node2orig_node_map, dtype=np.int32)
self.pin_direct = np.array(pydb.pin_direct, dtype=np.string_)
self.pin_offset_x = np.array(pydb.pin_offset_x, dtype=self.dtype)
self.pin_offset_y = np.array(pydb.pin_offset_y, dtype=self.dtype)
self.net_name2id_map = pydb.net_name2id_map
self.net_names = np.array(pydb.net_names, dtype=np.string_)
self.net2pin_map = pydb.net2pin_map
self.flat_net2pin_map = np.array(pydb.flat_net2pin_map, dtype=np.int32)
self.flat_net2pin_start_map = np.array(pydb.flat_net2pin_start_map, dtype=np.int32)
self.net_weights = np.array(pydb.net_weights, dtype=self.dtype)
self.node2pin_map = pydb.node2pin_map
self.flat_node2pin_map = np.array(pydb.flat_node2pin_map, dtype=np.int32)
self.flat_node2pin_start_map = np.array(pydb.flat_node2pin_start_map, dtype=np.int32)
self.pin2node_map = np.array(pydb.pin2node_map, dtype=np.int32)
self.pin2net_map = np.array(pydb.pin2net_map, dtype=np.int32)
self.rows = np.array(pydb.rows, dtype=self.dtype)
self.regions = pydb.regions
for i in range(len(self.regions)):
self.regions[i] = np.array(self.regions[i], dtype=self.dtype)
self.flat_region_boxes = np.array(pydb.flat_region_boxes, dtype=self.dtype)
self.flat_region_boxes_start = np.array(pydb.flat_region_boxes_start, dtype=np.int32)
self.node2fence_region_map = np.array(pydb.node2fence_region_map, dtype=np.int32)
# print(self.flat_region_boxes, self.flat_region_boxes_start, self.node2fence_region_map)
# print(self.flat_region_boxes.shape, self.flat_region_boxes_start.shape, self.node2fence_region_map.shape)
#### nonfence region is set to INT_MAX, we set it to #regions??? not compatible with other APIs
# self.node2fence_region_map = np.minimum(self.node2fence_region_map, len(self.regions))
self.xl = float(pydb.xl)
self.yl = float(pydb.yl)
self.xh = float(pydb.xh)
self.yh = float(pydb.yh)
self.row_height = float(pydb.row_height)
self.site_width = float(pydb.site_width)
self.num_movable_pins = pydb.num_movable_pins
self.total_space_area = float(pydb.total_space_area)
self.routing_grid_xl = float(pydb.routing_grid_xl)
self.routing_grid_yl = float(pydb.routing_grid_yl)
self.routing_grid_xh = float(pydb.routing_grid_xh)
self.routing_grid_yh = float(pydb.routing_grid_yh)
if pydb.num_routing_grids_x:
self.num_routing_grids_x = pydb.num_routing_grids_x
self.num_routing_grids_y = pydb.num_routing_grids_y
self.num_routing_layers = len(pydb.unit_horizontal_capacities)
self.unit_horizontal_capacity = np.array(pydb.unit_horizontal_capacities, dtype=self.dtype).sum()
self.unit_vertical_capacity = np.array(pydb.unit_vertical_capacities, dtype=self.dtype).sum()
self.unit_horizontal_capacities = np.array(pydb.unit_horizontal_capacities, dtype=self.dtype)
self.unit_vertical_capacities = np.array(pydb.unit_vertical_capacities, dtype=self.dtype)
self.initial_horizontal_demand_map = np.array(pydb.initial_horizontal_demand_map, dtype=self.dtype).reshape((-1, self.num_routing_grids_x, self.num_routing_grids_y)).sum(axis=0)
self.initial_vertical_demand_map = np.array(pydb.initial_vertical_demand_map, dtype=self.dtype).reshape((-1, self.num_routing_grids_x, self.num_routing_grids_y)).sum(axis=0)
else:
self.num_routing_grids_x = params.route_num_bins_x
self.num_routing_grids_y = params.route_num_bins_y
self.num_routing_layers = 1
self.unit_horizontal_capacity = params.unit_horizontal_capacity
self.unit_vertical_capacity = params.unit_vertical_capacity
# convert node2pin_map to array of array
for i in range(len(self.node2pin_map)):
self.node2pin_map[i] = np.array(self.node2pin_map[i], dtype=np.int32)
self.node2pin_map = np.array(self.node2pin_map)
# convert net2pin_map to array of array
for i in range(len(self.net2pin_map)):
self.net2pin_map[i] = np.array(self.net2pin_map[i], dtype=np.int32)
self.net2pin_map = np.array(self.net2pin_map)
def __call__(self, params):
"""
@brief top API to read placement files
@param params parameters
"""
tt = time.time()
self.read(params)
self.initialize(params)
logging.info("reading benchmark takes %g seconds" % (time.time()-tt))
def calc_num_filler_for_fence_region(self, region_id, node2fence_region_map, target_density):
"""
@description: calculate number of fillers for each fence region
@param fence_regions{type}
@return:
"""
num_regions = len(self.regions)
node2fence_region_map = node2fence_region_map[: self.num_movable_nodes]
if region_id < len(self.regions):
fence_region_mask = node2fence_region_map == region_id
else:
fence_region_mask = node2fence_region_map >= len(self.regions)
num_movable_nodes = self.num_movable_nodes
movable_node_size_x = self.node_size_x[:num_movable_nodes][fence_region_mask]
# movable_node_size_y = self.node_size_y[:num_movable_nodes][fence_region_mask]
lower_bound = np.percentile(movable_node_size_x, 5)
upper_bound = np.percentile(movable_node_size_x, 95)
filler_size_x = np.mean(
movable_node_size_x[(movable_node_size_x >= lower_bound) & (movable_node_size_x <= upper_bound)]
)
filler_size_y = self.row_height
area = (self.xh - self.xl) * (self.yh - self.yl)
total_movable_node_area = np.sum(
self.node_size_x[:num_movable_nodes][fence_region_mask]
* self.node_size_y[:num_movable_nodes][fence_region_mask]
)
if region_id < num_regions:
## placeable area is not just fention region area. Macros can have overlap with fence region. But we approximate by this method temporarily
region = self.regions[region_id]
placeable_area = np.sum((region[:, 2] - region[:, 0]) * (region[:, 3] - region[:, 1]))
else:
### invalid area outside the region, excluding macros? ignore overlap between fence region and macro
fence_regions = np.concatenate(self.regions, 0).astype(np.float32)
fence_regions_size_x = fence_regions[:, 2] - fence_regions[:, 0]
fence_regions_size_y = fence_regions[:, 3] - fence_regions[:, 1]
fence_region_area = np.sum(fence_regions_size_x * fence_regions_size_y)
placeable_area = (
max(self.total_space_area, self.area - self.total_fixed_node_area) - fence_region_area
)
### recompute target density based on the region utilization
utilization = min(total_movable_node_area / placeable_area, 1.0)
if target_density < utilization:
### add a few fillers to avoid divergence
target_density_fence_region = min(1, utilization + 0.01)
else:
target_density_fence_region = target_density
target_density_fence_region = max(0.35, target_density_fence_region)
total_filler_node_area = max(placeable_area * target_density_fence_region - total_movable_node_area, 0.0)
num_filler = int(round(total_filler_node_area / (filler_size_x * filler_size_y)))
logging.info(
"Region:%2d movable_node_area =%10.1f, placeable_area =%10.1f, utilization =%.3f, filler_node_area =%10.1f, #fillers =%8d, filler sizes =%2.4gx%g\n"
% (
region_id,
total_movable_node_area,
placeable_area,
utilization,
total_filler_node_area,
num_filler,
filler_size_x,
filler_size_y,
)
)
return (
num_filler,
target_density_fence_region,
filler_size_x,
filler_size_y,
total_movable_node_area,
np.sum(fence_region_mask.astype(np.float32)),
)
def initialize(self, params):
"""
@brief initialize data members after reading
@param params parameters
"""
# scale
# adjust scale_factor if not set
if params.scale_factor == 0.0 or self.site_width != 1.0:
params.scale_factor = 1.0 / self.site_width
logging.info("set scale_factor = %g, as site_width = %g" % (params.scale_factor, self.site_width))
self.scale(params.scale_factor)
content = """
================================= Benchmark Statistics =================================
#nodes = %d, #terminals = %d, # terminal_NIs = %d, #movable = %d, #nets = %d
die area = (%g, %g, %g, %g) %g
row height = %g, site width = %g
""" % (
self.num_physical_nodes, self.num_terminals, self.num_terminal_NIs, self.num_movable_nodes, len(self.net_names),
self.xl, self.yl, self.xh, self.yh, self.area,
self.row_height, self.site_width
)
# set number of bins
# derive bin dimensions by keeping the aspect ratio
# this bin setting is not for global placement, only for other steps
# global placement has its bin settings defined in global_place_stages
aspect_ratio = (self.yh - self.yl) / (self.xh - self.xl)
num_bins_x = int(math.pow(2, max(np.ceil(math.log2(math.sqrt(self.num_movable_nodes / aspect_ratio))), 0)))
num_bins_y = int(math.pow(2, max(np.ceil(math.log2(math.sqrt(self.num_movable_nodes * aspect_ratio))), 0)))
self.num_bins_x = max(params.num_bins_x, num_bins_x)
self.num_bins_y = max(params.num_bins_y, num_bins_y)
self.bin_size_x = (self.xh - self.xl) / self.num_bins_x
self.bin_size_y = (self.yh - self.yl) / self.num_bins_y
content += "num_bins = %dx%d, bin sizes = %gx%g\n" % (self.num_bins_x, self.num_bins_y, self.bin_size_x / self.row_height, self.bin_size_y / self.row_height)
# set num_movable_pins
if self.num_movable_pins is None:
self.num_movable_pins = 0
for node_id in self.pin2node_map:
if node_id < self.num_movable_nodes:
self.num_movable_pins += 1
content += "#pins = %d, #movable_pins = %d\n" % (self.num_pins, self.num_movable_pins)
# set total cell area
self.total_movable_node_area = float(np.sum(self.node_size_x[:self.num_movable_nodes]*self.node_size_y[:self.num_movable_nodes]))
# total fixed node area should exclude the area outside the layout and the area of terminal_NIs
self.total_fixed_node_area = float(np.sum(
np.maximum(
np.minimum(self.node_x[self.num_movable_nodes:self.num_physical_nodes - self.num_terminal_NIs] + self.node_size_x[self.num_movable_nodes:self.num_physical_nodes - self.num_terminal_NIs], self.xh)
- np.maximum(self.node_x[self.num_movable_nodes:self.num_physical_nodes - self.num_terminal_NIs], self.xl),
0.0) * np.maximum(
np.minimum(self.node_y[self.num_movable_nodes:self.num_physical_nodes - self.num_terminal_NIs] + self.node_size_y[self.num_movable_nodes:self.num_physical_nodes - self.num_terminal_NIs], self.yh)
- np.maximum(self.node_y[self.num_movable_nodes:self.num_physical_nodes - self.num_terminal_NIs], self.yl),
0.0)
))
content += "total_movable_node_area = %g, total_fixed_node_area = %g, total_space_area = %g\n" % (self.total_movable_node_area, self.total_fixed_node_area, self.total_space_area)
target_density = min(self.total_movable_node_area / self.total_space_area, 1.0)
if target_density > params.target_density:
logging.warn("target_density %g is smaller than utilization %g, ignored" % (params.target_density, target_density))
params.target_density = target_density
content += "utilization = %g, target_density = %g\n" % (self.total_movable_node_area / self.total_space_area, params.target_density)
# calculate fence region virtual macro
if len(self.regions) > 0:
virtual_macro_for_fence_region = [
fence_region.slice_non_fence_region(
region,
self.xl,
self.yl,
self.xh,
self.yh,
merge=True,
plot=False,
figname=f"vmacro_{region_id}_merged.png",
device="cpu",
macro_pos_x=self.node_x[self.num_movable_nodes : self.num_movable_nodes + self.num_terminals],
macro_pos_y=self.node_y[self.num_movable_nodes : self.num_movable_nodes + self.num_terminals],
macro_size_x=self.node_size_x[
self.num_movable_nodes : self.num_movable_nodes + self.num_terminals
],
macro_size_y=self.node_size_y[
self.num_movable_nodes : self.num_movable_nodes + self.num_terminals
],
)
.cpu()
.numpy()
for region_id, region in enumerate(self.regions)
]
virtual_macro_for_non_fence_region = np.concatenate(self.regions, 0)
self.virtual_macro_fence_region = virtual_macro_for_fence_region + [virtual_macro_for_non_fence_region]
# insert filler nodes
if len(self.regions) > 0:
### calculate fillers if there is fence region
self.filler_size_x_fence_region = []
self.filler_size_y_fence_region = []
self.num_filler_nodes = 0
self.num_filler_nodes_fence_region = []
self.num_movable_nodes_fence_region = []
self.total_movable_node_area_fence_region = []
self.target_density_fence_region = []
self.filler_start_map = None
filler_node_size_x_list = []
filler_node_size_y_list = []
self.total_filler_node_area = 0
for i in range(len(self.regions) + 1):
(
num_filler_i,
target_density_i,
filler_size_x_i,
filler_size_y_i,
total_movable_node_area_i,
num_movable_nodes_i,
) = self.calc_num_filler_for_fence_region(i, self.node2fence_region_map, params.target_density)
self.num_movable_nodes_fence_region.append(num_movable_nodes_i)
self.num_filler_nodes_fence_region.append(num_filler_i)
self.total_movable_node_area_fence_region.append(total_movable_node_area_i)
self.target_density_fence_region.append(target_density_i)
self.filler_size_x_fence_region.append(filler_size_x_i)
self.filler_size_y_fence_region.append(filler_size_y_i)
self.num_filler_nodes += num_filler_i
filler_node_size_x_list.append(
np.full(num_filler_i, fill_value=filler_size_x_i, dtype=self.node_size_x.dtype)
)
filler_node_size_y_list.append(
np.full(num_filler_i, fill_value=filler_size_y_i, dtype=self.node_size_y.dtype)
)
filler_node_area_i = num_filler_i * (filler_size_x_i * filler_size_y_i)
self.total_filler_node_area += filler_node_area_i
content += "Region: %2d filler_node_area = %10.2f, #fillers = %8d, filler sizes = %2.4gx%g\n" % (
i,
filler_node_area_i,
num_filler_i,
filler_size_x_i,
filler_size_y_i,
)
self.total_movable_node_area_fence_region = np.array(self.total_movable_node_area_fence_region)
self.num_movable_nodes_fence_region = np.array(self.num_movable_nodes_fence_region)
if params.enable_fillers:
# the way to compute this is still tricky; we need to consider place_io together on how to
# summarize the area of fixed cells, which may overlap with each other.
if len(self.regions) > 0:
self.filler_start_map = np.cumsum([0] + self.num_filler_nodes_fence_region)
self.num_filler_nodes_fence_region = np.array(self.num_filler_nodes_fence_region)
self.node_size_x = np.concatenate([self.node_size_x] + filler_node_size_x_list)
self.node_size_y = np.concatenate([self.node_size_y] + filler_node_size_y_list)
content += "total_filler_node_area = %10.2f, #fillers = %8d, average filler sizes = %2.4gx%g\n" % (
self.total_filler_node_area,
self.num_filler_nodes,
self.total_filler_node_area / self.num_filler_nodes / self.row_height,
self.row_height,
)
else:
node_size_order = np.argsort(self.node_size_x[: self.num_movable_nodes])
filler_size_x = np.mean(
self.node_size_x[
node_size_order[int(self.num_movable_nodes * 0.05) : int(self.num_movable_nodes * 0.95)]
]
)
filler_size_y = self.row_height
placeable_area = max(self.area - self.total_fixed_node_area, self.total_space_area)
content += "use placeable_area = %g to compute fillers\n" % (placeable_area)
self.total_filler_node_area = max(
placeable_area * params.target_density - self.total_movable_node_area, 0.0
)
self.num_filler_nodes = int(round(self.total_filler_node_area / (filler_size_x * filler_size_y)))
self.node_size_x = np.concatenate(
[
self.node_size_x,
np.full(self.num_filler_nodes, fill_value=filler_size_x, dtype=self.node_size_x.dtype),
]
)
self.node_size_y = np.concatenate(
[
self.node_size_y,
np.full(self.num_filler_nodes, fill_value=filler_size_y, dtype=self.node_size_y.dtype),
]
)
content += "total_filler_node_area = %g, #fillers = %d, filler sizes = %gx%g\n" % (
self.total_filler_node_area,
self.num_filler_nodes,
filler_size_x,
filler_size_y,
)
else:
self.total_filler_node_area = 0
self.num_filler_nodes = 0
filler_size_x, filler_size_y = 0, 0
if len(self.regions) > 0:
self.filler_start_map = np.zeros(len(self.regions) + 2, dtype=np.int32)
self.num_filler_nodes_fence_region = np.zeros(len(self.num_filler_nodes_fence_region))
content += "total_filler_node_area = %g, #fillers = %d, filler sizes = %gx%g\n" % (
self.total_filler_node_area,
self.num_filler_nodes,
filler_size_x,
filler_size_y,
)
if params.routability_opt_flag:
content += "================================== routing information =================================\n"
content += "routing grids (%d, %d)\n" % (self.num_routing_grids_x, self.num_routing_grids_y)
content += "routing grid sizes (%g, %g)\n" % (self.routing_grid_size_x, self.routing_grid_size_y)
content += "routing capacity H/V (%g, %g) per tile\n" % (self.unit_horizontal_capacity * self.routing_grid_size_y, self.unit_vertical_capacity * self.routing_grid_size_x)
content += "========================================================================================"
logging.info(content)
def write(self, params, filename, sol_file_format=None):
"""
@brief write placement solution
@param filename output file name
@param sol_file_format solution file format, DEF|DEFSIMPLE|BOOKSHELF|BOOKSHELFALL
"""
tt = time.time()
logging.info("writing to %s" % (filename))
if sol_file_format is None:
if filename.endswith(".def"):
sol_file_format = place_io.SolutionFileFormat.DEF
else:
sol_file_format = place_io.SolutionFileFormat.BOOKSHELF
# unscale locations
unscale_factor = 1.0/params.scale_factor
if unscale_factor == 1.0:
node_x = self.node_x
node_y = self.node_y
else:
node_x = self.node_x * unscale_factor
node_y = self.node_y * unscale_factor
# Global placement may have floating point positions.
# Currently only support BOOKSHELF format.
# This is mainly for debug.
if not params.legalize_flag and not params.detailed_place_flag and sol_file_format == place_io.SolutionFileFormat.BOOKSHELF:
self.write_pl(params, filename, node_x, node_y)
else:
place_io.PlaceIOFunction.write(self.rawdb, filename, sol_file_format, node_x, node_y)
logging.info("write %s takes %.3f seconds" % (str(sol_file_format), time.time()-tt))
def read_pl(self, params, pl_file):
"""
@brief read .pl file
@param pl_file .pl file
"""
tt = time.time()
logging.info("reading %s" % (pl_file))
count = 0
with open(pl_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith("UCLA"):
continue
# node positions
pos = re.search(r"(\w+)\s+([+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?)\s+([+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?)\s*:\s*(\w+)", line)
if pos:
node_id = self.node_name2id_map[pos.group(1)]
self.node_x[node_id] = float(pos.group(2))
self.node_y[node_id] = float(pos.group(6))
self.node_orient[node_id] = pos.group(10)
orient = pos.group(4)
if params.scale_factor != 1.0:
self.scale_pl(params.scale_factor)
logging.info("read_pl takes %.3f seconds" % (time.time()-tt))
def write_pl(self, params, pl_file, node_x, node_y):
"""
@brief write .pl file
@param pl_file .pl file
"""
tt = time.time()
logging.info("writing to %s" % (pl_file))
content = "UCLA pl 1.0\n"
str_node_names = np.array(self.node_names).astype(np.str)
str_node_orient = np.array(self.node_orient).astype(np.str)
for i in range(self.num_movable_nodes):
content += "\n%s %g %g : %s" % (
str_node_names[i],
node_x[i],
node_y[i],
str_node_orient[i]
)
# use the original fixed cells, because they are expanded if they contain shapes
fixed_node_indices = list(self.rawdb.fixedNodeIndices())
for i, node_id in enumerate(fixed_node_indices):
content += "\n%s %g %g : %s /FIXED" % (
str(self.rawdb.nodeName(node_id)),
float(self.rawdb.node(node_id).xl()),
float(self.rawdb.node(node_id).yl()),
"N" # still hard-coded
)
for i in range(self.num_movable_nodes + self.num_terminals, self.num_movable_nodes + self.num_terminals + self.num_terminal_NIs):
content += "\n%s %g %g : %s /FIXED_NI" % (
str_node_names[i],
node_x[i],
node_y[i],
str_node_orient[i]
)
with open(pl_file, "w") as f:
f.write(content)
logging.info("write_pl takes %.3f seconds" % (time.time()-tt))
def write_nets(self, params, net_file):
"""
@brief write .net file
@param params parameters
@param net_file .net file
"""
tt = time.time()
logging.info("writing to %s" % (net_file))
content = "UCLA nets 1.0\n"
content += "\nNumNets : %d" % (len(self.net2pin_map))
content += "\nNumPins : %d" % (len(self.pin2net_map))
content += "\n"
for net_id in range(len(self.net2pin_map)):
pins = self.net2pin_map[net_id]
content += "\nNetDegree : %d %s" % (len(pins), self.net_names[net_id])
for pin_id in pins:
content += "\n\t%s %s : %d %d" % (self.node_names[self.pin2node_map[pin_id]], self.pin_direct[pin_id], self.pin_offset_x[pin_id]/params.scale_factor, self.pin_offset_y[pin_id]/params.scale_factor)
with open(net_file, "w") as f:
f.write(content)
logging.info("write_nets takes %.3f seconds" % (time.time()-tt))
def apply(self, params, node_x, node_y):
"""
@brief apply placement solution and update database
"""
# assign solution
self.node_x[:self.num_movable_nodes] = node_x[:self.num_movable_nodes]
self.node_y[:self.num_movable_nodes] = node_y[:self.num_movable_nodes]
# unscale locations
unscale_factor = 1.0/params.scale_factor
if unscale_factor == 1.0:
node_x = self.node_x
node_y = self.node_y
else:
node_x = self.node_x * unscale_factor
node_y = self.node_y * unscale_factor
# update raw database
place_io.PlaceIOFunction.apply(self.rawdb, node_x, node_y)
if __name__ == "__main__":
if len(sys.argv) != 2:
logging.error("One input parameters in json format in required")
params = Params.Params()
params.load(sys.argv[sys.argv[1]])
logging.info("parameters = %s" % (params))
db = PlaceDB()
db(params)
db.print_node(1)
db.print_net(1)
db.print_row(1)
|
|
import os,sys
import numpy as np
from scipy.stats import norm
from rpy import r
from scipy.cluster.vq import kmeans2
#r.library(mvtnorm)
## a class represent a k component mixture of Gaussian models using a latent
# variable representation of the Gaussian function.
# the data matrix x is a numpy array (nxd)
class GaussianMix:
## Constructor
# @param k is the number of components in the mixture
def __init__(self,x,k,numRuns=25,numIters=25,useKmeans=False):
self.numRuns = numRuns
self.numIters = numIters
self.k = k
self.x = x
self.n, self.d = np.shape(self.x)
self.useKmeans = useKmeans
print "\nRunning the EM algorithm for a mixture of Gaussian distributions"
print "The data are %s dimensional and have %s observations"%(self.d,self.n)
print "The input number of components are %s"%k
print "For initial param guesses useKmeans is set to ", self.useKmeans
self.maxLikelihood, self.maxEstimates = self.run_em_algorithm()
## make intial guesses for the parameters (mu1, sig1, mu2, sig2 and pi)
# guess mu and sigma by randomly partitioning the data and using the (co)variences
# the parameter dictionary will be a dict of k dicts where each represents a component
# @params useKmeans specifies whether to use kmeans (default) or a uniform random partitioning
def get_init_guesses(self,useKmeans):
## create a container for the params
params = {}
for k in range(self.k):
params[k] = {'mu':None,'var':None,'pi':None}
## use kmeans to get the initial estimates
if useKmeans == True:
tries = 0
while tries < 5:
try:
centroids,labels = kmeans2(self.x,self.k,iter=25)
tries = 5
except:
tries+=1
print '\tRerunning kmeans...'
for k in range(self.k):
muHat = centroids[k,:]
params[k]['mu'] = muHat
else:
labels = self.make_k_random_groups()
print labels
for k in range(self.k):
dataInds = np.where(labels==k)[0]
if len(dataInds) > 2:
data = self.x[dataInds,:]
params[k]['mu'] = data.mean(axis=0)
else:
params[k]['mu'] = self.x[np.random.randint(0,self.n),:]
## guess the variance/covariance
if self.d == 1:
for k in range(self.k):
dataInds = np.where(labels==k)[0]
if len(dataInds) > 2:
data = self.x[dataInds,:]
params[k]['var'] = data.var()
else:
params[k]['var'] = self.x.var(axis=0) + np.random.uniform(0.01,5.0)
elif self.d > 1:
print "ERROR: not set up yet for covariance"
## guess mixing parameter pi
for k in range(self.k):
dataInds = np.where(labels==k)[0]
params[k]['pi'] = float(len(dataInds))/float(self.n)
return params
## main function to carry out EM
# @param numRuns is the number of times that the algorithm should be run
# @param numIters is the number of iterations that the algorithm carries out with each run
def run_em_algorithm(self):
maxLikelihood,maxEstimates = -np.inf,None
## iterate through the number of runs to be made
for run in range(self.numRuns):
print 'run: ', run + 1, maxLikelihood
## make initial guesses for parameter values (could use k-means here)
params = self.get_init_guesses(useKmeans=self.useKmeans)
## iterate perscribed number of times else use convergence criteria
for iter in range(self.numIters):
## perform the E-step -- evaluate the responsibilities using the current params
gammaHat = self.perform_expectation(params)
## perform the M-step -- re-estimate parameters using current conditional probs
params = self.perform_maximization(params,gammaHat)
## calculate the liklihood
likelihood = self.eval_likelihood(params)
print '\titer', iter,likelihood
if likelihood > maxLikelihood:
maxLikelihood = likelihood
maxEstimates = params
print "DEBUGGING"
sys.exit()
return maxLikelihood, maxEstimates
## function for expectation stop of algorithm
# @param a dictionary of guesses for the model parameters
def perform_expectation(self,params):
## for calculate responsibilities gamma is also the conditional probability
## of z given x or p(z_k = 1|x)
gammaHat = {}
responsibilities = None
for i in range(self.n):
gammaZ = np.array([params[k]['pi'] * r.dnorm(self.x[i],mean=params[k]['mu'],sd=np.sqrt(params[k]['var'])) for k in range(self.k)]).T
if responsibilities == None:
responsibilities = gammaZ
else:
responsibilities = np.vstack([responsibilities,gammaZ])
for i in range(self.n):
gammaHat[i] = responsibilities[i,:] / responsibilities.sum(axis=1)[i]
return gammaHat
## using the input responsibilities (gammaHat) re-estimate the
# @params dict of parameters estimates
# @gammaHat dict of responsibilities
def perform_maximization(self,params,gammaHat):
## get the component assignments
assignments = np.array([np.where(gammaHat[i]==gammaHat[i].max())[0][0] for i in range(self.n)])
#print 'assiignments', assignments
nK = np.array([len(np.where(assignments==k)[0]) for k in range(self.k)])
## avoid singularities by resetting params
if len(np.where(nK==0)[0]) > 0 or len(np.where(nK==1)[0]):
print "\tResetting initial guesses to avoid singularities"
params = self.get_init_guesses(useKmeans=self.useKmeans)
return params
## get new estimates for mu
newParams = {}
##muNew = np.zeros((self.k,self.d), dtype='float')
for k in range(self.k):
newParams[k] = {'mu':None,'var':None,'pi':None}
newParams[k]['mu'] = np.array([gammaHat[i][k] * self.x[i,:] for i in range(self.n)]).sum(axis=0) / float(nK[k])
### get new estimates for covariance matrices
if self.d == 1:
for k in range(self.k):
numeratorSum = np.zeros((self.d),dtype='float')
for i in range(self.n):
xMinusTerm = np.array([self.x[i,:] - newParams[k]['mu']])
#print 'term1', gammaHat[i][k,:]
#print 'term2', xMinusTerm, np.shape(xMinusTerm)
#print 'term3', 'blah'
numerator = np.array([self.x[i,:] - newParams[k]['mu']])
numerator = gammaHat[i][k] * numerator
numeratorSum = numeratorSum + np.dot(xMinusTerm.T,xMinusTerm) ## finds the matrix product and sums them
newParams[k]['var'] = numeratorSum / float(nK[k])
else:
print "ERROR not yet implimented"
## get new estimates for mixing parameter
for k in range(self.k):
newParams[k]['pi'] = float(nK[k]) / float(self.n)
return newParams
## evaluate the liklihood
# @params the dict of parameter estimates
def eval_likelihood(self,params):
likelihood = 0.0
## for each component get the liklihood and log it
for i in range(self.n):
componentLikelihoods = np.zeros((self.n),dtype='float')
for k in range(self.k):
if self.d == 1:
phi = r.dnorm(self.x[i],mean=params[k]['mu'],sd=np.sqrt(params[k]['var']))
else:
print "ERROR: not implimented yet"
#phi = norm.pdf(self.x[i,:],loc=params['mu'][k,:],scale=np.sqrt(params['sig'][k,:]))
#print 'phi',phi
componentLikelihoods[k] = np.log(phi * params[k]['pi'])
## sum over the components and add to the total
likelihood = likelihood + componentLikelihoods.sum(axis=0)
return likelihood
## error check that dims of sigma
# @params sigma is DxD np array covarance matrix
def _check_sigma(self,sigma):
dim1,dim2 = np.shape(sigma)
if self.d != dim1:
print "ERROR: covariance matrix is not DxD"
elif self.d != dim2:
print "ERROR: covariance matrix is not DxD"
## randomly group the data into k groups in order to get initial param guesses
def make_k_random_groups(self):
inds = np.arange(self.n)
np.random.shuffle(inds)
points = np.random.uniform(0,1,self.k-1)
points = np.sort(points)
if points.size > 1:
points = points / points.sum()
startPoints = np.hstack([np.array([0]),points])
startPoints = np.array([int(round(p * self.n)) for p in startPoints])
stopPoints = np.array([int(i) for i in np.hstack([startPoints[1:],self.n])])
labels = np.zeros((self.n),dtype='int')
for k in range(self.k):
labels[inds[startPoints[k]:stopPoints[k]]] = k
## error checking
if labels.size != self.n:
print "ERROR: returned labels not of correct lenght"
return labels
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import strutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints as constr
PARAMETER_KEYS = (
TYPE, DEFAULT, NO_ECHO, ALLOWED_VALUES, ALLOWED_PATTERN,
MAX_LENGTH, MIN_LENGTH, MAX_VALUE, MIN_VALUE,
DESCRIPTION, CONSTRAINT_DESCRIPTION, LABEL
) = (
'Type', 'Default', 'NoEcho', 'AllowedValues', 'AllowedPattern',
'MaxLength', 'MinLength', 'MaxValue', 'MinValue',
'Description', 'ConstraintDescription', 'Label'
)
class Schema(constr.Schema):
"""Parameter schema."""
KEYS = (
TYPE, DESCRIPTION, DEFAULT, SCHEMA, CONSTRAINTS, HIDDEN, LABEL
) = (
'Type', 'Description', 'Default', 'Schema', 'Constraints', 'NoEcho',
'Label'
)
PARAMETER_KEYS = PARAMETER_KEYS
# For Parameters the type name for Schema.LIST is CommaDelimitedList
# and the type name for Schema.MAP is Json
TYPES = (
STRING, NUMBER, LIST, MAP, BOOLEAN,
) = (
'String', 'Number', 'CommaDelimitedList', 'Json', 'Boolean',
)
def __init__(self, data_type, description=None, default=None, schema=None,
constraints=None, hidden=False, label=None):
super(Schema, self).__init__(data_type=data_type,
description=description,
default=default,
schema=schema,
required=default is None,
constraints=constraints,
label=label)
self.hidden = hidden
# Schema class validates default value for lists assuming list type. For
# comma delimited list string supported in parameters Schema class, the
# default value has to be parsed into a list if necessary so that
# validation works.
def _validate_default(self, context):
if self.default is not None:
default_value = self.default
if self.type == self.LIST and not isinstance(self.default, list):
try:
default_value = self.default.split(',')
except (KeyError, AttributeError) as err:
raise exception.InvalidSchemaError(
message=_('Default must be a comma-delimited list '
'string: %s') % err)
elif self.type == self.LIST and isinstance(self.default, list):
default_value = [(six.text_type(x))
for x in self.default]
try:
self.validate_constraints(default_value, context,
[constr.CustomConstraint])
except (ValueError, TypeError,
exception.StackValidationFailed) as exc:
raise exception.InvalidSchemaError(
message=_('Invalid default %(default)s (%(exc)s)') %
dict(default=self.default, exc=exc))
def set_default(self, default=None):
super(Schema, self).set_default(default)
self.required = default is None
@staticmethod
def get_num(key, context):
val = context.get(key)
if val is not None:
val = Schema.str_to_num(val)
return val
@staticmethod
def _check_dict(schema_dict, allowed_keys, entity):
if not isinstance(schema_dict, dict):
raise exception.InvalidSchemaError(
message=_("Invalid %s, expected a mapping") % entity)
for key in schema_dict:
if key not in allowed_keys:
raise exception.InvalidSchemaError(
message=_("Invalid key '%(key)s' for %(entity)s") % {
"key": key, "entity": entity})
@classmethod
def _validate_dict(cls, param_name, schema_dict):
cls._check_dict(schema_dict,
cls.PARAMETER_KEYS,
"parameter (%s)" % param_name)
if cls.TYPE not in schema_dict:
raise exception.InvalidSchemaError(
message=_("Missing parameter type for parameter: %s") %
param_name)
@classmethod
def from_dict(cls, param_name, schema_dict):
"""Return a Parameter Schema object from a legacy schema dictionary.
:param param_name: name of the parameter owning the schema; used
for more verbose logging
:type param_name: str
"""
cls._validate_dict(param_name, schema_dict)
def constraints():
desc = schema_dict.get(CONSTRAINT_DESCRIPTION)
if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
yield constr.Range(Schema.get_num(MIN_VALUE, schema_dict),
Schema.get_num(MAX_VALUE, schema_dict),
desc)
if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
yield constr.Length(Schema.get_num(MIN_LENGTH, schema_dict),
Schema.get_num(MAX_LENGTH, schema_dict),
desc)
if ALLOWED_VALUES in schema_dict:
yield constr.AllowedValues(schema_dict[ALLOWED_VALUES], desc)
if ALLOWED_PATTERN in schema_dict:
yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN], desc)
# make update_allowed true by default on TemplateResources
# as the template should deal with this.
return cls(schema_dict[TYPE],
description=schema_dict.get(DESCRIPTION),
default=schema_dict.get(DEFAULT),
constraints=list(constraints()),
hidden=str(schema_dict.get(NO_ECHO,
'false')).lower() == 'true',
label=schema_dict.get(LABEL))
def validate_value(self, value, context=None):
super(Schema, self).validate_constraints(value, context)
def __getitem__(self, key):
if key == self.TYPE:
return self.type
if key == self.HIDDEN:
return self.hidden
else:
return super(Schema, self).__getitem__(key)
@six.python_2_unicode_compatible
class Parameter(object):
"""A template parameter."""
def __new__(cls, name, schema, value=None):
"""Create a new Parameter of the appropriate type."""
if cls is not Parameter:
return super(Parameter, cls).__new__(cls)
# Check for fully-fledged Schema objects
if not isinstance(schema, Schema):
schema = Schema.from_dict(name, schema)
if schema.type == schema.STRING:
ParamClass = StringParam
elif schema.type == schema.NUMBER:
ParamClass = NumberParam
elif schema.type == schema.LIST:
ParamClass = CommaDelimitedListParam
elif schema.type == schema.MAP:
ParamClass = JsonParam
elif schema.type == schema.BOOLEAN:
ParamClass = BooleanParam
else:
raise ValueError(_('Invalid Parameter type "%s"') % schema.type)
return ParamClass(name, schema, value)
def __init__(self, name, schema, value=None):
"""Initialisation of the parameter.
Initialise the Parameter with a name, schema and optional user-supplied
value.
"""
self.name = name
self.schema = schema
self.user_value = value
self.user_default = None
def validate(self, validate_value=True, context=None):
"""Validates the parameter.
This method validates if the parameter's schema is valid,
and if the default value - if present - or the user-provided
value for the parameter comply with the schema.
"""
err_msg = _("Parameter '%(name)s' is invalid: %(exp)s")
try:
self.schema.validate(context)
if not validate_value:
return
if self.user_value is not None:
self._validate(self.user_value, context)
elif self.has_default():
self._validate(self.default(), context)
else:
raise exception.UserParameterMissing(key=self.name)
except exception.StackValidationFailed as ex:
msg = err_msg % dict(name=self.name, exp=six.text_type(ex))
raise exception.StackValidationFailed(message=msg)
except exception.InvalidSchemaError as ex:
msg = err_msg % dict(name=self.name, exp=six.text_type(ex))
raise exception.InvalidSchemaError(message=msg)
def value(self):
"""Get the parameter value, optionally sanitising it for output."""
if self.user_value is not None:
return self.user_value
if self.has_default():
return self.default()
raise exception.UserParameterMissing(key=self.name)
def has_value(self):
"""Parameter has a user or default value."""
return self.user_value is not None or self.has_default()
def hidden(self):
"""Return if parameter is hidden.
Return whether the parameter should be sanitised in any output to
the user.
"""
return self.schema.hidden
def description(self):
"""Return the description of the parameter."""
return self.schema.description or ''
def label(self):
"""Return the label or param name."""
return self.schema.label or self.name
def has_default(self):
"""Return whether the parameter has a default value."""
return (self.schema.default is not None or
self.user_default is not None)
def default(self):
"""Return the default value of the parameter."""
if self.user_default is not None:
return self.user_default
return self.schema.default
def set_default(self, value):
self.user_default = value
def __str__(self):
"""Return a string representation of the parameter."""
value = self.value()
if self.hidden():
return six.text_type('******')
else:
return six.text_type(value)
class NumberParam(Parameter):
"""A template parameter of type "Number"."""
def __int__(self):
"""Return an integer representation of the parameter."""
return int(super(NumberParam, self).value())
def __float__(self):
"""Return a float representation of the parameter."""
return float(super(NumberParam, self).value())
def _validate(self, val, context):
try:
Schema.str_to_num(val)
except ValueError as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
self.schema.validate_value(val, context)
def value(self):
return Schema.str_to_num(super(NumberParam, self).value())
class BooleanParam(Parameter):
"""A template parameter of type "Boolean"."""
def _validate(self, val, context):
try:
strutils.bool_from_string(val, strict=True)
except ValueError as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
self.schema.validate_value(val, context)
def value(self):
if self.user_value is not None:
raw_value = self.user_value
else:
raw_value = self.default()
return strutils.bool_from_string(str(raw_value), strict=True)
class StringParam(Parameter):
"""A template parameter of type "String"."""
def _validate(self, val, context):
self.schema.validate_value(val, context)
class ParsedParameter(Parameter):
"""A template parameter with cached parsed value."""
def __init__(self, name, schema, value=None):
super(ParsedParameter, self).__init__(name, schema, value)
self._update_parsed()
def set_default(self, value):
super(ParsedParameter, self).set_default(value)
self._update_parsed()
def _update_parsed(self):
if self.has_value():
if self.user_value is not None:
self.parsed = self.parse(self.user_value)
else:
self.parsed = self.parse(self.default())
class CommaDelimitedListParam(ParsedParameter, collections.Sequence):
"""A template parameter of type "CommaDelimitedList"."""
def __init__(self, name, schema, value=None):
self.parsed = []
super(CommaDelimitedListParam, self).__init__(name, schema, value)
def parse(self, value):
# only parse when value is not already a list
if isinstance(value, list):
return [(six.text_type(x)) for x in value]
try:
if value is not None:
if value == '':
return []
return value.split(',')
except (KeyError, AttributeError) as err:
message = _('Value must be a comma-delimited list string: %s')
raise ValueError(message % six.text_type(err))
return value
def value(self):
if self.has_value():
return self.parsed
raise exception.UserParameterMissing(key=self.name)
def __len__(self):
"""Return the length of the list."""
return len(self.parsed)
def __getitem__(self, index):
"""Return an item from the list."""
return self.parsed[index]
def __str__(self):
if self.hidden():
return super(CommaDelimitedListParam, self).__str__()
return ",".join(self.value())
def _validate(self, val, context):
parsed = self.parse(val)
self.schema.validate_value(parsed, context)
class JsonParam(ParsedParameter):
"""A template parameter who's value is map or list."""
def __init__(self, name, schema, value=None):
self.parsed = {}
super(JsonParam, self).__init__(name, schema, value)
def parse(self, value):
try:
val = value
if not isinstance(val, six.string_types):
# turn off oslo_serialization's clever to_primitive()
val = jsonutils.dumps(val, default=None)
if val:
return jsonutils.loads(val)
except (ValueError, TypeError) as err:
message = _('Value must be valid JSON: %s') % err
raise ValueError(message)
return value
def value(self):
if self.has_value():
return self.parsed
raise exception.UserParameterMissing(key=self.name)
def __getitem__(self, key):
return self.parsed[key]
def __iter__(self):
return iter(self.parsed)
def __len__(self):
return len(self.parsed)
def __str__(self):
if self.hidden():
return super(JsonParam, self).__str__()
return encodeutils.safe_decode(jsonutils.dumps(self.value()))
def _validate(self, val, context):
val = self.parse(val)
self.schema.validate_value(val, context)
class Parameters(collections.Mapping):
"""Parameters of a stack.
The parameters of a stack, with type checking, defaults etc., specified by
the stack's template.
"""
PSEUDO_PARAMETERS = (
PARAM_STACK_ID, PARAM_STACK_NAME, PARAM_REGION
) = (
'AWS::StackId', 'AWS::StackName', 'AWS::Region'
)
def __init__(self, stack_identifier, tmpl, user_params=None,
param_defaults=None):
"""Initialisation of the parameter.
Create the parameter container for a stack from the stack name and
template, optionally setting the user-supplied parameter values.
"""
user_params = user_params or {}
param_defaults = param_defaults or {}
def user_parameter(schema_item):
name, schema = schema_item
return Parameter(name, schema,
user_params.get(name))
self.tmpl = tmpl
self.user_params = user_params
schemata = self.tmpl.param_schemata()
user_parameters = (user_parameter(si) for si in
six.iteritems(schemata))
pseudo_parameters = self._pseudo_parameters(stack_identifier)
self.params = dict((p.name,
p) for p in itertools.chain(pseudo_parameters,
user_parameters))
for pd in six.iterkeys(param_defaults):
if pd in self.params:
self.params[pd].set_default(param_defaults[pd])
def validate(self, validate_value=True, context=None):
"""Validates all parameters.
This method validates if all user-provided parameters are actually
defined in the template, and if all parameters are valid.
"""
self._validate_tmpl_parameters()
self._validate_user_parameters()
for param in six.itervalues(self.params):
param.validate(validate_value, context)
def __contains__(self, key):
"""Return whether the specified parameter exists."""
return key in self.params
def __iter__(self):
"""Return an iterator over the parameter names."""
return iter(self.params)
def __len__(self):
"""Return the number of parameters defined."""
return len(self.params)
def __getitem__(self, key):
"""Get a parameter value."""
return self.params[key].value()
def map(self, func, filter_func=lambda p: True):
"""Map the supplied filter function onto each Parameter.
Map the supplied filter function onto each Parameter (with an
optional filter function) and return the resulting dictionary.
"""
return dict((n, func(p))
for n, p in six.iteritems(self.params) if filter_func(p))
def set_stack_id(self, stack_identifier):
"""Set the StackId pseudo parameter value."""
if stack_identifier is not None:
self.params[self.PARAM_STACK_ID].schema.set_default(
stack_identifier.arn())
return True
return False
def _validate_user_parameters(self):
schemata = self.tmpl.param_schemata()
for param in self.user_params:
if param not in schemata:
raise exception.UnknownUserParameter(key=param)
def _validate_tmpl_parameters(self):
param = None
for key in six.iterkeys(self.tmpl.t):
if key == 'Parameters' or key == 'parameters':
param = key
break
if param is not None:
template_params = self.tmpl.t[key] or {}
for name, attrs in six.iteritems(template_params):
if not isinstance(attrs, dict):
raise exception.InvalidTemplateParameter(key=name)
def _pseudo_parameters(self, stack_identifier):
stack_id = (stack_identifier.arn()
if stack_identifier is not None else 'None')
stack_name = stack_identifier and stack_identifier.stack_name
yield Parameter(self.PARAM_STACK_ID,
Schema(Schema.STRING, _('Stack ID'),
default=str(stack_id)))
if stack_name:
yield Parameter(self.PARAM_STACK_NAME,
Schema(Schema.STRING, _('Stack Name'),
default=stack_name))
yield Parameter(self.PARAM_REGION,
Schema(Schema.STRING,
default='ap-southeast-1',
constraints=[
constr.AllowedValues(['us-east-1',
'us-west-1',
'us-west-2',
'sa-east-1',
'eu-west-1',
'ap-southeast-1',
'ap-northeast-1']
)]))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import mock
import six
from webob import exc
from senlin.api.middleware import fault
from senlin.api.openstack.v1 import profiles
from senlin.common import exception as senlin_exc
from senlin.common import policy
from senlin.rpc import client as rpc_client
from senlin.tests.apiv1 import shared
from senlin.tests.common import base
class ProfileDataTest(base.SenlinTestCase):
def test_profile_data(self):
body = {
'name': 'test_profile',
'spec': {
'param1': 'value1',
'param2': 'value2',
},
'type': 'test_profile_type',
'permission': None,
'tags': {}
}
data = profiles.ProfileData(body)
self.assertEqual('test_profile', data.name())
self.assertEqual({'param1': 'value1', 'param2': 'value2'}, data.spec())
self.assertEqual('test_profile_type', data.type())
self.assertIsNone(data.permission())
self.assertEqual({}, data.tags())
def test_required_fields_missing(self):
body = {'not a profile name': 'wibble'}
data = profiles.ProfileData(body)
self.assertRaises(exc.HTTPBadRequest, data.name)
self.assertRaises(exc.HTTPBadRequest, data.spec)
self.assertRaises(exc.HTTPBadRequest, data.type)
self.assertIsNone(data.permission())
self.assertIsNone(data.tags())
@mock.patch.object(policy.Enforcer, 'enforce')
class ProfileControllerTest(shared.ControllerTest, base.SenlinTestCase):
def setUp(self):
super(ProfileControllerTest, self).setUp()
class DummyConfig(object):
bind_port = 8778
cfgopts = DummyConfig()
self.controller = profiles.ProfileController(options=cfgopts)
def test_profile_index_normal(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/profiles')
engine_resp = [
{
u'id': u'aaaa-bbbb-cccc',
u'name': u'profile-1',
u'type': u'test_profile_type',
u'spec': {
u'param_1': u'value1',
u'param_2': u'value2',
},
u'permission': '',
u'created_time': u'2015-02-24T19:17:22Z',
u'updated_time': None,
u'deleted_time': None,
u'tags': {},
}
]
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
result = self.controller.index(req, tenant_id=self.tenant)
default_args = {'limit': None, 'marker': None,
'sort_keys': None, 'sort_dir': None,
'filters': None, 'show_deleted': False}
mock_call.assert_called_with(req.context,
('profile_list', default_args))
expected = {'profiles': engine_resp}
self.assertEqual(expected, result)
def test_profile_index_whitelists_params(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'limit': 20,
'marker': 'fake marker',
'sort_keys': 'fake sort keys',
'sort_dir': 'fake sort dir',
'show_deleted': False,
'filters': None,
'balrog': 'you shall not pass!'
}
req = self._get('/profiles', params=params)
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(6, len(engine_args))
self.assertIn('limit', engine_args)
self.assertIn('marker', engine_args)
self.assertIn('sort_keys', engine_args)
self.assertIn('sort_dir', engine_args)
self.assertIn('filters', engine_args)
self.assertIn('show_deleted', engine_args)
self.assertNotIn('tenant_safe', engine_args)
self.assertNotIn('balrog', engine_args)
def test_profile_index_whitelist_filter_params(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'type': 'some_type',
'name': 'fake name',
'balrog': 'you shall not pass!'
}
req = self._get('/profiles', params=params)
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertIn('filters', engine_args)
filters = engine_args['filters']
self.assertEqual(2, len(filters))
self.assertIn('name', filters)
self.assertIn('type', filters)
self.assertNotIn('balrog', filters)
def test_profile_index_show_deleted_false(self, mock_enforce):
mock_call = self.patchobject(rpc_client.EngineClient, 'profile_list',
return_value=[])
params = {'show_deleted': 'False'}
req = self._get('/profiles', params=params)
self.controller.index(req, tenant_id=self.tenant)
mock_call.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_deleted=False)
def test_profile_index_show_deleted_true(self, mock_enforce):
mock_call = self.patchobject(rpc_client.EngineClient, 'profile_list',
return_value=[])
params = {'show_deleted': 'True'}
req = self._get('/profiles', params=params)
self.controller.index(req, tenant_id=self.tenant)
mock_call.assert_called_once_with(mock.ANY,
filters=mock.ANY,
show_deleted=True)
def test_profile_index_show_deleted_non_bool(self, mock_enforce):
mock_call = self.patchobject(rpc_client.EngineClient, 'profile_list',
return_value=[])
params = {'show_deleted': 'yes'}
req = self._get('/profiles', params=params)
ex = self.assertRaises(senlin_exc.InvalidParameter,
self.controller.index, req,
tenant_id=self.tenant)
self.assertIn("Invalid value 'yes' specified for 'show_deleted'",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_profile_index_limit_non_int(self, mock_enforce):
mock_call = self.patchobject(rpc_client.EngineClient, 'profile_list',
return_value=[])
params = {'limit': 'abc'}
req = self._get('/profiles', params=params)
ex = self.assertRaises(senlin_exc.InvalidParameter,
self.controller.index, req,
tenant_id=self.tenant)
self.assertIn("Invalid value 'abc' specified for 'limit'",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_profile_index_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
req = self._get('/profiles')
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_profile_create_success(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
body = {
'profile': {
'name': 'test_profile',
'type': 'test_profile_type',
'spec': {
'param_1': 'value1',
'param_2': 2,
},
'permission': None,
'tags': {},
}
}
engine_response = {
'id': 'xxxx-yyyy-zzzz',
'name': 'test_profile',
'type': 'test_profile_type',
'spec': {
'param_1': 'value1',
'param_2': 2,
},
'permission': None,
'tags': {},
}
req = self._post('/profiles', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
resp = self.controller.create(req, tenant_id=self.tenant, body=body)
mock_call.assert_called_with(
req.context,
('profile_create', {
'name': 'test_profile',
'type': 'test_profile_type',
'spec': {'param_1': 'value1', 'param_2': 2},
'perm': None,
'tags': {},
})
)
expected = {'profile': engine_response}
self.assertEqual(expected, resp)
def test_profile_create_with_bad_body(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
body = {'name': 'test_profile'}
req = self._post('/profiles', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(exc.HTTPBadRequest,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual("Malformed request data, missing 'profile' key "
"in request body.", six.text_type(ex))
self.assertFalse(mock_call.called)
def test_profile_create_with_bad_type(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
type_name = 'unknown_type'
body = {
'profile': {
'name': 'test_profile',
'type': type_name,
'spec': {'param': 'value'},
'permission': None,
'tags': {},
}
}
req = self._post('/profiles', json.dumps(body))
error = senlin_exc.ProfileTypeNotFound(profile_type=type_name)
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=error)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
mock_call.assert_called_once()
self.assertEqual(404, resp.json['code'])
self.assertEqual('ProfileTypeNotFound', resp.json['error']['type'])
self.assertIsNone(resp.json['error']['traceback'])
def test_profile_create_with_spec_validation_failed(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
body = {
'profile': {
'name': 'test_profile',
'type': 'test_profile_type',
'spec': {'param': 'value'},
'permission': None,
'tags': {},
}
}
req = self._post('/profiles', json.dumps(body))
msg = 'Spec validation error (param): value'
error = senlin_exc.SpecValidationFailed(message=msg)
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=error)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
mock_call.assert_called_once()
self.assertEqual(400, resp.json['code'])
self.assertEqual('SpecValidationFailed', resp.json['error']['type'])
self.assertIsNone(resp.json['error']['traceback'])
def test_profile_create_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', False)
body = {
'profile': {
'name': 'test_profile',
'type': 'test_profile_type',
'spec': {'param': 'value'},
}
}
req = self._post('/profiles', json.dumps(body))
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_profile_get_normal(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'get', True)
pid = 'aaaa-bbbb-cccc'
req = self._get('/profiles/%(profile_id)s' % {'profile_id': pid})
engine_resp = {
u'id': u'aaaa-bbbb-cccc',
u'name': u'profile-1',
u'type': u'test_profile_type',
u'spec': {
u'param_1': u'value1',
u'param_2': u'value2',
},
u'permission': '',
u'created_time': u'2015-02-24T19:17:22Z',
u'updated_time': None,
u'deleted_time': None,
u'tags': {},
}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
result = self.controller.get(req, tenant_id=self.tenant,
profile_id=pid)
mock_call.assert_called_with(req.context,
('profile_get', {'identity': pid}))
expected = {'profile': engine_resp}
self.assertEqual(expected, result)
def test_profile_get_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'get', True)
pid = 'non-existent-profile'
req = self._get('/profiles/%(profile_id)s' % {'profile_id': pid})
error = senlin_exc.ProfileNotFound(profile=pid)
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
mock_call.side_effect = shared.to_remote_error(error)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.get,
req, tenant_id=self.tenant,
profile_id=pid)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ProfileNotFound', resp.json['error']['type'])
def test_profile_get_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'get', False)
pid = 'non-existent-profile'
req = self._get('/profiles/%(profile_id)s' % {'profile_id': pid})
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.get,
req, tenant_id=self.tenant,
profile_id=pid)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_profile_update_normal(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
pid = 'aaaa-bbbb-cccc'
body = {
'profile': {
'name': 'profile-2',
'spec': {
'param_2': 'value3',
},
'tags': {
'author': 'thomas j',
}
}
}
req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid},
json.dumps(body))
engine_resp = {
u'id': pid,
u'name': u'profile-2',
u'type': u'test_profile_type',
u'spec': {
u'param_1': u'value1',
u'param_2': u'value3',
},
u'permission': '',
u'created_time': u'2015-02-25T16:20:13Z',
u'updated_time': None,
u'deleted_time': None,
u'tags': {u'author': u'thomas j'},
}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
result = self.controller.update(req, tenant_id=self.tenant,
profile_id=pid,
body=body)
args = copy.deepcopy(body['profile'])
args['profile_id'] = pid
args['permission'] = None
mock_call.assert_called_with(req.context, ('profile_update', args))
expected = {'profile': engine_resp}
self.assertEqual(expected, result)
def test_profile_update_no_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
pid = 'aaaa-bbbb-cccc'
body = {
'profile': {'spec': {'param_2': 'value3'},
'tags': {'author': 'thomas j'}}
}
req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid},
json.dumps(body))
self.patchobject(rpc_client.EngineClient, 'call', return_value={})
result = self.controller.update(req, tenant_id=self.tenant,
profile_id=pid, body=body)
self.assertEqual({'profile': {}}, result)
def test_profile_update_no_spec(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
pid = 'aaaa-bbbb-cccc'
body = {
'profile': {
'name': 'new_profile',
'tags': {'author': 'john d'},
'permission': 'xxx',
}
}
req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid},
json.dumps(body))
engine_response = {
u'id': 'dddd-eeee-ffff',
u'name': u'new_profile',
u'type': u'test_profile_type',
u'spec': {
u'param_1': u'value1',
u'param_2': u'value3',
},
u'permission': 'xxx',
u'created_time': u'2015-02-25T16:20:13Z',
u'updated_time': u'2015-02-25T16:50:22Z',
u'deleted_time': None,
u'tags': {u'author': u'john d'},
}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_response)
result = self.controller.update(req, tenant_id=self.tenant,
profile_id=pid, body=body)
args = copy.deepcopy(body['profile'])
args['profile_id'] = pid
args['spec'] = None
mock_call.assert_called_with(req.context, ('profile_update', args))
expected = {'profile': engine_response}
self.assertEqual(expected, result)
def test_profile_update_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
pid = 'non-existent-profile'
body = {
'profile': {
'name': 'new_profile',
'tags': {'author': 'john d'},
'permission': 'xxx',
}
}
req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid},
json.dumps(body))
error = senlin_exc.ProfileNotFound(profile=pid)
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
mock_call.side_effect = shared.to_remote_error(error)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=self.tenant,
profile_id=pid,
body=body)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ProfileNotFound', resp.json['error']['type'])
def test_profile_update_with_spec_validation_failed(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
pid = 'aaaa-bbbb-cccc'
body = {
'profile': {
'name': 'test_profile',
'spec': {'param4': 'value4'},
}
}
req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid},
json.dumps(body))
msg = 'Spec validation error (param): value'
error = senlin_exc.SpecValidationFailed(message=msg)
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
side_effect=error)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=self.tenant,
profile_id=pid,
body=body)
mock_call.assert_called_once()
self.assertEqual(400, resp.json['code'])
self.assertEqual('SpecValidationFailed', resp.json['error']['type'])
self.assertIsNone(resp.json['error']['traceback'])
def test_profile_update_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', False)
pid = 'aaaa-bbbb-cccc'
body = {
'profile': {'name': 'test_profile', 'spec': {'param5': 'value5'}},
}
req = self._put('/profiles/%(profile_id)s' % {'profile_id': pid},
json.dumps(body))
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=self.tenant,
profile_id=pid,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_profile_delete_success(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
pid = 'aaaa-bbbb-cccc'
req = self._delete('/profiles/%(profile_id)s' % {'profile_id': pid})
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=None)
self.assertRaises(exc.HTTPNoContent, self.controller.delete,
req, tenant_id=self.tenant, profile_id=pid)
mock_call.assert_called_with(
req.context, ('profile_delete', {'identity': pid}))
def test_node_delete_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
pid = 'aaaa-bbbb-cccc'
req = self._delete('/profiles/%(profile_id)s' % {'profile_id': pid})
error = senlin_exc.ProfileNotFound(profile=pid)
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
mock_call.side_effect = shared.to_remote_error(error)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=self.tenant,
profile_id=pid)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ProfileNotFound', resp.json['error']['type'])
def test_node_delete_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', False)
pid = 'aaaa-bbbb-cccc'
req = self._delete('/profiles/%(profile_id)s' % {'profile_id': pid})
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=self.tenant,
profile_id=pid)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
|
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Cursor classes
"""
import re
import weakref
from collections import namedtuple
from . import errors
from .abstracts import MySQLCursorAbstract
from .catch23 import PY2
SQL_COMMENT = r"\/\*.*?\*\/"
RE_SQL_COMMENT = re.compile(
r'''({0})|(["'`][^"'`]*?({0})[^"'`]*?["'`])'''.format(SQL_COMMENT),
re.I | re.M | re.S)
RE_SQL_ON_DUPLICATE = re.compile(
r'''\s*ON\s+DUPLICATE\s+KEY(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$''',
re.I | re.M | re.S)
RE_SQL_INSERT_STMT = re.compile(
r"({0}|\s)*INSERT({0}|\s)*INTO.+VALUES.*".format(SQL_COMMENT),
re.I | re.M | re.S)
RE_SQL_INSERT_VALUES = re.compile(r'.*VALUES\s*(\(.*\)).*', re.I | re.M | re.S)
# RE_PY_PARAM = re.compile(b'%s')
RE_PY_PARAM = re.compile(b'(%s)')
RE_PY_MAPPING_PARAM = re.compile(
br'''
%
\((?P<mapping_key>[^)]+)\)
(?P<conversion_type>[diouxXeEfFgGcrs%])
''',
re.X
)
RE_SQL_SPLIT_STMTS = re.compile(
b''';(?=(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$)''')
RE_SQL_FIND_PARAM = re.compile(
b'''%s(?=(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$)''')
ERR_NO_RESULT_TO_FETCH = "No result set to fetch from"
class _ParamSubstitutor(object):
"""
Substitutes parameters into SQL statement.
"""
def __init__(self, params):
self.params = params
self.index = 0
def __call__(self, matchobj):
index = self.index
self.index += 1
try:
return bytes(self.params[index])
except IndexError:
raise errors.ProgrammingError(
"Not enough parameters for the SQL statement")
@property
def remaining(self):
"""Returns number of parameters remaining to be substituted"""
return len(self.params) - self.index
def _bytestr_format_dict(bytestr, value_dict):
"""
>>> _bytestr_format_dict(b'%(a)s', {b'a': b'foobar'})
b'foobar
>>> _bytestr_format_dict(b'%%(a)s', {b'a': b'foobar'})
b'%%(a)s'
>>> _bytestr_format_dict(b'%%%(a)s', {b'a': b'foobar'})
b'%%foobar'
>>> _bytestr_format_dict(b'%(x)s %(y)s',
... {b'x': b'x=%(y)s', b'y': b'y=%(x)s'})
b'x=%(y)s y=%(x)s'
"""
def replace(matchobj):
value = None
groups = matchobj.groupdict()
if groups["conversion_type"] == b"%":
value = b"%"
if groups["conversion_type"] == b"s":
key = groups["mapping_key"].encode("utf-8") \
if PY2 else groups["mapping_key"]
value = value_dict[key]
if value is None:
raise ValueError("Unsupported conversion_type: {0}"
"".format(groups["conversion_type"]))
return value.decode("utf-8") if PY2 else value
return RE_PY_MAPPING_PARAM.sub(replace, bytestr.decode("utf-8")
if PY2 else bytestr)
class CursorBase(MySQLCursorAbstract):
"""
Base for defining MySQLCursor. This class is a skeleton and defines
methods and members as required for the Python Database API
Specification v2.0.
It's better to inherite from MySQLCursor.
"""
_raw = False
def __init__(self):
self._description = None
self._rowcount = -1
self._last_insert_id = None
self.arraysize = 1
super(CursorBase, self).__init__()
def callproc(self, procname, args=()):
"""Calls a stored procedue with the given arguments
The arguments will be set during this session, meaning
they will be called like _<procname>__arg<nr> where
<nr> is an enumeration (+1) of the arguments.
Coding Example:
1) Definining the Stored Routine in MySQL:
CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT)
BEGIN
SET pProd := pFac1 * pFac2;
END
2) Executing in Python:
args = (5,5,0) # 0 is to hold pprod
cursor.callproc('multiply', args)
print(cursor.fetchone())
Does not return a value, but a result set will be
available when the CALL-statement execute successfully.
Raises exceptions when something is wrong.
"""
pass
def close(self):
"""Close the cursor."""
pass
def execute(self, operation, params=(), multi=False):
"""Executes the given operation
Executes the given operation substituting any markers with
the given parameters.
For example, getting all rows where id is 5:
cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,))
The multi argument should be set to True when executing multiple
statements in one operation. If not set and multiple results are
found, an InterfaceError will be raised.
If warnings where generated, and connection.get_warnings is True, then
self._warnings will be a list containing these warnings.
Returns an iterator when multi is True, otherwise None.
"""
pass
def executemany(self, operation, seqparams):
"""Execute the given operation multiple times
The executemany() method will execute the operation iterating
over the list of parameters in seq_params.
Example: Inserting 3 new employees and their phone number
data = [
('Jane','555-001'),
('Joe', '555-001'),
('John', '555-003')
]
stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s')"
cursor.executemany(stmt, data)
INSERT statements are optimized by batching the data, that is
using the MySQL multiple rows syntax.
Results are discarded. If they are needed, consider looping over
data using the execute() method.
"""
pass
def fetchone(self):
"""Returns next row of a query result set
Returns a tuple or None.
"""
pass
def fetchmany(self, size=1):
"""Returns the next set of rows of a query result, returning a
list of tuples. When no more rows are available, it returns an
empty list.
The number of rows returned can be specified using the size argument,
which defaults to one
"""
pass
def fetchall(self):
"""Returns all rows of a query result set
Returns a list of tuples.
"""
pass
def nextset(self):
"""Not Implemented."""
pass
def setinputsizes(self, sizes):
"""Not Implemented."""
pass
def setoutputsize(self, size, column=None):
"""Not Implemented."""
pass
def reset(self, free=True):
"""Reset the cursor to default"""
pass
@property
def description(self):
"""Returns description of columns in a result
This property returns a list of tuples describing the columns in
in a result set. A tuple is described as follows::
(column_name,
type,
None,
None,
None,
None,
null_ok,
column_flags) # Addition to PEP-249 specs
Returns a list of tuples.
"""
return self._description
@property
def rowcount(self):
"""Returns the number of rows produced or affected
This property returns the number of rows produced by queries
such as a SELECT, or affected rows when executing DML statements
like INSERT or UPDATE.
Note that for non-buffered cursors it is impossible to know the
number of rows produced before having fetched them all. For those,
the number of rows will be -1 right after execution, and
incremented when fetching rows.
Returns an integer.
"""
return self._rowcount
@property
def lastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement or None when there is
no such value available.
Returns a long value or None.
"""
return self._last_insert_id
class MySQLCursor(CursorBase):
"""Default cursor for interacting with MySQL
This cursor will execute statements and handle the result. It will
not automatically fetch all rows.
MySQLCursor should be inherited whenever other functionallity is
required. An example would to change the fetch* member functions
to return dictionaries instead of lists of values.
Implements the Python Database API Specification v2.0 (PEP-249)
"""
def __init__(self, connection=None):
CursorBase.__init__(self)
self._connection = None
self._stored_results = []
self._nextrow = (None, None)
self._warnings = None
self._warning_count = 0
self._executed = None
self._executed_list = []
self._binary = False
if connection is not None:
self._set_connection(connection)
def __iter__(self):
"""
Iteration over the result set which calls self.fetchone()
and returns the next row.
"""
return iter(self.fetchone, None)
def _set_connection(self, connection):
"""Set the connection"""
try:
self._connection = weakref.proxy(connection)
self._connection.is_connected()
except (AttributeError, TypeError):
raise errors.InterfaceError(errno=2048)
def _reset_result(self):
"""Reset the cursor to default"""
self._rowcount = -1
self._nextrow = (None, None)
self._stored_results = []
self._warnings = None
self._warning_count = 0
self._description = None
self._executed = None
self._executed_list = []
self.reset()
def _have_unread_result(self):
"""Check whether there is an unread result"""
try:
return self._connection.unread_result
except AttributeError:
return False
def next(self):
"""Used for iterating over the result set."""
return self.__next__()
def __next__(self):
"""
Used for iterating over the result set. Calles self.fetchone()
to get the next row.
"""
try:
row = self.fetchone()
except errors.InterfaceError:
raise StopIteration
if not row:
raise StopIteration
return row
def close(self):
"""Close the cursor
Returns True when successful, otherwise False.
"""
if self._connection is None:
return False
self._connection.handle_unread_result()
self._reset_result()
self._connection = None
return True
def _process_params_dict(self, params):
"""Process query parameters given as dictionary"""
try:
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = {}
for key, value in list(params.items()):
conv = value
conv = to_mysql(conv)
conv = escape(conv)
conv = quote(conv)
if PY2:
res[key] = conv
else:
res[key.encode()] = conv
except Exception as err:
raise errors.ProgrammingError(
"Failed processing pyformat-parameters; %s" % err)
else:
return res
def _process_params(self, params):
"""Process query parameters."""
try:
res = params
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = [to_mysql(i) for i in res]
res = [escape(i) for i in res]
res = [quote(i) for i in res]
except Exception as err:
raise errors.ProgrammingError(
"Failed processing format-parameters; %s" % err)
else:
return tuple(res)
def _handle_noresultset(self, res):
"""Handles result of execute() when there is no result set
"""
try:
self._rowcount = res['affected_rows']
self._last_insert_id = res['insert_id']
self._warning_count = res['warning_count']
except (KeyError, TypeError) as err:
raise errors.ProgrammingError(
"Failed handling non-resultset; {0}".format(err))
self._handle_warnings()
if self._connection.raise_on_warnings is True and self._warnings:
raise errors.get_mysql_exception(
self._warnings[0][1], self._warnings[0][2])
def _handle_resultset(self):
"""Handles result set
This method handles the result set and is called after reading
and storing column information in _handle_result(). For non-buffering
cursors, this method is usually doing nothing.
"""
pass
def _handle_result(self, result):
"""
Handle the result after a command was send. The result can be either
an OK-packet or a dictionary containing column/eof information.
Raises InterfaceError when result is not a dict() or result is
invalid.
"""
if not isinstance(result, dict):
raise errors.InterfaceError('Result was not a dict()')
if 'columns' in result:
# Weak test, must be column/eof information
self._description = result['columns']
self._connection.unread_result = True
self._handle_resultset()
elif 'affected_rows' in result:
# Weak test, must be an OK-packet
self._connection.unread_result = False
self._handle_noresultset(result)
else:
raise errors.InterfaceError('Invalid result')
def _execute_iter(self, query_iter):
"""Generator returns MySQLCursor objects for multiple statements
This method is only used when multiple statements are executed
by the execute() method. It uses zip() to make an iterator from the
given query_iter (result of MySQLConnection.cmd_query_iter()) and
the list of statements that were executed.
"""
executed_list = RE_SQL_SPLIT_STMTS.split(self._executed)
i = 0
while True:
result = next(query_iter)
self._reset_result()
self._handle_result(result)
try:
self._executed = executed_list[i].strip()
i += 1
except IndexError:
self._executed = executed_list[0]
yield self
def execute(self, operation, params=None, multi=False):
"""Executes the given operation
Executes the given operation substituting any markers with
the given parameters.
For example, getting all rows where id is 5:
cursor.execute("SELECT * FROM t1 WHERE id = %s", (5,))
The multi argument should be set to True when executing multiple
statements in one operation. If not set and multiple results are
found, an InterfaceError will be raised.
If warnings where generated, and connection.get_warnings is True, then
self._warnings will be a list containing these warnings.
Returns an iterator when multi is True, otherwise None.
"""
if not operation:
return None
if not self._connection:
raise errors.ProgrammingError("Cursor is not connected")
self._connection.handle_unread_result()
self._reset_result()
stmt = ''
try:
if not isinstance(operation, (bytes, bytearray)):
stmt = operation.encode(self._connection.python_charset)
else:
stmt = operation
except (UnicodeDecodeError, UnicodeEncodeError) as err:
raise errors.ProgrammingError(str(err))
if params is not None:
if isinstance(params, dict):
stmt = _bytestr_format_dict(
stmt, self._process_params_dict(params))
elif isinstance(params, (list, tuple)):
psub = _ParamSubstitutor(self._process_params(params))
stmt = RE_PY_PARAM.sub(psub, stmt)
if psub.remaining != 0:
raise errors.ProgrammingError(
"Not all parameters were used in the SQL statement")
self._executed = stmt
if multi:
self._executed_list = []
return self._execute_iter(self._connection.cmd_query_iter(stmt))
else:
try:
self._handle_result(self._connection.cmd_query(stmt))
except errors.InterfaceError:
if self._connection._have_next_result: # pylint: disable=W0212
raise errors.InterfaceError(
"Use multi=True when executing multiple statements")
raise
return None
def _batch_insert(self, operation, seq_params):
"""Implements multi row insert"""
def remove_comments(match):
"""Remove comments from INSERT statements.
This function is used while removing comments from INSERT
statements. If the matched string is a comment not enclosed
by quotes, it returns an empty string, else the string itself.
"""
if match.group(1):
return ""
else:
return match.group(2)
tmp = re.sub(RE_SQL_ON_DUPLICATE, '',
re.sub(RE_SQL_COMMENT, remove_comments, operation))
matches = re.search(RE_SQL_INSERT_VALUES, tmp)
if not matches:
raise errors.InterfaceError(
"Failed rewriting statement for multi-row INSERT. "
"Check SQL syntax."
)
fmt = matches.group(1).encode(self._connection.charset)
values = []
try:
stmt = operation.encode(self._connection.charset)
for params in seq_params:
tmp = fmt
if isinstance(params, dict):
tmp = _bytestr_format_dict(
tmp, self._process_params_dict(params))
else:
psub = _ParamSubstitutor(self._process_params(params))
tmp = RE_PY_PARAM.sub(psub, tmp)
if psub.remaining != 0:
raise errors.ProgrammingError(
"Not all parameters were used in the SQL statement")
# for p in self._process_params(params):
# tmp = tmp.replace(b'%s',p,1)
values.append(tmp)
if fmt in stmt:
stmt = stmt.replace(fmt, b','.join(values), 1)
self._executed = stmt
return stmt
else:
return None
except (UnicodeDecodeError, UnicodeEncodeError) as err:
raise errors.ProgrammingError(str(err))
except errors.Error:
raise
except Exception as err:
raise errors.InterfaceError(
"Failed executing the operation; %s" % err)
def executemany(self, operation, seq_params):
"""Execute the given operation multiple times
The executemany() method will execute the operation iterating
over the list of parameters in seq_params.
Example: Inserting 3 new employees and their phone number
data = [
('Jane','555-001'),
('Joe', '555-001'),
('John', '555-003')
]
stmt = "INSERT INTO employees (name, phone) VALUES ('%s','%s)"
cursor.executemany(stmt, data)
INSERT statements are optimized by batching the data, that is
using the MySQL multiple rows syntax.
Results are discarded. If they are needed, consider looping over
data using the execute() method.
"""
if not operation or not seq_params:
return None
self._connection.handle_unread_result()
try:
_ = iter(seq_params)
except TypeError:
raise errors.ProgrammingError(
"Parameters for query must be an Iterable.")
# Optimize INSERTs by batching them
if re.match(RE_SQL_INSERT_STMT, operation):
if not seq_params:
self._rowcount = 0
return
stmt = self._batch_insert(operation, seq_params)
if stmt is not None:
return self.execute(stmt)
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
if self.with_rows and self._have_unread_result():
self.fetchall()
rowcnt += self._rowcount
except (ValueError, TypeError) as err:
raise errors.InterfaceError(
"Failed executing the operation; {0}".format(err))
except:
# Raise whatever execute() raises
raise
self._rowcount = rowcnt
def stored_results(self):
"""Returns an iterator for stored results
This method returns an iterator over results which are stored when
callproc() is called. The iterator will provide MySQLCursorBuffered
instances.
Returns a iterator.
"""
return iter(self._stored_results)
def callproc(self, procname, args=()):
"""Calls a stored procedure with the given arguments
The arguments will be set during this session, meaning
they will be called like _<procname>__arg<nr> where
<nr> is an enumeration (+1) of the arguments.
Coding Example:
1) Defining the Stored Routine in MySQL:
CREATE PROCEDURE multiply(IN pFac1 INT, IN pFac2 INT, OUT pProd INT)
BEGIN
SET pProd := pFac1 * pFac2;
END
2) Executing in Python:
args = (5, 5, 0) # 0 is to hold pprod
cursor.callproc('multiply', args)
print(cursor.fetchone())
For OUT and INOUT parameters the user should provide the
type of the parameter as well. The argument should be a
tuple with first item as the value of the parameter to pass
and second argument the type of the argument.
In the above example, one can call callproc method like:
args = (5, 5, (0, 'INT'))
cursor.callproc('multiply', args)
The type of the argument given in the tuple will be used by
the MySQL CAST function to convert the values in the corresponding
MySQL type (See CAST in MySQL Reference for more information)
Does not return a value, but a result set will be
available when the CALL-statement execute successfully.
Raises exceptions when something is wrong.
"""
if not procname or not isinstance(procname, str):
raise ValueError("procname must be a string")
if not isinstance(args, (tuple, list)):
raise ValueError("args must be a sequence")
argfmt = "@_{name}_arg{index}"
self._stored_results = []
results = []
try:
argnames = []
argtypes = []
if args:
for idx, arg in enumerate(args):
argname = argfmt.format(name=procname, index=idx + 1)
argnames.append(argname)
if isinstance(arg, tuple):
argtypes.append(" CAST({0} AS {1})".format(argname,
arg[1]))
self.execute("SET {0}=%s".format(argname), (arg[0],))
else:
argtypes.append(argname)
self.execute("SET {0}=%s".format(argname), (arg,))
call = "CALL {0}({1})".format(procname, ','.join(argnames))
# pylint: disable=W0212
# We disable consuming results temporary to make sure we
# getting all results
can_consume_results = self._connection._consume_results
for result in self._connection.cmd_query_iter(call):
self._connection._consume_results = False
if self._raw:
tmp = MySQLCursorBufferedRaw(self._connection._get_self())
else:
tmp = MySQLCursorBuffered(self._connection._get_self())
tmp._executed = "(a result of {0})".format(call)
tmp._handle_result(result)
if tmp._warnings is not None:
self._warnings = tmp._warnings
if 'columns' in result:
results.append(tmp)
self._connection._consume_results = can_consume_results
# pylint: enable=W0212
if argnames:
select = "SELECT {0}".format(','.join(argtypes))
self.execute(select)
self._stored_results = results
return self.fetchone()
else:
self._stored_results = results
return ()
except errors.Error:
raise
except Exception as err:
raise errors.InterfaceError(
"Failed calling stored routine; {0}".format(err))
def getlastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement.
Returns a long value or None.
"""
return self._last_insert_id
def _fetch_warnings(self):
"""
Fetch warnings doing a SHOW WARNINGS. Can be called after getting
the result.
Returns a result set or None when there were no warnings.
"""
res = []
try:
cur = self._connection.cursor(raw=False)
cur.execute("SHOW WARNINGS")
res = cur.fetchall()
cur.close()
except Exception as err:
raise errors.InterfaceError(
"Failed getting warnings; %s" % err)
if len(res):
return res
return None
def _handle_warnings(self):
"""Handle possible warnings after all results are consumed"""
if self._connection.get_warnings is True and self._warning_count:
self._warnings = self._fetch_warnings()
def _handle_eof(self, eof):
"""Handle EOF packet"""
self._connection.unread_result = False
self._nextrow = (None, None)
self._warning_count = eof['warning_count']
self._handle_warnings()
if self._connection.raise_on_warnings is True and self._warnings:
raise errors.get_mysql_exception(
self._warnings[0][1], self._warnings[0][2])
def _fetch_row(self):
"""Returns the next row in the result set
Returns a tuple or None.
"""
if not self._have_unread_result():
return None
row = None
if self._nextrow == (None, None):
(row, eof) = self._connection.get_row(
binary=self._binary, columns=self.description)
else:
(row, eof) = self._nextrow
if row:
self._nextrow = self._connection.get_row(
binary=self._binary, columns=self.description)
eof = self._nextrow[1]
if eof is not None:
self._handle_eof(eof)
if self._rowcount == -1:
self._rowcount = 1
else:
self._rowcount += 1
if eof:
self._handle_eof(eof)
return row
def fetchone(self):
"""Returns next row of a query result set
Returns a tuple or None.
"""
row = self._fetch_row()
if row:
if hasattr(self._connection, 'converter'):
return self._connection.converter.row_to_python(
row, self.description)
return row
return None
def fetchmany(self, size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0 and self._have_unread_result():
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
(rows, eof) = self._connection.get_rows()
if self._nextrow[0]:
rows.insert(0, self._nextrow[0])
if hasattr(self._connection, 'converter'):
row_to_python = self._connection.converter.row_to_python
rows = [row_to_python(row, self.description) for row in rows]
self._handle_eof(eof)
rowcount = len(rows)
if rowcount >= 0 and self._rowcount == -1:
self._rowcount = 0
self._rowcount += rowcount
return rows
@property
def column_names(self):
"""Returns column names
This property returns the columns names as a tuple.
Returns a tuple.
"""
if not self.description:
return ()
return tuple([d[0] for d in self.description])
@property
def statement(self):
"""Returns the executed statement
This property returns the executed statement. When multiple
statements were executed, the current statement in the iterator
will be returned.
"""
if self._executed is None:
return None
try:
return self._executed.strip().decode('utf-8')
except (AttributeError, UnicodeDecodeError):
return self._executed.strip()
@property
def with_rows(self):
"""Returns whether the cursor could have rows returned
This property returns True when column descriptions are available
and possibly also rows, which will need to be fetched.
Returns True or False.
"""
if not self.description:
return False
return True
def __str__(self):
fmt = "{class_name}: {stmt}"
if self._executed:
try:
executed = self._executed.decode('utf-8')
except AttributeError:
executed = self._executed
if len(executed) > 40:
executed = executed[:40] + '..'
else:
executed = '(Nothing executed yet)'
return fmt.format(class_name=self.__class__.__name__, stmt=executed)
class MySQLCursorBuffered(MySQLCursor):
"""Cursor which fetches rows within execute()"""
def __init__(self, connection=None):
MySQLCursor.__init__(self, connection)
self._rows = None
self._next_row = 0
def _handle_resultset(self):
(self._rows, eof) = self._connection.get_rows()
self._rowcount = len(self._rows)
self._handle_eof(eof)
self._next_row = 0
try:
self._connection.unread_result = False
except:
pass
def reset(self, free=True):
self._rows = None
def _fetch_row(self):
row = None
try:
row = self._rows[self._next_row]
except:
return None
else:
self._next_row += 1
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
res = []
if hasattr(self._connection, 'converter'):
for row in self._rows[self._next_row:]:
res.append(self._connection.converter.row_to_python(
row, self.description))
else:
res = self._rows[self._next_row:]
self._next_row = len(self._rows)
return res
def fetchmany(self, size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0:
cnt -= 1
row = self.fetchone()
if row:
res.append(row)
return res
@property
def with_rows(self):
return self._rows is not None
class MySQLCursorRaw(MySQLCursor):
"""
Skips conversion from MySQL datatypes to Python types when fetching rows.
"""
_raw = True
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
(rows, eof) = self._connection.get_rows()
if self._nextrow[0]:
rows.insert(0, self._nextrow[0])
self._handle_eof(eof)
rowcount = len(rows)
if rowcount >= 0 and self._rowcount == -1:
self._rowcount = 0
self._rowcount += rowcount
return rows
class MySQLCursorBufferedRaw(MySQLCursorBuffered):
"""
Cursor which skips conversion from MySQL datatypes to Python types when
fetching rows and fetches rows within execute().
"""
_raw = True
def fetchone(self):
row = self._fetch_row()
if row:
return row
return None
def fetchall(self):
if self._rows is None:
raise errors.InterfaceError("No result set to fetch from.")
return [r for r in self._rows[self._next_row:]]
@property
def with_rows(self):
return self._rows is not None
class MySQLCursorPrepared(MySQLCursor):
"""Cursor using MySQL Prepared Statements
"""
def __init__(self, connection=None):
super(MySQLCursorPrepared, self).__init__(connection)
self._rows = None
self._next_row = 0
self._prepared = None
self._binary = True
self._have_result = None
def callproc(self, *args, **kwargs):
"""Calls a stored procedue
Not supported with MySQLCursorPrepared.
"""
raise errors.NotSupportedError()
def close(self):
"""Close the cursor
This method will try to deallocate the prepared statement and close
the cursor.
"""
if self._prepared:
try:
self._connection.cmd_stmt_close(self._prepared['statement_id'])
except errors.Error:
# We tried to deallocate, but it's OK when we fail.
pass
self._prepared = None
super(MySQLCursorPrepared, self).close()
def _row_to_python(self, rowdata, desc=None):
"""Convert row data from MySQL to Python types
The conversion is done while reading binary data in the
protocol module.
"""
pass
def _handle_result(self, res):
"""Handle result after execution"""
if isinstance(res, dict):
self._connection.unread_result = False
self._have_result = False
self._handle_noresultset(res)
else:
self._description = res[1]
self._connection.unread_result = True
self._have_result = True
def execute(self, operation, params=(), multi=False): # multi is unused
"""Prepare and execute a MySQL Prepared Statement
This method will preare the given operation and execute it using
the optionally given parameters.
If the cursor instance already had a prepared statement, it is
first closed.
"""
if operation is not self._executed:
if self._prepared:
self._connection.cmd_stmt_close(self._prepared['statement_id'])
self._executed = operation
try:
if not isinstance(operation, bytes):
operation = operation.encode(self._connection.charset)
except (UnicodeDecodeError, UnicodeEncodeError) as err:
raise errors.ProgrammingError(str(err))
# need to convert %s to ? before sending it to MySQL
if b'%s' in operation:
operation = re.sub(RE_SQL_FIND_PARAM, b'?', operation)
try:
self._prepared = self._connection.cmd_stmt_prepare(operation)
except errors.Error:
self._executed = None
raise
self._connection.cmd_stmt_reset(self._prepared['statement_id'])
if self._prepared['parameters'] and not params:
return
elif len(self._prepared['parameters']) != len(params):
raise errors.ProgrammingError(
errno=1210,
msg="Incorrect number of arguments " \
"executing prepared statement")
res = self._connection.cmd_stmt_execute(
self._prepared['statement_id'],
data=params,
parameters=self._prepared['parameters'])
self._handle_result(res)
def executemany(self, operation, seq_params):
"""Prepare and execute a MySQL Prepared Statement many times
This method will prepare the given operation and execute with each
tuple found the list seq_params.
If the cursor instance already had a prepared statement, it is
first closed.
executemany() simply calls execute().
"""
rowcnt = 0
try:
for params in seq_params:
self.execute(operation, params)
if self.with_rows and self._have_unread_result():
self.fetchall()
rowcnt += self._rowcount
except (ValueError, TypeError) as err:
raise errors.InterfaceError(
"Failed executing the operation; {error}".format(error=err))
except:
# Raise whatever execute() raises
raise
self._rowcount = rowcnt
def fetchone(self):
"""Returns next row of a query result set
Returns a tuple or None.
"""
return self._fetch_row() or None
def fetchmany(self, size=None):
res = []
cnt = (size or self.arraysize)
while cnt > 0 and self._have_unread_result():
cnt -= 1
row = self._fetch_row()
if row:
res.append(row)
return res
def fetchall(self):
if not self._have_unread_result():
raise errors.InterfaceError("No result set to fetch from.")
(rows, eof) = self._connection.get_rows(
binary=self._binary, columns=self.description)
self._rowcount = len(rows)
self._handle_eof(eof)
return rows
class MySQLCursorDict(MySQLCursor):
"""
Cursor fetching rows as dictionaries.
The fetch methods of this class will return dictionaries instead of tuples.
Each row is a dictionary that looks like:
row = {
"col1": value1,
"col2": value2
}
"""
def _row_to_python(self, rowdata, desc=None):
"""Convert a MySQL text result row to Python types
Returns a dictionary.
"""
if hasattr(self._connection, 'converter'):
row = self._connection.converter.row_to_python(rowdata, desc)
else:
row = rowdata
if row:
return dict(zip(self.column_names, row))
return None
def fetchone(self):
"""Returns next row of a query result set
"""
row = self._fetch_row()
if row:
return self._row_to_python(row, self.description)
return None
def fetchall(self):
"""Returns all rows of a query result set
"""
if not self._have_unread_result():
raise errors.InterfaceError(ERR_NO_RESULT_TO_FETCH)
(rows, eof) = self._connection.get_rows()
if self._nextrow[0]:
rows.insert(0, self._nextrow[0])
res = [self._row_to_python(row, self.description)
for row in rows]
self._handle_eof(eof)
rowcount = len(rows)
if rowcount >= 0 and self._rowcount == -1:
self._rowcount = 0
self._rowcount += rowcount
return res
class MySQLCursorNamedTuple(MySQLCursor):
"""
Cursor fetching rows as named tuple.
The fetch methods of this class will return namedtuples instead of tuples.
Each row is returned as a namedtuple and the values can be accessed as:
row.col1, row.col2
"""
def _row_to_python(self, rowdata, desc=None):
"""Convert a MySQL text result row to Python types
Returns a named tuple.
"""
if hasattr(self._connection, 'converter'):
row = self._connection.converter.row_to_python(rowdata, desc)
else:
row = rowdata
if row:
# pylint: disable=W0201
self.named_tuple = namedtuple('Row', self.column_names)
# pylint: enable=W0201
return self.named_tuple(*row)
def fetchone(self):
"""Returns next row of a query result set
"""
row = self._fetch_row()
if row:
if hasattr(self._connection, 'converter'):
return self._row_to_python(row, self.description)
else:
return row
return None
def fetchall(self):
"""Returns all rows of a query result set
"""
if not self._have_unread_result():
raise errors.InterfaceError(ERR_NO_RESULT_TO_FETCH)
(rows, eof) = self._connection.get_rows()
if self._nextrow[0]:
rows.insert(0, self._nextrow[0])
res = [self._row_to_python(row, self.description)
for row in rows]
self._handle_eof(eof)
rowcount = len(rows)
if rowcount >= 0 and self._rowcount == -1:
self._rowcount = 0
self._rowcount += rowcount
return res
class MySQLCursorBufferedDict(MySQLCursorDict, MySQLCursorBuffered):
"""
Buffered Cursor fetching rows as dictionaries.
"""
def fetchone(self):
"""Returns next row of a query result set
"""
row = self._fetch_row()
if row:
return self._row_to_python(row, self.description)
return None
def fetchall(self):
"""Returns all rows of a query result set
"""
if self._rows is None:
raise errors.InterfaceError(ERR_NO_RESULT_TO_FETCH)
res = []
for row in self._rows[self._next_row:]:
res.append(self._row_to_python(
row, self.description))
self._next_row = len(self._rows)
return res
class MySQLCursorBufferedNamedTuple(MySQLCursorNamedTuple, MySQLCursorBuffered):
"""
Buffered Cursor fetching rows as named tuple.
"""
def fetchone(self):
"""Returns next row of a query result set
"""
row = self._fetch_row()
if row:
return self._row_to_python(row, self.description)
return None
def fetchall(self):
"""Returns all rows of a query result set
"""
if self._rows is None:
raise errors.InterfaceError(ERR_NO_RESULT_TO_FETCH)
res = []
for row in self._rows[self._next_row:]:
res.append(self._row_to_python(
row, self.description))
self._next_row = len(self._rows)
return res
|
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mesos.interface
import mesos.native
from mesos.interface import mesos_pb2
import os
import sys
import time
import re
import threading
from optparse import OptionParser
from subprocess import *
def mpiexec():
print "We've launched all our MPDs; waiting for them to come up"
while countMPDs() <= TOTAL_MPDS:
print "...waiting on MPD(s)..."
time.sleep(1)
print "Got %d mpd(s), running mpiexec" % TOTAL_MPDS
try:
print "Running mpiexec"
call([MPICH2PATH + 'mpiexec', '-1', '-n', str(TOTAL_MPDS)] + MPI_PROGRAM)
except OSError,e:
print >> sys.stderr, "Error executing mpiexec"
print >> sys.stderr, e
exit(2)
print "mpiexec completed, calling mpdallexit %s" % MPD_PID
# Ring/slave mpd daemons will be killed on executor's shutdown() if
# framework scheduler fails to call 'mpdallexit'.
call([MPICH2PATH + 'mpdallexit', MPD_PID])
class MPIScheduler(mesos.interface.Scheduler):
def __init__(self, options, ip, port):
self.mpdsLaunched = 0
self.mpdsFinished = 0
self.ip = ip
self.port = port
self.options = options
self.startedExec = False
def registered(self, driver, fid, masterInfo):
print "Mesos MPI scheduler and mpd running at %s:%s" % (self.ip, self.port)
print "Registered with framework ID %s" % fid.value
def resourceOffers(self, driver, offers):
print "Got %d resource offers" % len(offers)
for offer in offers:
print "Considering resource offer %s from %s" % (offer.id.value, offer.hostname)
if self.mpdsLaunched == TOTAL_MPDS:
print "Declining permanently because we have already launched enough tasks"
driver.declineOffer(offer.id)
continue
cpus = 0
mem = 0
tasks = []
for resource in offer.resources:
if resource.name == "cpus":
cpus = resource.scalar.value
elif resource.name == "mem":
mem = resource.scalar.value
if cpus < CPUS or mem < MEM:
print "Declining offer due to too few resources"
driver.declineOffer(offer.id)
else:
tid = self.mpdsLaunched
self.mpdsLaunched += 1
print "Accepting offer on %s to start mpd %d" % (offer.hostname, tid)
task = mesos_pb2.TaskInfo()
task.task_id.value = str(tid)
task.slave_id.value = offer.slave_id.value
task.name = "task %d " % tid
cpus = task.resources.add()
cpus.name = "cpus"
cpus.type = mesos_pb2.Value.SCALAR
cpus.scalar.value = CPUS
mem = task.resources.add()
mem.name = "mem"
mem.type = mesos_pb2.Value.SCALAR
mem.scalar.value = MEM
task.command.value = "%smpd --noconsole --ncpus=%d --host=%s --port=%s" % (MPICH2PATH, CPUS, self.ip, self.port)
tasks.append(task)
print "Replying to offer: launching mpd %d on host %s" % (tid, offer.hostname)
driver.launchTasks(offer.id, tasks)
if not self.startedExec and self.mpdsLaunched == TOTAL_MPDS:
threading.Thread(target = mpiexec).start()
self.startedExec = True
def statusUpdate(self, driver, update):
print "Task %s in state %s" % (update.task_id.value, update.state)
if (update.state == mesos_pb2.TASK_FAILED or
update.state == mesos_pb2.TASK_KILLED or
update.state == mesos_pb2.TASK_LOST):
print "A task finished unexpectedly, calling mpdexit on %s" % MPD_PID
call([MPICH2PATH + "mpdexit", MPD_PID])
driver.stop()
if (update.state == mesos_pb2.TASK_FINISHED):
self.mpdsFinished += 1
if self.mpdsFinished == TOTAL_MPDS:
print "All tasks done, all mpd's closed, exiting"
driver.stop()
def countMPDs():
try:
mpdtraceproc = Popen(MPICH2PATH + "mpdtrace -l", shell=True, stdout=PIPE)
mpdtraceline = mpdtraceproc.communicate()[0]
return mpdtraceline.count("\n")
except OSError,e:
print >>sys.stderr, "Error starting mpd or mpdtrace"
print >>sys.stderr, e
exit(2)
def parseIpPort(s):
ba = re.search("([^_]*)_([0-9]*)", s)
ip = ba.group(1)
port = ba.group(2)
return (ip, port)
if __name__ == "__main__":
parser = OptionParser(usage="Usage: %prog [options] mesos_master mpi_program")
parser.disable_interspersed_args()
parser.add_option("-n", "--num",
help="number of mpd's to allocate (default 1)",
dest="num", type="int", default=1)
parser.add_option("-c", "--cpus",
help="number of cpus per mpd (default 1)",
dest="cpus", type="int", default=1)
parser.add_option("-m","--mem",
help="number of MB of memory per mpd (default 1GB)",
dest="mem", type="int", default=1024)
parser.add_option("--name",
help="framework name", dest="name", type="string")
parser.add_option("-p","--path",
help="path to look for MPICH2 binaries (mpd, mpiexec, etc.)",
dest="path", type="string", default="")
parser.add_option("--ifhn-master",
help="alt. interface hostname for what mpd is running on (for scheduler)",
dest="ifhn_master", type="string")
# Add options to configure cpus and mem.
(options,args) = parser.parse_args()
if len(args) < 2:
print >> sys.stderr, "At least two parameters required."
print >> sys.stderr, "Use --help to show usage."
exit(2)
TOTAL_MPDS = options.num
CPUS = options.cpus
MEM = options.mem
MPI_PROGRAM = args[1:]
# Give options.path a trailing '/', if it doesn't have one already.
MPICH2PATH = os.path.join(options.path, "")
print "Connecting to Mesos master %s" % args[0]
try:
mpd_cmd = MPICH2PATH + "mpd"
mpdtrace_cmd = MPICH2PATH + "mpdtrace -l"
if options.ifhn_master is not None:
call([mpd_cmd, "--daemon", "--ifhn=" + options.ifhn_master])
else:
call([mpd_cmd, "--daemon"])
mpdtraceproc = Popen(mpdtrace_cmd, shell=True, stdout=PIPE)
mpdtraceout = mpdtraceproc.communicate()[0]
except OSError,e:
print >> sys.stderr, "Error starting mpd or mpdtrace"
print >> sys.stderr, e
exit(2)
(ip,port) = parseIpPort(mpdtraceout)
MPD_PID = mpdtraceout.split(" ")[0]
print "MPD_PID is %s" % MPD_PID
scheduler = MPIScheduler(options, ip, port)
framework = mesos_pb2.FrameworkInfo()
framework.user = ""
if options.name is not None:
framework.name = options.name
else:
framework.name = "MPI: %s" % MPI_PROGRAM[0]
driver = mesos.native.MesosSchedulerDriver(
scheduler,
framework,
args[0])
sys.exit(0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1)
|
|
"""Dependency injector provided instance provider unit tests."""
import unittest
from dependency_injector import containers, providers
class Service:
def __init__(self, value):
self.value = value
self.values = [self.value]
def __call__(self):
return self.value
def __getitem__(self, item):
return self.values[item]
def get_value(self):
return self.value
def get_closure(self):
def closure():
return self.value
return closure
class Client:
def __init__(self, value):
self.value = value
class Container(containers.DeclarativeContainer):
service = providers.Singleton(Service, value='foo')
client_attribute = providers.Factory(
Client,
value=service.provided.value,
)
client_item = providers.Factory(
Client,
value=service.provided[0],
)
client_attribute_item = providers.Factory(
Client,
value=service.provided.values[0],
)
client_method_call = providers.Factory(
Client,
value=service.provided.get_value.call(),
)
client_method_closure_call = providers.Factory(
Client,
value=service.provided.get_closure.call().call(),
)
client_provided_call = providers.Factory(
Client,
value=service.provided.call(),
)
class ProvidedInstanceTests(unittest.TestCase):
def setUp(self):
self.container = Container()
def test_is_provider(self):
self.assertTrue(providers.is_provider(self.container.service.provided))
def test_attribute(self):
client = self.container.client_attribute()
self.assertEqual(client.value, 'foo')
def test_item(self):
client = self.container.client_item()
self.assertEqual(client.value, 'foo')
def test_attribute_item(self):
client = self.container.client_attribute_item()
self.assertEqual(client.value, 'foo')
def test_method_call(self):
client = self.container.client_method_call()
self.assertEqual(client.value, 'foo')
def test_method_closure_call(self):
client = self.container.client_method_closure_call()
self.assertEqual(client.value, 'foo')
def test_provided_call(self):
client = self.container.client_provided_call()
self.assertEqual(client.value, 'foo')
def test_call_overridden(self):
value = 'bar'
with self.container.service.override(Service(value)):
self.assertEqual(self.container.client_attribute().value, value)
self.assertEqual(self.container.client_item().value, value)
self.assertEqual(self.container.client_attribute_item().value, value)
self.assertEqual(self.container.client_method_call().value, value)
def test_repr_provided_instance(self):
provider = self.container.service.provided
self.assertEqual(
'ProvidedInstance(\'{0}\')'.format(repr(self.container.service)),
repr(provider),
)
def test_repr_attribute_getter(self):
provider = self.container.service.provided.value
self.assertEqual(
'AttributeGetter(\'value\')',
repr(provider),
)
def test_repr_item_getter(self):
provider = self.container.service.provided['test-test']
self.assertEqual(
'ItemGetter(\'test-test\')',
repr(provider),
)
class LazyInitTests(unittest.TestCase):
def test_provided_instance(self):
provides = providers.Object(object())
provider = providers.ProvidedInstance()
provider.set_provides(provides)
self.assertIs(provider.provides, provides)
self.assertIs(provider.set_provides(providers.Provider()), provider)
def test_attribute_getter(self):
provides = providers.Object(object())
provider = providers.AttributeGetter()
provider.set_provides(provides)
provider.set_name('__dict__')
self.assertIs(provider.provides, provides)
self.assertEqual(provider.name, '__dict__')
self.assertIs(provider.set_provides(providers.Provider()), provider)
self.assertIs(provider.set_name('__dict__'), provider)
def test_item_getter(self):
provides = providers.Object({'foo': 'bar'})
provider = providers.ItemGetter()
provider.set_provides(provides)
provider.set_name('foo')
self.assertIs(provider.provides, provides)
self.assertEqual(provider.name, 'foo')
self.assertIs(provider.set_provides(providers.Provider()), provider)
self.assertIs(provider.set_name('foo'), provider)
def test_method_caller(self):
provides = providers.Object(lambda: 42)
provider = providers.MethodCaller()
provider.set_provides(provides)
self.assertIs(provider.provides, provides)
self.assertEqual(provider(), 42)
self.assertIs(provider.set_provides(providers.Provider()), provider)
class ProvidedInstancePuzzleTests(unittest.TestCase):
def test_puzzled(self):
service = providers.Singleton(Service, value='foo-bar')
dependency = providers.Object(
{
'a': {
'b': {
'c1': 10,
'c2': lambda arg: {'arg': arg}
},
},
},
)
test_list = providers.List(
dependency.provided['a']['b']['c1'],
dependency.provided['a']['b']['c2'].call(22)['arg'],
dependency.provided['a']['b']['c2'].call(service)['arg'],
dependency.provided['a']['b']['c2'].call(service)['arg'].value,
dependency.provided['a']['b']['c2'].call(service)['arg'].get_value.call(),
)
result = test_list()
self.assertEqual(
result,
[
10,
22,
service(),
'foo-bar',
'foo-bar',
],
)
class ProvidedInstanceInBaseClassTests(unittest.TestCase):
def test_provided_attribute(self):
provider = providers.Provider()
assert isinstance(provider.provided, providers.ProvidedInstance)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.jvm.subsystems.junit import JUnit
from pants.backend.jvm.targets.junit_tests import JUnitTests
from pants.backend.jvm.tasks.junit_run import JUnitRun
from pants.backend.python.targets.python_tests import PythonTests
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.files import Files
from pants.build_graph.resources import Resources
from pants.ivy.bootstrapper import Bootstrapper
from pants.ivy.ivy_subsystem import IvySubsystem
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import SubprocessExecutor
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import safe_file_dump, touch
from pants.util.process_handler import subprocess
from pants_test.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase
from pants_test.subsystem.subsystem_util import global_subsystem_instance, init_subsystem
from pants_test.tasks.task_test_base import ensure_cached
class JUnitRunnerTest(JvmToolTaskTestBase):
@classmethod
def task_type(cls):
return JUnitRun
@property
def alias_groups(self):
return super(JUnitRunnerTest, self).alias_groups.merge(BuildFileAliases(
targets={
'files': Files,
'junit_tests': JUnitTests,
'python_tests': PythonTests,
},
))
def setUp(self):
super(JUnitRunnerTest, self).setUp()
init_subsystem(JUnit)
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_runner_success(self):
self._execute_junit_runner(
[('FooTest.java', dedent("""
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
assertTrue(5 > 3);
}
}
"""))]
)
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_junit_runner_failure(self):
with self.assertRaises(TaskError) as cm:
self._execute_junit_runner(
[('FooTest.java', dedent("""
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
assertTrue(5 < 3);
}
}
"""))]
)
self.assertEqual([t.name for t in cm.exception.failed_targets], ['foo_test'])
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_junit_runner_error(self):
with self.assertRaises(TaskError) as cm:
self._execute_junit_runner(
[('FooTest.java', dedent("""
import org.junit.Test;
public class FooTest {
@Test
public void testFoo() {
throw new RuntimeException("test error");
}
}
"""))]
)
self.assertEqual([t.name for t in cm.exception.failed_targets], ['foo_test'])
def _execute_junit_runner(self, list_of_filename_content_tuples, create_some_resources=True,
target_name=None):
# Create the temporary base test directory
test_rel_path = 'tests/java/org/pantsbuild/foo'
test_abs_path = self.create_dir(test_rel_path)
# Create the temporary classes directory under work dir
test_classes_abs_path = self.create_workdir_dir(test_rel_path)
test_java_file_abs_paths = []
# Generate the temporary java test source code.
for filename, content in list_of_filename_content_tuples:
test_java_file_rel_path = os.path.join(test_rel_path, filename)
test_java_file_abs_path = self.create_file(test_java_file_rel_path, content)
test_java_file_abs_paths.append(test_java_file_abs_path)
# Invoke ivy to resolve classpath for junit.
classpath_file_abs_path = os.path.join(test_abs_path, 'junit.classpath')
ivy_subsystem = global_subsystem_instance(IvySubsystem)
distribution = DistributionLocator.cached(jdk=True)
ivy = Bootstrapper(ivy_subsystem=ivy_subsystem).ivy()
ivy.execute(args=['-cachepath', classpath_file_abs_path,
'-dependency', 'junit', 'junit-dep', '4.10'],
executor=SubprocessExecutor(distribution=distribution))
with open(classpath_file_abs_path) as fp:
classpath = fp.read()
# Now directly invoking javac to compile the test java code into java class
# so later we can inject the class into products mapping for JUnitRun to execute
# the test on.
javac = distribution.binary('javac')
subprocess.check_call(
[javac, '-d', test_classes_abs_path, '-cp', classpath] + test_java_file_abs_paths)
# If a target_name is specified create a target with it, otherwise create a junit_tests target.
if target_name:
target = self.target(target_name)
else:
target = self.create_library(test_rel_path, 'junit_tests', 'foo_test', ['FooTest.java'])
target_roots = []
if create_some_resources:
# Create a synthetic resource target.
target_roots.append(self.make_target('some_resources', Resources))
target_roots.append(target)
# Set the context with the two targets, one junit_tests target and
# one synthetic resources target.
# The synthetic resources target is to make sure we won't regress
# in the future with bug like https://github.com/pantsbuild/pants/issues/508. Note
# in that bug, the resources target must be the first one in the list.
context = self.context(target_roots=target_roots)
# Before we run the task, we need to inject the "runtime_classpath" with
# the compiled test java classes that JUnitRun will know which test
# classes to execute. In a normal run, this "runtime_classpath" will be
# populated by java compilation step.
self.populate_runtime_classpath(context=context, classpath=[test_classes_abs_path])
# Finally execute the task.
self.execute(context)
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_junit_runner_raises_no_error_on_non_junit_target(self):
"""Run pants against a `python_tests` target, but set an option for the `test.junit` task. This
should execute without error.
"""
self.add_to_build_file('foo', dedent("""
python_tests(
name='hello',
sources=['some_file.py'],
)
"""
))
self.set_options(test='#abc')
self.execute(self.context(target_roots=[self.target('foo:hello')]))
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_empty_sources(self):
self.add_to_build_file('foo', dedent("""
junit_tests(
name='empty',
sources=[],
)
"""
))
task = self.prepare_execute(self.context(target_roots=[self.target('foo:empty')]))
with self.assertRaisesRegexp(TargetDefinitionException,
r'must include a non-empty set of sources'):
task.execute()
# We should skip the execution (and caching) phase when there are no test sources.
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_allow_empty_sources(self):
self.add_to_build_file('foo', dedent("""
junit_tests(
name='empty',
sources=[],
)
"""
))
self.set_options(allow_empty_sources=True)
context = self.context(target_roots=[self.target('foo:empty')])
self.populate_runtime_classpath(context=context)
self.execute(context)
def test_request_classes_by_source(self):
"""`classes_by_source` is expensive to compute: confirm that it is only computed when needed."""
# Class names (with and without a method name) should not trigger.
self.assertFalse(JUnitRun.request_classes_by_source(['com.goo.ber']))
self.assertFalse(JUnitRun.request_classes_by_source(['com.goo.ber#method']))
# Existing files (with and without the method name) should trigger.
srcfile = os.path.join(self.test_workdir, 'this.is.a.source.file.scala')
safe_file_dump(srcfile, 'content!')
self.assertTrue(JUnitRun.request_classes_by_source([srcfile]))
self.assertTrue(JUnitRun.request_classes_by_source(['{}#method'.format(srcfile)]))
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_runner_extra_jvm_options(self):
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
extra_jvm_options=['-Dexample.property=1'],
)
self._execute_junit_runner([('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
String exampleProperty = System.getProperty("example.property");
assertTrue(exampleProperty != null && exampleProperty.equals("1"));
}
}
"""))], target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_runner_multiple_extra_jvm_options(self):
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
extra_jvm_options=['-Dexample.property1=1','-Dexample.property2=2'],
)
self._execute_junit_runner([('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
String exampleProperty1 = System.getProperty("example.property1");
assertTrue(exampleProperty1 != null && exampleProperty1.equals("1"));
String exampleProperty2 = System.getProperty("example.property2");
assertTrue(exampleProperty2 != null && exampleProperty2.equals("2"));
String exampleProperty3 = System.getProperty("example.property3");
assertTrue(exampleProperty3 == null);
}
}
"""))], target_name='tests/java/org/pantsbuild/foo:foo_test')
# 2 runs with different targets (unique configurations), should cache twice.
@ensure_cached(JUnitRun, expected_num_artifacts=2)
def test_junit_runner_extra_env_vars(self):
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
extra_env_vars={
'HELLO': 27,
'THERE': 32,
},
)
self.make_target(
spec='tests/java/org/pantsbuild/foo:bar_test',
target_type=JUnitTests,
sources=['FooTest.java'],
extra_env_vars={
'THE_ANSWER': 42,
'HELLO': 12,
},
)
self._execute_junit_runner(
[
('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class FooTest {
@Test
public void testFoo() {
assertEquals("27", System.getenv().get("HELLO"));
assertEquals("32", System.getenv().get("THERE"));
}
}
"""))
], target_name='tests/java/org/pantsbuild/foo:foo_test')
# Execute twice in a row to make sure the environment changes aren't sticky.
self._execute_junit_runner([('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class FooTest {
@Test
public void testFoo() {
assertEquals("12", System.getenv().get("HELLO"));
assertEquals("42", System.getenv().get("THE_ANSWER"));
assertFalse(System.getenv().containsKey("THERE"));
}
}
"""))], target_name='tests/java/org/pantsbuild/foo:bar_test', create_some_resources=False)
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_runner_extra_env_vars_none(self):
with environment_as(THIS_VARIABLE="12", THAT_VARIABLE="This is a variable."):
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
extra_env_vars={
'HELLO': None,
'THERE': False,
'THIS_VARIABLE': None
},
)
self._execute_junit_runner([('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class FooTest {
@Test
public void testFoo() {
assertEquals("False", System.getenv().get("THERE"));
assertEquals("This is a variable.", System.getenv().get("THAT_VARIABLE"));
assertFalse(System.getenv().containsKey("HELLO"));
assertFalse(System.getenv().containsKey("THIS_VARIABLE"));
}
}
"""))], target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junt_run_with_too_many_args(self):
max_subprocess_args = 2
num_of_classes = 5
list_of_filename_content_tuples = []
for n in range(num_of_classes):
filename = 'FooTest{}.java'.format(n)
content = dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest{}{{
@Test
public void testFoo() {{
int x = 5;
}}
}}""".format(n))
list_of_filename_content_tuples.append((filename, content))
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=[name for name, _ in list_of_filename_content_tuples],
)
self.set_options(max_subprocess_args=max_subprocess_args)
self._execute_junit_runner(list_of_filename_content_tuples,
target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_run_chroot(self):
self.create_files('config/org/pantsbuild/foo', ['sentinel', 'another'])
files = self.make_target(
spec='config/org/pantsbuild/foo:sentinel',
target_type=Files,
sources=['sentinel']
)
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
dependencies=[files]
)
content = dedent("""
package org.pantsbuild.foo;
import java.io.File;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
assertTrue(new File("config/org/pantsbuild/foo/sentinel").exists());
assertFalse(new File("config/org/pantsbuild/foo/another").exists());
}
}
""")
self.set_options(chroot=True)
self._execute_junit_runner([('FooTest.java', content)],
target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_junit_run_chroot_cwd_mutex(self):
with temporary_dir() as chroot:
self.set_options(chroot=True, cwd=chroot)
with self.assertRaises(JUnitRun.OptionError):
self.execute(self.context())
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_run_target_cwd_trumps_chroot(self):
with temporary_dir() as target_cwd:
self.create_files('config/org/pantsbuild/foo', ['files_dep_sentinel'])
files = self.make_target(
spec='config/org/pantsbuild/foo:sentinel',
target_type=Files,
sources=['files_dep_sentinel']
)
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
dependencies=[files],
cwd=target_cwd
)
content = dedent("""
package org.pantsbuild.foo;
import java.io.File;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class FooTest {{
@Test
public void testFoo() {{
assertTrue(new File("target_cwd_sentinel").exists());
// We declare a Files dependency on this file, but since we run in a CWD not in a
// chroot and not in the build root, we can't find it at the expected relative path.
assertFalse(new File("config/org/pantsbuild/foo/files_dep_sentinel").exists());
// As a sanity check, it is at the expected absolute path though.
File buildRoot = new File("{}");
assertTrue(new File(buildRoot,
"config/org/pantsbuild/foo/files_dep_sentinel").exists());
}}
}}
""".format(self.build_root))
touch(os.path.join(target_cwd, 'target_cwd_sentinel'))
self.set_options(chroot=True)
self._execute_junit_runner([('FooTest.java', content)],
target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_run_target_cwd_trumps_cwd_option(self):
with temporary_dir() as target_cwd:
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
cwd=target_cwd
)
content = dedent("""
package org.pantsbuild.foo;
import java.io.File;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
assertTrue(new File("target_cwd_sentinel").exists());
assertFalse(new File("option_cwd_sentinel").exists());
}
}
""")
touch(os.path.join(target_cwd, 'target_cwd_sentinel'))
with temporary_dir() as option_cwd:
touch(os.path.join(option_cwd, 'option_cwd_sentinel'))
self.set_options(cwd=option_cwd)
self._execute_junit_runner([('FooTest.java', content)],
target_name='tests/java/org/pantsbuild/foo:foo_test')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Deployment model for distributed gasifiers
# Jose Daniel Lara
from __future__ import division
from pyomo.environ import *
from pyomo.opt import SolverFactory
import googlemaps
from sqlalchemy import create_engine
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import ast
import time as tm
# Conventions for naming model components:
# SETS_ALL_CAPS
# VarsCamelCase
# params_pothole_case
# Constraints_Words_Capitalized_With_Underscores
# Initialize the model
model = ConcreteModel()
"""
This portion of the code makes a query in the server to obtain from the database the substations and biomass sources list. It pergforms a pre-filtering by resucing the search space of the google API such that there no queries above certain linear distance.
The workflow is as follows:
-Create the Engine to connect to the DB.
-Make the query into a pandas dataframe where the potential routes are enclosed.
- This portion of the code is somewhat difficult to follow. In the Database the coordinates Y and X of the sites are independent columns, both the substations and the biomass. However,from the optimization point of view each "point" is a single location. So, what it does is that it merges the Y and X coordinates into a single colum as a string. Later on, this will also be used to generate the dictionaries with some limits.
-based on the list of potentials then get the list of biomass_coord and the substation_coord
"""
engine = create_engine('postgresql+pg8000://jdlara:Amadeus-2010@switch-db2.erg.berkeley.edu:5432/apl_cec?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory')
df_routes = pd.read_sql_query('select biosum.scenario1_gis.lat as source_lat, biosum.scenario1_gis.lon as source_lon, pge_ram.feeders_data.lat as dest_lat, pge_ram.feeders_data.lon as dest_lon, st_distance_Spheroid(biosum.scenario1_gis.st_makepoint, pge_ram.feeders_data.the_geom, \'SPHEROID[\"WGS 84\",6378137,298.257223563]\')/1000 as distance FROM biosum.scenario1_gis, pge_ram.feeders_data where (st_distance_Spheroid(biosum.scenario1_gis.st_makepoint, pge_ram.feeders_data.the_geom, \'SPHEROID[\"WGS 84\",6378137,298.257223563]\')/1000 <= 30);', engine)
biomass_coord = df_routes.source_lat.astype(str).str.cat(df_routes.source_lon.astype(str), sep=',')
biomass_coord = biomass_coord.values.tolist()
biomass_coord = list(set(biomass_coord))
substation_coord = df_routes.dest_lat.astype(str).str.cat(df_routes.dest_lon.astype(str), sep=',')
substation_coord = substation_coord.values.tolist()
substation_coord = list(set(biomass_coord))
"""
The data for the piecewise cost of installation is given in # of gasifiers per
substation. This is why the sizes are integers. The cost is the total cost in $
of installing the amount N of gasifiers. Given that the gasifiers can only be
installed in integer number, this is a better approximation of the costs than
using a cost per kw. This explicit calculation needs to be replaced with a file.
"""
number_of_containers = [0, 1, 2, 3, 5, 10, 20]
cost = [0, 4000, 6500, 7500, 9300, 13000, 17000]
"""
Distances from googleAPI, matrx_distance is a dictionary, first it extends
the biomass list to include the substations for the distance calculations
Extract distances and travel times from the google maps API results
As of now, the code checks if the matrices already exist, this protection is
quite unrefined and will need better practices in the future, like comparing the
lists loaded in the model with the list in the files. For testing purposes, it
will work and avoid constant queries to the google API.
This portion of the code is run before the definition of the sets, to avoid
issues when some routes are not available.
"""
gmaps = googlemaps.Client(key='AIzaSyAh2PIcLDrPecSSR36z2UNubqphdHwIw7M')
distance_table = {}
time_table = {}
biomass_list = []
substation_list = []
avoid_table = {}
fail_table = {}
if os.path.isfile('time_table.dat'):
print "matrices for time exist at this time"
f = open('time_table.dat', 'r')
time_table = f.read()
f.close()
time_table = ast.literal_eval(time_table)
else:
print "There are no matrix files stored"
f = open('time_table.dat', 'w')
f.close()
if os.path.isfile('distance_table.dat'):
print "matrices for distance exist at this time"
f = open('distance_table.dat', 'r')
distance_table = f.read()
f.close()
distance_table = ast.literal_eval(distance_table)
else:
print "There are no matrix files stored"
f = open('distance_table.dat', 'w')
f.close()
if os.path.isfile('avoid_table.dat'):
print "avoid table exist at this time"
f = open('avoid_table.dat', 'r')
avoid_table = f.read()
f.close()
avoid_table = ast.literal_eval(avoid_table)
else:
print "There are no avoid table files stored"
f = open('avoid_table.dat', 'w')
f.close()
if os.path.isfile('fail_table.dat'):
print "fail table exist at this time"
else:
print "There are no fail table files stored"
f = open('fail_table.dat', 'w')
f.close()
for (bio_idx, biomass_source) in enumerate(biomass_coord):
for (sub_idx, substation_dest) in enumerate(substation_coord):
if (biomass_coord[bio_idx], substation_coord[sub_idx]) not in distance_table.keys() and (biomass_coord[bio_idx], substation_coord[sub_idx]) not in avoid_table.keys():
tm.sleep(0.3)
matrx_distance = gmaps.distance_matrix(biomass_coord[bio_idx], substation_coord[sub_idx], mode="driving", departure_time="now", traffic_model="pessimistic")
error = matrx_distance['rows'][0]['elements'][0]['status']
if error != 'OK':
f = open('fail_table.dat', 'a')
f.write(('Route data unavailable for ' + str(biomass_coord[bio_idx]) + "," + str(substation_coord[sub_idx] + "\n")))
f.close()
else:
if 0.001 * (matrx_distance['rows'][0]['elements'][0]['distance']['value']) > 160:
print "Distance too long for " + biomass_coord[bio_idx], substation_coord[sub_idx]
avoid_table[biomass_source, substation_dest] = 1
f = open('avoid_table.dat', 'w')
f.write(str(avoid_table))
f.close()
else:
if str(biomass_coord[bio_idx]) not in biomass_list:
biomass_list.extend([str(biomass_coord[bio_idx])])
if str(substation_coord[sub_idx]) not in substation_list:
substation_list.extend([str(substation_coord[sub_idx])])
distance_table[biomass_source, substation_dest] = 0.001 * (matrx_distance['rows'][0]['elements'][0]['distance']['value'])
time_table[biomass_source, substation_dest] = (1 / 3600) * (matrx_distance['rows'][0]['elements'][0]['duration_in_traffic']['value'])
f = open('distance_table.dat', 'w')
f.write(str(distance_table))
f.close()
f = open('time_table.dat', 'w')
f.write(str(time_table))
f.close()
else:
continue
# Define sets of the substations and biomass stocks and initialize them from data above.
model.SOURCES = Set(initialize=biomass_list, doc='Location of Biomass sources')
model.SUBSTATIONS = Set(initialize=substation_list, doc='Location of Substations')
model.ROUTES = Set(dimen=2, doc='Allows routes from sources to sinks',
initialize=lambda mdl: (mdl.SOURCES * mdl.SUBSTATIONS))
"""
Each piecewise approximation requires and independent set for each one of the lines in the approximation. In this case, this is the piecewise approximation for the installations costs, and more maybe required soon.
"""
model.Pw_Install_Cost = Set(initialize=range(1, len(number_of_containers)),
doc='Set for the Piecewise approx of the installation cost')
"""
All the parameters are subject to be modified later when doing MonteCarlo simulations
for now, they are fixed during the development stage. This first set of parameters
are not read from the files or database.
"""
# Cost related parameters, most of them to be replaced with cost curves
model.om_cost_fix = Param(initialize=0,
doc='Fixed cost of operation per installed kW')
model.om_cost_var = Param(initialize=0,
doc='Variable cost of operation per installed kW')
model.transport_cost = Param(initialize=0.1343,
doc='Freight in dollars per BDT per km')
# Limits related parameters, read from the database/files
biomass_prod = pd.DataFrame(biomass_list)
biomass_prod['production'] = biomass_df.production
biomass_prod = biomass_prod.set_index(0).to_dict()['production']
model.source_biomass_max = Param(model.SOURCES,
initialize=biomass_prod,
doc='Capacity of supply in tons')
# TO BE READ FROM DATABASE IN THE NEAR FUTURE
substation_capacity = pd.DataFrame(substation_list)
substation_capacity['sbs_cap'] = substation_df.limit
substation_capacity = substation_capacity.set_index(0).to_dict()['sbs_cap']
model.max_capacity = Param(model.SUBSTATIONS,
initialize=substation_capacity,
doc='Max installation per site kW')
model.min_capacity = Param(model.SUBSTATIONS,
initialize=150,
doc='Min installation per site kW')
biomass_price = pd.DataFrame(biomass_list)
biomass_price['price_trgt'] = biomass_df.price_trgt
biomass_price = biomass_price.set_index(0).to_dict()['price_trgt']
model.biomass_cost = Param(model.SOURCES,
initialize=biomass_price,
doc='Cost of biomass per ton')
substation_price = pd.DataFrame(substation_list)
substation_price['sbs_price'] = 0.09 # substation_df.sbs_price'
substation_price = substation_price.set_index(0).to_dict()['sbs_price']
model.fit_tariff = Param(model.SUBSTATIONS,
initialize=substation_price,
doc='Payment depending on the location $/kWh')
# Operational parameters
model.heat_rate = Param(initialize=833.3, doc='Heat rate kWh/TON')
model.capacity_factor = Param(initialize=0.85, doc='Gasifier capacity factor')
model.total_hours = Param(initialize=8760, doc='Total amount of hours in the analysis period')
model.distances = Param(model.ROUTES, initialize=distance_table, doc='Distance in km')
model.times = Param(model.ROUTES, initialize=time_table, doc='Time in Hours')
def calculate_lines(x, y):
"""
Calculate lines to connect a series of points. This is used for the PW approximations. Given matching vectors of x,y coordinates. This only makes sense for monotolically increasing values.
This function does not perform a data integrity check.
"""
slope_list = {}
intercept_list = {}
for i in range(0, len(x) - 1):
slope_list[i + 1] = (y[i] - y[i + 1]) / (x[i] - x[i + 1])
intercept_list[i + 1] = y[i + 1] - slope_list[i + 1] * x[i + 1]
return slope_list, intercept_list
install_cost_slope, install_cost_intercept = calculate_lines(number_of_containers, cost)
model.install_cost_slope = Param(model.Pw_Install_Cost, initialize=install_cost_slope, doc='PW c_i')
model.install_cost_intercept = Param(model.Pw_Install_Cost, initialize=install_cost_intercept, doc='PW d_i')
"""
This portion of the code defines the decision making variables, in general the
model will solve for the capacity installed per substation, the decision to
install or not, the amount of biomass transported per route and variable for
the total install cost resulting from the piecewise approximation
"""
model.CapInstalled = Var(model.SUBSTATIONS, within=NonNegativeReals,
doc='Installed Capacity kW')
model.InstallorNot = Var(model.SUBSTATIONS, within=Binary,
doc='Decision to install or not')
model.BiomassTransported = Var(model.ROUTES, within=NonNegativeReals,
doc='Biomass shipment quantities in tons')
model.Fixed_Install_Cost = Var(model.SUBSTATIONS, within=NonNegativeReals,
doc='Variable for PW of installation cost')
"""
Define contraints
Here b is the index for sources and s the index for substations
"""
def Subs_Nodal_Balance_rule(mdl, s):
return mdl.CapInstalled[s] * mdl.capacity_factor * mdl.total_hours == (
sum(mdl.heat_rate * mdl.BiomassTransported[b, s]
for b in mdl.SOURCES))
model.Subs_Nodal_Balance = Constraint(model.SUBSTATIONS,
rule=Subs_Nodal_Balance_rule,
doc='Energy Balance at the substation')
def Sources_Nodal_Limit_rule(mdl, b):
return sum(mdl.BiomassTransported[b, s] for s in model.SUBSTATIONS) <= (
model.source_biomass_max[b])
model.Sources_Nodal_Limit = Constraint(model.SOURCES,
rule=Sources_Nodal_Limit_rule,
doc='Limit of biomass supply at source')
def Install_Decision_Max_rule(mdl, s):
return mdl.CapInstalled[s] <= mdl.InstallorNot[s] * mdl.max_capacity[s]
model.Install_Decision_Max = Constraint(
model.SUBSTATIONS, rule=Install_Decision_Max_rule,
doc='Limit the maximum installed capacity and bind the continuous decision to the binary InstallorNot variable.')
def Install_Decision_Min_rule(mdl, s):
return mdl.CapInstalled[s] >= mdl.InstallorNot[s] * mdl.min_capacity[s]
model.Install_Decision_Min = Constraint(
model.SUBSTATIONS, rule=Install_Decision_Min_rule,
doc='Limit the mininum installed capacity and bind the continuous decision to the binary InstallorNot variable.')
# This set of constraints define the piece-wise linear approximation of
# installation cost
def Pwapprox_InstallCost_rule(mdl, s, p):
r"""
This rule approximates picewise non-linear concave cost functions.
It has a input from the output from the function calculate_lines and the set PW. The installation cost is calculated by substation.
The model is as follows (as per Bersimas Introduction to linear optimization, page 17)
min z &\\
s.t. & z \le c_i x + d_i forall i
where z is a slack variable, i is the set of lines that approximate the non-linear convex function,
c_i is the slope of the line, and d_i is the intercept.
"""
return (mdl.Fixed_Install_Cost[s] <= mdl.install_cost_slope[p] * (mdl.CapInstalled[s] / 150) +
mdl.install_cost_intercept[p])
model.Installation_Cost = Constraint(model.SUBSTATIONS, model.Pw_Install_Cost,
rule=Pwapprox_InstallCost_rule,
doc='PW constraint')
# Define Objective Function.
def net_revenue_rule(mdl):
return (
# Fixed capacity installtion costs
sum(mdl.Fixed_Install_Cost[s] for s in mdl.SUBSTATIONS) +
# O&M costs (variable & fixed)
sum((mdl.om_cost_fix + mdl.capacity_factor * mdl.om_cost_var) * mdl.CapInstalled[s]
for s in mdl.SUBSTATIONS) +
# Transportation costs
sum(mdl.distances[r] * model.BiomassTransported[r] * mdl.transport_cost
for r in mdl.ROUTES) +
# Biomass acquisition costs.
sum(mdl.biomass_cost[b] * sum(mdl.BiomassTransported[b, s] for s in mdl.SUBSTATIONS)
for b in mdl.SOURCES) -
# Gross profits during the period
sum(mdl.fit_tariff[s] * mdl.CapInstalled[s] * mdl.capacity_factor * mdl.total_hours
for s in mdl.SUBSTATIONS)
)
model.net_profits = Objective(rule=net_revenue_rule, sense=minimize,
doc='Define objective function')
# Display of the output #
# plt.plot(size, cost)
# plt.show()
opt = SolverFactory("gurobi")
results = opt.solve(model, tee=True)
f = open('results.txt', 'w')
for v_data in model.component_data_objects(Var):
if value(v_data) > 1:
f.write(v_data.cname(True) + ", value = " + str(value(v_data)) + "\n")
f.close()
|
|
import operator
import collections
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import PY2, PY36
from pandas.tests.extension import base
from .array import JSONArray, JSONDtype, make_data
pytestmark = pytest.mark.skipif(PY2, reason="Py2 doesn't have a UserDict")
@pytest.fixture
def dtype():
return JSONDtype()
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
data = make_data()
# Why the while loop? NumPy is unable to construct an ndarray from
# equal-length ndarrays. Many of our operations involve coercing the
# EA to an ndarray of objects. To avoid random test failures, we ensure
# that our data is coercable to an ndarray. Several tests deal with only
# the first two elements, so that's what we'll check.
while len(data[0]) == len(data[1]):
data = make_data()
return JSONArray(data)
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return JSONArray([{}, {'a': 10}])
@pytest.fixture
def data_for_sorting():
return JSONArray([{'b': 1}, {'c': 4}, {'a': 2, 'c': 3}])
@pytest.fixture
def data_missing_for_sorting():
return JSONArray([{'b': 1}, {}, {'a': 4}])
@pytest.fixture
def na_value(dtype):
return dtype.na_value
@pytest.fixture
def na_cmp():
return operator.eq
@pytest.fixture
def data_for_grouping():
return JSONArray([
{'b': 1}, {'b': 1},
{}, {},
{'a': 0, 'c': 2}, {'a': 0, 'c': 2},
{'b': 1},
{'c': 2},
])
class BaseJSON(object):
# NumPy doesn't handle an array of equal-length UserDicts.
# The default assert_series_equal eventually does a
# Series.values, which raises. We work around it by
# converting the UserDicts to dicts.
def assert_series_equal(self, left, right, **kwargs):
if left.dtype.name == 'json':
assert left.dtype == right.dtype
left = pd.Series(JSONArray(left.values.astype(object)),
index=left.index, name=left.name)
right = pd.Series(JSONArray(right.values.astype(object)),
index=right.index, name=right.name)
tm.assert_series_equal(left, right, **kwargs)
def assert_frame_equal(self, left, right, *args, **kwargs):
tm.assert_index_equal(
left.columns, right.columns,
exact=kwargs.get('check_column_type', 'equiv'),
check_names=kwargs.get('check_names', True),
check_exact=kwargs.get('check_exact', False),
check_categorical=kwargs.get('check_categorical', True),
obj='{obj}.columns'.format(obj=kwargs.get('obj', 'DataFrame')))
jsons = (left.dtypes == 'json').index
for col in jsons:
self.assert_series_equal(left[col], right[col],
*args, **kwargs)
left = left.drop(columns=jsons)
right = right.drop(columns=jsons)
tm.assert_frame_equal(left, right, *args, **kwargs)
class TestDtype(BaseJSON, base.BaseDtypeTests):
pass
class TestInterface(BaseJSON, base.BaseInterfaceTests):
def test_custom_asserts(self):
# This would always trigger the KeyError from trying to put
# an array of equal-length UserDicts inside an ndarray.
data = JSONArray([collections.UserDict({'a': 1}),
collections.UserDict({'b': 2}),
collections.UserDict({'c': 3})])
a = pd.Series(data)
self.assert_series_equal(a, a)
self.assert_frame_equal(a.to_frame(), a.to_frame())
b = pd.Series(data.take([0, 0, 1]))
with pytest.raises(AssertionError):
self.assert_series_equal(a, b)
with pytest.raises(AssertionError):
self.assert_frame_equal(a.to_frame(), b.to_frame())
class TestConstructors(BaseJSON, base.BaseConstructorsTests):
@pytest.mark.skip(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
pass
class TestReshaping(BaseJSON, base.BaseReshapingTests):
pass
class TestGetitem(BaseJSON, base.BaseGetitemTests):
pass
class TestMissing(BaseJSON, base.BaseMissingTests):
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
@pytest.mark.skip(reason="Setting a dict as a scalar")
def test_fillna_frame(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
unhashable = pytest.mark.skip(reason="Unhashable")
unstable = pytest.mark.skipif(not PY36, # 3.6 or higher
reason="Dictionary order unstable")
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(BaseJSON, base.BaseMethodsTests):
@unhashable
def test_value_counts(self, all_data, dropna):
pass
@unhashable
def test_sort_values_frame(self):
# TODO (EA.factorize): see if _values_for_factorize allows this.
pass
@unstable
def test_argsort(self, data_for_sorting):
super(TestMethods, self).test_argsort(data_for_sorting)
@unstable
def test_argsort_missing(self, data_missing_for_sorting):
super(TestMethods, self).test_argsort_missing(
data_missing_for_sorting)
@unstable
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values(self, data_for_sorting, ascending):
super(TestMethods, self).test_sort_values(
data_for_sorting, ascending)
@unstable
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
super(TestMethods, self).test_sort_values_missing(
data_missing_for_sorting, ascending)
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_le(self, data_repeated):
pass
@pytest.mark.skip(reason="combine for JSONArray not supported")
def test_combine_add(self, data_repeated):
pass
@unhashable
def test_hash_pandas_object_works(self, data, kind):
super().test_hash_pandas_object_works(data, kind)
class TestCasting(BaseJSON, base.BaseCastingTests):
@pytest.mark.skip(reason="failing on np.array(self, dtype=str)")
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
*** ValueError: setting an array element with a sequence
"""
# We intentionally don't run base.BaseSetitemTests because pandas'
# internals has trouble setting sequences of values into scalar positions.
class TestGroupby(BaseJSON, base.BaseGroupbyTests):
@unhashable
def test_groupby_extension_transform(self):
"""
This currently fails in Series.name.setter, since the
name must be hashable, but the value is a dictionary.
I think this is what we want, i.e. `.name` should be the original
values, and not the values for factorization.
"""
@unhashable
def test_groupby_extension_apply(self):
"""
This fails in Index._do_unique_check with
> hash(val)
E TypeError: unhashable type: 'UserDict' with
I suspect that once we support Index[ExtensionArray],
we'll be able to dispatch unique.
"""
@unstable
@pytest.mark.parametrize('as_index', [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super(TestGroupby, self).test_groupby_extension_agg(
as_index, data_for_grouping
)
class TestArithmeticOps(BaseJSON, base.BaseArithmeticOpsTests):
def test_error(self, data, all_arithmetic_operators):
pass
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with tm.assert_raises_regex(TypeError, "unsupported"):
ser + data
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super(TestArithmeticOps, self)._check_divmod_op(
s, op, other, exc=TypeError
)
class TestComparisonOps(BaseJSON, base.BaseComparisonOpsTests):
pass
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import logging
import os
import sys
import pyotherside
import smartcard
import struct
import types
import time
import getpass
import urllib.parse
import ykman.logging_setup
from base64 import b32decode
from binascii import b2a_hex, a2b_hex
from fido2.ctap import CtapError
from cryptography import x509
from cryptography.hazmat.primitives import serialization
from threading import Timer
from ykman import connect_to_device, scan_devices, get_name
from ykman.piv import (
get_pivman_data, list_certificates, generate_self_signed_certificate,
generate_csr, OBJECT_ID, generate_chuid, pivman_set_mgm_key,
derive_management_key, get_pivman_protected_data)
from ykman.otp import PrepareUploadFailed, prepare_upload_key, generate_static_pw
from ykman.scancodes import KEYBOARD_LAYOUT, encode
from ykman.util import (
parse_certificates, parse_private_key, get_leaf_certificates, InvalidPasswordError )
from yubikit.core import CommandError
from yubikit.core.otp import modhex_encode, modhex_decode, OtpConnection, CommandRejectedError
from yubikit.core.fido import FidoConnection
from yubikit.core.smartcard import ApduError, SW, SmartCardConnection
from yubikit.management import (
USB_INTERFACE, Mode, ManagementSession, DeviceConfig,
CAPABILITY, TRANSPORT)
from yubikit.piv import (
PivSession, SLOT, KEY_TYPE, check_key_support, NotSupportedError,
PIN_POLICY, TOUCH_POLICY, InvalidPinError, MANAGEMENT_KEY_TYPE)
from yubikit.yubiotp import (
YubiOtpSession, YubiOtpSlotConfiguration,
StaticPasswordSlotConfiguration, HotpSlotConfiguration, HmacSha1SlotConfiguration)
from fido2.ctap2 import Ctap2, ClientPin
logger = logging.getLogger(__name__)
log = logging.getLogger("ykman.hid")
log.setLevel(logging.WARNING)
log = logging.getLogger("fido2.hid")
log.setLevel(logging.WARNING)
def as_json(f):
def wrapped(*args, **kwargs):
return json.dumps(f(*args, **kwargs))
return wrapped
def catch_error(f):
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except smartcard.pcsc.PCSCExceptions.EstablishContextException:
return failure('pcsc_establish_context_failed')
except ValueError as e:
logger.error('Failed to open device', exc_info=e)
return failure('open_device_failed')
except Exception as e:
if str(e) == 'Incorrect padding':
return failure('incorrect_padding')
logger.error('Uncaught exception', exc_info=e)
return unknown_failure(e)
return wrapped
def success(result={}):
result['success'] = True
return result
def failure(err_id, result={}):
result['success'] = False
result['error_id'] = err_id
return result
def unknown_failure(exception):
return failure(None, {'error_message': str(exception)})
class Controller(object):
_dev_info = None
_state = None
_n_devs = 0
def __init__(self):
# Wrap all return values as JSON.
for f in dir(self):
if not f.startswith('_'):
func = getattr(self, f)
if isinstance(func, types.MethodType):
setattr(self, f, as_json(catch_error(func)))
def _open_device(self, connection_types=[SmartCardConnection, FidoConnection, OtpConnection]):
return connect_to_device(connection_types=connection_types)[0]
def refresh(self):
devices, state = scan_devices()
n_devs = sum(devices.values())
if state != self._state:
self._state = state
self._n_devs = n_devs
self._dev_info = None
if n_devs != 1:
return success({'n_devs': self._n_devs})
attempts = 3
while True:
try:
connection, device, info = connect_to_device()
connection.close()
break
except:
attempts -= 1
if attempts < 1:
self._state = None
return failure('open_device_failed')
logger.debug("Sleep...")
time.sleep(0.5)
interfaces = USB_INTERFACE(0)
usb_supported = info.supported_capabilities.get(TRANSPORT.USB)
if CAPABILITY.OTP & usb_supported:
interfaces |= USB_INTERFACE.OTP
if (CAPABILITY.U2F | CAPABILITY.FIDO2) & usb_supported:
interfaces |= USB_INTERFACE.FIDO
if (CAPABILITY.OPENPGP | CAPABILITY.PIV | CAPABILITY.OATH) & usb_supported:
interfaces |= USB_INTERFACE.CCID
self._dev_info = {
'name': get_name(info, device.pid.get_type()).replace("YubiKey BIO", "YubiKey Bio"),
'version': '.'.join(str(x) for x in info.version) if info.version else "",
'serial': info.serial or '',
'usb_enabled': [
a.name for a in CAPABILITY
if a in info.config.enabled_capabilities.get(TRANSPORT.USB)],
'usb_supported': [
a.name for a in CAPABILITY
if a in info.supported_capabilities.get(TRANSPORT.USB)],
'usb_interfaces_supported': [
t.name for t in USB_INTERFACE
if t in interfaces],
'nfc_enabled': [
a.name for a in CAPABILITY
if a in info.config.enabled_capabilities.get(TRANSPORT.NFC, [])],
'nfc_supported': [
a.name for a in CAPABILITY
if a in info.supported_capabilities.get(TRANSPORT.NFC, [])],
'usb_interfaces_enabled': [i.name for i in USB_INTERFACE if i & device.pid.get_interfaces()],
'can_write_config': info.version and info.version >= (5,0,0),
'configuration_locked': info.is_locked,
'form_factor': info.form_factor
}
return success({'dev': self._dev_info, 'n_devs': self._n_devs})
def write_config(self, usb_applications, nfc_applications, lock_code):
usb_enabled = 0x00
nfc_enabled = 0x00
for app in usb_applications:
usb_enabled |= CAPABILITY [app]
for app in nfc_applications:
nfc_enabled |= CAPABILITY [app]
with self._open_device() as conn:
if lock_code:
lock_code = a2b_hex(lock_code)
if len(lock_code) != 16:
return failure('lock_code_not_16_bytes')
try:
session = ManagementSession(conn)
session.write_device_config(
DeviceConfig(
{TRANSPORT.USB: usb_enabled,
TRANSPORT.NFC: nfc_enabled},
None,
None,
None,
),
True,
lock_code)
self._state = None
except ApduError as e:
if (e.sw == SW.VERIFY_FAIL_NO_RETRY):
return failure('wrong_lock_code')
raise
except ValueError as e:
if str(e) == 'Configuration locked!':
return failure('interface_config_locked')
raise
return success()
def refresh_piv(self):
with self._open_device([SmartCardConnection]) as conn:
session = PivSession(conn)
pivman = get_pivman_data(session)
try:
key_type = session.get_management_key_metadata().key_type
except NotSupportedError:
key_type = MANAGEMENT_KEY_TYPE.TDES
return success({
'piv_data': {
'certs': self._piv_list_certificates(session),
'has_derived_key': pivman.has_derived_key,
'has_protected_key': pivman.has_protected_key,
'has_stored_key': pivman.has_stored_key,
'pin_tries': session.get_pin_attempts(),
'puk_blocked': pivman.puk_blocked,
'supported_algorithms': _supported_algorithms(self._dev_info['version'].split('.')),
'key_type': key_type,
},
})
def set_mode(self, interfaces):
interfaces_enabled = 0x00
for usb_interface in interfaces:
interfaces_enabled |= USB_INTERFACE [usb_interface]
with self._open_device() as conn:
try:
session = ManagementSession(conn)
session.set_mode(
Mode(interfaces_enabled))
except ValueError as e:
if str(e) == 'Configuration locked!':
return failure('interface_config_locked')
raise
return success()
def get_username(self):
username = getpass.getuser()
return success({'username': username})
def is_macos(self):
return success({'is_macos': sys.platform == 'darwin'})
def slots_status(self):
try:
with self._open_device([OtpConnection]) as conn:
session = YubiOtpSession(conn)
state = session.get_config_state()
slot1 = state.is_configured(1)
slot2 = state.is_configured(2)
ans = [slot1, slot2]
return success({'status': ans})
except OSError:
return failure('open_device_failed')
def erase_slot(self, slot):
try:
with self._open_device([OtpConnection]) as conn:
session = YubiOtpSession(conn)
session.delete_slot(slot)
return success()
except CommandRejectedError:
return failure("write error")
def swap_slots(self):
try:
with self._open_device([OtpConnection]) as conn:
session = YubiOtpSession(conn)
session.swap_slots()
return success()
except CommandRejectedError:
return failure("write error")
def serial_modhex(self):
with self._open_device([OtpConnection]) as conn:
session = YubiOtpSession(conn)
return modhex_encode(b'\xff\x00' + struct.pack(b'>I', session.get_serial()))
def generate_static_pw(self, keyboard_layout):
return success({
'password': generate_static_pw(
38, KEYBOARD_LAYOUT[keyboard_layout])
})
def random_uid(self):
return b2a_hex(os.urandom(6)).decode('ascii')
def random_key(self, bytes):
return b2a_hex(os.urandom(int(bytes))).decode('ascii')
def program_otp(self, slot, public_id, private_id, key, upload=False,
app_version='unknown'):
key = a2b_hex(key)
public_id = modhex_decode(public_id)
private_id = a2b_hex(private_id)
upload_url = None
with self._open_device([OtpConnection]) as conn:
if upload:
try:
upload_url = prepare_upload_key(
key, public_id, private_id,
serial=self._dev_info['serial'],
user_agent='ykman-qt/' + app_version)
except PrepareUploadFailed as e:
logger.debug('YubiCloud upload failed', exc_info=e)
return failure('upload_failed',
{'upload_errors': [err.name
for err in e.errors]})
try:
session = YubiOtpSession(conn)
session.put_configuration(
slot,
YubiOtpSlotConfiguration(public_id, private_id, key)
)
except CommandError as e:
logger.debug("Failed to program YubiOTP", exc_info=e)
return failure("write error")
logger.debug('YubiOTP successfully programmed.')
if upload_url:
logger.debug('Upload url: %s', upload_url)
return success({'upload_url': upload_url})
def program_challenge_response(self, slot, key, touch):
key = a2b_hex(key)
with self._open_device([OtpConnection]) as conn:
session = YubiOtpSession(conn)
try:
session.put_configuration(
slot,
HmacSha1SlotConfiguration(key).require_touch(touch),
)
except CommandError as e:
logger.debug("Failed to program Challenge-response", exc_info=e)
return failure("write error")
return success()
def program_static_password(self, slot, key, keyboard_layout):
with self._open_device([OtpConnection]) as conn:
session = YubiOtpSession(conn)
scan_codes = encode(key, KEYBOARD_LAYOUT[keyboard_layout])
try:
session.put_configuration(slot, StaticPasswordSlotConfiguration(scan_codes))
except CommandError as e:
logger.debug("Failed to program static password", exc_info=e)
return failure("write error")
return success()
def program_oath_hotp(self, slot, key, digits):
unpadded = key.upper().rstrip('=').replace(' ', '')
key = b32decode(unpadded + '=' * (-len(unpadded) % 8))
with self._open_device([OtpConnection]) as conn:
session = YubiOtpSession(conn)
try:
session.put_configuration(
slot,
HotpSlotConfiguration(key)
.digits8(int(digits) == 8),
)
except CommandError as e:
logger.debug("Failed to program OATH-HOTP", exc_info=e)
return failure("write error")
return success()
def fido_has_pin(self):
with self._open_device([FidoConnection]) as conn:
ctap2 = Ctap2(conn)
return success({'hasPin': ctap2.info.options.get("clientPin")})
def fido_pin_retries(self):
try:
with self._open_device([FidoConnection]) as conn:
ctap2 = Ctap2(conn)
client_pin = ClientPin(ctap2)
return success({'retries': client_pin.get_pin_retries()[0]})
except CtapError as e:
if e.code == CtapError.ERR.PIN_AUTH_BLOCKED:
return failure('PIN authentication is currently blocked. '
'Remove and re-insert the YubiKey.')
if e.code == CtapError.ERR.PIN_BLOCKED:
return failure('PIN is blocked.')
raise
def fido_set_pin(self, new_pin):
try:
with self._open_device([FidoConnection]) as conn:
ctap2 = Ctap2(conn)
if len(new_pin) < ctap2.info.min_pin_length:
return failure('too short')
client_pin = ClientPin(ctap2)
client_pin.set_pin(new_pin)
return success()
except CtapError as e:
if e.code == CtapError.ERR.INVALID_LENGTH or \
e.code == CtapError.ERR.PIN_POLICY_VIOLATION:
return failure('too long')
raise
def fido_change_pin(self, current_pin, new_pin):
try:
with self._open_device([FidoConnection]) as conn:
ctap2 = Ctap2(conn)
if len(new_pin) < ctap2.info.min_pin_length:
return failure('too short')
client_pin = ClientPin(ctap2)
client_pin.change_pin(current_pin, new_pin)
return success()
except CtapError as e:
if e.code == CtapError.ERR.INVALID_LENGTH or \
e.code == CtapError.ERR.PIN_POLICY_VIOLATION:
return failure('too long')
if e.code == CtapError.ERR.PIN_INVALID:
return failure('wrong pin')
if e.code == CtapError.ERR.PIN_AUTH_BLOCKED:
return failure('currently blocked')
if e.code == CtapError.ERR.PIN_BLOCKED:
return failure('blocked')
raise
def fido_reset(self):
try:
with self._open_device([FidoConnection]) as conn:
ctap2 = Ctap2(conn)
ctap2.reset()
return success()
except CtapError as e:
if e.code == CtapError.ERR.NOT_ALLOWED:
return failure('not allowed')
if e.code == CtapError.ERR.ACTION_TIMEOUT:
return failure('touch timeout')
raise
def piv_reset(self):
with self._open_device([SmartCardConnection]) as conn:
session = PivSession(conn)
session.reset()
return success()
def _piv_list_certificates(self, session):
return {
SLOT(slot).name: _piv_serialise_cert(slot, cert) for slot, cert in list_certificates(session).items() # noqa: E501
}
def piv_delete_certificate(self, slot_name, pin=None, mgm_key_hex=None):
logger.debug('piv_delete_certificate %s', slot_name)
with self._open_device([SmartCardConnection]) as conn:
session = PivSession(conn)
with PromptTimeout():
auth_failed = self._piv_ensure_authenticated(
session, pin=pin, mgm_key_hex=mgm_key_hex)
if auth_failed:
return auth_failed
try:
session.delete_certificate(SLOT[slot_name])
session.put_object(OBJECT_ID.CHUID, generate_chuid())
return success()
except ApduError as e:
if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:
logger.debug("Wrong management key", exc_info=e)
return failure('wrong_mgm_key')
def piv_generate_certificate(
self, slot_name, algorithm, subject, expiration_date,
self_sign=True, csr_file_url=None, pin=None, mgm_key_hex=None):
logger.debug('slot_name=%s algorithm=%s common_name=%s '
'expiration_date=%s self_sign=%s csr_file_url=%s',
slot_name, algorithm, subject, expiration_date,
self_sign, csr_file_url)
if csr_file_url:
file_path = self._get_file_path(csr_file_url)
with self._open_device([SmartCardConnection]) as conn:
session = PivSession(conn)
with PromptTimeout():
auth_failed = self._piv_ensure_authenticated(
session, pin=pin, mgm_key_hex=mgm_key_hex)
if auth_failed:
return auth_failed
pin_failed = self._piv_verify_pin(session, pin)
if pin_failed:
return pin_failed
if self_sign:
now = datetime.datetime.utcnow()
try:
year = int(expiration_date[0:4])
month = int(expiration_date[(4+1):(4+1+2)])
day = int(expiration_date[(4+1+2+1):(4+1+2+1+2)])
valid_to = datetime.datetime(year, month, day)
except ValueError as e:
logger.debug(
'Failed to parse date: ' + expiration_date,
exc_info=e)
return failure(
'invalid_iso8601_date',
{'date': expiration_date})
try:
public_key = session.generate_key(
SLOT[slot_name], KEY_TYPE[algorithm])
except ApduError as e:
if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:
logger.debug("Wrong management key", exc_info=e)
return failure('wrong_mgm_key')
pin_failed = self._piv_verify_pin(session, pin)
if pin_failed:
return pin_failed
if "=" not in subject:
# Old style, common name only.
subject = "CN=" + subject
try:
if self_sign:
cert = generate_self_signed_certificate(session,
SLOT[slot_name], public_key, subject, now,
valid_to)
session.put_certificate(SLOT[slot_name], cert)
session.put_object(OBJECT_ID.CHUID, generate_chuid())
else:
csr = generate_csr(session,
SLOT[slot_name], public_key, subject)
with open(file_path, 'w+b') as csr_file:
csr_file.write(csr.public_bytes(
encoding=serialization.Encoding.PEM))
except ApduError as e:
if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:
return failure('pin_required')
raise
return success()
def piv_change_pin(self, old_pin, new_pin):
with self._open_device([SmartCardConnection]) as conn:
session = PivSession(conn)
try:
session.change_pin(old_pin, new_pin)
logger.debug('PIN change successful!')
return success()
except InvalidPinError as e:
attempts = e.attempts_remaining
if attempts:
logger.debug("Failed to change PIN, %d tries left", attempts, exc_info=e)
return failure('wrong_pin', {'tries_left': attempts})
else:
logger.debug("PIN is blocked.", exc_info=e)
return failure('pin_blocked')
except ApduError as e:
if e.sw == SW.INCORRECT_PARAMETERS:
return failure('incorrect_parameters')
tries_left = e.attempts_remaining
logger.debug('PIN change failed. %s tries left.',
tries_left, exc_info=e)
return {
'success': False,
'tries_left': tries_left,
}
def piv_change_puk(self, old_puk, new_puk):
with self._open_device([SmartCardConnection]) as conn:
session = PivSession(conn)
try:
session.change_puk(old_puk, new_puk)
return success()
except InvalidPinError as e:
attempts = e.attempts_remaining
if attempts:
logger.debug("Failed to change PUK, %d tries left", attempts, exc_info=e)
return failure('wrong_puk', {'tries_left': attempts})
else:
logger.debug("PUK is blocked.", exc_info=e)
return failure('puk_blocked')
def piv_generate_random_mgm_key(self, key_type):
key = b2a_hex(ykman.piv.generate_random_management_key(MANAGEMENT_KEY_TYPE(key_type))).decode(
'utf-8')
return key
def piv_change_mgm_key(self, pin, current_key_hex, new_key_hex, key_type,
store_on_device=False):
with self._open_device([SmartCardConnection]) as conn:
session = PivSession(conn)
pivman = get_pivman_data(session)
if pivman.has_protected_key or store_on_device:
pin_failed = self._piv_verify_pin(
session, pin=pin)
if pin_failed:
return pin_failed
with PromptTimeout():
auth_failed = self._piv_ensure_authenticated(
session, pin=pin, mgm_key_hex=current_key_hex)
if auth_failed:
return auth_failed
try:
new_key = a2b_hex(new_key_hex) if new_key_hex else None
except Exception as e:
logger.debug('Failed to parse new management key', exc_info=e)
return failure('new_mgm_key_bad_hex')
if new_key is not None and len(new_key) != MANAGEMENT_KEY_TYPE(key_type).key_len:
logger.debug('Wrong length for new management key: %d',
len(new_key))
return failure('new_mgm_key_bad_length')
pivman_set_mgm_key(
session, new_key, MANAGEMENT_KEY_TYPE(key_type), touch=False, store_on_device=store_on_device
)
return success()
def piv_unblock_pin(self, puk, new_pin):
with self._open_device([SmartCardConnection]) as conn:
session = PivSession(conn)
try:
session.unblock_pin(puk, new_pin)
return success()
except InvalidPinError as e:
attempts = e.attempts_remaining
if attempts:
logger.debug("Failed to unblock PIN, %d tries left", attempts, exc_info=e)
return failure('wrong_puk', {'tries_left': attempts})
else:
logger.debug("PUK is blocked.", exc_info=e)
return failure('puk_blocked')
def piv_can_parse(self, file_url):
file_path = self._get_file_path(file_url)
with open(file_path, 'r+b') as file:
data = file.read()
try:
parse_certificates(data, password=None)
return success()
except (ValueError, TypeError):
pass
try:
parse_private_key(data, password=None)
return success()
except (ValueError, TypeError):
pass
raise ValueError('Failed to parse certificate or key')
# TODO test more
def piv_import_file(self, slot, file_url, password=None,
pin=None, mgm_key=None):
is_cert = False
is_private_key = False
file_path = self._get_file_path(file_url)
if password:
password = password.encode()
with open(file_path, 'r+b') as file:
data = file.read()
try:
certs = parse_certificates(data, password)
is_cert = True
except (ValueError, TypeError):
pass
try:
private_key = parse_private_key(data, password)
is_private_key = True
except (ValueError, TypeError, InvalidPasswordError):
pass
if not (is_cert or is_private_key):
return failure('failed_parsing')
with self._open_device([SmartCardConnection]) as conn:
session = PivSession(conn)
with PromptTimeout():
auth_failed = self._piv_ensure_authenticated(
session, pin, mgm_key)
if auth_failed:
return auth_failed
if is_private_key:
session.put_key(SLOT[slot], private_key)
if is_cert:
if len(certs) > 1:
leafs = get_leaf_certificates(certs)
cert_to_import = leafs[0]
else:
cert_to_import = certs[0]
session.put_certificate(
SLOT[slot], cert_to_import)
session.put_object(OBJECT_ID.CHUID, generate_chuid())
return success({
'imported_cert': is_cert,
'imported_key': is_private_key
})
def piv_export_certificate(self, slot, file_url):
file_path = self._get_file_path(file_url)
with self._open_device([SmartCardConnection]) as conn:
session = PivSession(conn)
cert = session.get_certificate(SLOT[slot])
with open(file_path, 'wb') as file:
file.write(
cert.public_bytes(
encoding=serialization.Encoding.PEM))
return success()
def _get_file_path(self, file_url):
file_path = urllib.parse.urlparse(file_url).path
return file_path[1:] if os.name == 'nt' else file_path
def _piv_verify_pin(self, session, pin=None):
if pin:
try:
try:
key_type = session.get_management_key_metadata().key_type
except NotSupportedError:
key_type = MANAGEMENT_KEY_TYPE.TDES
pivman = get_pivman_data(session)
session.verify_pin(pin)
if pivman.has_derived_key:
with PromptTimeout():
session.authenticate(
key_type, derive_management_key(pin, pivman.salt)
)
session.verify_pin(pin)
elif pivman.has_stored_key:
pivman_prot = get_pivman_protected_data(session)
with PromptTimeout():
session.authenticate(key_type, pivman_prot.key)
session.verify_pin(pin)
except InvalidPinError as e:
attempts = e.attempts_remaining
if attempts:
logger.debug("Failed to verify PIN, %d tries left", attempts, exc_info=e)
return failure('wrong_pin', {'tries_left': attempts})
else:
logger.debug("PIN is blocked.", exc_info=e)
return failure('pin_blocked')
else:
return failure('pin_required')
def _piv_ensure_authenticated(self, session, pin=None,
mgm_key_hex=None):
pivman = get_pivman_data(session)
try:
key_type = session.get_management_key_metadata().key_type
except NotSupportedError:
key_type = MANAGEMENT_KEY_TYPE.TDES
if pivman.has_protected_key:
return self._piv_verify_pin(session, pin)
else:
if mgm_key_hex:
try:
mgm_key_bytes = a2b_hex(mgm_key_hex)
except Exception:
return failure('mgm_key_bad_format')
try:
with PromptTimeout():
session.authenticate(
key_type, mgm_key_bytes
)
except ApduError as e:
if (e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED):
return failure('wrong_mgm_key')
raise
else:
return failure('mgm_key_required')
controller = None
def _supported_algorithms(version):
supported = []
for key_type in KEY_TYPE:
try:
check_key_support(tuple(map(int, version)), key_type, PIN_POLICY.DEFAULT, TOUCH_POLICY.DEFAULT)
supported.append(key_type.name)
except NotSupportedError:
pass
return supported
def _piv_serialise_cert(slot, cert):
if cert:
# Try reading out issuer and subject,
# may throw ValueError if malformed
malformed = False
try:
issuer_cns = cert.issuer.get_attributes_for_oid(
x509.NameOID.COMMON_NAME)
except ValueError:
malformed = True
issuer_cns = None
try:
subject_cns = cert.subject.get_attributes_for_oid(
x509.NameOID.COMMON_NAME)
except ValueError:
malformed = True
subject_cns = None
try:
valid_from = cert.not_valid_before.date().isoformat()
except ValueError:
valid_from = None
try:
valid_to = cert.not_valid_after.date().isoformat()
except ValueError:
valid_to = None
else:
malformed = True
issuer_cns = None
subject_cns = None
valid_from = None
valid_to = None
return {
'slot': SLOT(slot).name,
'malformed': malformed,
'issuedFrom': issuer_cns[0].value if issuer_cns else '',
'issuedTo': subject_cns[0].value if subject_cns else '',
'validFrom': valid_from if valid_from else '',
'validTo': valid_to if valid_to else ''
}
def _touch_prompt():
pyotherside.send('touchRequired')
def _close_touch_prompt():
pyotherside.send('touchNotRequired')
def init_with_logging(log_level, log_file=None):
logging_setup = as_json(ykman.logging_setup.setup)
try:
logging_setup(log_level.upper(), log_file)
except Exception as e:
return json.dumps(unknown_failure(e))
init()
def init():
global controller
controller = Controller()
class PromptTimeout:
def __init__(self, timeout=0.5):
self.timer = Timer(timeout, _touch_prompt)
def __enter__(self):
self.timer.start()
def __exit__(self, typ, value, traceback):
_close_touch_prompt()
self.timer.cancel()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Document matcher for Search API stub.
DocumentMatcher provides an approximation of the Search API's query matching.
"""
from google.appengine.datastore import document_pb
from google.appengine._internal.antlr3 import tree
from google.appengine.api.search import query_parser
from google.appengine.api.search import QueryParser
from google.appengine.api.search import search_util
from google.appengine.api.search.stub import geo_util
from google.appengine.api.search.stub import simple_tokenizer
from google.appengine.api.search.stub import tokens
MSEC_PER_DAY = 86400000
class ExpressionTreeException(Exception):
"""An error occurred while analyzing/translating the expression parse tree."""
def __init__(self, msg):
Exception.__init__(self, msg)
class DistanceMatcher(object):
"""A class to match on geo distance."""
def __init__(self, geopoint, distance):
self._geopoint = geopoint
self._distance = distance
def _CheckOp(self, op):
if op == QueryParser.EQ:
raise ExpressionTreeException('Equality comparison not available for Geo type')
if op == QueryParser.NE:
raise ExpressionTreeException('!= comparison operator is not available')
if op not in (QueryParser.GT, QueryParser.GE, QueryParser.LESSTHAN, QueryParser.LE):
raise search_util.UnsupportedOnDevError(
'Operator %s not supported for distance matches on development server.'
% str(op))
def _IsDistanceMatch(self, geopoint, op):
distance = geopoint - self._geopoint
if op == QueryParser.GT or op == QueryParser.GE:
return distance >= self._distance
if op == QueryParser.LESSTHAN or op == QueryParser.LE:
return distance <= self._distance
else:
raise AssertionError, 'unexpected op %s' % str(op)
def IsMatch(self, field_values, op):
self._CheckOp(op)
for field_value in field_values:
geo_pb = field_value.geo()
geopoint = geo_util.LatLng(geo_pb.lat(), geo_pb.lng())
if self._IsDistanceMatch(geopoint, op):
return True
if field_values:
return False
return op == QueryParser.GT or op == QueryParser.GE
class DocumentMatcher(object):
"""A class to match documents with a query."""
def __init__(self, query, inverted_index):
self._query = query
self._inverted_index = inverted_index
self._parser = simple_tokenizer.SimpleTokenizer()
def _PostingsForToken(self, token):
"""Returns the postings for the token."""
return self._inverted_index.GetPostingsForToken(token)
def _PostingsForFieldToken(self, field, value):
"""Returns postings for the value occurring in the given field."""
value = simple_tokenizer.NormalizeString(value)
return self._PostingsForToken(
tokens.Token(chars=value, field_name=field))
def _MatchPhrase(self, field, match, document):
"""Match a textual field with a phrase query node."""
field_text = field.value().string_value()
phrase_text = query_parser.GetPhraseQueryNodeText(match)
if field.value().type() == document_pb.FieldValue.ATOM:
return (field_text == phrase_text)
phrase = self._parser.TokenizeText(phrase_text)
field_text = self._parser.TokenizeText(field_text)
if not phrase:
return True
posting = None
for post in self._PostingsForFieldToken(field.name(), phrase[0].chars):
if post.doc_id == document.id():
posting = post
break
if not posting:
return False
def ExtractWords(token_list):
return (token.chars for token in token_list)
for position in posting.positions:
match_words = zip(ExtractWords(field_text[position:]),
ExtractWords(phrase))
if len(match_words) != len(phrase):
continue
match = True
for doc_word, match_word in match_words:
if doc_word != match_word:
match = False
if match:
return True
return False
def _MatchTextField(self, field, match, document):
"""Check if a textual field matches a query tree node."""
if match.getType() == QueryParser.VALUE:
if query_parser.IsPhrase(match):
return self._MatchPhrase(field, match, document)
if field.value().type() == document_pb.FieldValue.ATOM:
return (field.value().string_value() ==
query_parser.GetQueryNodeText(match))
query_tokens = self._parser.TokenizeText(
query_parser.GetQueryNodeText(match))
if not query_tokens:
return True
if len(query_tokens) > 1:
def QueryNode(token):
return query_parser.CreateQueryNode(token.chars, QueryParser.TEXT)
return all(self._MatchTextField(field, QueryNode(token), document)
for token in query_tokens)
token_text = query_tokens[0].chars
matching_docids = [
post.doc_id for post in self._PostingsForFieldToken(
field.name(), token_text)]
return document.id() in matching_docids
def ExtractGlobalEq(node):
if node.getType() == QueryParser.EQ and len(node.children) >= 2:
if node.children[0].getType() == QueryParser.GLOBAL:
return node.children[1]
return node
if match.getType() == QueryParser.CONJUNCTION:
return all(self._MatchTextField(field, ExtractGlobalEq(child), document)
for child in match.children)
if match.getType() == QueryParser.DISJUNCTION:
return any(self._MatchTextField(field, ExtractGlobalEq(child), document)
for child in match.children)
if match.getType() == QueryParser.NEGATION:
return not self._MatchTextField(
field, ExtractGlobalEq(match.children[0]), document)
return False
def _MatchDateField(self, field, match, operator, document):
"""Check if a date field matches a query tree node."""
return self._MatchComparableField(
field, match, _DateStrToDays, operator, document)
def _MatchNumericField(self, field, match, operator, document):
"""Check if a numeric field matches a query tree node."""
return self._MatchComparableField(field, match, float, operator, document)
def _MatchGeoField(self, field, matcher, operator, document):
"""Check if a geo field matches a query tree node."""
if not isinstance(matcher, DistanceMatcher):
return False
if isinstance(field, tree.CommonTree):
field = query_parser.GetQueryNodeText(field)
values = [ field.value() for field in
search_util.GetAllFieldInDocument(document, field) if
field.value().type() == document_pb.FieldValue.GEO ]
return matcher.IsMatch(values, operator)
def _MatchComparableField(
self, field, match, cast_to_type, op, document):
"""A generic method to test matching for comparable types.
Comparable types are defined to be anything that supports <, >, <=, >=, ==.
For our purposes, this is numbers and dates.
Args:
field: The document_pb.Field to test
match: The query node to match against
cast_to_type: The type to cast the node string values to
op: The query node type representing the type of comparison to perform
document: The document that the field is in
Returns:
True iff the field matches the query.
Raises:
UnsupportedOnDevError: Raised when an unsupported operator is used, or
when the query node is of the wrong type.
ExpressionTreeException: Raised when a != inequality operator is used.
"""
field_val = cast_to_type(field.value().string_value())
if match.getType() == QueryParser.VALUE:
try:
match_val = cast_to_type(query_parser.GetPhraseQueryNodeText(match))
except ValueError:
return False
else:
return False
if op == QueryParser.EQ:
return field_val == match_val
if op == QueryParser.NE:
raise ExpressionTreeException('!= comparison operator is not available')
if op == QueryParser.GT:
return field_val > match_val
if op == QueryParser.GE:
return field_val >= match_val
if op == QueryParser.LESSTHAN:
return field_val < match_val
if op == QueryParser.LE:
return field_val <= match_val
raise search_util.UnsupportedOnDevError(
'Operator %s not supported for numerical fields on development server.'
% match.getText())
def _MatchAnyField(self, field, match, operator, document):
"""Check if a field matches a query tree.
Args:
field: the name of the field, or a query node containing the field.
match: A query node to match the field with.
operator: The query node type corresponding to the type of match to
perform (eg QueryParser.EQ, QueryParser.GT, etc).
document: The document to match.
"""
if isinstance(field, tree.CommonTree):
field = query_parser.GetQueryNodeText(field)
fields = search_util.GetAllFieldInDocument(document, field)
return any(self._MatchField(f, match, operator, document) for f in fields)
def _MatchField(self, field, match, operator, document):
"""Check if a field matches a query tree.
Args:
field: a document_pb.Field instance to match.
match: A query node to match the field with.
operator: The a query node type corresponding to the type of match to
perform (eg QueryParser.EQ, QueryParser.GT, etc).
document: The document to match.
"""
if field.value().type() in search_util.TEXT_DOCUMENT_FIELD_TYPES:
if operator != QueryParser.EQ:
return False
return self._MatchTextField(field, match, document)
if field.value().type() in search_util.NUMBER_DOCUMENT_FIELD_TYPES:
return self._MatchNumericField(field, match, operator, document)
if field.value().type() == document_pb.FieldValue.DATE:
return self._MatchDateField(field, match, operator, document)
if field.value().type() == document_pb.FieldValue.GEO:
return False
type_name = document_pb.FieldValue.ContentType_Name(
field.value().type()).lower()
raise search_util.UnsupportedOnDevError(
'Matching fields of type %s is unsupported on dev server (searched for '
'field %s)' % (type_name, field.name()))
def _MatchGlobal(self, match, document):
for field in document.field_list():
try:
if self._MatchAnyField(field.name(), match, QueryParser.EQ, document):
return True
except search_util.UnsupportedOnDevError:
pass
return False
def _ResolveDistanceArg(self, node):
if node.getType() == QueryParser.VALUE:
return query_parser.GetQueryNodeText(node)
if node.getType() == QueryParser.FUNCTION:
name, args = node.children
if name.getText() == 'geopoint':
lat, lng = (float(query_parser.GetQueryNodeText(v)) for v in args.children)
return geo_util.LatLng(lat, lng)
return None
def _MatchFunction(self, node, match, operator, document):
name, args = node.children
if name.getText() == 'distance':
x, y = args.children
x, y = self._ResolveDistanceArg(x), self._ResolveDistanceArg(y)
if isinstance(x, geo_util.LatLng) and isinstance(y, basestring):
x, y = y, x
if isinstance(x, basestring) and isinstance(y, geo_util.LatLng):
distance = float(query_parser.GetQueryNodeText(match))
matcher = DistanceMatcher(y, distance)
return self._MatchGeoField(x, matcher, operator, document)
return False
def _CheckMatch(self, node, document):
"""Check if a document matches a query tree."""
if node.getType() == QueryParser.CONJUNCTION:
return all(self._CheckMatch(child, document) for child in node.children)
if node.getType() == QueryParser.DISJUNCTION:
return any(self._CheckMatch(child, document) for child in node.children)
if node.getType() == QueryParser.NEGATION:
return not self._CheckMatch(node.children[0], document)
if node.getType() in query_parser.COMPARISON_TYPES:
lhs, match = node.children
if lhs.getType() == QueryParser.GLOBAL:
return self._MatchGlobal(match, document)
elif lhs.getType() == QueryParser.FUNCTION:
return self._MatchFunction(lhs, match, node.getType(), document)
return self._MatchAnyField(lhs, match, node.getType(), document)
return False
def Matches(self, document):
return self._CheckMatch(self._query, document)
def FilterDocuments(self, documents):
return (doc for doc in documents if self.Matches(doc))
def _DateStrToDays(date_str):
date = search_util.DeserializeDate(date_str)
return search_util.EpochTime(date) / MSEC_PER_DAY
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Network units wrapping TensorFlows' tf.contrib.rnn cells.
Please put all wrapping logic for tf.contrib.rnn in this module; this will help
collect common subroutines that prove useful.
"""
import abc
import tensorflow as tf
from dragnn.python import network_units as dragnn
from syntaxnet.util import check
def capture_variables(function, scope_name):
"""Captures and returns variables created by a function.
Runs |function| in a scope of name |scope_name| and returns the list of
variables created by |function|.
Args:
function: Function whose variables should be captured. The function should
take one argument, its enclosing variable scope.
scope_name: Variable scope in which the |function| is evaluated.
Returns:
List of created variables.
"""
# Use a dict to dedupe captured variables.
created_vars = {}
def _custom_getter(getter, *args, **kwargs):
"""Calls the real getter and captures its result in |created_vars|."""
real_variable = getter(*args, **kwargs)
created_vars[real_variable.name] = real_variable
return real_variable
with tf.variable_scope(
scope_name, reuse=None, custom_getter=_custom_getter) as scope:
function(scope)
return created_vars.values()
def apply_with_captured_variables(function, scope_name, component):
"""Applies a function using previously-captured variables.
The counterpart to capture_variables(); invokes |function| in a scope of name
|scope_name|, extracting captured variables from the |component|.
Args:
function: Function to apply using captured variables. The function should
take one argument, its enclosing variable scope.
scope_name: Variable scope in which the |function| is evaluated. Must match
the scope passed to capture_variables().
component: Component from which to extract captured variables.
Returns:
Results of function application.
"""
def _custom_getter(getter, *args, **kwargs):
"""Retrieves the normal or moving-average variables."""
return component.get_variable(var_params=getter(*args, **kwargs))
with tf.variable_scope(
scope_name, reuse=True, custom_getter=_custom_getter) as scope:
return function(scope)
class BaseLSTMNetwork(dragnn.NetworkUnitInterface):
"""Base class for wrapped LSTM networks.
This LSTM network unit supports multiple layers with layer normalization.
Because it is imported from tf.contrib.rnn, we need to capture the created
variables during initialization time.
Layers:
...subclass-specific layers...
last_layer: Alias for the activations of the last hidden layer.
logits: Logits associated with component actions.
"""
def __init__(self, component, additional_attr_defaults=None):
"""Initializes the LSTM base class.
Parameters used:
hidden_layer_sizes: Comma-delimited number of hidden units for each layer.
input_dropout_rate (-1.0): Input dropout rate for each layer. If < 0.0,
use the global |dropout_rate| hyperparameter.
recurrent_dropout_rate (0.8): Recurrent dropout rate. If < 0.0, use the
global |recurrent_dropout_rate| hyperparameter.
layer_norm (True): Whether or not to use layer norm.
Hyperparameters used:
dropout_rate: Input dropout rate.
recurrent_dropout_rate: Recurrent dropout rate.
Args:
component: parent ComponentBuilderBase object.
additional_attr_defaults: Additional attributes for use by derived class.
"""
attr_defaults = additional_attr_defaults or {}
attr_defaults.update({
'layer_norm': True,
'input_dropout_rate': -1.0,
'recurrent_dropout_rate': 0.8,
'hidden_layer_sizes': '256',
})
self._attrs = dragnn.get_attrs_with_defaults(
component.spec.network_unit.parameters,
defaults=attr_defaults)
self._hidden_layer_sizes = map(int,
self._attrs['hidden_layer_sizes'].split(','))
self._input_dropout_rate = self._attrs['input_dropout_rate']
if self._input_dropout_rate < 0.0:
self._input_dropout_rate = component.master.hyperparams.dropout_rate
self._recurrent_dropout_rate = self._attrs['recurrent_dropout_rate']
if self._recurrent_dropout_rate < 0.0:
self._recurrent_dropout_rate = (
component.master.hyperparams.recurrent_dropout_rate)
if self._recurrent_dropout_rate < 0.0:
self._recurrent_dropout_rate = component.master.hyperparams.dropout_rate
tf.logging.info('[%s] input_dropout_rate=%s recurrent_dropout_rate=%s',
component.name, self._input_dropout_rate,
self._recurrent_dropout_rate)
layers, context_layers = self.create_hidden_layers(component,
self._hidden_layer_sizes)
last_layer_dim = layers[-1].dim
layers.append(
dragnn.Layer(component, name='last_layer', dim=last_layer_dim))
layers.append(
dragnn.Layer(component, name='logits', dim=component.num_actions))
# Provide initial layers and context layers, so the base class constructor
# can safely use accessors like get_layer_size().
super(BaseLSTMNetwork, self).__init__(
component, init_layers=layers, init_context_layers=context_layers)
# Allocate parameters for the softmax.
self._params.append(
tf.get_variable(
'weights_softmax', [last_layer_dim, component.num_actions],
initializer=tf.random_normal_initializer(stddev=1e-4)))
self._params.append(
tf.get_variable(
'bias_softmax', [component.num_actions],
initializer=tf.zeros_initializer()))
def get_logits(self, network_tensors):
"""Returns the logits for prediction."""
return network_tensors[self.get_layer_index('logits')]
@abc.abstractmethod
def create_hidden_layers(self, component, hidden_layer_sizes):
"""Creates hidden network layers.
Args:
component: Parent ComponentBuilderBase object.
hidden_layer_sizes: List of requested hidden layer activation sizes.
Returns:
layers: List of layers created by this network.
context_layers: List of context layers created by this network.
"""
pass
def _append_base_layers(self, hidden_layers):
"""Appends layers defined by the base class to the |hidden_layers|."""
last_layer = hidden_layers[-1]
logits = tf.nn.xw_plus_b(last_layer,
self._component.get_variable('weights_softmax'),
self._component.get_variable('bias_softmax'))
return hidden_layers + [last_layer, logits]
def _create_cell(self, num_units, during_training):
"""Creates a single LSTM cell, possibly with dropout.
Requires that BaseLSTMNetwork.__init__() was called.
Args:
num_units: Number of hidden units in the cell.
during_training: Whether to create a cell for training (vs inference).
Returns:
A RNNCell of the requested size, possibly with dropout.
"""
# No dropout in inference mode.
if not during_training:
return tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units, layer_norm=self._attrs['layer_norm'], reuse=True)
# Otherwise, apply dropout to inputs and recurrences.
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units,
dropout_keep_prob=self._recurrent_dropout_rate,
layer_norm=self._attrs['layer_norm'])
cell = tf.contrib.rnn.DropoutWrapper(
cell, input_keep_prob=self._input_dropout_rate)
return cell
def _create_train_cells(self):
"""Creates a list of LSTM cells for training."""
return [
self._create_cell(num_units, during_training=True)
for num_units in self._hidden_layer_sizes
]
def _create_inference_cells(self):
"""Creates a list of LSTM cells for inference."""
return [
self._create_cell(num_units, during_training=False)
for num_units in self._hidden_layer_sizes
]
def _capture_variables_as_params(self, function):
"""Captures variables created by a function in |self._params|."""
self._params.extend(capture_variables(function, 'cell'))
def _apply_with_captured_variables(self, function):
"""Applies a function using previously-captured variables."""
return apply_with_captured_variables(function, 'cell', self._component)
class LayerNormBasicLSTMNetwork(BaseLSTMNetwork):
"""Wrapper around tf.contrib.rnn.LayerNormBasicLSTMCell.
Features:
All inputs are concatenated.
Subclass-specific layers:
state_c_<n>: Cell states for the <n>'th LSTM layer (0-origin).
state_h_<n>: Hidden states for the <n>'th LSTM layer (0-origin).
"""
def __init__(self, component):
"""Sets up context and output layers, as well as a final softmax."""
super(LayerNormBasicLSTMNetwork, self).__init__(component)
# Wrap lists of training and inference sub-cells into multi-layer RNN cells.
# Note that a |MultiRNNCell| state is a tuple of per-layer sub-states.
self._train_cell = tf.contrib.rnn.MultiRNNCell(self._create_train_cells())
self._inference_cell = tf.contrib.rnn.MultiRNNCell(
self._create_inference_cells())
def _cell_closure(scope):
"""Applies the LSTM cell to placeholder inputs and state."""
placeholder_inputs = tf.placeholder(
dtype=tf.float32, shape=(1, self._concatenated_input_dim))
placeholder_substates = []
for num_units in self._hidden_layer_sizes:
placeholder_substate = tf.contrib.rnn.LSTMStateTuple(
tf.placeholder(dtype=tf.float32, shape=(1, num_units)),
tf.placeholder(dtype=tf.float32, shape=(1, num_units)))
placeholder_substates.append(placeholder_substate)
placeholder_state = tuple(placeholder_substates)
self._train_cell(
inputs=placeholder_inputs, state=placeholder_state, scope=scope)
self._capture_variables_as_params(_cell_closure)
def create_hidden_layers(self, component, hidden_layer_sizes):
"""See base class."""
# Construct the layer meta info for the DRAGNN builder. Note that the order
# of h and c are reversed compared to the vanilla DRAGNN LSTM cell, as
# this is the standard in tf.contrib.rnn.
#
# NB: The h activations of the last LSTM must be the last layer, in order
# for _append_base_layers() to work.
layers = []
for index, num_units in enumerate(hidden_layer_sizes):
layers.append(
dragnn.Layer(component, name='state_c_%d' % index, dim=num_units))
layers.append(
dragnn.Layer(component, name='state_h_%d' % index, dim=num_units))
context_layers = list(layers) # copy |layers|, don't alias it
return layers, context_layers
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
"""See base class."""
# NB: This cell pulls the lstm's h and c vectors from context_tensor_arrays
# instead of through linked features.
check.Eq(
len(context_tensor_arrays), 2 * len(self._hidden_layer_sizes),
'require two context tensors per hidden layer')
# Rearrange the context tensors into a tuple of LSTM sub-states.
length = context_tensor_arrays[0].size()
substates = []
for index, num_units in enumerate(self._hidden_layer_sizes):
state_c = context_tensor_arrays[2 * index].read(length - 1)
state_h = context_tensor_arrays[2 * index + 1].read(length - 1)
# Fix shapes that for some reason are not set properly for an unknown
# reason. TODO(googleuser): Why are the shapes not set?
state_c.set_shape([tf.Dimension(None), num_units])
state_h.set_shape([tf.Dimension(None), num_units])
substates.append(tf.contrib.rnn.LSTMStateTuple(state_c, state_h))
state = tuple(substates)
input_tensor = dragnn.get_input_tensor(fixed_embeddings, linked_embeddings)
cell = self._train_cell if during_training else self._inference_cell
def _cell_closure(scope):
"""Applies the LSTM cell to the current inputs and state."""
return cell(input_tensor, state, scope=scope)
unused_h, state = self._apply_with_captured_variables(_cell_closure)
# Return tensors to be put into the tensor arrays / used to compute
# objective.
output_tensors = []
for new_substate in state:
new_c, new_h = new_substate
output_tensors.append(new_c)
output_tensors.append(new_h)
return self._append_base_layers(output_tensors)
class BulkBiLSTMNetwork(BaseLSTMNetwork):
"""Bulk wrapper around tf.contrib.rnn.stack_bidirectional_dynamic_rnn().
Features:
lengths: [stride, 1] sequence lengths per batch item.
All other features are concatenated into input activations.
Subclass-specific layers:
outputs: [stride * num_steps, self._output_dim] bi-LSTM activations.
"""
def __init__(self, component):
"""Initializes the bulk bi-LSTM.
Parameters used:
parallel_iterations (1): Parallelism of the underlying tf.while_loop().
Defaults to 1 thread to encourage deterministic behavior, but can be
increased to trade memory for speed.
Args:
component: parent ComponentBuilderBase object.
"""
super(BulkBiLSTMNetwork, self).__init__(
component, additional_attr_defaults={'parallel_iterations': 1})
check.In('lengths', self._linked_feature_dims,
'Missing required linked feature')
check.Eq(self._linked_feature_dims['lengths'], 1,
'Wrong dimension for "lengths" feature')
self._input_dim = self._concatenated_input_dim - 1 # exclude 'lengths'
self._output_dim = self.get_layer_size('outputs')
tf.logging.info('[%s] Bulk bi-LSTM with input_dim=%d output_dim=%d',
component.name, self._input_dim, self._output_dim)
# Create one training and inference cell per layer and direction.
self._train_cells_forward = self._create_train_cells()
self._train_cells_backward = self._create_train_cells()
self._inference_cells_forward = self._create_inference_cells()
self._inference_cells_backward = self._create_inference_cells()
def _bilstm_closure(scope):
"""Applies the bi-LSTM to placeholder inputs and lengths."""
# Use singleton |stride| and |steps| because their values don't affect the
# weight variables.
stride, steps = 1, 1
placeholder_inputs = tf.placeholder(
dtype=tf.float32, shape=[stride, steps, self._input_dim])
placeholder_lengths = tf.placeholder(dtype=tf.int64, shape=[stride])
# Omit the initial states and sequence lengths for simplicity; they don't
# affect the weight variables.
tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
self._train_cells_forward,
self._train_cells_backward,
placeholder_inputs,
dtype=tf.float32,
sequence_length=placeholder_lengths,
scope=scope)
self._capture_variables_as_params(_bilstm_closure)
# Allocate parameters for the initial states. Note that an LSTM state is a
# tuple of two substates (c, h), so there are 4 variables per layer.
for index, num_units in enumerate(self._hidden_layer_sizes):
for direction in ['forward', 'backward']:
for substate in ['c', 'h']:
self._params.append(
tf.get_variable(
'initial_state_%s_%s_%d' % (direction, substate, index),
[1, num_units], # leading 1 for later batch-wise tiling
dtype=tf.float32,
initializer=tf.constant_initializer(0.0)))
def create_hidden_layers(self, component, hidden_layer_sizes):
"""See base class."""
dim = 2 * hidden_layer_sizes[-1]
return [dragnn.Layer(component, name='outputs', dim=dim)], []
def create(self,
fixed_embeddings,
linked_embeddings,
context_tensor_arrays,
attention_tensor,
during_training,
stride=None):
"""Requires |stride|; otherwise see base class."""
check.NotNone(stride,
'BulkBiLSTMNetwork requires "stride" and must be called '
'in the bulk feature extractor component.')
# Flatten the lengths into a vector.
lengths = dragnn.lookup_named_tensor('lengths', linked_embeddings)
lengths_s = tf.squeeze(lengths.tensor, [1])
# Collect all other inputs into a batched tensor.
linked_embeddings = [
named_tensor for named_tensor in linked_embeddings
if named_tensor.name != 'lengths'
]
inputs_sxnxd = dragnn.get_input_tensor_with_stride(
fixed_embeddings, linked_embeddings, stride)
# Since get_input_tensor_with_stride() concatenates the input embeddings, it
# obscures the static activation dimension, which the RNN library requires.
# Restore it using set_shape(). Note that set_shape() merges into the known
# shape, so only specify the activation dimension.
inputs_sxnxd.set_shape(
[tf.Dimension(None), tf.Dimension(None), self._input_dim])
initial_states_forward, initial_states_backward = (
self._create_initial_states(stride))
if during_training:
cells_forward = self._train_cells_forward
cells_backward = self._train_cells_backward
else:
cells_forward = self._inference_cells_forward
cells_backward = self._inference_cells_backward
def _bilstm_closure(scope):
"""Applies the bi-LSTM to the current inputs."""
outputs_sxnxd, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
cells_forward,
cells_backward,
inputs_sxnxd,
initial_states_fw=initial_states_forward,
initial_states_bw=initial_states_backward,
sequence_length=lengths_s,
parallel_iterations=self._attrs['parallel_iterations'],
scope=scope)
return outputs_sxnxd
# Layer outputs are not batched; flatten out the batch dimension.
outputs_sxnxd = self._apply_with_captured_variables(_bilstm_closure)
outputs_snxd = tf.reshape(outputs_sxnxd, [-1, self._output_dim])
return self._append_base_layers([outputs_snxd])
def _create_initial_states(self, stride):
"""Returns stacked and batched initial states for the bi-LSTM."""
initial_states_forward = []
initial_states_backward = []
for index in range(len(self._hidden_layer_sizes)):
# Retrieve the initial states for this layer.
states_sxd = []
for direction in ['forward', 'backward']:
for substate in ['c', 'h']:
state_1xd = self._component.get_variable('initial_state_%s_%s_%d' %
(direction, substate, index))
state_sxd = tf.tile(state_1xd, [stride, 1]) # tile across the batch
states_sxd.append(state_sxd)
# Assemble and append forward and backward LSTM states.
initial_states_forward.append(
tf.contrib.rnn.LSTMStateTuple(states_sxd[0], states_sxd[1]))
initial_states_backward.append(
tf.contrib.rnn.LSTMStateTuple(states_sxd[2], states_sxd[3]))
return initial_states_forward, initial_states_backward
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys,sqlite3,traceback
largv = []
g_dbpath = None
k_vt_col_map = { '':'\x1b[0m', 'default':'\x1b[0m', 'black':'\x1b[30m', 'red':'\x1b[31m', 'green':'\x1b[32m', 'yellow':'\x1b[33m',
'blue':'\x1b[34m', 'magenta':'\x1b[35m', 'cyan':'\x1b[36m', 'white':'\x1b[37m',
'bdefault':'\x1b[49m', 'bblack':'\x1b[40m', 'bred':'\x1b[41m', 'bgreen':'\x1b[42m', 'byellow':'\x1b[43m',
'bblue':'\x1b[44m', 'bmagenta':'\x1b[45m', 'bcyan':'\x1b[46m', 'bwhite':'\x1b[47m' }
vt_cm = k_vt_col_map
def set_vt_col(col):
sys.stdout.write(k_vt_col_map[col])
def largv_has(keys):
for i in range(len(keys)):
if (keys[i] in largv):
return True
return False
def largv_has_key(keys):
for key in keys:
ki = largv.index(key) if key in largv else -1
if (ki >= 0 and ki+1 < len(largv)):
return True
return False
def largv_get(keys, dflt):
if ( hasattr(sys, 'argv')):
for key in keys:
ki = largv.index(key) if key in largv else -1
if (ki >= 0 and ki+1 < len(largv)):
return largv[ki+1]
return dflt
def largv_geti(i, dflt):
if (i >= len(largv)):
return dflt
return largv[i]
def init_getch():
def init_getch_win():
from msvcrt import getch as win_getch
return win_getch
def init_getch_posix():
import tty,sys,termios
def posix_getch():
fd = sys.stdin.fileno(); old_settings = termios.tcgetattr(fd);
try:
tty.setraw(sys.stdin.fileno()); ch = sys.stdin.read(1);
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings);
if ord(ch) == 13:
return '\n'
elif ch == '\x1B':
return [ch, posix_getch(), posix_getch()]
else:
return ch
return posix_getch
impls = [init_getch_win, init_getch_posix]
for impl in impls:
try:
return impl()
except:
#traceback.print_exc()
pass
return None
getch = init_getch()
def vt_hist_create():
return { 'list':[], 'max':30, 'index':0 }
def vt_hist_add(hist, item, max = 30):
if item not in hist:
hist.append(item)
if len(hist) > max:
hist.remove(0)
def vt_edit(prefix, initial, hist = None):
inpchars = [x for x in initial]
while True:
print('\x1B[2K', '\r{} {}'.format(prefix, ''.join(inpchars)), end=' ')
pre_inp = getch()
#print '[{}]'.format(pre_inp[0] == '\x1B')
if (len(pre_inp) >= 3 and pre_inp[0:3] == ['\x1B', '[', 'A'] and hist):
if (hist['index'] >= -len(hist['list'])):
hist['index'] = hist['index']-1
if (hist['index'] >= -len(hist['list'])):
inpchars = [x for x in hist['list'][hist['index']]]
else:
inpchars = []
elif (len(pre_inp) >= 3 and pre_inp[0:3] == ['\x1B', '[', 'B']):
if (hist['index'] <= -1):
hist['index'] = hist['index']+1
if (hist['index'] < 0):
inpchars = [x for x in hist['list'][hist['index']]]
else:
inpchars = []
if len(pre_inp) == 1:
if ('\n' in pre_inp):
sys.stdout.write('\n')
break
else:
for x in pre_inp:
if x == '\x7F':
if len(inpchars):
inpchars.pop()
else:
inpchars.append(x)
return ''.join(inpchars)
def dbGetNode(conn, id):
ret = []
recs = conn.execute('SELECT node_id, name FROM nodes WHERE node_id=?', (id,) )
rec = recs.fetchone(); recs.close(); return rec;
def dbFindNode(conn, name, soft=False):
ret = []
recs = conn.execute('SELECT node_id, name FROM nodes WHERE name=?', (name,) )
rec = recs.fetchone(); recs.close();
if soft and rec == None:
nodes = dbGetNodes(conn)
for n in nodes:
if name in n[1]:
return n
return rec
def dbHasNode(conn, name):
return dbFindNode(conn, name) != None
def dbGetNodes(conn):
ret = []
recs = conn.execute('SELECT node_id, name FROM nodes')
rec = recs.fetchone()
while (rec != None):
ret.append(rec)
rec = recs.fetchone()
recs.close()
return ret
def dbAddNode(conn, node):
conn.execute("INSERT INTO nodes VALUES (?,?)", (None, node[1] ) )
conn.commit()
def dbUpdateNode(conn, node):
conn.execute('UPDATE nodes SET name=? WHERE node_id=?', (node[1], node[0]) )
conn.commit()
def dbAddEdge(conn, frm, to, tp, descr):
conn.execute("INSERT INTO edges VALUES (?,?,?,?,?)", (None, frm, to, tp, descr) )
conn.commit()
def dbUpdateEdge(conn, edge):
conn.execute('UPDATE edges SET node_frm=?, node_to=?, tp=?, descr=? WHERE edge_id=?', (edge[1], edge[2], edge[3], edge[4], edge[0]) )
conn.commit()
def dbGetNodeEdges(conn, node_id):
rets = ([], []); conds = ('node_frm', 'node_to');
for i in range(len(conds)):
cond = conds[i]
recs = conn.execute('SELECT edge_id, node_frm, node_to, type, descr FROM edges WHERE {}=?'.format(cond), (node_id, ) )
rec = recs.fetchone()
while (rec != None):
rets[i].append(rec)
rec = recs.fetchone()
recs.close()
return rets
def dbBootstrap(conn):
conn.execute('CREATE TABLE nodes(node_id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)')
conn.execute('CREATE TABLE edges(edge_id INTEGER PRIMARY KEY AUTOINCREMENT, node_frm INTEGER, node_to INTEGER, type TEXT, descr TEXT)')
conn.commit()
def dbStartSession(dbPath):
conn = None
if (dbPath is not None):
tail, head = os.path.split(dbPath)
if (not os.path.isdir(tail)):
os.makedirs(tail)
conn = sqlite3.connect(dbPath)
if 1:
tableListQuery = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY Name"
cursor = conn.execute(tableListQuery)
tables = list(map(lambda t: t[0], cursor.fetchall()))
cursor.close()
if (len(tables) == 0):
dbBootstrap(conn)
else:
conn = sqlite3.connect(":memory:")
dbBootstrap(conn)
return conn
def dbEndSession(conn):
if conn is None:
return 0
conn.close()
def printList(prefix, lst, sep, col1, col2):
for i in range(len(lst)):
set_vt_col(col2 if i%2 else col1)
print('{}{}{}'.format(prefix, lst[i], sep if i+1<len(lst) else ''), end=' ' )
if len(lst):
set_vt_col('default')
print('')
def runInputLoop():
conn = dbStartSession(g_dbpath)
try:
while True:
inp = vt_edit('>', '')
#inp = raw_input(); print inp;
input_splt = inp.split(' ')
cmd = input_splt[0]
if cmd == 'q':
break
elif cmd == 'a':
name = ' '.join(input_splt[1:]).strip()
if dbHasNode(conn, name) == False:
dbAddNode(conn, [None, name])
elif cmd == '-':
try:
inn = ' '.join(input_splt[1:])
n1 = inn.split('-')[0].strip()
n2 = inn.split('-')[1].split(':')[0].strip()
descr = inn.split('-')[1].split(':')[1].strip()
in1 = dbFindNode(conn, n1)
in2 = dbFindNode(conn, n2)
if in1 != None and in2 != None:
dbAddEdge(conn, in1[0], in2[0], '-', descr)
except:
traceback.print_exc()
elif cmd == 'l':
if len(input_splt) > 1:
try:
in1 = dbFindNode(conn, input_splt[1].strip(), True)
edges = dbGetNodeEdges(conn, in1[0])
e1 = ['{}: {}'.format(dbGetNode(conn, x[2])[1], x[4]) for x in edges[0]]
e2 = ['{}: {}'.format(dbGetNode(conn, x[1])[1], x[4]) for x in edges[1]]
printList(' ', e1+e2, '\n', 'white', 'yellow')
except:
traceback.print_exc()
else:
printList(' ', [x[1] for x in dbGetNodes(conn)], ',', 'white', 'yellow')
except:
dbEndSession(conn)
traceback.print_exc()
e = sys.exc_info()[0]
raise e
dbEndSession(conn)
def main():
global largv
global g_dbpath
largv = sys.argv
set_vt_col('default'); print('');
if largv_has(['-db']):
g_dbpath = largv_get(['-db'], None)
runInputLoop()
main()
|
|
import random
"""
Returns a "random" 4 word phrase from a list of words.
"""
nouns = [
'people',
'history',
'way',
'art',
'world',
'information',
'map',
'two',
'family',
'government',
'health',
'system',
'computer',
'meat',
'year',
'thanks',
'music',
'person',
'reading',
'method',
'data',
'food',
'understanding',
'theory',
'law',
'bird',
'literature',
'problem',
'software',
'control',
'knowledge',
'power',
'ability',
'economics',
'love',
'internet',
'television',
'science',
'library',
'nature',
'fact',
'product',
'idea',
'temperature',
'investment',
'area',
'society',
'activity',
'story',
'industry',
'media',
'thing',
'oven',
'community',
'definition',
'safety',
'quality',
'development',
'language',
'management',
'player',
'variety',
'video',
'week',
'security',
'country',
'exam',
'movie',
'organization',
'equipment',
'physics',
'analysis',
'policy',
'series',
'thought',
'basis',
'boyfriend',
'direction',
'strategy',
'technology',
'army',
'camera',
'freedom',
'paper',
'environment',
'child',
'instance',
'month',
'truth',
'marketing',
'university',
'writing',
'article',
'department',
'difference',
'goal',
'news',
'audience',
'fishing',
'growth',
'income',
'marriage',
'user',
'combination',
'failure',
'meaning',
'medicine',
'philosophy',
'teacher',
'communication',
'night',
'chemistry',
'disease',
'disk',
'energy',
'nation',
'road',
'role',
'soup',
'advertising',
'location',
'success',
'addition',
'apartment',
'education',
'math',
'moment',
'painting',
'politics',
'attention',
'decision',
'event',
'property',
'shopping',
'student',
'wood',
'competition',
'distribution',
'entertainment',
'office',
'population',
'president',
'unit',
'category',
'cigarette',
'context',
'introduction',
'opportunity',
'performance',
'driver',
'flight',
'length',
'magazine',
'newspaper',
'relationship',
'teaching',
'cell',
'dealer',
'debate',
'finding',
'lake',
'member',
'message',
'phone',
'scene',
'appearance',
'association',
'concept',
'customer',
'death',
'discussion',
'housing',
'inflation',
'insurance',
'mood',
'woman',
'advice',
'blood',
'effort',
'expression',
'importance',
'opinion',
'payment',
'reality',
'responsibility',
'situation',
'skill',
'statement',
'wealth',
'application',
'city',
'county',
'depth',
'estate',
'foundation',
'grandmother',
'heart',
'perspective',
'photo',
'recipe',
'studio',
'topic',
'collection',
'depression',
'imagination',
'passion',
'percentage',
'resource',
'setting',
'ad',
'agency',
'college',
'connection',
'criticism',
'debt',
'description',
'memory',
'patience',
'secretary',
'solution',
'administration',
'aspect',
'attitude',
'director',
'personality',
'psychology',
'recommendation',
'response',
'selection',
'storage',
'version',
'alcohol',
'argument',
'complaint',
'contract',
'emphasis',
'highway',
'loss',
'membership',
'possession',
'preparation',
'steak',
'union',
'agreement',
'cancer',
'currency',
'employment',
'engineering',
'entry',
'interaction',
'limit',
'mixture',
'preference',
'region',
'republic',
'seat',
'tradition',
'virus',
'actor',
'classroom',
'delivery',
'device',
'difficulty',
'drama',
'election',
'engine',
'football',
'guidance',
'hotel',
'match',
'owner',
'priority',
'protection',
'suggestion',
'tension',
'variation',
'anxiety',
'atmosphere',
'awareness',
'bread',
'climate',
'comparison',
'confusion',
'construction',
'elevator',
'emotion',
'employee',
'employer',
'guest',
'height',
'leadership',
'mall',
'manager',
'operation',
'recording',
'respect',
'sample',
'transportation',
'boring',
'charity',
'cousin',
'disaster',
'editor',
'efficiency',
'excitement',
'extent',
'feedback',
'guitar',
'homework',
'leader',
'mom',
'outcome',
'permission',
'presentation',
'promotion',
'reflection',
'refrigerator',
'resolution',
'revenue',
'session',
'singer',
'tennis',
'basket',
'bonus',
'cabinet',
'childhood',
'church',
'clothes',
'coffee',
'dinner',
'drawing',
'hair',
'hearing',
'initiative',
'judgment',
'lab',
'measurement',
'mode',
'mud',
'orange',
'poetry',
'police',
'possibility',
'procedure',
'queen',
'ratio',
'relation',
'restaurant',
'satisfaction',
'sector',
'signature',
'significance',
'song',
'tooth',
'town',
'vehicle',
'volume',
'wife',
'accident',
'airport',
'appointment',
'arrival',
'assumption',
'baseball',
'chapter',
'committee',
'conversation',
'database',
'enthusiasm',
'error',
'explanation',
'farmer',
'gate',
'girl',
'hall',
'historian',
'hospital',
'injury',
'instruction',
'maintenance',
'manufacturer',
'meal',
'perception',
'pie',
'poem',
'presence',
'proposal',
'reception',
'replacement',
'revolution',
'river',
'son',
'speech',
'tea',
'village',
'warning',
'winner',
'worker',
'writer',
'assistance',
'breath',
'buyer',
'chest',
'chocolate',
'conclusion',
'contribution',
'cookie',
'courage',
'dad',
'desk',
'drawer',
'establishment',
'examination',
'garbage',
'grocery',
'honey',
'impression',
'improvement',
'independence',
'insect',
'inspection',
'inspector',
'king',
'ladder',
'menu',
'penalty',
'piano',
'potato',
'profession',
'professor',
'quantity',
'reaction',
'requirement',
'salad',
'sister',
'supermarket',
'tongue',
'weakness',
'wedding',
'affair',
'ambition',
'analyst',
'apple',
'assignment',
'assistant',
'bathroom',
'bedroom',
'beer',
'birthday',
'celebration',
'championship',
'cheek',
'client',
'consequence',
'departure',
'diamond',
'dirt',
'ear',
'fortune',
'friendship',
'funeral',
'gene',
'girlfriend',
'hat',
'indication',
'intention',
'lady',
'midnight',
'negotiation',
'obligation',
'passenger',
'pizza',
'platform',
'poet',
'pollution',
'recognition',
'reputation',
'shirt',
'sir',
'speaker',
'stranger',
'surgery',
'sympathy',
'tale',
'throat',
'trainer',
'uncle',
'youth',
'time',
'work',
'film',
'water',
'money',
'example',
'while',
'business',
'study',
'game',
'life',
'form',
'air',
'day',
'place',
'number',
'part',
'field',
'fish',
'back',
'process',
'heat',
'hand',
'experience',
'job',
'book',
'end',
'point',
'type',
'home',
'economy',
'value',
'body',
'market',
'guide',
'interest',
'state',
'radio',
'course',
'company',
'price',
'size',
'card',
'list',
'mind',
'trade',
'line',
'care',
'group',
'risk',
'word',
'fat',
'force',
'key',
'light',
'training',
'name',
'school',
'top',
'amount',
'level',
'order',
'practice',
'research',
'sense',
'service',
'piece',
'web',
'boss',
'sport',
'fun',
'house',
'page',
'term',
'test',
'answer',
'sound',
'focus',
'matter',
'kind',
'soil',
'board',
'oil',
'picture',
'access',
'garden',
'range',
'rate',
'reason',
'future',
'site',
'demand',
'exercise',
'image',
'case',
'cause',
'coast',
'action',
'age',
'bad',
'boat',
'record',
'result',
'section',
'building',
'mouse',
'cash',
'class',
'nothing',
'period',
'plan',
'store',
'tax',
'side',
'subject',
'space',
'rule',
'stock',
'weather',
'chance',
'figure',
'man',
'model',
'source',
'beginning',
'earth',
'program',
'chicken',
'design',
'feature',
'head',
'material',
'purpose',
'question',
'rock',
'salt',
'act',
'birth',
'car',
'dog',
'object',
'scale',
'sun',
'note',
'profit',
'rent',
'speed',
'style',
'war',
'bank',
'craft',
'half',
'inside',
'outside',
'standard',
'bus',
'exchange',
'eye',
'fire',
'position',
'pressure',
'stress',
'advantage',
'benefit',
'box',
'frame',
'issue',
'step',
'cycle',
'face',
'item',
'metal',
'paint',
'review',
'room',
'screen',
'structure',
'view',
'account',
'ball',
'discipline',
'medium',
'share',
'balance',
'bit',
'black',
'bottom',
'choice',
'gift',
'impact',
'machine',
'shape',
'tool',
'wind',
'address',
'average',
'career',
'culture',
'morning',
'pot',
'sign',
'table',
'task',
'condition',
'contact',
'credit',
'egg',
'hope',
'ice',
'network',
'north',
'square',
'attempt',
'date',
'effect',
'link',
'post',
'star',
'voice',
'capital',
'challenge',
'friend',
'self',
'shot',
'brush',
'couple',
'exit',
'front',
'function',
'lack',
'living',
'plant',
'plastic',
'spot',
'summer',
'taste',
'theme',
'track',
'wing',
'brain',
'button',
'click',
'desire',
'foot',
'gas',
'influence',
'notice',
'rain',
'wall',
'base',
'damage',
'distance',
'feeling',
'pair',
'savings',
'staff',
'sugar',
'target',
'text',
'animal',
'author',
'budget',
'discount',
'file',
'ground',
'lesson',
'minute',
'officer',
'phase',
'reference',
'register',
'sky',
'stage',
'stick',
'title',
'trouble',
'bowl',
'bridge',
'campaign',
'character',
'club',
'edge',
'evidence',
'fan',
'letter',
'lock',
'maximum',
'novel',
'option',
'pack',
'park',
'plenty',
'quarter',
'skin',
'sort',
'weight',
'baby',
'background',
'carry',
'dish',
'factor',
'fruit',
'glass',
'joint',
'master',
'muscle',
'red',
'strength',
'traffic',
'trip',
'vegetable',
'appeal',
'chart',
'gear',
'ideal',
'kitchen',
'land',
'log',
'mother',
'net',
'party',
'principle',
'relative',
'sale',
'season',
'signal',
'spirit',
'street',
'tree',
'wave',
'belt',
'bench',
'commission',
'copy',
'drop',
'minimum',
'path',
'progress',
'project',
'sea',
'south',
'status',
'stuff',
'ticket',
'tour',
'angle',
'blue',
'breakfast',
'confidence',
'daughter',
'degree',
'doctor',
'dot',
'dream',
'duty',
'essay',
'father',
'fee',
'finance',
'hour',
'juice',
'luck',
'milk',
'mouth',
'peace',
'pipe',
'stable',
'storm',
'substance',
'team',
'trick',
'afternoon',
'bat',
'beach',
'blank',
'catch',
'chain',
'consideration',
'cream',
'crew',
'detail',
'gold',
'interview',
'kid',
'mark',
'mission',
'pain',
'pleasure',
'score',
'screw',
'sex',
'shop',
'shower',
'suit',
'tone',
'window',
'agent',
'band',
'bath',
'block',
'bone',
'calendar',
'candidate',
'cap',
'coat',
'contest',
'corner',
'court',
'cup',
'district',
'door',
'east',
'finger',
'garage',
'guarantee',
'hole',
'hook',
'implement',
'layer',
'lecture',
'lie',
'manner',
'meeting',
'nose',
'parking',
'partner',
'profile',
'rice',
'routine',
'schedule',
'swimming',
'telephone',
'tip',
'winter',
'airline',
'bag',
'battle',
'bed',
'bill',
'bother',
'cake',
'code',
'curve',
'designer',
'dimension',
'dress',
'ease',
'emergency',
'evening',
'extension',
'farm',
'fight',
'gap',
'grade',
'holiday',
'horror',
'horse',
'host',
'husband',
'loan',
'mistake',
'mountain',
'nail',
'noise',
'occasion',
'package',
'patient',
'pause',
'phrase',
'proof',
'race',
'relief',
'sand',
'sentence',
'shoulder',
'smoke',
'stomach',
'string',
'tourist',
'towel',
'vacation',
'west',
'wheel',
'wine',
'arm',
'aside',
'associate',
'bet',
'blow',
'border',
'branch',
'breast',
'brother',
'buddy',
'bunch',
'chip',
'coach',
'cross',
'document',
'draft',
'dust',
'expert',
'floor',
'god',
'golf',
'habit',
'iron',
'judge',
'knife',
'landscape',
'league',
'mail',
'mess',
'native',
'opening',
'parent',
'pattern',
'pin',
'pool',
'pound',
'request',
'salary',
'shame',
'shelter',
'shoe',
'silver',
'tackle',
'tank',
'trust',
'assist',
'bake',
'bar',
'bell',
'bike',
'blame',
'boy',
'brick',
'chair',
'closet',
'clue',
'collar',
'comment',
'conference',
'devil',
'diet',
'fear',
'fuel',
'glove',
'jacket',
'lunch',
'monitor',
'mortgage',
'nurse',
'pace',
'panic',
'peak',
'plane',
'reward',
'row',
'sandwich',
'shock',
'spite',
'spray',
'surprise',
'till',
'transition',
'weekend',
'welcome',
'yard',
'alarm',
'bend',
'bicycle',
'bite',
'blind',
'bottle',
'cable',
'candle',
'clerk',
'cloud',
'concert',
'counter',
'flower',
'grandfather',
'harm',
'knee',
'lawyer',
'leather',
'load',
'mirror',
'neck',
'pension',
'plate',
'purple',
'ruin',
'ship',
'skirt',
'slice',
'snow',
'specialist',
'stroke',
'switch',
'trash',
'tune',
'zone',
'anger',
'award',
'bid',
'bitter',
'boot',
'bug',
'camp',
'candy',
'carpet',
'cat',
'champion',
'channel',
'clock',
'comfort',
'cow',
'crack',
'engineer',
'entrance',
'fault',
'grass',
'guy',
'hell',
'highlight',
'incident',
'island',
'joke',
'jury',
'leg',
'lip',
'mate',
'motor',
'nerve',
'passage',
'pen',
'pride',
'priest',
'prize',
'promise',
'resident',
'resort',
'ring',
'roof',
'rope',
'sail',
'scheme',
'script',
'sock',
'station',
'toe',
'tower',
'truck',
'witness',
'a',
'you',
'it',
'can',
'will',
'if',
'one',
'many',
'most',
'other',
'use',
'make',
'good',
'look',
'help',
'go',
'great',
'being',
'few',
'might',
'still',
'public',
'read',
'keep',
'start',
'give',
'human',
'local',
'general',
'she',
'specific',
'long',
'play',
'feel',
'high',
'tonight',
'put',
'common',
'set',
'change',
'simple',
'past',
'big',
'possible',
'particular',
'today',
'major',
'personal',
'current',
'national',
'cut',
'natural',
'physical',
'show',
'try',
'check',
'second',
'call',
'move',
'pay',
'let',
'increase',
'single',
'individual',
'turn',
'ask',
'buy',
'guard',
'hold',
'main',
'offer',
'potential',
'professional',
'international',
'travel',
'cook',
'alternative',
'following',
'special',
'working',
'whole',
'dance',
'excuse',
'cold',
'commercial',
'low',
'purchase',
'deal',
'primary',
'worth',
'fall',
'necessary',
'positive',
'produce',
'search',
'present',
'spend',
'talk',
'creative',
'tell',
'cost',
'drive',
'green',
'support',
'glad',
'remove',
'return',
'run',
'complex',
'due',
'effective',
'middle',
'regular',
'reserve',
'independent',
'leave',
'original',
'reach',
'rest',
'serve',
'watch',
'beautiful',
'charge',
'active',
'break',
'negative',
'safe',
'stay',
'visit',
'visual',
'affect',
'cover',
'report',
'rise',
'walk',
'white',
'beyond',
'junior',
'pick',
'unique',
'anything',
'classic',
'final',
'lift',
'mix',
'private',
'stop',
'teach',
'western',
'concern',
'familiar',
'fly',
'official',
'broad',
'comfortable',
'gain',
'maybe',
'rich',
'save',
'stand',
'young',
'heavy',
'hello',
'lead',
'listen',
'valuable',
'worry',
'handle',
'leading',
'meet',
'release',
'sell',
'finish',
'normal',
'press',
'ride',
'secret',
'spread',
'spring',
'tough',
'wait',
'brown',
'deep',
'display',
'flow',
'hit',
'objective',
'shoot',
'touch',
'cancel',
'chemical',
'cry',
'dump',
'extreme',
'push',
'conflict',
'eat',
'fill',
'formal',
'jump',
'kick',
'opposite',
'pass',
'pitch',
'remote',
'total',
'treat',
'vast',
'abuse',
'beat',
'burn',
'deposit',
'print',
'raise',
'sleep',
'somewhere',
'advance',
'anywhere',
'consist',
'dark',
'double',
'draw',
'equal',
'fix',
'hire',
'internal',
'join',
'kill',
'sensitive',
'tap',
'win',
'attack',
'claim',
'constant',
'drag',
'drink',
'guess',
'minor',
'pull',
'raw',
'soft',
'solid',
'wear',
'weird',
'wonder',
'annual',
'count',
'dead',
'doubt',
'feed',
'forever',
'impress',
'nobody',
'repeat',
'round',
'sing',
'slide',
'strip',
'whereas',
'wish',
'combine',
'command',
'dig',
'divide',
'equivalent',
'hang',
'hunt',
'initial',
'march',
'mention',
'spiritual',
'survey',
'tie',
'adult',
'brief',
'crazy',
'escape',
'gather',
'hate',
'prior',
'repair',
'rough',
'sad',
'scratch',
'sick',
'strike',
'employ',
'external',
'hurt',
'illegal',
'laugh',
'lay',
'mobile',
'nasty',
'ordinary',
'respond',
'royal',
'senior',
'split',
'strain',
'struggle',
'swim',
'train',
'upper',
'wash',
'yellow',
'convert',
'crash',
'dependent',
'fold',
'funny',
'grab',
'hide',
'miss',
'permit',
'quote',
'recover',
'resolve',
'roll',
'sink',
'slip',
'spare',
'suspect',
'sweet',
'swing',
'twist',
'upstairs',
'usual',
'abroad',
'brave',
'calm',
'concentrate',
'estimate',
'grand',
'male',
'mine',
'prompt',
'quiet',
'refuse',
'regret',
'reveal',
'rush',
'shake',
'shift',
'shine',
'steal',
'suck',
'surround',
'anybody',
'bear',
'brilliant',
'dare',
'dear',
'delay',
'drunk',
'female',
'hurry',
'inevitable',
'invite',
'kiss',
'neat',
'pop',
'punch',
'quit',
'reply',
'representative',
'resist',
'rip',
'rub',
'silly',
'smile',
'spell',
'stretch',
'stupid',
'tear',
'temporary',
'tomorrow',
'wake',
'wrap',
'yesterday',
]
plus = ['-', '', "."]
end = ["com", "net", "ml", "org", "us"]
#print " ".join([nouns[random.randrange(0, len(nouns))] for i in range(4)])
def get():
n = random.randint(2, 3)
ws = []
for i in range(0, n):
w = random.choice(nouns)
ws.append(w)
p = random.choice(plus)
name = p.join(ws)
name += "." + random.choice(end)
return name
if __name__ == '__main__':
n = get()
print(n)
|
|
# Debian packaging tools: Relationship parsing and evaluation.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: April 19, 2020
# URL: https://github.com/xolox/python-deb-pkg-tools
"""
Parsing and evaluation of Debian package relationship declarations.
The :mod:`deb_pkg_tools.deps` module provides functions to parse and evaluate
Debian package relationship declarations as defined in `chapter 7`_ of the
Debian policy manual. The most important function is :func:`parse_depends()`
which returns a :class:`RelationshipSet` object. The
:func:`RelationshipSet.matches()` method can be used to evaluate relationship
expressions. The relationship parsing is implemented in pure Python (no
external dependencies) but relationship evaluation uses the external command
``dpkg --compare-versions`` to ensure compatibility with Debian's package
version comparison algorithm.
To give you an impression of how to use this module:
>>> from deb_pkg_tools.deps import parse_depends
>>> dependencies = parse_depends('python (>= 2.6), python (<< 3) | python (>= 3.4)')
>>> dependencies.matches('python', '2.5')
False
>>> dependencies.matches('python', '3.0')
False
>>> dependencies.matches('python', '2.6')
True
>>> dependencies.matches('python', '3.4')
True
>>> print(repr(dependencies))
RelationshipSet(VersionedRelationship(name='python', operator='>=', version='2.6', architectures=()),
AlternativeRelationship(VersionedRelationship(name='python', operator='<<', version='3', architectures=()),
VersionedRelationship(name='python', operator='>=', version='3.4', architectures=())))
>>> print(str(dependencies))
python (>= 2.6), python (<< 3) | python (>= 3.4)
As you can see the :func:`repr()` output of the relationship set shows the
object tree and the :class:`str` output is the dependency line.
.. _chapter 7: http://www.debian.org/doc/debian-policy/ch-relationships.html#s-depsyntax
"""
# Standard library modules.
import functools
import logging
import re
# External dependencies.
from humanfriendly.text import compact, split
from property_manager import PropertyManager, key_property
from six import string_types, text_type
# Modules included in our package.
from deb_pkg_tools.compat import str_compatible
from deb_pkg_tools.version import compare_versions
# Public identifiers that require documentation.
__all__ = (
"ARCHITECTURE_RESTRICTIONS_MESSAGE",
"AbstractRelationship",
"AlternativeRelationship",
"EXPRESSION_PATTERN",
"Relationship",
"RelationshipSet",
"VersionedRelationship",
"cache_matches",
"logger",
"parse_alternatives",
"parse_depends",
"parse_relationship",
)
# Initialize a logger.
logger = logging.getLogger(__name__)
# Define a compiled regular expression pattern that we will use to match
# package relationship expressions consisting of a package name followed by
# optional version and architecture restrictions.
EXPRESSION_PATTERN = re.compile(r'''
# Capture all leading characters up to (but not including)
# the first parenthesis, bracket or space.
(?P<name> [^\(\[ ]+ )
# Ignore any whitespace.
\s*
# Optionally capture version restriction inside parentheses.
( \( (?P<version> [^)]+ ) \) )?
# Ignore any whitespace.
\s*
# Optionally capture architecture restriction inside brackets.
( \[ (?P<architectures> [^\]]+ ) \] )?
''', re.VERBOSE)
ARCHITECTURE_RESTRICTIONS_MESSAGE = """
Evaluation of architecture restrictions hasn't been implemented yet. If you
think this would be useful to you then please submit a feature request at
https://github.com/xolox/python-deb-pkg-tools/issues/9
"""
def parse_depends(relationships):
"""
Parse a Debian package relationship declaration line.
:param relationships: A string containing one or more comma separated
package relationships or a list of strings with
package relationships.
:returns: A :class:`RelationshipSet` object.
:raises: :exc:`~exceptions.ValueError` when parsing fails.
This function parses a list of package relationships of the form ``python
(>= 2.6), python (<< 3)``, i.e. a comma separated list of relationship
expressions. Uses :func:`parse_alternatives()` to parse each comma
separated expression.
Here's an example:
>>> from deb_pkg_tools.deps import parse_depends
>>> dependencies = parse_depends('python (>= 2.6), python (<< 3)')
>>> print(repr(dependencies))
RelationshipSet(VersionedRelationship(name='python', operator='>=', version='2.6'),
VersionedRelationship(name='python', operator='<<', version='3'))
>>> dependencies.matches('python', '2.5')
False
>>> dependencies.matches('python', '2.6')
True
>>> dependencies.matches('python', '2.7')
True
>>> dependencies.matches('python', '3.0')
False
"""
logger.debug("Parsing relationships: %r", relationships)
if isinstance(relationships, string_types):
relationships = split(relationships, ',')
return RelationshipSet(*map(parse_alternatives, relationships))
def parse_alternatives(expression):
"""
Parse an expression containing one or more alternative relationships.
:param expression: A relationship expression (a string).
:returns: A :class:`Relationship` object.
:raises: :exc:`~exceptions.ValueError` when parsing fails.
This function parses an expression containing one or more alternative
relationships of the form ``python2.6 | python2.7.``, i.e. a list of
relationship expressions separated by ``|`` tokens. Uses
:func:`parse_relationship()` to parse each ``|`` separated expression.
An example:
>>> from deb_pkg_tools.deps import parse_alternatives
>>> parse_alternatives('python2.6')
Relationship(name='python2.6')
>>> parse_alternatives('python2.6 | python2.7')
AlternativeRelationship(Relationship(name='python2.6'),
Relationship(name='python2.7'))
"""
if '|' in expression:
logger.debug("Parsing relationship with alternatives: %r", expression)
return AlternativeRelationship(*map(parse_relationship, split(expression, '|')))
else:
return parse_relationship(expression)
def parse_relationship(expression):
"""
Parse an expression containing a package name and optional version/architecture restrictions.
:param expression: A relationship expression (a string).
:returns: A :class:`Relationship` object.
:raises: :exc:`~exceptions.ValueError` when parsing fails.
This function parses relationship expressions containing a package name and
(optionally) a version relation of the form ``python (>= 2.6)`` and/or an
architecture restriction (refer to the Debian policy manual's documentation
on the `syntax of relationship fields`_ for details). Here's an example:
>>> from deb_pkg_tools.deps import parse_relationship
>>> parse_relationship('python')
Relationship(name='python')
>>> parse_relationship('python (<< 3)')
VersionedRelationship(name='python', operator='<<', version='3')
.. _syntax of relationship fields: https://www.debian.org/doc/debian-policy/ch-relationships.html
"""
logger.debug("Parsing relationship: %r", expression)
match = EXPRESSION_PATTERN.match(expression)
name = match.group('name')
version = match.group('version')
# Split the architecture restrictions into a tuple of strings.
architectures = tuple((match.group('architectures') or '').split())
if name and not version:
# A package name (and optional architecture restrictions) without version relation.
return Relationship(name=name, architectures=architectures)
else:
# A package name (and optional architecture restrictions) followed by a
# relationship to specific version(s) of the package.
tokens = [t.strip() for t in re.split('([<>=]+)', version) if t and not t.isspace()]
if len(tokens) != 2:
# Encountered something unexpected!
raise ValueError(compact("""
Corrupt package relationship expression: Splitting operator
from version resulted in more than two tokens!
(expression: {e}, tokens: {t})
""", e=expression, t=tokens))
return VersionedRelationship(name=name, architectures=architectures, operator=tokens[0], version=tokens[1])
def cache_matches(f):
"""
High performance memoizing decorator for overrides of :func:`Relationship.matches()`.
Before writing this function I tried out several caching decorators from
PyPI, unfortunately all of them were bloated. I benchmarked using
:func:`.collect_related_packages()` and where this decorator would get a
total runtime of 8 seconds the other caching decorators would get
something like 40 seconds...
"""
@functools.wraps(f)
def decorator(self, package, version=None):
# Get or create the cache.
try:
cache = self._matches_cache
except AttributeError:
cache = {}
setattr(self, '_matches_cache', cache)
# Get or create the entry.
key = (package, version)
try:
return cache[key]
except KeyError:
value = f(self, package, version)
cache[key] = value
return value
return decorator
class AbstractRelationship(PropertyManager):
"""Abstract base class for the various types of relationship objects defined in :mod:`deb_pkg_tools.deps`."""
@property
def names(self):
"""
The name(s) of the packages in the relationship.
:returns: A set of package names (strings).
.. note:: This property needs to be implemented by subclasses.
"""
raise NotImplementedError
def matches(self, name, version=None):
"""
Check if the relationship matches a given package and version.
:param name: The name of a package (a string).
:param version: The version number of a package (a string, optional).
:returns: One of the values :data:`True`, :data:`False` or :data:`None`
meaning the following:
- :data:`True` if the name matches and the version
doesn't invalidate the match,
- :data:`False` if the name matches but the version
invalidates the match,
- :data:`None` if the name doesn't match at all.
.. note:: This method needs to be implemented by subclasses.
"""
raise NotImplementedError
@str_compatible
class Relationship(AbstractRelationship):
"""
A simple package relationship referring only to the name of a package.
Created by :func:`parse_relationship()`.
"""
# Explicitly define the sort order of the key properties.
key_properties = 'name', 'architectures'
@key_property
def name(self):
"""The name of a package (a string)."""
@key_property
def architectures(self):
"""The architecture restriction(s) on the relationship (a tuple of strings)."""
return ()
@property
def names(self):
"""The name(s) of the packages in the relationship."""
return set([self.name])
def matches(self, name, version=None):
"""
Check if the relationship matches a given package name.
:param name: The name of a package (a string).
:param version: The version number of a package (this parameter is ignored).
:returns: :data:`True` if the name matches, :data:`None` otherwise.
:raises: :exc:`~exceptions.NotImplementedError` when :attr:`architectures`
is not empty (because evaluation of architecture restrictions
hasn't been implemented).
"""
if self.name == name:
if self.architectures:
raise NotImplementedError(compact(ARCHITECTURE_RESTRICTIONS_MESSAGE))
return True
def __str__(self):
"""Serialize a :class:`Relationship` object to a Debian package relationship expression."""
expression = self.name
if self.architectures:
expression += u" [%s]" % " ".join(self.architectures)
return expression
def __repr__(self):
"""Serialize a :class:`Relationship` object to a Python expression."""
return "%s(%s)" % (self.__class__.__name__, ', '.join([
'name=%r' % self.name,
'architectures=%s' % repr(self.architectures),
]))
@str_compatible
class VersionedRelationship(Relationship):
"""
A conditional package relationship that refers to a package and certain versions of that package.
Created by :func:`parse_relationship()`.
"""
# Explicitly define the sort order of the key properties.
key_properties = 'name', 'operator', 'version', 'architectures'
@key_property
def operator(self):
"""An operator that compares Debian package version numbers (a string)."""
@key_property
def version(self):
"""The version number of a package (a string)."""
@cache_matches
def matches(self, name, version=None):
"""
Check if the relationship matches a given package name and version.
:param name: The name of a package (a string).
:param version: The version number of a package (a string, optional).
:returns: One of the values :data:`True`, :data:`False` or :data:`None`
meaning the following:
- :data:`True` if the name matches and the version
doesn't invalidate the match,
- :data:`False` if the name matches but the version
invalidates the match,
- :data:`None` if the name doesn't match at all.
:raises: :exc:`~exceptions.NotImplementedError` when
:attr:`~Relationship.architectures` is not empty (because
evaluation of architecture restrictions hasn't been
implemented).
Uses the external command ``dpkg --compare-versions`` to ensure
compatibility with Debian's package version comparison algorithm.
"""
if self.name == name:
if version:
if self.architectures:
raise NotImplementedError(compact(ARCHITECTURE_RESTRICTIONS_MESSAGE))
return compare_versions(version, self.operator, self.version)
else:
return False
def __str__(self):
"""Serialize a :class:`VersionedRelationship` object to a Debian package relationship expression."""
expression = u'%s (%s %s)' % (self.name, self.operator, self.version)
if self.architectures:
expression += u" [%s]" % " ".join(self.architectures)
return expression
def __repr__(self):
"""Serialize a :class:`VersionedRelationship` object to a Python expression."""
return "%s(%s)" % (self.__class__.__name__, ', '.join([
'name=%r' % self.name,
'operator=%r' % self.operator,
'version=%r' % self.version,
'architectures=%s' % repr(self.architectures),
]))
@str_compatible
class AlternativeRelationship(AbstractRelationship):
"""
A package relationship that refers to one of several alternative packages.
Created by :func:`parse_alternatives()`.
"""
def __init__(self, *relationships):
"""
Initialize an :class:`AlternativeRelationship` object.
:param relationships: One or more :class:`Relationship` objects.
"""
self.relationships = tuple(relationships)
@key_property
def relationships(self):
"""A tuple of :class:`Relationship` objects."""
@property
def names(self):
"""
Get the name(s) of the packages in the alternative relationship.
:returns: A set of package names (strings).
"""
names = set()
for relationship in self.relationships:
names |= relationship.names
return names
@cache_matches
def matches(self, name, version=None):
"""
Check if the relationship matches a given package and version.
:param name: The name of a package (a string).
:param version: The version number of a package (a string, optional).
:returns: :data:`True` if the name and version of an alternative match,
:data:`False` if the name of an alternative was matched but the
version didn't match, :data:`None` otherwise.
"""
matches = None
for alternative in self.relationships:
alternative_matches = alternative.matches(name, version)
if alternative_matches is True:
return True
elif alternative_matches is False:
# Keep looking for a match but return False if we don't find one.
matches = False
return matches
def __str__(self):
"""Serialize an :class:`AlternativeRelationship` object to a Debian package relationship expression."""
return u' | '.join(map(text_type, self.relationships))
def __repr__(self):
"""Serialize an :class:`AlternativeRelationship` object to a Python expression."""
return "%s(%s)" % (self.__class__.__name__, ', '.join(repr(r) for r in self.relationships))
@str_compatible
class RelationshipSet(PropertyManager):
"""A set of package relationships. Created by :func:`parse_depends()`."""
def __init__(self, *relationships):
"""
Initialize a :class `RelationshipSet` object.
:param relationships: One or more :class:`Relationship` objects.
"""
self.relationships = tuple(relationships)
@key_property
def relationships(self):
"""A tuple of :class:`Relationship` objects."""
@property
def names(self):
"""
Get the name(s) of the packages in the relationship set.
:returns: A set of package names (strings).
"""
names = set()
for relationship in self.relationships:
names |= relationship.names
return names
@cache_matches
def matches(self, name, version=None):
"""
Check if the set of relationships matches a given package and version.
:param name: The name of a package (a string).
:param version: The version number of a package (a string, optional).
:returns: :data:`True` if all matched relationships evaluate to true,
:data:`False` if a relationship is matched and evaluates to false,
:data:`None` otherwise.
.. warning:: Results are cached in the assumption that
:class:`RelationshipSet` objects are
immutable. This is not enforced.
"""
results = [r.matches(name, version) for r in self.relationships]
matches = [r for r in results if r is not None]
return all(matches) if matches else None
def __str__(self):
"""Serialize a :class:`RelationshipSet` object to a Debian package relationship expression."""
return u', '.join(map(text_type, self.relationships))
def __repr__(self, pretty=False, indent=0):
"""Serialize a :class:`RelationshipSet` object to a Python expression."""
prefix = '%s(' % self.__class__.__name__
indent += len(prefix)
delimiter = ',\n%s' % (' ' * indent) if pretty else ', '
return prefix + delimiter.join(repr(r) for r in self.relationships) + ')'
def __iter__(self):
"""Iterate over the relationships in a relationship set."""
return iter(self.relationships)
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import story
from telemetry import page as page_module
from telemetry import value
from telemetry.value import list_of_scalar_values
from telemetry.value import none_values
class StatisticComputationTest(unittest.TestCase):
def testVariance(self):
self.assertAlmostEqual(
list_of_scalar_values.Variance([]), 0)
self.assertAlmostEqual(
list_of_scalar_values.Variance([3]), 0)
self.assertAlmostEqual(
list_of_scalar_values.Variance([600, 470, 170, 430, 300]), 27130)
def testStandardDeviation(self):
self.assertAlmostEqual(
list_of_scalar_values.StandardDeviation([]), 0)
self.assertAlmostEqual(
list_of_scalar_values.StandardDeviation([1]), 0)
self.assertAlmostEqual(
list_of_scalar_values.StandardDeviation([600, 470, 170, 430, 300]),
164.71186, places=4)
def testPooledVariance(self):
self.assertAlmostEqual(
list_of_scalar_values.PooledStandardDeviation([[], [], []]), 0)
self.assertAlmostEqual(
list_of_scalar_values.PooledStandardDeviation([[1], [], [3], []]), 0)
self.assertAlmostEqual(
list_of_scalar_values.PooledStandardDeviation([[1], [2], [3], [4]]), 0)
self.assertAlmostEqual(list_of_scalar_values.PooledStandardDeviation(
[[600, 470, 170, 430, 300], # variance = 27130, std = 164.7
[4000, 4020, 4230], # variance = 16233, std = 127.41
[260, 700, 800, 900, 0, 120, 150]]), # variance = 136348, std = 369.2
282.7060, # SQRT((27130 4 + 16233*2 + 136348*6)/12)
places=4)
self.assertAlmostEqual(list_of_scalar_values.PooledStandardDeviation(
[[600, 470, 170, 430, 300],
[4000, 4020, 4230],
[260, 700, 800, 900, 0, 120, 150]],
list_of_variances=[100000, 200000, 300000]),
465.47466, # SQRT((100000*4 + 200000* 2 + 300000*6)/12)
places=4)
class TestBase(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
story_set.AddStory(
page_module.Page('http://www.baz.com/', story_set, story_set.base_dir))
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
class ValueTest(TestBase):
def testListSamePageMergingWithSamePageConcatenatePolicy(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[10, 9, 9, 7], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[300, 302, 303, 304], same_page_merge_policy=value.CONCATENATE)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([10, 9, 9, 7, 300, 302, 303, 304], vM.values)
# SQRT((19/12 * 3 + 35/12 * 3)/6) = 1.5
self.assertAlmostEqual(1.5, vM.std)
def testListSamePageMergingWithPickFirstPolicy(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.PICK_FIRST)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[3, 4], same_page_merge_policy=value.PICK_FIRST)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(page0, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.PICK_FIRST, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2], vM.values)
def testListDifferentPageMerging(self):
page0 = self.pages[0]
page1 = self.pages[1]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page1, 'x', 'unit',
[3, 4], same_page_merge_policy=value.CONCATENATE)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromDifferentPages([v0, v1]))
self.assertEquals(None, vM.page)
self.assertEquals('x', vM.name)
self.assertEquals('unit', vM.units)
self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy)
self.assertEquals(True, vM.important)
self.assertEquals([1, 2, 3, 4], vM.values)
def testListWithNoneValueMerging(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[1, 2], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
None, same_page_merge_policy=value.CONCATENATE, none_value_reason='n')
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
self.assertEquals(None, vM.values)
self.assertEquals(none_values.MERGE_FAILURE_REASON,
vM.none_value_reason)
def testListWithNoneValueMustHaveNoneReason(self):
page0 = self.pages[0]
self.assertRaises(none_values.NoneValueMissingReason,
lambda: list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit', None))
def testListWithNoneReasonMustHaveNoneValue(self):
page0 = self.pages[0]
self.assertRaises(none_values.ValueMustHaveNoneValue,
lambda: list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit', [1, 2],
none_value_reason='n'))
def testAsDict(self):
v = list_of_scalar_values.ListOfScalarValues(
None, 'x', 'unit', [1, 2],
same_page_merge_policy=value.PICK_FIRST, important=False)
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d['values'], [1, 2])
self.assertAlmostEqual(d['std'], 0.7071, places=4)
def testMergedValueAsDict(self):
page0 = self.pages[0]
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[10, 9, 9, 7], same_page_merge_policy=value.CONCATENATE)
v1 = list_of_scalar_values.ListOfScalarValues(
page0, 'x', 'unit',
[300, 302, 303, 304], same_page_merge_policy=value.CONCATENATE)
self.assertTrue(v1.IsMergableWith(v0))
vM = (list_of_scalar_values.ListOfScalarValues.
MergeLikeValuesFromSamePage([v0, v1]))
d = vM.AsDict()
self.assertEquals(d['values'], [10, 9, 9, 7, 300, 302, 303, 304])
# SQRT((19/12 * 3 + 35/12 * 3)/6)
self.assertAlmostEqual(d['std'], 1.5)
def testNoneValueAsDict(self):
v = list_of_scalar_values.ListOfScalarValues(
None, 'x', 'unit', None, same_page_merge_policy=value.PICK_FIRST,
important=False, none_value_reason='n')
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d, {
'values': None,
'none_value_reason': 'n',
'std': None
})
def testFromDictInts(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': [1, 2],
'std': 0.7071
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, [1, 2])
self.assertEquals(v.std, 0.7071)
def testFromDictFloats(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': [1.3, 2.7, 4.5, 2.1, 3.4],
'std': 0.901
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, [1.3, 2.7, 4.5, 2.1, 3.4])
self.assertEquals(v.std, 0.901)
def testFromDictNoneValue(self):
d = {
'type': 'list_of_scalar_values',
'name': 'x',
'units': 'unit',
'values': None,
'std': None,
'none_value_reason': 'n'
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, list_of_scalar_values.ListOfScalarValues))
self.assertEquals(v.values, None)
self.assertEquals(v.none_value_reason, 'n')
|
|
"""
=============
pyFluidSynth
=============
Python bindings for FluidSynth
Author: Nathan Whitehead
Contact: nwhitehe@gmail.com
Version: 0.7
Date: 2015-02-24
Copyright 2008--2015, Nathan Whitehead <nwhitehe@gmail.com>
Released under the LGPL
This module contains python bindings for FluidSynth. FluidSynth is a
software synthesizer for generating music. It works like a MIDI
synthesizer. You load patches, set parameters, then send NOTEON and
NOTEOFF events to play notes. Instruments are defined in SoundFonts,
generally files with the extension SF2. FluidSynth can either be used
to play audio itself, or you can call a function that returns chunks
of audio data and output the data to the soundcard yourself.
FluidSynth works on all major platforms, so pyFluidSynth should also.
"""
from ctypes import *
from ctypes.util import find_library
import os
# A short circuited or expression to find the FluidSynth library
# (mostly needed for Windows distributions of libfluidsynth supplied with QSynth)
lib = find_library('fluidsynth') or \
find_library('libfluidsynth') or \
find_library('libfluidsynth-1')
if lib is None:
raise ImportError("Couldn't find the FluidSynth library.")
else:
print(lib)
# Dynamically link the FluidSynth library
_fl = CDLL(lib)
# Helper function for declaring function prototypes
def cfunc(name, result, *args):
"""Build and apply a ctypes prototype complete with parameter flags
:rtype : object
"""
atypes = []
aflags = []
for arg in args:
atypes.append(arg[1])
aflags.append((arg[2], arg[0]) + arg[3:])
return CFUNCTYPE(result, *atypes)((name, _fl), tuple(aflags))
# Bump this up when changing the interface for users
api_version = '1.2'
# Function prototypes for C versions of functions
new_fluid_settings = cfunc('new_fluid_settings', c_void_p)
new_fluid_synth = cfunc('new_fluid_synth', c_void_p,
('settings', c_void_p, 1))
new_fluid_audio_driver = cfunc('new_fluid_audio_driver', c_void_p,
('settings', c_void_p, 1),
('synth', c_void_p, 1))
fluid_settings_setstr = cfunc('fluid_settings_setstr', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('str', c_char_p, 1))
fluid_settings_setnum = cfunc('fluid_settings_setnum', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('val', c_double, 1))
fluid_settings_setint = cfunc('fluid_settings_setint', c_int,
('settings', c_void_p, 1),
('name', c_char_p, 1),
('val', c_int, 1))
delete_fluid_audio_driver = cfunc('delete_fluid_audio_driver', None,
('driver', c_void_p, 1))
delete_fluid_synth = cfunc('delete_fluid_synth', None,
('synth', c_void_p, 1))
delete_fluid_settings = cfunc('delete_fluid_settings', None,
('settings', c_void_p, 1))
fluid_synth_sfload = cfunc('fluid_synth_sfload', c_int,
('synth', c_void_p, 1),
('filename', c_char_p, 1),
('update_midi_presets', c_int, 1))
fluid_synth_sfunload = cfunc('fluid_synth_sfunload', c_int,
('synth', c_void_p, 1),
('sfid', c_int, 1),
('update_midi_presets', c_int, 1))
fluid_synth_program_select = cfunc('fluid_synth_program_select', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('sfid', c_int, 1),
('bank', c_int, 1),
('preset', c_int, 1))
fluid_synth_noteon = cfunc('fluid_synth_noteon', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('key', c_int, 1),
('vel', c_int, 1))
fluid_synth_noteoff = cfunc('fluid_synth_noteoff', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('key', c_int, 1))
fluid_synth_pitch_bend = cfunc('fluid_synth_pitch_bend', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('val', c_int, 1))
fluid_synth_cc = cfunc('fluid_synth_cc', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('ctrl', c_int, 1),
('val', c_int, 1))
fluid_synth_program_change = cfunc('fluid_synth_program_change', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('prg', c_int, 1))
fluid_synth_bank_select = cfunc('fluid_synth_bank_select', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('bank', c_int, 1))
fluid_synth_sfont_select = cfunc('fluid_synth_sfont_select', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('sfid', c_int, 1))
fluid_synth_program_reset = cfunc('fluid_synth_program_reset', c_int,
('synth', c_void_p, 1))
fluid_synth_system_reset = cfunc('fluid_synth_system_reset', c_int,
('synth', c_void_p, 1))
fluid_synth_write_s16 = cfunc('fluid_synth_write_s16', c_void_p,
('synth', c_void_p, 1),
('len', c_int, 1),
('lbuf', c_void_p, 1),
('loff', c_int, 1),
('lincr', c_int, 1),
('rbuf', c_void_p, 1),
('roff', c_int, 1),
('rincr', c_int, 1))
fluid_synth_get_polyphony = cfunc('fluid_synth_get_polyphony', c_int,
('synth', c_void_p, 1))
fluid_synth_set_polyphony = cfunc('fluid_synth_set_polyphony', c_int,
('synth', c_void_p, 1),
('polyphony', c_int, 1))
fluid_synth_get_active_voice_count = cfunc('fluid_synth_get_active_voice_count', c_int,
('synth', c_void_p, 1))
fluid_synth_get_gain = cfunc('fluid_synth_get_gain', c_float,
('synth', c_void_p, 1))
fluid_synth_set_gain = cfunc('fluid_synth_set_gain', c_void_p,
('synth', c_void_p, 1),
('gain', c_float, 1))
fluid_synth_get_cpu_load = cfunc('fluid_synth_get_cpu_load', c_double,
('synth', c_void_p, 1))
# This adds an Struct-like info variable to be used in fluid_synth_get_channel_info
class ChannelInfo(Structure):
_fields_ = [
('assigned', c_int),
('sfont_id', c_int),
('bank', c_int),
('program', c_int),
('name', c_char * 32),
('reserved', c_char * 32),
]
fluid_get_stdout = cfunc('fluid_get_stdout', c_int)
new_fluid_cmd_handler = cfunc('new_fluid_cmd_handler', c_void_p,
('synth', c_void_p, 1))
fluid_command = cfunc('fluid_command', c_int,
('handler', c_void_p, 1),
('cmd', c_char_p, 1),
('out', c_int, 1))
fluid_synth_get_channel_info = cfunc('fluid_synth_get_channel_info', c_int,
('synth', c_void_p, 1),
('chan', c_int, 1),
('info', POINTER(ChannelInfo), 1))
# Create a new MIDI driver instance.
new_fluid_midi_driver = cfunc('new_fluid_midi_driver', c_void_p,
('settings', c_void_p, 1),
('handler', c_void_p, 1),
('event_handler_data', c_void_p, 1))
# Delete a MIDI driver instance.
delete_fluid_midi_driver = cfunc('delete_fluid_midi_driver', c_void_p,
('driver', c_void_p, 1))
fluid_synth_handle_midi_event = cfunc('fluid_synth_handle_midi_event', c_int,
('data', c_void_p, 1),
('event', c_void_p, 1))
def fluid_synth_write_s16_stereo(synth, len):
"""Return generated samples in stereo 16-bit format
Return value is a Numpy array of samples.
"""
import numpy
buf = create_string_buffer(len * 4)
fluid_synth_write_s16(synth, len, buf, 0, 2, buf, 1, 2)
return numpy.fromstring(buf[:], dtype=numpy.int16)
# Object-oriented interface, simplifies access to functions
class Synth:
"""Synth represents a FluidSynth synthesizer"""
def __init__(self, gain=0.2, samplerate=44100, polyphony=128, channels=256):
"""Create new synthesizer object to control sound generation
Optional keyword arguments:
gain : scale factor for audio output, default is 0.2
lower values are quieter, allow more simultaneous notes
samplerate : output samplerate in Hz, default is 44100 Hz
polyphony: total polyphony of the output
channels: number of MIDI channels.
"""
st = new_fluid_settings()
fluid_settings_setnum(st, b'gain', 1)#gain)
fluid_settings_setnum(st, b'synth.sample-rate', samplerate)
# We limit the polyphony to 128 for safety purposes
fluid_settings_setint(st, b"synth.polyphony", polyphony)
# No reason to limit ourselves to 16 channels
fluid_settings_setint(st, b'synth.midi-channels', channels)
self.settings = st
self.synth = new_fluid_synth(self.settings)
self.audio_driver = None
self.midi_driver = None
def start(self, audiodriver=b'alsa'):
"""Start audio output driver in separate background thread
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
audiodriver : which audio driver to use for output
Possible choices:
'alsa', 'oss', 'jack', 'portaudio'
'sndmgr', 'coreaudio', 'Direct Sound',
'pulseaudio'
Not all drivers will be available for every platform, it
depends on which drivers were compiled into FluidSynth for
your platform.
"""
if audiodriver is not None:
assert (audiodriver in [b'alsa', b'oss', b'jack', b'portaudio',
b'sndmgr', b'coreaudio', b'Direct Sound', b'pulseaudio'])
fluid_settings_setstr(self.settings, b'audio.driver', audiodriver)
self.audio_driver = new_fluid_audio_driver(self.settings, self.synth)
def delete(self):
if self.audio_driver is not None:
delete_fluid_audio_driver(self.audio_driver)
if self.midi_driver is not None:
delete_fluid_midi_driver(self.midi_driver)
delete_fluid_synth(self.synth)
delete_fluid_settings(self.settings)
def sfload(self, filename, update_midi_preset=0):
"""Load SoundFont and return its ID"""
return fluid_synth_sfload(self.synth, filename, update_midi_preset)
def sfunload(self, sfid, update_midi_preset=0):
"""Unload a SoundFont and free memory it used"""
return fluid_synth_sfunload(self.synth, sfid, update_midi_preset)
def program_select(self, chan, sfid, bank, preset):
"""Select a program"""
return fluid_synth_program_select(self.synth, chan, sfid, bank, preset)
def noteon(self, chan, key, vel):
"""Play a note"""
if key < 0 or key > 128:
return False
if chan < 0:
return False
if vel < 0 or vel > 128:
return False
return fluid_synth_noteon(self.synth, chan, key, vel)
def noteoff(self, chan, key):
"""Stop a note"""
if key < 0 or key > 128:
return False
if chan < 0:
return False
return fluid_synth_noteoff(self.synth, chan, key)
def pitch_bend(self, chan, val):
"""Adjust pitch of a playing channel by small amounts
A pitch bend value of 0 is no pitch change from default.
A value of -2048 is 1 semitone down.
A value of 2048 is 1 semitone up.
Maximum values are -8192 to +8192 (transposing by 4 semitones).
"""
return fluid_synth_pitch_bend(self.synth, chan, val + 8192)
def cc(self, chan, ctrl, val):
"""Send control change value
The controls that are recognized are dependent on the
SoundFont. Values are always 0 to 127. Typical controls
include:
1 : vibrato
7 : volume
10 : pan (left to right)
11 : expression (soft to loud)
64 : sustain
91 : reverb
93 : chorus
"""
return fluid_synth_cc(self.synth, chan, ctrl, val)
def program_change(self, chan, prg):
"""Change the program"""
return fluid_synth_program_change(self.synth, chan, prg)
def bank_select(self, chan, bank):
"""Choose a bank"""
return fluid_synth_bank_select(self.synth, chan, bank)
def sfont_select(self, chan, sfid):
"""Choose a SoundFont"""
return fluid_synth_sfont_select(self.synth, chan, sfid)
def program_reset(self):
"""Reset the programs on all channels"""
return fluid_synth_program_reset(self.synth)
def system_reset(self):
"""Stop all notes and reset all programs"""
return fluid_synth_system_reset(self.synth)
def get_samples(self, len=1024):
"""Generate audio samples
The return value will be a NumPy array containing the given
length of audio samples. If the synth is set to stereo output
(the default) the array will be size 2 * len.
"""
return fluid_synth_write_s16_stereo(self.synth, len)
def get_polyphony(self):
""" Gets the current polyphony
:return: Current polyphony (int)
"""
return fluid_synth_get_polyphony(self.synth)
def set_polyphony(self, polyphony):
""" Set synthesizer polyphony (max number of voices).
:param polyphony: New polyphony to set
:return: -1 if incorrect, 1 if correct
"""
return fluid_synth_set_polyphony(self.synth, polyphony)
def count_active_voices(self):
""" Get current number of active voices.
:return: Number of active voices
"""
return fluid_synth_get_active_voice_count(self.synth)
def get_gain(self):
""" Gets synth output gain value
:return: Curent gain (float)
"""
return fluid_synth_get_gain(self.synth)
def set_gain(self, gain):
""" Set synth output gain value
:param gain: Live sets the output gain
:return: void
"""
return fluid_synth_set_gain(self.synth, gain)
def get_cpu_load(self):
""" Get the synth CPU load value.
:return: Float containing the CPU load.
"""
return fluid_synth_get_cpu_load(self.synth)
def get_channel_info(self, chan):
""" Gets a struct with the information of the preset loaded in a channel.
:param chan: Channel number of which we want to get the information
:return: information structure with fields (synth, bank, preset, name)
"""
information = ChannelInfo()
fluid_synth_get_channel_info(self.synth, chan, information)
return information
def get_instrument_list(self, sfontid):
""" Gets the instrument list and
:param sfontid: Soundfont ID
:return: A dictionary with keys BBB-PPP (bank-preset) and
the name of the instrument preset. Example of how to access a preset's name:
inst[str(bank).zfill(3) + '-' + str(program).zfill(3)]
"""
fname = ".instSF" + str(sfontid) # TEmporary file with the info of the SF2
handler = new_fluid_cmd_handler(self.synth)
instruments = dict() # It builds the list as a dictionary
# First, check if .instSF is created before and avoid repeating the process.
try:
for line in open(fname):
instruments[line[0:7]] = line[8:-1] # If possible, returns the instrument list
except IOError: # Creates the instrument list if it doesn't exist
newshell = StdoutHandler(fname)
newshell.freopen()
fluid_command(handler, "inst " + str(sfontid), fluid_get_stdout())
newshell.freclose()
for line in open(fname):
instruments[line[0:7]] = line[8:-1]
return instruments
def start_midi(self, mididriver=b'alsa_seq'):
"""
Starts the MIDI driver to allow the MIDI keyboard interaction.
:param mididriver: name of the midi driver, that can be one of these:
'alsa_raw', 'alsa_seq', 'coremidi', 'jack',
'midishare', 'oss', 'winmidi'
:return:
"""
if mididriver is not None:
assert (mididriver in [b'alsa_raw', b'alsa_seq', b'coremidi', b'jack',
b'midishare', b'oss', b'winmidi'])
fluid_settings_setstr(self.settings, b'midi.driver', mididriver)
# Optionally: sets the real time priority to 99.
fluid_settings_setnum(self.settings, b'midi.realtimeprio', 99)
self.midi_driver = new_fluid_midi_driver(self.settings, fluid_synth_handle_midi_event, self.synth)
return self.midi_driver
def stop_midi(self):
"""
Stops the current midi Driver.
:return: Nothing
"""
if self.midi_driver is not None: # Checks if there actually is a midi_driver
delete_fluid_midi_driver(self.midi_driver)
self.midi_driver = None
def raw_audio_string(data):
"""Return a string of bytes to send to soundcard
Input is a numpy array of samples. Default output format
is 16-bit signed (other formats not currently supported).
"""
import numpy
return (data.astype(numpy.int16)).tostring()
class StdoutHandler(object):
"""Helper class for the capture of the Standard Output Stream.
This is needed for some functions of class Synth.
"""
def __init__(self, f):
"""Create new stdouthandler, for management of stdin and
stdout (some methods of Synth DO need to capture stdout stream).
"""
self.prevOutFd = os.dup(1)
self.prevInFd = os.dup(0)
self.prevErrFd = os.dup(2)
self.newf = open(f, 'w')
self.newfd = self.newf.fileno() # The new file output
def freopen(self):
"""
Redirects the standard input, output and error stream
to the established newfd.
:return:
"""
os.dup2(self.newfd, 0)
os.dup2(self.newfd, 1)
os.dup2(self.newfd, 2)
def freclose(self):
"""
Closes the modified input, output and error stream
:return:
"""
self.newf.close()
os.dup2(self.prevOutFd, 1)
os.close(self.prevOutFd)
os.dup2(self.prevInFd, 0)
os.close(self.prevInFd)
os.dup2(self.prevErrFd, 2)
os.close(self.prevErrFd)
|
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions raised by Google API core & clients.
This module provides base classes for all errors raised by libraries based
on :mod:`google.api.core`, including both HTTP and gRPC clients.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import six
from six.moves import http_client
try:
import grpc
except ImportError: # pragma: NO COVER
grpc = None
# Lookup tables for mapping exceptions from HTTP and gRPC transports.
# Populated by _APICallErrorMeta
_HTTP_CODE_TO_EXCEPTION = {}
_GRPC_CODE_TO_EXCEPTION = {}
class GoogleAPIError(Exception):
"""Base class for all exceptions raised by Google API Clients."""
pass
class _GoogleAPICallErrorMeta(type):
"""Metaclass for registering GoogleAPICallError subclasses."""
def __new__(mcs, name, bases, class_dict):
cls = type.__new__(mcs, name, bases, class_dict)
if cls.code is not None:
_HTTP_CODE_TO_EXCEPTION.setdefault(cls.code, cls)
if cls.grpc_status_code is not None:
_GRPC_CODE_TO_EXCEPTION.setdefault(cls.grpc_status_code, cls)
return cls
@six.python_2_unicode_compatible
@six.add_metaclass(_GoogleAPICallErrorMeta)
class GoogleAPICallError(GoogleAPIError):
"""Base class for exceptions raised by calling API methods.
Args:
message (str): The exception message.
errors (Sequence[Any]): An optional list of error details.
response (Union[requests.Request, grpc.Call]): The response or
gRPC call metadata.
"""
code = None
"""Optional[int]: The HTTP status code associated with this error.
This may be ``None`` if the exception does not have a direct mapping
to an HTTP error.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
"""
grpc_status_code = None
"""Optional[grpc.StatusCode]: The gRPC status code associated with this
error.
This may be ``None`` if the exception does not match up to a gRPC error.
"""
def __init__(self, message, errors=(), response=None):
super(GoogleAPICallError, self).__init__(message)
self.message = message
"""str: The exception message."""
self._errors = errors
self._response = response
def __str__(self):
return '{} {}'.format(self.code, self.message)
@property
def errors(self):
"""Detailed error information.
Returns:
Sequence[Any]: A list of additional error details.
"""
return list(self._errors)
@property
def response(self):
"""Optional[Union[requests.Request, grpc.Call]]: The response or
gRPC call metadata."""
return self._response
class Redirection(GoogleAPICallError):
"""Base class for for all redirection (HTTP 3xx) responses."""
class MovedPermanently(Redirection):
"""Exception mapping a ``301 Moved Permanently`` response."""
code = http_client.MOVED_PERMANENTLY
class NotModified(Redirection):
"""Exception mapping a ``304 Not Modified`` response."""
code = http_client.NOT_MODIFIED
class TemporaryRedirect(Redirection):
"""Exception mapping a ``307 Temporary Redirect`` response."""
code = http_client.TEMPORARY_REDIRECT
class ResumeIncomplete(Redirection):
"""Exception mapping a ``308 Resume Incomplete`` response.
.. note:: :ref:`http_client.PERMANENT_REDIRECT` is ``308``, but Google APIs
differ in their use of this status code.
"""
code = 308
class ClientError(GoogleAPICallError):
"""Base class for all client error (HTTP 4xx) responses."""
class BadRequest(ClientError):
"""Exception mapping a ``400 Bad Request`` response."""
code = http_client.BAD_REQUEST
class InvalidArgument(BadRequest):
"""Exception mapping a :prop:`grpc.StatusCode.INVALID_ARGUMENT` error."""
grpc_status_code = (
grpc.StatusCode.INVALID_ARGUMENT if grpc is not None else None)
class FailedPrecondition(BadRequest):
"""Exception mapping a :prop:`grpc.StatusCode.FAILED_PRECONDITION`
error."""
grpc_status_code = (
grpc.StatusCode.FAILED_PRECONDITION if grpc is not None else None)
class OutOfRange(BadRequest):
"""Exception mapping a :prop:`grpc.StatusCode.OUT_OF_RANGE` error."""
grpc_status_code = (
grpc.StatusCode.OUT_OF_RANGE if grpc is not None else None)
class Unauthorized(ClientError):
"""Exception mapping a ``401 Unauthorized`` response."""
code = http_client.UNAUTHORIZED
class Unauthenticated(Unauthorized):
"""Exception mapping a :prop:`grpc.StatusCode.UNAUTHENTICATED` error."""
grpc_status_code = (
grpc.StatusCode.UNAUTHENTICATED if grpc is not None else None)
class Forbidden(ClientError):
"""Exception mapping a ``403 Forbidden`` response."""
code = http_client.FORBIDDEN
class PermissionDenied(Forbidden):
"""Exception mapping a :prop:`grpc.StatusCode.PERMISSION_DENIED` error."""
grpc_status_code = (
grpc.StatusCode.PERMISSION_DENIED if grpc is not None else None)
class NotFound(ClientError):
"""Exception mapping a ``404 Not Found`` response or a
:prop:`grpc.StatusCode.NOT_FOUND` error."""
code = http_client.NOT_FOUND
grpc_status_code = (
grpc.StatusCode.NOT_FOUND if grpc is not None else None)
class MethodNotAllowed(ClientError):
"""Exception mapping a ``405 Method Not Allowed`` response."""
code = http_client.METHOD_NOT_ALLOWED
class Conflict(ClientError):
"""Exception mapping a ``409 Conflict`` response."""
code = http_client.CONFLICT
class AlreadyExists(Conflict):
"""Exception mapping a :prop:`grpc.StatusCode.ALREADY_EXISTS` error."""
grpc_status_code = (
grpc.StatusCode.ALREADY_EXISTS if grpc is not None else None)
class Aborted(Conflict):
"""Exception mapping a :prop:`grpc.StatusCode.ABORTED` error."""
grpc_status_code = (
grpc.StatusCode.ABORTED if grpc is not None else None)
class LengthRequired(ClientError):
"""Exception mapping a ``411 Length Required`` response."""
code = http_client.LENGTH_REQUIRED
class PreconditionFailed(ClientError):
"""Exception mapping a ``412 Precondition Failed`` response."""
code = http_client.PRECONDITION_FAILED
class RequestRangeNotSatisfiable(ClientError):
"""Exception mapping a ``416 Request Range Not Satisfiable`` response."""
code = http_client.REQUESTED_RANGE_NOT_SATISFIABLE
class TooManyRequests(ClientError):
"""Exception mapping a ``429 Too Many Requests`` response."""
# http_client does not define a constant for this in Python 2.
code = 429
class ResourceExhausted(TooManyRequests):
"""Exception mapping a :prop:`grpc.StatusCode.RESOURCE_EXHAUSTED` error."""
grpc_status_code = (
grpc.StatusCode.RESOURCE_EXHAUSTED if grpc is not None else None)
class Cancelled(ClientError):
"""Exception mapping a :prop:`grpc.StatusCode.CANCELLED` error."""
# This maps to HTTP status code 499. See
# https://github.com/googleapis/googleapis/blob/master/google/rpc\
# /code.proto
code = 499
grpc_status_code = grpc.StatusCode.CANCELLED if grpc is not None else None
class ServerError(GoogleAPICallError):
"""Base for 5xx responses."""
class InternalServerError(ServerError):
"""Exception mapping a ``500 Internal Server Error`` response. or a
:prop:`grpc.StatusCode.INTERNAL` error."""
code = http_client.INTERNAL_SERVER_ERROR
grpc_status_code = grpc.StatusCode.INTERNAL if grpc is not None else None
class Unknown(ServerError):
"""Exception mapping a :prop:`grpc.StatusCode.UNKNOWN` error."""
grpc_status_code = grpc.StatusCode.UNKNOWN if grpc is not None else None
class DataLoss(ServerError):
"""Exception mapping a :prop:`grpc.StatusCode.DATA_LOSS` error."""
grpc_status_code = grpc.StatusCode.DATA_LOSS if grpc is not None else None
class MethodNotImplemented(ServerError):
"""Exception mapping a ``501 Not Implemented`` response or a
:prop:`grpc.StatusCode.UNIMPLEMENTED` error."""
code = http_client.NOT_IMPLEMENTED
grpc_status_code = (
grpc.StatusCode.UNIMPLEMENTED if grpc is not None else None)
class BadGateway(ServerError):
"""Exception mapping a ``502 Bad Gateway`` response."""
code = http_client.BAD_GATEWAY
class ServiceUnavailable(ServerError):
"""Exception mapping a ``503 Service Unavailable`` response or a
:prop:`grpc.StatusCode.UNAVAILABLE` error."""
code = http_client.SERVICE_UNAVAILABLE
grpc_status_code = (
grpc.StatusCode.UNAVAILABLE if grpc is not None else None)
class GatewayTimeout(ServerError):
"""Exception mapping a ``504 Gateway Timeout`` response."""
code = http_client.GATEWAY_TIMEOUT
class DeadlineExceeded(GatewayTimeout):
"""Exception mapping a :prop:`grpc.StatusCode.DEADLINE_EXCEEDED` error."""
grpc_status_code = (
grpc.StatusCode.DEADLINE_EXCEEDED if grpc is not None else None)
def exception_class_for_http_status(status_code):
"""Return the exception class for a specific HTTP status code.
Args:
status_code (int): The HTTP status code.
Returns:
type: the appropriate subclass of :class:`GoogleAPICallError`.
"""
return _HTTP_CODE_TO_EXCEPTION.get(status_code, GoogleAPICallError)
def from_http_status(status_code, message, **kwargs):
"""Create a :class:`GoogleAPICallError` from an HTTP status code.
Args:
status_code (int): The HTTP status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
error_class = exception_class_for_http_status(status_code)
error = error_class(message, **kwargs)
if error.code is None:
error.code = status_code
return error
def from_http_response(response):
"""Create a :class:`GoogleAPICallError` from a :class:`requests.Response`.
Args:
response (requests.Response): The HTTP response.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`, with the message and errors populated
from the response.
"""
try:
payload = response.json()
except ValueError:
payload = {'error': {'message': response.text or 'unknown error'}}
error_message = payload.get('error', {}).get('message', 'unknown error')
errors = payload.get('error', {}).get('errors', ())
message = '{method} {url}: {error}'.format(
method=response.request.method,
url=response.request.url,
error=error_message)
exception = from_http_status(
response.status_code, message, errors=errors, response=response)
return exception
def exception_class_for_grpc_status(status_code):
"""Return the exception class for a specific :class:`grpc.StatusCode`.
Args:
status_code (grpc.StatusCode): The gRPC status code.
Returns:
type: the appropriate subclass of :class:`GoogleAPICallError`.
"""
return _GRPC_CODE_TO_EXCEPTION.get(status_code, GoogleAPICallError)
def from_grpc_status(status_code, message, **kwargs):
"""Create a :class:`GoogleAPICallError` from a :class:`grpc.StatusCode`.
Args:
status_code (grpc.StatusCode): The gRPC status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
error_class = exception_class_for_grpc_status(status_code)
error = error_class(message, **kwargs)
if error.grpc_status_code is None:
error.grpc_status_code = status_code
return error
def from_grpc_error(rpc_exc):
"""Create a :class:`GoogleAPICallError` from a :class:`grpc.RpcError`.
Args:
rpc_exc (grpc.RpcError): The gRPC error.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
if isinstance(rpc_exc, grpc.Call):
return from_grpc_status(
rpc_exc.code(),
rpc_exc.details(),
errors=(rpc_exc,),
response=rpc_exc)
else:
return GoogleAPICallError(
str(rpc_exc), errors=(rpc_exc,), response=rpc_exc)
|
|
from .hub import Hub, HubListener
from .data import Data
from .link_manager import LinkManager
from .registry import Registry
from .visual import COLORS
from .message import (DataCollectionAddMessage,
DataCollectionDeleteMessage,
DataAddComponentMessage)
from .util import as_list
__all__ = ['DataCollection']
class DataCollection(HubListener):
"""The top-level object for interacting with datasets in Glue.
DataCollections have the following responsibilities:
* Providing a way to retrieve and store data
* Broadcasting messages when data are added or removed
* Keeping each managed data set's list of
:class:`~glue.core.data.DerivedComponent` instances up-to-date
* Creating the hub that all other objects should use to communicate
with one another (stored in ``self.hub``)
"""
def __init__(self, data=None):
"""
:param data: :class:`~glue.core.data.Data` object, or list of such objects
"""
super(DataCollection, self).__init__()
self._link_manager = LinkManager()
self._data = []
self.hub = None
self._subset_groups = []
self.register_to_hub(Hub())
self.extend(as_list(data or []))
self._sg_count = 0
@property
def data(self):
""" The :class:`~glue.core.data.Data` objects in the collection """
return self._data
def append(self, data):
""" Add a new dataset to this collection.
Appending emits a DataCollectionAddMessage.
It also updates the list of DerivedComponents that each
data set can work with.
:param data: :class:`~glue.core.data.Data` object to add
"""
if isinstance(data, list):
self.extend(data)
return
if data in self:
return
self._data.append(data)
if self.hub:
data.register_to_hub(self.hub)
for s in data.subsets:
s.register()
msg = DataCollectionAddMessage(self, data)
self.hub.broadcast(msg)
self._sync_link_manager()
def extend(self, data):
"""Add several new datasets to this collection
See :meth:`append` for more information
:param data: List of data objects to add
"""
[self.append(d) for d in data]
def remove(self, data):
""" Remove a data set from the collection
Emits a DataCollectionDeleteMessage
:param data: the object to remove
:type data: :class:`~glue.core.data.Data`
"""
if data not in self._data:
return
self._data.remove(data)
Registry().unregister(data, Data)
if self.hub:
msg = DataCollectionDeleteMessage(self, data)
self.hub.broadcast(msg)
def _sync_link_manager(self):
""" update the LinkManager, so all the DerivedComponents
for each data set are up-to-date
"""
# add any links in the data
for d in self._data:
for derived in d.derived_components:
self._link_manager.add_link(d.get_component(derived).link)
for link in d.coordinate_links:
self._link_manager.add_link(link)
for d in self._data:
self._link_manager.update_data_components(d)
@property
def links(self):
"""
Tuple of :class:`~glue.core.component_link.ComponentLink` objects.
"""
return tuple(self._link_manager.links)
def add_link(self, links):
"""Add one or more links to the data collection.
This will auto-update the components in each data set
:param links:
The links to add. A scalar or list of
:class:`~glue.core.component_link.ComponentLink`
instances, or a :class:`~glue.core.link_helpers.LinkCollection`
"""
self._link_manager.add_link(links)
for d in self._data:
self._link_manager.update_data_components(d)
def _merge_link(self, link):
pass
def set_links(self, links):
"""Override the links in the collection, and update data
objects as necessary.
:param links: The new links. An iterable of
:class:`~glue.core.component_link.ComponentLink` instances
"""
self._link_manager.clear()
for link in links:
self._link_manager.add_link(link)
for d in self._data:
self._link_manager.update_data_components(d)
def register_to_hub(self, hub):
""" Register managed data objects to a hub.
:param hub: The hub to register with
:type hub: :class:`~glue.core.hub.Hub`
"""
if self.hub is hub:
return
if self.hub is not None:
raise RuntimeError("Data Collection already registered "
"to a different Hub")
if not isinstance(hub, Hub):
raise TypeError("Input is not a Hub object: %s" % type(hub))
self.hub = hub
# re-assign all data, subset hub instances to this hub
for d in self._data:
d.register_to_hub(hub)
for s in d.subsets:
s.register()
hub.subscribe(self, DataAddComponentMessage,
lambda msg: self._sync_link_manager(),
filter=lambda x: x.sender in self._data)
def new_subset_group(self, label=None, subset_state=None):
"""
Create and return a new :class:`~glue.core.subset_group.SubsetGroup`
"""
from .subset_group import SubsetGroup
color = COLORS[self._sg_count % len(COLORS)]
self._sg_count += 1
label = label or "%i" % (self._sg_count)
result = SubsetGroup(color=color, label=label, subset_state=subset_state)
self._subset_groups.append(result)
result.register(self)
return result
def remove_subset_group(self, subset_grp):
"""
Remove an existing :class:`~glue.core.subset_group.SubsetGroup`
"""
if subset_grp not in self._subset_groups:
return
# remove from list first, so that group appears deleted
# by the time the first SubsetDelete message is broadcast
self._subset_groups.remove(subset_grp)
for s in subset_grp.subsets:
s.delete()
subset_grp.unregister(self.hub)
@property
def subset_groups(self):
"""
tuple of current :class:`Subset Groups <glue.core.subset_group.SubsetGroup>`
"""
return tuple(self._subset_groups)
def __contains__(self, obj):
return obj in self._data or obj in self.subset_groups
def __getitem__(self, key):
return self._data[key]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __str__(self):
result = "DataCollection (%i data sets)\n\t" % len(self)
result += '\n\t'.join("%3i: %s" % (i, d.label) for
i, d in enumerate(self))
return result
def __repr__(self):
return self.__str__()
def __bool__(self):
return True
def __nonzero__(self):
return True
|
|
from __future__ import absolute_import, unicode_literals
TEST_RUNNER='django_nose.NoseTestSuiteRunner'
import os
import importlib
local_settings_module = os.environ.get('LOCAL_SETTINGS', 'hydroshare.local_settings')
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", ("Media Library", "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'),
# ('Full Name', 'anotheremail@example.com'))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
_ = lambda s: s
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1",)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"django.contrib.gis",
"django_nose",
"django_irods",
"theme",
"theme.blog_mods",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.pages",
"mezzanine.galleries",
"crispy_forms",
"mezzanine.accounts",
"mezzanine.mobile",
"autocomplete_light",
"tastypie",
"tastypie_swagger",
"dublincore",
"hs_core",
"hs_party",
"hs_metrics",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
"ga_resources.middleware.PagePermissionsViewableMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
# "compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False}
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username
# "SSH_PASS": "", # SSH password (consider key-based authentication)
# "SSH_KEY_PATH": "", # Local path to SSH key file, for key-based auth
# "HOSTS": [], # List of hosts to deploy to
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "", # Path to pip requirements, relative to project
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "LIVE_HOSTNAME": "www.example.com", # Host for public site.
# "REPO_URL": "", # Git or Mercurial remote repo URL for the project
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
local_settings = __import__(local_settings_module, globals(), locals(), ['*'])
for k in dir(local_settings):
locals()[k] = getattr(local_settings, k)
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
INSTALLED_APPS += HYDROSHARE_APPS
TASTYPIE_SWAGGER_API_MODULE = 'hydroshare.urls.v1_api'
#
AUTH_PROFILE_MODULE = "theme.UserProfile"
|
|
#----------------------------------------------------------------------
# Copyright (c) 2011-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
Reference GENI GCF Clearinghouse. Uses SFA Certificate and credential objects.
Run from gcf-ch.py
Will produce signed user credentials from a GID, return a
list of aggregates read from a config file, and create a new Slice Credential.
"""
from __future__ import absolute_import
import datetime
import traceback
import uuid
import os
import dateutil.parser
from .SecureXMLRPCServer import SecureXMLRPCServer
from .SecureThreadedXMLRPCServer import SecureThreadedXMLRPCServer, SecureThreadedXMLRPCRequestHandler
from .util import cred_util
from .util import cert_util
from .util.tz_util import tzd
from .util import urn_util
from ..sfa.trust import gid
# Variable to turn on multi-threaded CH server
# If true, spawn a different thread for each RPC
THREADED = True
# Substitute eg "openflow//stanford"
# Be sure this matches init-ca.py:CERT_AUTHORITY
# This is in publicid format
SLICE_AUTHORITY = "geni//gpo//gcf"
# Credential lifetimes in seconds
# Extend slice lifetimes to actually use the resources
USER_CRED_LIFE = 86400
SLICE_CRED_LIFE = 3600
# Make the max life of a slice 30 days (an arbitrary length).
SLICE_MAX_LIFE_SECS = 30 * 24 * 60 * 60
# The list of Aggregates that this Clearinghouse knows about
# should be defined in the gcf_config file in the am_* properties.
# ListResources will refer the client to these aggregates
# Clearinghouse.runserver currently does the register_aggregate_pair
# calls for each row in that file
# but this should be doable dynamically
# Some sample pairs:
# GPOMYPLC = ('urn:publicid:IDN+plc:gpo1+authority+sa',
# 'http://myplc1.gpolab.bbn.com:12348')
# TESTGCFAM = ('urn:publicid:IDN+geni.net:gpo+authority+gcf',
# 'https://127.0.0.1:8001')
# OTHERGPOMYPLC = ('urn:publicid:IDN+plc:gpo+authority+site2',
# 'http://128.89.81.74:12348')
# ELABINELABAM = ('urn:publicid:IDN+elabinelab.geni.emulab.net',
# 'https://myboss.elabinelab.geni.emulab.net:443/protogeni/xmlrpc/am')
class SampleClearinghouseServer(object):
"""A sample clearinghouse with barebones functionality."""
def __init__(self, delegate):
self._delegate = delegate
def GetVersion(self):
return self._delegate.GetVersion()
def CreateSlice(self, urn=None):
return self._delegate.CreateSlice(urn_req=urn)
def RenewSlice(self, urn, expire_str):
try:
return self._delegate.RenewSlice(urn, expire_str)
except:
self._delegate.logger.error(traceback.format_exc())
raise
def DeleteSlice(self, urn):
return self._delegate.DeleteSlice(urn)
def ListAggregates(self):
return self._delegate.ListAggregates()
def ListMySlices(self, urn):
'''List slices owned by the user URN provided, returning a list of slice URNs.
Expired slices are deleted (and not returned).'''
return self._delegate.ListMySlices(urn)
def CreateUserCredential(self, cert):
return self._delegate.CreateUserCredential(cert)
class Clearinghouse(object):
def __init__(self):
self.logger = cred_util.logging.getLogger('gcf-ch')
self.slices = {}
self.aggs = []
def load_aggregates(self):
"""Loads aggregates from the clearinghouse section of the config file.
In the config section there are keys for each am, am_1, am_2, ..., am_n
The value for each key is the urn and url of the aggregate separated by a comma
Returns True if aggregates were loaded, False otherwise.
"""
for (key, val) in self.config['clearinghouse'].items():
if not key.startswith('am_'):
continue
(urn,url) = val.split(',')
urn = urn.strip()
url = url.strip()
if not urn:
self.logger.warn('Empty URN for aggregate %s in gcf_config' % key)
continue
if not url:
self.logger.warn('Empty URL for aggregate %s in gcf_config' % key)
continue
if urn in [x for (x, _) in self.aggs]:
self.logger.warn('Duplicate URN %s in gcf_config' % key)
continue
self.logger.info("Registering AM %s at %s", urn, url)
self.aggs.append((urn, url))
def runserver(self, addr, keyfile=None, certfile=None,
ca_certs=None, authority=None,
user_len=None, slice_len=None, config=None):
"""Run the clearinghouse server."""
# ca_certs is a dir of several certificates for peering
# If not supplied just use the certfile as the only trusted root
self.keyfile = keyfile
self.certfile = certfile
self.config = config
# Error check the keyfile, certfile all exist
if keyfile is None or not os.path.isfile(os.path.expanduser(keyfile)) or os.path.getsize(os.path.expanduser(keyfile)) < 1:
raise Exception("Missing CH key file %s" % keyfile)
if certfile is None or not os.path.isfile(os.path.expanduser(certfile)) or os.path.getsize(os.path.expanduser(certfile)) < 1:
raise Exception("Missing CH cert file %s" % certfile)
if ca_certs is None:
ca_certs = certfile
self.logger.info("Using only my CH cert as a trusted root cert")
self.trusted_root_files = cred_util.CredentialVerifier(ca_certs).root_cert_files
if not os.path.exists(os.path.expanduser(ca_certs)):
raise Exception("Missing CA cert(s): %s" % ca_certs)
global SLICE_AUTHORITY, USER_CRED_LIFE, SLICE_CRED_LIFE
SLICE_AUTHORITY = authority
USER_CRED_LIFE = int(user_len)
SLICE_CRED_LIFE = int(slice_len)
# Load up the aggregates
self.load_aggregates()
# This is the arg to _make_server
ca_certs_onefname = cred_util.CredentialVerifier.getCAsFileFromDir(ca_certs)
# This is used below by CreateSlice
self.ca_cert_fnames = []
if os.path.isfile(os.path.expanduser(ca_certs)):
self.ca_cert_fnames = [os.path.expanduser(ca_certs)]
elif os.path.isdir(os.path.expanduser(ca_certs)):
self.ca_cert_fnames = [os.path.join(os.path.expanduser(ca_certs), name) for name in os.listdir(os.path.expanduser(ca_certs)) if name != cred_util.CredentialVerifier.CATEDCERTSFNAME]
# Create the xmlrpc server, load the rootkeys and do the ssl thing.
self._server = self._make_server(addr, keyfile, certfile,
ca_certs_onefname)
self._server.register_instance(SampleClearinghouseServer(self))
self.logger.info('GENI CH Listening on port %d...' % (addr[1]))
self._server.serve_forever()
def _make_server(self, addr, keyfile=None, certfile=None,
ca_certs=None):
"""Creates the XML RPC server."""
# ca_certs is a file of concatenated certs
# make 2nd arg logRequests=True if --debug
debug = False
if self.config.has_key('debug'):
debug = self.config['debug']
if THREADED:
return SecureThreadedXMLRPCServer(addr, logRequests=debug, \
keyfile=keyfile, \
certfile=certfile, \
ca_certs=ca_certs)
else:
return SecureXMLRPCServer(addr, logRequests=debug, \
keyfile=keyfile, \
certfile=certfile, \
ca_certs=ca_certs)
def _naiveUTC(self, dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
def GetVersion(self):
self.logger.info("Called GetVersion")
version = dict()
version['gcf-ch_api'] = 1
return version
# FIXME: Change that URN to be a name and non-optional
# Currently gcf-test.py doesnt supply it, and
# Omni takes a name and constructs a URN to supply
def CreateSlice(self, urn_req = None):
self.logger.info("Called CreateSlice URN REQ %r" % urn_req)
slice_gid = None
if urn_req and self.slices.has_key(urn_req):
# If the Slice has expired, treat this as
# a request to renew
slice_cred = self.slices[urn_req]
slice_exp = self._naiveUTC(slice_cred.expiration)
if slice_exp <= datetime.datetime.utcnow():
# Need to renew this slice
self.logger.info("CreateSlice on %r found existing cred that expired at %r - will renew", urn_req, slice_exp)
slice_gid = slice_cred.get_gid_object()
else:
self.logger.debug("Slice cred is still valid at %r until %r - return it", datetime.datetime.utcnow(), slice_exp)
return slice_cred.save_to_string()
# Create a random uuid for the slice
slice_uuid = uuid.uuid4()
# First ensure we have a slice_urn
if urn_req:
# Validate urn_req has the right form
# to be issued by this CH
if not urn_util.is_valid_urn(urn_req):
# FIXME: make sure it isnt empty, etc...
urn = urn_util.publicid_to_urn(urn_req)
else:
urn = urn_req
# Validate the urn meets name restrictions
if not urn_util.is_valid_urn_bytype(urn, 'slice', self.logger):
raise Exception("Cannot create slice with urn %s: URN is invalid" % urn)
else:
# Generate a unique URN for the slice
# based on this CH location and a UUID
# Where was the slice created?
(ipaddr, port) = self._server.socket._sock.getsockname()
# FIXME: Get public_id start from a properties file
# Create a unique name for the slice based on uuid
slice_name = slice_uuid.__str__()[4:12]
public_id = 'IDN %s slice %s//%s:%d' % (SLICE_AUTHORITY, slice_name,
ipaddr,
port)
# this func adds the urn:publicid:
# and converts spaces to +'s, and // to :
urn = urn_util.publicid_to_urn(public_id)
# Now create a GID for the slice (signed credential)
if slice_gid is None:
# FIXME: For APIv3 compliance, we need
# - slice email address
# - unique cert serial number
try:
slice_gid = cert_util.create_cert(urn, self.keyfile, self.certfile, uuidarg = slice_uuid)[0]
except Exception, exc:
self.logger.error("Cant create slice gid for slice urn %s: %s", urn, traceback.format_exc())
raise Exception("Failed to create slice %s. Cant create slice gid" % urn, exc)
# Now get the user GID which will have permissions on this slice.
# Get client x509 cert from the SSL connection
# It doesnt have the chain but should be signed
# by this CHs cert, which should also be a trusted
# root at any federated AM. So everyone can verify it as is.
# Note that if a user from a different CH (installed
# as trusted by this CH for some reason) called this method,
# that user would be used here - and can still get a valid slice
try:
if THREADED:
user_gid = gid.GID(string=SecureThreadedXMLRPCRequestHandler.get_pem_cert())
else:
user_gid = gid.GID(string=self._server.pem_cert)
except Exception, exc:
self.logger.error("CreateSlice failed to create user_gid from SSL client cert: %s", traceback.format_exc())
raise Exception("Failed to create slice %s. Cant get user GID from SSL client certificate." % urn, exc)
# OK have a user_gid so can get a slice credential
# authorizing this user on the slice
try:
expiration = datetime.datetime.utcnow() + datetime.timedelta(seconds=SLICE_CRED_LIFE)
# add delegatable=True to make this slice delegatable
slice_cred = self.create_slice_credential(user_gid,
slice_gid,
expiration, delegatable=True)
except Exception, exc:
self.logger.error('CreateSlice failed to get slice credential for user %r, slice %r: %s', user_gid.get_hrn(), slice_gid.get_hrn(), traceback.format_exc())
raise Exception('CreateSlice failed to get slice credential for user %r, slice %r' % (user_gid.get_hrn(), slice_gid.get_hrn()), exc)
self.logger.info('Created slice %r' % (urn))
self.slices[urn] = slice_cred
return slice_cred.save_to_string()
def RenewSlice(self, slice_urn, expire_str):
self.logger.info("Called RenewSlice(%s, %s)", slice_urn, expire_str)
if not self.slices.has_key(slice_urn):
self.logger.warning('Slice %s was not found', slice_urn)
return False
try:
in_expiration = dateutil.parser.parse(expire_str, tzinfos=tzd)
in_expiration = cred_util.naiveUTC(in_expiration)
except:
self.logger.warning('Unable to parse date "%s"', expire_str)
return False
# Is requested expiration valid? It must be in the future,
# but not too far into the future.
now = datetime.datetime.utcnow()
if in_expiration < now:
self.logger.warning('Expiration "%s" is in the past.', expire_str)
return False
duration = in_expiration - now
max_duration = datetime.timedelta(seconds=SLICE_MAX_LIFE_SECS)
if duration > max_duration:
self.logger.warning('Expiration %s is too far in the future.',
expire_str)
return False
# Everything checks out, so create a new slice cred and tuck it away.
if THREADED:
user_gid = gid.GID(string=SecureThreadedXMLRPCRequestHandler.get_pem_cert())
else:
user_gid = gid.GID(string=self._server.pem_cert)
slice_cred = self.slices[slice_urn]
slice_gid = slice_cred.get_gid_object()
# if original slice' privileges were all delegatable,
# make all the privs here delegatable
# Of course, the correct thing would be to do it priv by priv...
dgatable = False
if slice_cred.get_privileges().get_all_delegate():
dgatable = True
slice_cred = self.create_slice_credential(user_gid, slice_gid,
in_expiration, delegatable=dgatable)
self.logger.info("Slice %s renewed to %s", slice_urn, expire_str)
self.slices[slice_urn] = slice_cred
return True
def DeleteSlice(self, urn_req):
self.logger.info("Called DeleteSlice %r" % urn_req)
if self.slices.has_key(urn_req):
self.slices.pop(urn_req)
self.logger.info("Deleted slice")
return True
self.logger.info('Slice was not found')
# Slice not found!
# FIXME: Raise an error so client knows why this failed?
return False
def ListAggregates(self):
self.logger.info("Called ListAggregates")
# TODO: Allow dynamic registration of aggregates
return self.aggs
def ListMySlices(self, urn):
'''List slices owned by the user URN provided, returning a list of slice URNs.
Expired slices are deleted (and not returned).'''
ret = list()
self.logger.debug("Looking for slices owned by %s", urn)
# We could take hrn or return hrn too. Or return hrn and uuid.
# Here we take a URN and return a URN
if not self.slices:
# self.logger.debug("Returning from lms early")
return ret
for slicecred in self.slices.values():
if slicecred.get_gid_caller().get_urn() == urn:
# Confirm it has not expired. If it has, remove it from the list of slices
slice_exp = self._naiveUTC(slicecred.expiration)
sliceurn = slicecred.get_gid_object().get_urn()
# self.logger.debug("Matching slice %s", sliceurn)
if slice_exp <= datetime.datetime.utcnow():
self.logger.info("Removing expired slice %s", sliceurn)
self.slices.pop(sliceurn)
continue
ret.append(sliceurn)
else:
self.logger.debug("Found slice %s owned by different user %s", slicecred.get_gid_object().get_urn(), slicecred.get_gid_caller().get_urn())
return ret
def CreateUserCredential(self, user_gid):
'''Return string representation of a user credential
issued by this CH with caller/object this user_gid (string)
with user privileges'''
# FIXME: Validate arg - non empty, my user
user_gid = gid.GID(string=user_gid)
self.logger.info("Called CreateUserCredential for GID %s" % user_gid.get_hrn())
expiration = datetime.datetime.utcnow() + datetime.timedelta(seconds=USER_CRED_LIFE)
try:
ucred = cred_util.create_credential(user_gid, user_gid, expiration, 'user', self.keyfile, self.certfile, self.trusted_root_files)
except Exception, exc:
self.logger.error("Failed to create user credential for %s: %s", user_gid.get_hrn(), traceback.format_exc())
raise Exception("Failed to create user credential for %s" % user_gid.get_hrn(), exc)
return ucred.save_to_string()
def create_slice_credential(self, user_gid, slice_gid, expiration, delegatable=False):
'''Create a Slice credential object for this user_gid (object) on given slice gid (object)'''
# FIXME: Validate the user_gid and slice_gid
# are my user and slice
return cred_util.create_credential(user_gid, slice_gid, expiration, 'slice', self.keyfile, self.certfile, self.trusted_root_files, delegatable)
|
|
import unittest
from unittest import mock
from django.core.exceptions import ObjectDoesNotExist
import tethys_cli.app_settings_commands as cli_app_settings_command
class TestCliAppSettingsCommand(unittest.TestCase):
def setUp(self):
load_apps_patcher = mock.patch('tethys_cli.app_settings_commands.load_apps')
load_apps_patcher.start()
self.addCleanup(load_apps_patcher.stop)
def tearDown(self):
pass
@mock.patch('tethys_cli.app_settings_commands.get_setting_type', return_value='setting_type')
@mock.patch('tethys_apps.models.TethysApp')
@mock.patch('tethys_apps.models.PersistentStoreConnectionSetting')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting')
@mock.patch('tethys_apps.models.SpatialDatasetServiceSetting')
@mock.patch('tethys_apps.models.DatasetServiceSetting')
@mock.patch('tethys_apps.models.WebProcessingServiceSetting')
@mock.patch('tethys_apps.models.CustomSetting')
@mock.patch('tethys_cli.app_settings_commands.pretty_output')
def test_app_settings_list_command_unlinked(self, mock_pretty_output, __, ___, ____, _____, ______,
MockPscs, MockTethysApp, _):
# mock the args
mock_arg = mock.MagicMock(app='foo')
mock_setting = mock.MagicMock(pk='pk')
mock_setting.persistent_store_service.name = 'mock_ps'
mock_setting.name = 'name'
del mock_setting.persistent_store_service
del mock_setting.spatial_dataset_service
del mock_setting.dataset_service
del mock_setting.web_processing_service
del mock_setting.value
# mock the PersistentStoreConnectionSetting filter return value
MockPscs.objects.filter.return_value = [mock_setting]
cli_app_settings_command.app_settings_list_command(mock_arg)
MockTethysApp.objects.get(package='foo').return_value = mock_arg.app
# check TethysApp.object.get method is called with app
MockTethysApp.objects.get.assert_called_with(package='foo')
# get the app name from mock_ta
app = MockTethysApp.objects.get()
# check PersistentStoreConnectionSetting.objects.filter method is called with 'app'
MockPscs.objects.filter.assert_called_with(tethys_app=app)
# get the called arguments from the mock print
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn('Unlinked Settings:', po_call_args[0][0][0])
self.assertIn('Name', po_call_args[1][0][0])
self.assertIn('pk', po_call_args[2][0][0])
self.assertIn('Linked Settings:', po_call_args[3][0][0])
self.assertIn('None', po_call_args[4][0][0])
@mock.patch('tethys_cli.app_settings_commands.get_setting_type', return_value='setting_type')
@mock.patch('tethys_apps.models.TethysApp')
@mock.patch('tethys_apps.models.PersistentStoreConnectionSetting')
@mock.patch('tethys_apps.models.PersistentStoreDatabaseSetting')
@mock.patch('tethys_apps.models.SpatialDatasetServiceSetting')
@mock.patch('tethys_apps.models.DatasetServiceSetting')
@mock.patch('tethys_apps.models.WebProcessingServiceSetting')
@mock.patch('tethys_apps.models.CustomSetting')
@mock.patch('tethys_cli.app_settings_commands.pretty_output')
@mock.patch('tethys_cli.app_settings_commands.type')
def test_app_settings_list_command_linked(self, mock_type, mock_pretty_output, MockCs, MockWpss, MockDss,
MockSdss, MockPsds, MockPscs, MockTethysApp, _):
# mock the args
mock_arg = mock.MagicMock(app='foo')
# mock the PersistentStoreConnectionSetting filter return value
pscs = MockPscs()
pscs.name = 'n001'
pscs.pk = 'p001'
pscs.persistent_store_service.name = ''
MockPscs.objects.filter.return_value = [pscs]
# mock the PersistentStoreDatabaseSetting filter return value
psds = MockPsds()
psds.name = 'n002'
psds.pk = 'p002'
psds.persistent_store_service.name = ''
# del psds.spatial_dataset_service
MockPsds.objects.filter.return_value = [psds]
# mock the Spatial Dataset ServiceSetting filter return value
sdss = MockSdss()
sdss.name = 'n003'
sdss.pk = 'p003'
sdss.spatial_dataset_service.name = ''
del sdss.persistent_store_service
MockSdss.objects.filter.return_value = [sdss]
# mock the Dataset ServiceSetting filter return value
dss = MockDss()
dss.name = 'n004'
dss.pk = 'p004'
dss.dataset_service.name = ''
del dss.persistent_store_service
del dss.spatial_dataset_service
MockDss.objects.filter.return_value = [dss]
# mock the Web Processing ServiceSetting filter return value
wpss = MockWpss()
wpss.name = 'n005'
wpss.pk = 'p005'
wpss.web_processing_service.name = ''
del wpss.persistent_store_service
del wpss.spatial_dataset_service
del wpss.dataset_service
MockWpss.objects.filter.return_value = [wpss]
# mock the Custom Setting filter return value
cs = MockCs()
cs.name = 'n006'
cs.pk = 'p006'
cs.value = '5'
del cs.persistent_store_service
del cs.spatial_dataset_service
del cs.dataset_service
del cs.web_processing_service
MockCs.objects.filter.return_value = [cs]
MockTethysApp.objects.get(package='foo').return_value = mock_arg.app
def mock_type_func(obj):
if obj is pscs:
return MockPscs
elif obj is psds:
return MockPsds
elif obj is sdss:
return MockSdss
mock_type.side_effect = mock_type_func
cli_app_settings_command.app_settings_list_command(mock_arg)
# check TethysApp.object.get method is called with app
MockTethysApp.objects.get.assert_called_with(package='foo')
# get the app name from mock_ta
app = MockTethysApp.objects.get()
# check PersistentStoreConnectionSetting.objects.filter method is called with 'app'
MockPscs.objects.filter.assert_called_with(tethys_app=app)
# check PersistentStoreDatabaseSetting.objects.filter method is called with 'app'
MockPsds.objects.filter.assert_called_with(tethys_app=app)
# check SpatialDatasetServiceSetting.objects.filter is called with 'app'
MockSdss.objects.filter.assert_called_with(tethys_app=app)
# check DatasetServiceSetting.objects.filter is called with 'app'
MockDss.objects.filter.assert_called_with(tethys_app=app)
# check WepProcessingServiceSetting.objects.filter is called with 'app'
MockWpss.objects.filter.assert_called_with(tethys_app=app)
# check CustomSetting.objects.filter is called with 'app'
MockCs.objects.filter.assert_called_with(tethys_app=app)
# get the called arguments from the mock print
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn('Unlinked Settings:', po_call_args[0][0][0])
self.assertIn('None', po_call_args[1][0][0])
self.assertIn('Linked Settings:', po_call_args[2][0][0])
self.assertIn('Name', po_call_args[3][0][0])
self.assertIn('n001', po_call_args[4][0][0])
self.assertIn('n002', po_call_args[5][0][0])
self.assertIn('n003', po_call_args[6][0][0])
self.assertIn('n004', po_call_args[7][0][0])
self.assertIn('n005', po_call_args[8][0][0])
self.assertIn('n006', po_call_args[9][0][0])
@mock.patch('tethys_apps.models.TethysApp')
@mock.patch('tethys_cli.cli_colors.pretty_output')
def test_app_settings_list_command_object_does_not_exist(self, mock_pretty_output, MockTethysApp):
# mock the args
mock_arg = mock.MagicMock(app='foo')
MockTethysApp.objects.get.side_effect = ObjectDoesNotExist
# raise ObjectDoesNotExist error
cli_app_settings_command.app_settings_list_command(mock_arg)
# get the called arguments from the mock print
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn('The app or extension you specified ("foo") does not exist. Command aborted.',
po_call_args[0][0][0])
@mock.patch('tethys_apps.models.TethysApp')
@mock.patch('tethys_cli.cli_colors.pretty_output')
def test_app_settings_list_command_object_exception(self, mock_pretty_output, MockTethysApp):
# mock the args
mock_arg = mock.MagicMock(app='foo')
MockTethysApp.objects.get.side_effect = Exception("error message")
# raise ObjectDoesNotExist error
cli_app_settings_command.app_settings_list_command(mock_arg)
# get the called arguments from the mock print
po_call_args = mock_pretty_output().__enter__().write.call_args_list
self.assertIn('Something went wrong. Please try again', po_call_args[1][0][0])
# @mock.patch('tethys_cli.app_settings_commands.create_ps_database_setting')
@mock.patch('tethys_cli.app_settings_commands.exit')
@mock.patch('tethys_apps.utilities.create_ps_database_setting')
def test_app_settings_create_ps_database_command(self, mock_database_settings, mock_exit):
# mock the args
mock_arg = mock.MagicMock(app='foo')
mock_arg.name = 'arg_name'
mock_arg.description = 'mock_description'
mock_arg.required = True
mock_arg.initializer = ''
mock_arg.initialized = 'initialized'
mock_arg.spatial = 'spatial'
mock_arg.dynamic = 'dynamic'
# mock the system exit
mock_exit.side_effect = SystemExit
# raise the system exit call when database is created
self.assertRaises(SystemExit, cli_app_settings_command.app_settings_create_ps_database_command, mock_arg)
# check the call arguments from mock_database
mock_database_settings.assert_called_with('foo', 'arg_name', 'mock_description', True, '',
'initialized', 'spatial', 'dynamic')
# check the mock exit value
mock_exit.assert_called_with(0)
@mock.patch('tethys_cli.app_settings_commands.exit')
@mock.patch('tethys_apps.utilities.create_ps_database_setting')
def test_app_settings_create_ps_database_command_with_no_success(self, mock_database_settings, mock_exit):
# mock the args
mock_arg = mock.MagicMock(app='foo')
mock_arg.name = None
mock_arg.description = 'mock_description'
mock_arg.required = True
mock_arg.initializer = ''
mock_arg.initialized = 'initialized'
mock_arg.spatial = 'spatial'
mock_arg.dynamic = 'dynamic'
# mock the system exit
mock_exit.side_effect = SystemExit
mock_database_settings.return_value = False
# raise the system exit call when database is created
self.assertRaises(SystemExit, cli_app_settings_command.app_settings_create_ps_database_command, mock_arg)
# check the mock exit value
mock_exit.assert_called_with(1)
@mock.patch('tethys_cli.app_settings_commands.exit')
@mock.patch('tethys_apps.utilities.remove_ps_database_setting')
def test_app_settings_remove_command(self, mock_database_settings, mock_exit):
# mock the args
mock_arg = mock.MagicMock(app='foo')
mock_arg.name = 'arg_name'
mock_arg.force = 'force'
# mock the system exit
mock_exit.side_effect = SystemExit
# raise the system exit call when database is created
self.assertRaises(SystemExit, cli_app_settings_command.app_settings_remove_command, mock_arg)
# check the call arguments from mock_database
mock_database_settings.assert_called_with('foo', 'arg_name', 'force')
# check the mock exit value
mock_exit.assert_called_with(0)
@mock.patch('tethys_cli.app_settings_commands.exit')
@mock.patch('tethys_apps.utilities.remove_ps_database_setting')
def test_app_settings_remove_command_with_no_success(self, mock_database_settings, mock_exit):
# mock the args
mock_arg = mock.MagicMock(app='foo')
mock_arg.name = 'arg_name'
mock_arg.force = 'force'
# mock the system exit
mock_exit.side_effect = SystemExit
mock_database_settings.return_value = False
# raise the system exit call when database is created
self.assertRaises(SystemExit, cli_app_settings_command.app_settings_remove_command, mock_arg)
# check the mock exit value
mock_exit.assert_called_with(1)
def test_app_settings_get_setting_type(self):
from tethys_apps.models import (PersistentStoreConnectionSetting, PersistentStoreDatabaseSetting,
SpatialDatasetServiceSetting, DatasetServiceSetting,
WebProcessingServiceSetting,
CustomSetting)
self.assertEqual('ps_connection', cli_app_settings_command.get_setting_type(PersistentStoreConnectionSetting()))
self.assertEqual('ps_database', cli_app_settings_command.get_setting_type(PersistentStoreDatabaseSetting()))
self.assertEqual('ds_spatial', cli_app_settings_command.get_setting_type(SpatialDatasetServiceSetting()))
self.assertEqual('ds_dataset', cli_app_settings_command.get_setting_type(DatasetServiceSetting()))
self.assertEqual('wps', cli_app_settings_command.get_setting_type(WebProcessingServiceSetting()))
self.assertEqual('custom_setting', cli_app_settings_command.get_setting_type(CustomSetting()))
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import sys
from collections import namedtuple
from twitter.common.collections import OrderedSet
from pants.option.scope import ScopeInfo
GLOBAL_SCOPE = ''
class ArgSplitterError(Exception):
pass
class SplitArgs(namedtuple('SplitArgs',
['goals', 'scope_to_flags', 'targets', 'passthru', 'passthru_owner'])):
"""The result of splitting args.
goals: A list of explicitly specified goals.
scope_to_flags: An ordered map from scope name to the list of flags belonging to that scope.
The global scope is specified as an empty string.
Keys are in the order encountered in the args.
targets: A list of target specs.
passthru: Any remaining args specified after a -- separator.
passthru_owner: The scope specified last on the command line, if any. None otherwise.
"""
pass
class HelpRequest(namedtuple('HelpRequest', ['version', 'advanced', 'all_scopes'])):
"""A help request from the user.
version: Did the user ask for version info.
advanced: Did the user ask for advanced help (e.g., using --help-advanced).
all_scopes: Did the user ask for help for all goals and tasks (e.g., using --help-all).
"""
@classmethod
def basic(cls):
return cls(version=False, advanced=False, all_scopes=False)
class ArgSplitter(object):
"""Splits a command-line into scoped sets of flags, and a set of targets.
Recognizes, e.g.:
./pants goal -x compile --foo compile.java -y target1 target2
./pants -x compile --foo compile.java -y -- target1 target2
./pants -x compile target1 target2 --compile-java-flag
./pants -x --compile-java-flag compile target1 target2
Handles help and version args specially.
"""
_HELP_BASIC_ARGS = ('-h', '--help', 'help')
_HELP_ADVANCED_ARGS = ('--help-advanced', 'help-advanced')
_HELP_ALL_SCOPES_ARGS = ('--help-all', 'help-all')
_HELP_ARGS = _HELP_BASIC_ARGS + _HELP_ADVANCED_ARGS + _HELP_ALL_SCOPES_ARGS
_VERSION_ARGS = ('-V', '--version')
def __init__(self, known_scope_infos):
self._known_scope_infos = known_scope_infos
self._known_scopes = (set([si.scope for si in known_scope_infos]) |
{'help', 'help-advanced', 'help-all'})
self._unconsumed_args = [] # In reverse order, for efficient popping off the end.
self._help_request = None # Will be set if we encounter any help flags.
# For convenience, and for historical reasons, we allow --scope-flag-name anywhere on the
# cmd line, as an alternative to ... scope --flag-name.
# We check for prefixes in reverse order, so we match the longest prefix first.
sorted_scope_infos = sorted(filter(lambda si: si.scope, self._known_scope_infos),
key=lambda si: si.scope, reverse=True)
# List of pairs (prefix, ScopeInfo).
self._known_scoping_prefixes = [('{0}-'.format(si.scope.replace('.', '-')), si)
for si in sorted_scope_infos]
@property
def help_request(self):
return self._help_request
def _check_for_help_request(self, arg):
if not arg in self._HELP_ARGS:
return False
# First ensure that we have a basic HelpRequest.
if not self._help_request:
self._help_request = HelpRequest.basic()
# Now see if we need to enhance it.
advanced = self._help_request.advanced or arg in self._HELP_ADVANCED_ARGS
all_scopes = self._help_request.all_scopes or arg in self._HELP_ALL_SCOPES_ARGS
self._help_request = HelpRequest(False, advanced, all_scopes)
return True
def split_args(self, args=None):
"""Split the specified arg list (or sys.argv if unspecified).
args[0] is ignored.
Returns a SplitArgs tuple.
"""
goals = OrderedSet()
scope_to_flags = {}
def add_scope(s):
# Force the scope to appear, even if empty.
if s not in scope_to_flags:
scope_to_flags[s] = []
targets = []
passthru = []
passthru_owner = None
self._unconsumed_args = list(reversed(sys.argv if args is None else args))
# In regular use the first token is the binary name, so skip it. However tests may
# pass just a list of flags, so don't skip it in that case.
if not self._at_flag() and self._unconsumed_args:
self._unconsumed_args.pop()
if self._unconsumed_args and self._unconsumed_args[-1] == 'goal':
# TODO: Temporary warning. Eventually specifying 'goal' will be an error.
print("WARNING: Specifying 'goal' explicitly is no longer necessary, and deprecated.",
file=sys.stderr)
self._unconsumed_args.pop()
def assign_flag_to_scope(flag, default_scope):
flag_scope, descoped_flag = self._descope_flag(flag, default_scope=default_scope)
if flag_scope not in scope_to_flags:
scope_to_flags[flag_scope] = []
scope_to_flags[flag_scope].append(descoped_flag)
global_flags = self._consume_flags()
# We only check for _VERSION_ARGS in the global flags. It's reasonable for tasks
# to have a --version flag with different meaning.
for version_arg in self._VERSION_ARGS:
if version_arg in global_flags:
if not self._help_request:
self._help_request = HelpRequest(True, False, False)
global_flags.remove(version_arg)
add_scope(GLOBAL_SCOPE)
for flag in global_flags:
assign_flag_to_scope(flag, GLOBAL_SCOPE)
scope, flags = self._consume_scope()
while scope:
if not self._check_for_help_request(scope.lower()):
add_scope(scope)
goals.add(scope.partition('.')[0])
passthru_owner = scope
for flag in flags:
assign_flag_to_scope(flag, scope)
scope, flags = self._consume_scope()
while self._unconsumed_args and not self._at_double_dash():
arg = self._unconsumed_args.pop()
if arg.startswith(b'-'):
# We assume any args here are in global scope.
if not self._check_for_help_request(arg):
assign_flag_to_scope(arg, GLOBAL_SCOPE)
else:
targets.append(arg)
if self._at_double_dash():
self._unconsumed_args.pop()
passthru = list(reversed(self._unconsumed_args))
if not goals and not self._help_request:
self._help_request = HelpRequest.basic()
return SplitArgs(goals, scope_to_flags, targets, passthru, passthru_owner if passthru else None)
def _consume_scope(self):
"""Returns a pair (scope, list of flags encountered in that scope).
Note that the flag may be explicitly scoped, and therefore not actually belong to this scope.
For example, in:
./pants --compile-java-partition-size-hint=100 compile <target>
--compile-java-partition-size-hint should be treated as if it were --partition-size-hint=100
in the compile.java scope.
"""
if not self._at_scope():
return None, []
scope = self._unconsumed_args.pop()
flags = self._consume_flags()
return scope, flags
def _consume_flags(self):
"""Read flags until we encounter the first token that isn't a flag."""
flags = []
while self._at_flag():
flag = self._unconsumed_args.pop()
if not self._check_for_help_request(flag):
flags.append(flag)
return flags
def _descope_flag(self, flag, default_scope):
"""If the flag is prefixed by its scope, in the old style, extract the scope.
Otherwise assume it belongs to default_scope.
returns a pair (scope, flag).
"""
for scope_prefix, scope_info in self._known_scoping_prefixes:
for flag_prefix in ['--', '--no-']:
prefix = flag_prefix + scope_prefix
if flag.startswith(prefix):
scope = scope_info.scope
if scope_info.category == ScopeInfo.GLOBAL_SUBSYSTEM and default_scope != GLOBAL_SCOPE:
# We allow goal.task --subsystem-foo to refer to the task-level subsystem instance,
# i.e., as if qualified by --subsystem-goal-task-foo.
task_subsystem_scope = '{}.{}'.format(scope_info.scope, default_scope)
if task_subsystem_scope in self._known_scopes: # Such a task subsystem actually exists.
scope = task_subsystem_scope
return scope, flag_prefix + flag[len(prefix):]
return default_scope, flag
def _at_flag(self):
return (self._unconsumed_args and
self._unconsumed_args[-1].startswith(b'-') and
not self._at_double_dash())
def _at_scope(self):
return self._unconsumed_args and self._unconsumed_args[-1] in self._known_scopes
def _at_double_dash(self):
return self._unconsumed_args and self._unconsumed_args[-1] == b'--'
|
|
# Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
try:
import unittest2 as unittest
except ImportError:
import unittest
from sheet.worksheet import Worksheet
from sheet.rewrite_formula_offset_cell_references import (
rewrite_formula, rewrite_source_sheet_formulae_for_cut,
)
class TestRewriteFormulaOffsetCellReferences(unittest.TestCase):
def test_dont_rewrite_constants(self):
result = rewrite_formula(
"B3", 3, 5, False, (1, 2, 3, 4)
)
self.assertEquals(result, 'B3')
def test_safely_handle_none(self):
self.assertIsNone( rewrite_formula(None, 3, 5, False, (1, 2, 3, 4)) )
def test_safely_handle_nonsense(self):
unparseable_nonsense = '=!:booA1:A2'
self.assertEquals(
rewrite_formula(unparseable_nonsense, 3, 5, False, (1, 2, 3, 4)),
unparseable_nonsense
)
def test_cut_cell_reference_to_cut_cell_is_rewritten(self):
result = rewrite_formula(
"=A2", 2, 1, True, (1, 1, 1, 2)
)
self.assertEquals(result, '=C3')
def test_cut_cell_reference_to_uncut_cell_is_not_rewritten(self):
result = rewrite_formula(
"=B3", 2, 1, True, (1, 1, 1, 1)
)
self.assertEquals(result, '=B3')
def test_absolute_cut_cell_reference_to_uncut_cell_is_not_rewritten(self):
result = rewrite_formula(
"=$B$3", 2, 1, True, (1, 1, 1, 1)
)
self.assertEquals(result, '=$B$3')
def test_absolute_cut_cell_reference_to_cut_cell_is_rewritten(self):
result = rewrite_formula(
"=$A$2", 2, 1, True, (1, 1, 1, 2)
)
self.assertEquals(result, '=$C$3')
def test_copied_cell_reference_to_copied_cell_is_rewritten(self):
result = rewrite_formula(
"=A2", 2, 1, False, (1, 1, 1, 2)
)
self.assertEquals(result, '=C3')
def test_copied_cell_reference_to_uncopied_cell_is_rewritten(self):
result = rewrite_formula(
"=B3", 2, 1, False, (1, 1, 1, 1)
)
self.assertEquals(result, '=D4')
def test_absolute_copied_cell_reference_to_copied_cell_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2", 2, 1, False, (1, 1, 1, 2)
)
self.assertEquals(result, '=$A$2')
def test_absolute_copied_cell_reference_to_uncopied_cell_is_not_rewritten(self):
result = rewrite_formula(
"=$B$3", 2, 1, False, (1, 1, 1, 1)
)
self.assertEquals(result, '=$B$3')
def test_copied_cell_reference_that_moves_off_grid_marked_invalid(self):
result = rewrite_formula(
"=A1", 1, -1, False, (1, 2, 1, 2)
)
self.assertEquals(result, '=#Invalid!')
def test_cut_cellrange_reference_to_completely_cut_cellrange_is_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, True, (1, 1, 1, 3)
)
self.assertEquals(result, '=C3:C4')
def test_cut_cellrange_reference_to_partially_cut_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, True, (1, 1, 1, 2)
)
self.assertEquals(result, '=A2:A3')
def test_cut_absolute_cellrange_reference_to_completely_cut_cellrange_is_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, True, (1, 1, 1, 3)
)
self.assertEquals(result, '=$C$3:$C$4')
def test_cut_absolute_cellrange_reference_to_partially_cut_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, True, (1, 1, 1, 2)
)
self.assertEquals(result, '=$A$2:$A$3')
def test_cut_cellrange_reference_to_partially_cut_cellrange_is_not_rewritten_even_if_its_not_obviously_overlapping(self):
cut_region_left = 2
cut_region_right = 3
cut_region_top = 1
cut_region_bottom = 2
cell_range_topleft = "A2"
cell_range_bottomright = "B3"
result = rewrite_formula(
"=%s:%s" % (cell_range_topleft, cell_range_bottomright),
2, 1,
True,
(cut_region_left, cut_region_top, cut_region_right, cut_region_bottom)
)
self.assertEquals(result, '=A2:B3')
def test_cut_absolute_cellrange_reference_to_partially_cut_cellrange_is_not_rewritten_even_if_its_not_obviously_overlapping(self):
cut_region_left = 2
cut_region_right = 3
cut_region_top = 1
cut_region_bottom = 2
cell_range_topleft = "$A$2"
cell_range_bottomright = "$B$3"
result = rewrite_formula(
"=%s:%s" % (cell_range_topleft, cell_range_bottomright),
2, 1,
True,
(cut_region_left, cut_region_top, cut_region_right, cut_region_bottom)
)
self.assertEquals(result, '=$A$2:$B$3')
def test_cut_cellrange_reference_to_uncut_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, True, (1, 1, 1, 1)
)
self.assertEquals(result, '=A2:A3')
def test_cut_absolute_cellrange_reference_to_uncut_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, True, (1, 1, 1, 1)
)
self.assertEquals(result, '=$A$2:$A$3')
def test_copied_cellrange_reference_to_completely_copied_cellrange_is_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, False, (1, 1, 1, 3)
)
self.assertEquals(result, '=C3:C4')
def test_copied_absolute_cellrange_reference_to_completely_copied_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, False, (1, 1, 1, 3)
)
self.assertEquals(result, '=$A$2:$A$3')
def test_copied_cellrange_reference_to_partially_copied_cellrange_is_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, False, (1, 1, 1, 2)
)
self.assertEquals(result, '=C3:C4')
def test_copied_absolute_cellrange_reference_to_partially_copied_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, False, (1, 1, 1, 2)
)
self.assertEquals(result, '=$A$2:$A$3')
def test_copied_cellrange_reference_to_uncopied_cellrange_is_rewritten(self):
result = rewrite_formula(
"=A2:A3", 2, 1, False, (1, 1, 1, 1)
)
self.assertEquals(result, '=C3:C4')
def test_copied_absolute_cellrange_reference_to_uncopied_cellrange_is_not_rewritten(self):
result = rewrite_formula(
"=$A$2:$A$3", 2, 1, False, (1, 1, 1, 1)
)
self.assertEquals(result, '=$A$2:$A$3')
def test_copied_cellrange_reference_that_moves_off_grid_marked_invalid(self):
result = rewrite_formula(
"=A1:A2", 1, -1, False, (1, 3, 1, 3)
)
self.assertEquals(result, '=#Invalid!:B1')
def test_source_sheet_cell_references_to_cut_range_are_rewritten(self):
worksheet = Worksheet()
worksheet.A1.formula = '=B1'
worksheet.A2.formula = '=B2'
worksheet.A3.formula = '=B3'
worksheet.A4.formula = 'B1'
worksheet.A5.formula = '=$B$1'
rewrite_source_sheet_formulae_for_cut(worksheet, (2, 1, 2, 2), 3, 4)
self.assertEquals(worksheet.A1.formula, '=C4')
self.assertEquals(worksheet.A2.formula, '=C5')
self.assertEquals(worksheet.A3.formula, '=B3')
self.assertEquals(worksheet.A4.formula, 'B1')
self.assertEquals(worksheet.A5.formula, '=$C$4')
def test_source_sheet_cell_ranges_inside_cut_range_are_rewritten(self):
worksheet = Worksheet()
worksheet.A1.formula = '=B1:B2'
worksheet.A2.formula = '=sum(B1:B2)'
worksheet.A3.formula = '=B3:B4'
worksheet.A4.formula = 'B1:B2'
worksheet.A5.formula = '=$B$1:$B$2'
rewrite_source_sheet_formulae_for_cut(worksheet, (2, 1, 2, 2), 3, 4)
self.assertEquals(worksheet.A1.formula, '=C4:C5')
self.assertEquals(worksheet.A2.formula, '=sum(C4:C5)')
self.assertEquals(worksheet.A3.formula, '=B3:B4')
self.assertEquals(worksheet.A4.formula, 'B1:B2')
self.assertEquals(worksheet.A5.formula, '=$C$4:$C$5')
|
|
import json
from flask import render_template, flash, redirect, session
#
from flask import url_for
# The g global is setup by Flask to store and share data
# during the life of a request.
from flask import request, g
from flask import jsonify
from flask.ext.login import login_user, logout_user, current_user, \
login_required
from flask.ext.sqlalchemy import get_debug_queries
from flask.ext.babel import gettext
from datetime import datetime
from guess_language import guessLanguage
from app import app, db, lm, oid, babel
from .forms import LoginForm, EditForm, PostForm, SearchForm
from .models import User, Post
from .emails import follower_notification
from .translate import microsoft_translate
from config import POSTS_PER_PAGE, MAX_SEARCH_RESULTS, LANGUAGES, \
DATABASE_QUERY_TIMEOUT
# load_user is registered with Flask-Login through this decorator
@lm.user_loader
def load_user(id):
""" load a user from the database. id is stored as unicode and needs to
be converted to an int for the query."""
return User.query.get(int(id))
@babel.localeselector
def get_locale():
""" Read the Accept-Languages header sent by the browser in the HTTP request
and find the best matching language from the supported languages list.
The best_match method does all the work. Where is it coming from?
@babel.localeselector? How do I find this out?"""
# return request.accept_languages.best_match(LANGUAGES.keys())
# Forcing languages for debugging: de, es, en
return 'en'
@app.before_request
def before_request():
""" Setup g.user variable. The 'current_user' global is set by Flask-Login.
For better access a copy is saved to the g object.
This allows all all requests to access the logged in user,
even inside templates. """
g.user = current_user
if g.user.is_authenticated():
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
# make SearchForm available to all templates
g.search_form = SearchForm()
g.locale = get_locale()
@app.after_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= DATABASE_QUERY_TIMEOUT:
app.logger.warning(
"""SLOW QUERY: {}
Parameters: {}
Duration: {: f}s
Context: {}
""".format(query.statement, query.parameters, query.duration,
query.context))
return response
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
@app.route('/' , methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
@app.route('/index/<int:page>', methods=['GET', 'POST'])
# page only visible for logged in users
@login_required
def index(page=1):
form = PostForm()
if form.validate_on_submit():
language = guessLanguage(form.post.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
post = Post(body=form.post.data, timestamp=datetime.utcnow(),
author=g.user, language=language)
db.session.add(post)
db.session.commit()
flash(gettext('Your post is now live!'))
# force the browser to issue another request after the form submission
# to avoid accidental resubmission of posts.
return redirect(url_for('index'))
# followed_posts() returns sqlalchemy object
# The paginate method can be called on any query object
posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)
return render_template("index.html",
title='Pycodeshare',
user=user,
form=form,
posts=posts)
# accept GET and POST requests. Without this only GET requests are accepted
@app.route('/login', methods=['GET', 'POST'])
# tells Flask-OpenID that this is our login view function.
@oid.loginhandler
def login():
''' View function that renders the template. '''
if g.user is not None and g.user.is_authenticated():
return redirect(url_for('index'))
# Instantiate an object from LoginForm class to send it later to the
# template
form = LoginForm()
# True if all validations successful
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
# trigger the user authentication through Flask-OpenID.
return oid.try_login(form.openid.data, ask_for=['nickname', 'email'])
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS'])
@oid.after_login
def after_login(resp):
""" resp contains information returned by the OpenID provider. """
if resp.email is None or resp.email == "":
flash(gettext('Invalid login. Please try again.'))
return redirect(url_for('login'))
user = User.query.filter_by(email=resp.email).first()
if user is None:
nickname = resp.nickname
# some OpenID providers don't have the nickname.
if nickname is None or nickname == '':
nickname = resp.email.split('@')[0]
nickname = User.make_valid_nickname(nickname)
nickname = User.make_unique_nickname(nickname)
user = User(nickname=nickname, email=resp.email)
db.session.add(user)
db.session.commit()
# make the user (implicitely) follow him/herself
# easily include the posts in the followed posts query -> tutorial pt. 8
db.session.add(user.follow(user))
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember=remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
# <nickname> turns to be an argument to the 'user' function
@app.route('/user/<nickname>')
@app.route('/user/<nickname>/<int:page>')
@login_required
def user(nickname, page=1):
user = User.query.filter_by(nickname=nickname).first()
follower = user.followers.all()
if user is None:
flash(gettext('User {0} not found.'.format(nickname)))
return redirect(url_for('index'))
posts = user.posts.paginate(page, POSTS_PER_PAGE, False)
return render_template('user.html',
user=user,
posts=posts,
follower=follower)
@app.route('/edit', methods=['GET', 'POST'])
@login_required
def edit():
# pass the constructor argument
form = EditForm(g.user.nickname)
if form.validate_on_submit():
g.user.nickname = form.nickname.data
g.user.about_me = form.about_me.data
db.session.add(g.user)
db.session.commit()
flash(gettext('Your changes have been saved.'))
return redirect(url_for('edit'))
else:
form.nickname.data = g.user.nickname
form.about_me.data = g.user.about_me
return render_template('edit.html', form=form)
@app.route('/follow/<nickname>')
def follow(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash(gettext('User {0} not found.'.format(nickname)))
return redirect(url_for('index'))
if user == g.user:
flash(gettext('You can\'t follow yourself!'))
return redirect(url_for('user', nickname=nickname))
u = g.user.follow(user)
if u is None:
flash(gettext('Can not follow {0}.'.format(nickname)))
return redirect(url_for('user', nickname=nickname))
db.session.add(u)
db.session.commit()
flash(gettext('You are now following {0}!'.format(nickname)))
follower_notification(user, g.user)
return redirect(url_for('user', nickname=nickname))
@app.route('/unfollow/<nickname>')
def unfollow(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash(gettext('User {0} not found.'.format()))
return redirect(url_for('index'))
if user == g.user:
flash(gettext('You can\'t unfollow yourself!'))
return redirect(url_for('user', nickname=nickname))
u = g.user.unfollow(user)
if u is None:
flash(gettext('Can not unfollow {0}.'.format(nickname)))
return redirect(url_for('user', nickname=nickname))
db.session.add(u)
db.session.commit()
flash(gettext('You have stopped following {0}.'.format(nickname)))
return redirect(url_for('user', nickname=nickname))
@app.route('/delete/<int:id>')
@login_required
def delete(id):
post = Post.query.get(id)
if post is None:
flash('Post not found.')
return redirect(url_for('index'))
if post.author.id != g.user.id:
flash('You cannot delete this post.')
return redirect(url_for('index'))
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted.')
return redirect(url_for('index'))
@app.route('/search', methods=['POST'])
@login_required
def search():
""" This function collects the search query from the form and
then redirects to search_results with this query as an argument.
The search work isn't done directly here to avoid resubmission of
the form through usage of the refresh button."""
if not g.search_form.validate_on_submit():
return redirect(url_for('index'))
return redirect(url_for('search_results', query=g.search_form.search.data))
@app.route('/search_results/<query>')
@login_required
def search_results(query):
""" """
results = Post.query.whoosh_search(query, MAX_SEARCH_RESULTS).all()
return render_template('search_results.html',
query=query,
results=results)
@app.route('/translate', methods=['POST'])
@login_required
def translate():
# data = request.form['text'] + request.form['sourceLang'] +
# return data
return jsonify({
'text': microsoft_translate(
request.form['text'],
request.form['sourceLang'],
request.form['destLang'])})
|
|
#!/usr/bin/env python
"""Base module components."""
import numpy as np
import copy
from collections import OrderedDict
import itertools
import six
import inspect
__all__ = ["BaseTransformer", "Pipeline", "Union"]
class BaseTransformer(object):
"""The base class for all transformation objects.
This class implements a single transformation (history)
and some various niceties."""
# This bit gleefully stolen from sklearn.base
@classmethod
def _get_param_names(cls):
"""Get the list of parameter names for the object"""
init = cls.__init__
if six.PY3:
args, varargs = inspect.getfullargspec(init)[:2]
elif six.PY2:
args, varargs = inspect.getargspec(init)[:2]
if varargs is not None:
raise RuntimeError("BaseTransformer objects cannot have varargs")
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get the parameters for this object. Returns as a dict.
Parameters
----------
deep : bool
Recurse on nested objects
Returns
-------
params : dict
A dictionary containing all parameters for this object
"""
out = dict(__class__=self.__class__, params=dict())
for key in self._get_param_names():
value = getattr(self, key, None)
if deep and hasattr(value, "get_params"):
deep_items = value.get_params().items()
out["params"][key] = dict(__class__=value.__class__)
out["params"][key].update((k, val) for k, val in deep_items)
else:
out["params"][key] = value
return out
def __repr__(self):
"""Pretty-print this object"""
class_name = self.__class__.__name__
return "{:s}({:s})".format(
class_name,
_pprint(self.get_params(deep=False)["params"], offset=len(class_name)),
)
def __init__(self):
self.dispatch = OrderedDict()
def states(self, jam):
raise NotImplementedError
def audio(self, mudabox, state):
raise NotImplementedError
def metadata(self, metadata, state):
raise NotImplementedError
def _register(self, pattern, function):
self.dispatch[pattern] = function.__name__
def _transform(self, jam, state):
"""Apply the transformation to audio and annotations.
The input jam is copied and modified, and returned
contained in a list.
Parameters
----------
jam : jams.JAMS
A single jam object to modify
Returns
-------
jam_list : list
A length-1 list containing `jam` after transformation
See also
--------
core.load_jam_audio
"""
if not hasattr(jam.sandbox, "muda"):
raise RuntimeError("No muda state found in jams sandbox.")
# We'll need a working copy of this object for modification purposes
jam_w = copy.deepcopy(jam)
# Push our reconstructor onto the history stack
jam_w.sandbox.muda["history"].append(
{"transformer": self.__serialize__, "state": state}
)
try:
self.audio(jam_w.sandbox.muda, state)
except NotImplementedError:
pass
try:
self.metadata(jam_w.file_metadata, state)
except NotImplementedError:
pass
# Walk over the list of deformers
for query, function_name in six.iteritems(self.dispatch):
function = getattr(self, function_name)
for matched_annotation in jam_w.search(namespace=query):
function(matched_annotation, state)
return jam_w
def transform(self, jam):
"""Iterative transformation generator
Applies the deformation to an input jams object.
This generates a sequence of deformed output JAMS.
Parameters
----------
jam : jams.JAMS
The jam to transform
Examples
--------
>>> for jam_out in deformer.transform(jam_in):
... process(jam_out)
"""
for state in self.states(jam):
yield self._transform(jam, state)
@property
def __serialize__(self):
"""Serializer"""
data = self.get_params()
data["__class__"] = data["__class__"].__name__
return data
class Pipeline(object):
"""Wrapper which allows multiple BaseDeformer objects to be chained together
A given JAMS object will be transformed sequentially by
each stage of the pipeline.
The pipeline induces a graph over transformers
Attributes
----------
steps : argument array
steps[i] is a tuple of `(name, Transformer)`
Examples
--------
>>> P = muda.deformers.PitchShift(semitones=5)
>>> T = muda.deformers.TimeStretch(speed=1.25)
>>> Pipe = muda.Pipeline(steps=[('Pitch:maj3', P), ('Speed:1.25x', T)])
>>> output_jams = list(Pipe.transform(jam_in))
See Also
--------
Union
"""
def __init__(self, steps=None):
names, transformers = zip(*steps)
if len(set(names)) != len(steps):
raise ValueError("Names provided are not unique: " " {}".format(names))
# shallow copy of steps
self.steps = list(zip(names, transformers))
for t in transformers:
if not isinstance(t, BaseTransformer):
raise TypeError("{:s} is not a BaseTransformer".format(t))
def get_params(self):
"""Get the parameters for this object. Returns as a dict."""
out = {}
out["__class__"] = self.__class__
out["params"] = dict(steps=[])
for name, step in self.steps:
out["params"]["steps"].append([name, step.get_params(deep=True)])
return out
def __repr__(self):
"""Pretty-print the object"""
class_name = self.__class__.__name__
return "{:s}({:s})".format(
class_name, _pprint(self.get_params(), offset=len(class_name))
)
def __recursive_transform(self, jam, steps):
"""A recursive transformation pipeline"""
if len(steps) > 0:
head_transformer = steps[0][1]
for t_jam in head_transformer.transform(jam):
for q in self.__recursive_transform(t_jam, steps[1:]):
yield q
else:
yield jam
def transform(self, jam):
"""Apply the sequence of transformations to a single jam object.
Parameters
----------
jam : jams.JAMS
The jam object to transform
Yields
------
jam_out : jams.JAMS
The jam objects produced by the transformation sequence
"""
for output in self.__recursive_transform(jam, self.steps):
yield output
class Union(object):
"""Wrapper which allows multiple BaseDeformer objects to be combined
for round-robin sampling.
A given JAMS object will be transformed sequentially by
each element of the union, in round-robin fashion.
This is similar to `Pipeline`, except the deformers are independent
of one another in a Union, rather than applied sequentially.
Attributes
----------
steps : argument array
steps[i] is a tuple of `(name, Transformer)`
Examples
--------
>>> P = muda.deformers.PitchShift(semitones=5)
>>> T = muda.deformers.TimeStretch(speed=1.25)
>>> union = muda.Union(steps=[('Pitch:maj3', P), ('Speed:1.25x', T)])
>>> output_jams = list(union.transform(jam_in))
See Also
--------
Pipeline
"""
def __init__(self, steps=None):
names, transformers = zip(*steps)
if len(set(names)) != len(steps):
raise ValueError("Names provided are not unique: " " {}".format(names))
# shallow copy of steps
self.steps = list(zip(names, transformers))
for t in transformers:
if not isinstance(t, BaseTransformer):
raise TypeError("{:s} is not a BaseTransformer".format(t))
def get_params(self):
"""Get the parameters for this object. Returns as a dict."""
out = {}
out["__class__"] = self.__class__
out["params"] = dict(steps=[])
for name, step in self.steps:
out["params"]["steps"].append([name, step.get_params(deep=True)])
return out
def __repr__(self):
"""Pretty-print the object"""
class_name = self.__class__.__name__
return "{:s}({:s})".format(
class_name, _pprint(self.get_params(), offset=len(class_name))
)
def __serial_transform(self, jam, steps):
"""A serial transformation union"""
# This uses the round-robin itertools recipe
if six.PY2:
attr = "next"
else:
attr = "__next__"
pending = len(steps)
nexts = itertools.cycle(
getattr(iter(D.transform(jam)), attr) for (name, D) in steps
)
while pending:
try:
for next_jam in nexts:
yield next_jam()
except StopIteration:
pending -= 1
nexts = itertools.cycle(itertools.islice(nexts, pending))
def transform(self, jam):
"""Apply the sequence of transformations to a single jam object.
Parameters
----------
jam : jams.JAMS
The jam object to transform
Yields
------
jam_out : jams.JAMS
The jam objects produced by each member of the union
"""
for output in self.__serial_transform(jam, self.steps):
yield output
###
# Borrowed from scikit-learn 0.18
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ",\n" + (1 + offset // 2) * " "
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = "%s=%s" % (k, str(v))
else:
# use repr of the rest
this_repr = "%s=%s" % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + "..." + this_repr[-100:]
if i > 0:
if this_line_length + len(this_repr) >= 75 or "\n" in this_repr:
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(", ")
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = "".join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = "\n".join(l.rstrip(" ") for l in lines.split("\n"))
return lines
def _get_rng(random_state):
"""Get a random number generator (RandomState) object
from a seed or existing state.
Parameters
----------
random_state : None, int, or np.random.RandomState
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is a copy of the current global
random state.
Returns
-------
rng : np.random.RandomState
The RandomState object
"""
if random_state is None:
state = np.random.get_state()
rng = np.random.RandomState()
rng.set_state(state)
elif isinstance(random_state, int):
rng = np.random.RandomState(seed=random_state)
elif isinstance(random_state, np.random.RandomState):
rng = random_state
else:
raise ValueError("Invalid random_state={}".format(random_state))
return rng
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_almost_equal_nulp,
assert_equal)
from astropy.stats.biweight import (biweight_location, biweight_scale,
biweight_midvariance,
biweight_midcovariance,
biweight_midcorrelation)
from astropy.utils.misc import NumpyRNGContext
def test_biweight_location():
with NumpyRNGContext(12345):
# test that it runs
randvar = np.random.randn(10000)
cbl = biweight_location(randvar)
assert abs(cbl - 0) < 1e-2
def test_biweight_location_constant():
cbl = biweight_location(np.ones((10, 5)))
assert cbl == 1.
def test_biweight_location_constant_axis_2d():
shape = (10, 5)
data = np.ones(shape)
cbl = biweight_location(data, axis=0)
assert_allclose(cbl, np.ones(shape[1]))
cbl = biweight_location(data, axis=1)
assert_allclose(cbl, np.ones(shape[0]))
val1 = 100.
val2 = 2.
val3 = 5.
data = np.arange(50).reshape(10, 5)
data[2] = val1
data[7] = val2
data[8] = [val3, 0.8, val3, -0.8, val3]
cbl = biweight_location(data, axis=1)
assert_allclose(cbl[2], val1)
assert_allclose(cbl[7], val2)
assert_allclose(cbl[8], val3)
def test_biweight_location_constant_axis_3d():
shape = (10, 5, 2)
data = np.ones(shape)
cbl = biweight_location(data, axis=0)
assert_allclose(cbl, np.ones((shape[1], shape[2])))
cbl = biweight_location(data, axis=1)
assert_allclose(cbl, np.ones((shape[0], shape[2])))
cbl = biweight_location(data, axis=2)
assert_allclose(cbl, np.ones((shape[0], shape[1])))
def test_biweight_location_small():
bw_loc = biweight_location([1, 3, 5, 500, 2])
assert_allclose(bw_loc, 2.7456117)
def test_biweight_location_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = np.random.normal(5, 2, (ny, nx))
bw = biweight_location(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_location(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_location(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
def test_biweight_location_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = np.random.normal(5, 2, (nz, ny, nx))
bw = biweight_location(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_location(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi)
def test_biweight_location_axis_tuple():
"""Test a 3D array with a tuple axis keyword."""
data = np.arange(24).reshape(2, 3, 4)
data[0, 0] = 100.
assert_equal(biweight_location(data, axis=0),
biweight_location(data, axis=(0,)))
assert_equal(biweight_location(data, axis=-1),
biweight_location(data, axis=(2,)))
assert_equal(biweight_location(data, axis=(0, 1)),
biweight_location(data, axis=(1, 0)))
assert_equal(biweight_location(data, axis=(0, 2)),
biweight_location(data, axis=(0, -1)))
assert_equal(biweight_location(data, axis=(0, 1, 2)),
biweight_location(data, axis=(2, 0, 1)))
assert_equal(biweight_location(data, axis=(0, 1, 2)),
biweight_location(data, axis=None))
@pytest.mark.filterwarnings('ignore:All-NaN slice encountered')
@pytest.mark.filterwarnings('ignore:Invalid value encountered in median')
def test_biweight_location_ignore_nan():
data1d = np.array([1, 3, 5, 500, 2, np.nan])
data2d = np.array([data1d, data1d])
assert np.isnan(biweight_location(data1d, ignore_nan=False))
biw_expected = biweight_location(data1d[:-1], ignore_nan=False)
assert_equal(biweight_location(data1d, ignore_nan=True), biw_expected)
assert_equal(biweight_location(data2d, axis=0, ignore_nan=True),
data1d)
assert_equal(biweight_location(data2d, axis=1, ignore_nan=True),
[biw_expected, biw_expected])
@pytest.mark.filterwarnings('ignore:All-NaN slice encountered')
@pytest.mark.filterwarnings('ignore:Invalid value encountered in median')
def test_biweight_location_nan():
data1d = np.array([1, 3, 5, 500, 2, np.nan])
all_nan = data1d.copy()
all_nan[:] = np.nan
data2d = np.array([data1d, data1d, all_nan])
data1d_masked = np.ma.masked_invalid(data1d)
data1d_masked.data[0] = np.nan
data2d_masked = np.ma.masked_invalid(data2d)
assert np.isnan(biweight_location(data1d))
bw_loc = biweight_location(data1d_masked)
assert not isinstance(bw_loc, np.ma.MaskedArray)
assert np.isnan(biweight_location(data2d))
for axis in (0, 1):
assert np.all(np.isnan(biweight_location(data2d, axis=axis)))
assert isinstance(biweight_location(data2d_masked, axis=axis),
np.ma.MaskedArray)
@pytest.mark.filterwarnings('ignore:All-NaN slice encountered')
@pytest.mark.filterwarnings('ignore:Invalid value encountered in median')
def test_biweight_location_masked():
data1d = np.array([1, 3, 5, 500, 2, np.nan])
data2d = np.array([data1d, data1d])
data1d_masked = np.ma.masked_invalid(data1d)
data2d_masked = np.ma.masked_invalid(data2d)
assert_equal(biweight_location(data1d, ignore_nan=True),
biweight_location(data1d_masked))
assert_equal(biweight_location(data2d, ignore_nan=True),
biweight_location(data2d_masked))
bw_loc = biweight_location(data1d_masked)
assert_allclose(bw_loc, 2.7456117)
assert np.isscalar(bw_loc)
bw_loc = biweight_location(data2d, ignore_nan=True, axis=1)
bw_loc_masked = biweight_location(data2d_masked, axis=1)
assert isinstance(bw_loc_masked, np.ma.MaskedArray)
assert ~np.any(bw_loc_masked.mask) # mask is all False
assert_equal(bw_loc, bw_loc_masked.data)
bw_loc = biweight_location(data2d, ignore_nan=True, axis=0)
bw_loc_masked = biweight_location(data2d_masked, axis=0)
assert_equal(bw_loc_masked.data[:-1], bw_loc[:-1])
assert bw_loc_masked.mask[-1] # last mask element is True
data1d_masked.data[0] = np.nan # unmasked NaN
bw_loc = biweight_location(data1d_masked)
assert not isinstance(bw_loc, np.ma.MaskedArray)
assert np.isscalar(bw_loc)
assert np.isnan(bw_loc)
assert_equal(biweight_location(data1d_masked, ignore_nan=True),
biweight_location(data1d[1:], ignore_nan=True))
# ensure that input masked array is not modified
assert np.isnan(data1d_masked[0])
def test_biweight_scale():
# NOTE: biweight_scale is covered by biweight_midvariance tests
data = [1, 3, 5, 500, 2]
scl = biweight_scale(data)
var = biweight_midvariance(data)
assert_allclose(scl, np.sqrt(var))
data = np.ma.masked_invalid([1, 3, 5, 500, 2, np.nan])
data[0] = np.nan
scl = biweight_scale(data, ignore_nan=True)
var = biweight_midvariance(data, ignore_nan=True)
assert_allclose(scl, np.sqrt(var))
def test_biweight_midvariance():
with NumpyRNGContext(12345):
# test that it runs
randvar = np.random.randn(10000)
var = biweight_midvariance(randvar)
assert_allclose(var, 1.0, rtol=0.02)
def test_biweight_midvariance_small():
data = [1, 3, 5, 500, 2]
var = biweight_midvariance(data)
assert_allclose(var, 2.9238456) # verified with R
var = biweight_midvariance(data, modify_sample_size=True)
assert_allclose(var, 2.3390765)
def test_biweight_midvariance_5127():
# test a regression introduced in #5127
rand = np.random.default_rng(12345)
data = rand.normal(loc=0., scale=20., size=(100, 100))
var = biweight_midvariance(data)
assert_allclose(var, 409.87135608846205)
def test_biweight_midvariance_axis():
"""Test a 2D array with the axis keyword."""
with NumpyRNGContext(12345):
ny = 100
nx = 200
data = np.random.normal(5, 2, (ny, nx))
bw = biweight_midvariance(data, axis=0)
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, i]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
bw = biweight_midvariance(data, axis=1)
bwi = []
for i in range(ny):
bwi.append(biweight_midvariance(data[i, :]))
bwi = np.array(bwi)
assert_allclose(bw, bwi)
def test_biweight_midvariance_axis_3d():
"""Test a 3D array with the axis keyword."""
with NumpyRNGContext(12345):
nz = 3
ny = 4
nx = 5
data = np.random.normal(5, 2, (nz, ny, nx))
bw = biweight_midvariance(data, axis=0)
assert bw.shape == (ny, nx)
y = 0
bwi = []
for i in range(nx):
bwi.append(biweight_midvariance(data[:, y, i]))
bwi = np.array(bwi)
assert_allclose(bw[y], bwi)
@pytest.mark.filterwarnings('ignore:All-NaN slice encountered')
@pytest.mark.filterwarnings('ignore:Invalid value encountered in median')
def test_biweight_midvariance_ignore_nan():
data1d = np.array([1, 3, 5, 500, 2, np.nan])
data2d = np.array([data1d, data1d])
assert np.isnan(biweight_midvariance(data1d, ignore_nan=False))
biw_var = biweight_midvariance(data1d[:-1], ignore_nan=False)
biw_var_nonan = biweight_midvariance(data1d, ignore_nan=True)
assert_equal(biw_var_nonan, biw_var)
assert_equal(biweight_midvariance(data2d, axis=0, ignore_nan=True),
[0., 0., 0., 0., 0., np.nan])
assert_equal(biweight_midvariance(data2d, axis=1, ignore_nan=True),
[biw_var_nonan, biw_var_nonan])
@pytest.mark.filterwarnings('ignore:All-NaN slice encountered')
@pytest.mark.filterwarnings('ignore:Invalid value encountered in median')
def test_biweight_scale_nan():
data1d = np.array([1, 3, 5, 500, 2, np.nan])
all_nan = data1d.copy()
all_nan[:] = np.nan
data2d = np.array([data1d, data1d, all_nan])
data1d_masked = np.ma.masked_invalid(data1d)
data1d_masked.data[0] = np.nan
data2d_masked = np.ma.masked_invalid(data2d)
assert np.isnan(biweight_scale(data1d))
bw_scl = biweight_scale(data1d_masked)
assert not isinstance(bw_scl, np.ma.MaskedArray)
assert np.isnan(bw_scl)
assert np.isnan(biweight_scale(data2d))
assert_allclose(biweight_scale(data2d_masked), 1.709926, atol=1e-5)
for axis in (0, 1):
assert np.all(np.isnan(biweight_scale(data2d, axis=axis)))
assert isinstance(biweight_scale(data2d_masked, axis=axis),
np.ma.MaskedArray)
@pytest.mark.filterwarnings('ignore:All-NaN slice encountered')
@pytest.mark.filterwarnings('ignore:Invalid value encountered in median')
def test_biweight_midvariance_masked():
data1d = np.array([1, 3, 5, 500, 2, np.nan])
data2d = np.array([data1d, data1d])
data1d_masked = np.ma.masked_invalid(data1d)
data2d_masked = np.ma.masked_invalid(data2d)
assert_equal(biweight_midvariance(data1d, ignore_nan=True),
biweight_midvariance(data1d_masked))
assert_equal(biweight_midvariance(data2d, ignore_nan=True),
biweight_midvariance(data2d_masked))
bw_scl = biweight_midvariance(data1d_masked)
assert_allclose(bw_scl, 2.9238456)
assert np.isscalar(bw_scl)
bw_loc = biweight_midvariance(data2d, ignore_nan=True, axis=1)
bw_loc_masked = biweight_midvariance(data2d_masked, axis=1)
assert isinstance(bw_loc_masked, np.ma.MaskedArray)
assert ~np.any(bw_loc_masked.mask) # mask is all False
assert_equal(bw_loc, bw_loc_masked.data)
bw_loc = biweight_midvariance(data2d, ignore_nan=True, axis=0)
bw_loc_masked = biweight_midvariance(data2d_masked, axis=0)
assert_equal(bw_loc_masked.data[:-1], bw_loc[:-1])
assert bw_loc_masked.mask[-1] # last mask element is True
data1d_masked.data[0] = np.nan # unmasked NaN
bw_scl = biweight_midvariance(data1d_masked)
assert not isinstance(bw_scl, np.ma.MaskedArray)
assert np.isscalar(bw_scl)
assert np.isnan(bw_scl)
assert_equal(biweight_midvariance(data1d_masked, ignore_nan=True),
biweight_midvariance(data1d[1:], ignore_nan=True))
# ensure that input masked array is not modified
assert np.isnan(data1d_masked[0])
def test_biweight_scale_axis_tuple():
"""Test a 3D array with a tuple axis keyword."""
data = np.arange(24).reshape(2, 3, 4)
data[0, 0] = 100.
assert_equal(biweight_scale(data, axis=0),
biweight_scale(data, axis=(0,)))
assert_equal(biweight_scale(data, axis=-1),
biweight_scale(data, axis=(2,)))
assert_equal(biweight_scale(data, axis=(0, 1)),
biweight_scale(data, axis=(1, 0)))
assert_equal(biweight_scale(data, axis=(0, 2)),
biweight_scale(data, axis=(0, -1)))
assert_equal(biweight_scale(data, axis=(0, 1, 2)),
biweight_scale(data, axis=(2, 0, 1)))
assert_equal(biweight_scale(data, axis=(0, 1, 2)),
biweight_scale(data, axis=None))
assert_equal(biweight_scale(data, axis=(0, 2), modify_sample_size=True),
biweight_scale(data, axis=(0, -1), modify_sample_size=True))
def test_biweight_midvariance_constant_axis():
bw = biweight_midvariance(np.ones((10, 5)))
assert bw == 0.0
def test_biweight_midvariance_constant_axis_2d():
shape = (10, 5)
data = np.ones(shape)
cbl = biweight_midvariance(data, axis=0)
assert_allclose(cbl, np.zeros(shape[1]))
cbl = biweight_midvariance(data, axis=1)
assert_allclose(cbl, np.zeros(shape[0]))
data = np.arange(50).reshape(10, 5)
data[2] = 100.
data[7] = 2.
data[8] = [5.0, 0.8, 5.0, -0.8, 5.0]
bw = biweight_midvariance(data, axis=1)
assert_allclose(bw[2], 0.)
assert_allclose(bw[7], 0.)
assert_allclose(bw[8], 0.)
def test_biweight_midvariance_constant_axis_3d():
shape = (10, 5, 2)
data = np.ones(shape)
cbl = biweight_midvariance(data, axis=0)
assert_allclose(cbl, np.zeros((shape[1], shape[2])))
cbl = biweight_midvariance(data, axis=1)
assert_allclose(cbl, np.zeros((shape[0], shape[2])))
cbl = biweight_midvariance(data, axis=2)
assert_allclose(cbl, np.zeros((shape[0], shape[1])))
def test_biweight_midcovariance_1d():
d = [0, 1, 2]
cov = biweight_midcovariance(d)
var = biweight_midvariance(d)
assert_allclose(cov, [[var]])
def test_biweight_midcovariance_2d():
d = [[0, 1, 2], [2, 1, 0]]
cov = biweight_midcovariance(d)
val = 0.70121809
assert_allclose(cov, [[val, -val], [-val, val]]) # verified with R
d = [[5, 1, 10], [500, 5, 2]]
cov = biweight_midcovariance(d)
assert_allclose(cov, [[14.54159077, -7.79026256], # verified with R
[-7.79026256, 6.92087252]])
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_allclose(cov, [[14.54159077, -5.19350838],
[-5.19350838, 4.61391501]])
def test_biweight_midcovariance_constant():
data = np.ones((3, 10))
val3 = 5.0
data[1] = [val3, 0.8, val3, -0.8, val3, val3, val3, 1.0, val3, -0.7]
cov = biweight_midcovariance(data)
assert_allclose(cov, np.zeros((3, 3)))
rng = np.random.default_rng(123)
data = rng.random((5, 5))
val3 = 5.0
data[1] = [val3, 0.8, val3, -0.8, val3]
cov = biweight_midcovariance(data)
assert_allclose(cov[1, :], 0.)
assert_allclose(cov[:, 1], 0.)
def test_biweight_midcovariance_midvariance():
"""
Test that biweight_midcovariance diagonal elements agree with
biweight_midvariance.
"""
rng = np.random.default_rng(1)
d = rng.normal(0, 2, size=(100, 3))
cov = biweight_midcovariance(d)
var = [biweight_midvariance(a) for a in d]
assert_allclose(cov.diagonal(), var)
cov2 = biweight_midcovariance(d, modify_sample_size=True)
var2 = [biweight_midvariance(a, modify_sample_size=True)
for a in d]
assert_allclose(cov2.diagonal(), var2)
def test_midcovariance_shape():
"""
Test that biweight_midcovariance raises error with a 3D array.
"""
d = np.ones(27).reshape(3, 3, 3)
with pytest.raises(ValueError) as e:
biweight_midcovariance(d)
assert 'The input array must be 2D or 1D.' in str(e.value)
def test_midcovariance_M_shape():
"""
Test that biweight_midcovariance raises error when M is not a scalar
or 1D array.
"""
d = [0, 1, 2]
M = [[0, 1], [2, 3]]
with pytest.raises(ValueError) as e:
biweight_midcovariance(d, M=M)
assert 'M must be a scalar or 1D array.' in str(e.value)
def test_biweight_midcovariance_symmetric():
"""
Regression test to ensure that midcovariance matrix is symmetric
when ``modify_sample_size=True`` (see #5972).
"""
rng = np.random.default_rng(1)
d = rng.gamma(2, 2, size=(3, 500))
cov = biweight_midcovariance(d)
assert_array_almost_equal_nulp(cov, cov.T, nulp=5)
cov = biweight_midcovariance(d, modify_sample_size=True)
assert_array_almost_equal_nulp(cov, cov.T, nulp=5)
def test_biweight_midcorrelation():
x = [0, 1, 2]
y = [2, 1, 0]
assert_allclose(biweight_midcorrelation(x, x), 1.0)
assert_allclose(biweight_midcorrelation(x, y), -1.0)
x = [5, 1, 10, 12.4, 13.2]
y = [500, 5, 2, 7.1, 0.9]
# verified with R
assert_allclose(biweight_midcorrelation(x, y), -0.14411038976763313)
def test_biweight_midcorrelation_inputs():
a1 = np.ones((3, 3))
a2 = np.ones(5)
a3 = np.ones(7)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a1, a2)
assert 'x must be a 1D array.' in str(e.value)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a2, a1)
assert 'y must be a 1D array.' in str(e.value)
with pytest.raises(ValueError) as e:
biweight_midcorrelation(a2, a3)
assert 'x and y must have the same shape.' in str(e.value)
def test_biweight_32bit_runtime_warnings():
"""Regression test for #6905."""
with NumpyRNGContext(12345):
data = np.random.random(100).astype(np.float32)
data[50] = 30000.
biweight_scale(data)
biweight_midvariance(data)
|
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Richard Hull
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Example usage:
#
# from oled.device import ssd1306, sh1106
# from oled.render import canvas
# from PIL import ImageFont, ImageDraw
#
# font = ImageFont.load_default()
# device = ssd1306(port=1, address=0x3C)
#
# with canvas(device) as draw:
# draw.rectangle((0, 0, device.width, device.height), outline=0, fill=0)
# draw.text(30, 40, "Hello World", font=font, fill=255)
#
# As soon as the with-block scope level is complete, the graphics primitives
# will be flushed to the device.
#
# Creating a new canvas is effectively 'carte blanche': If you want to retain
# an existing canvas, then make a reference like:
#
# c = canvas(device)
# for X in ...:
# with c as draw:
# draw.rectangle(...)
#
# As before, as soon as the with block completes, the canvas buffer is flushed
# to the device
import smbus
class device(object):
"""
Base class for OLED driver classes
"""
def __init__(self, port=1, address=0x3C, cmd_mode=0x00, data_mode=0x40):
self.cmd_mode = cmd_mode
self.data_mode = data_mode
self.bus = smbus.SMBus(port)
self.addr = address
def command(self, *cmd):
"""
Sends a command or sequence of commands through to the
device - maximum allowed is 32 bytes in one go.
"""
assert(len(cmd) <= 32)
self.bus.write_i2c_block_data(self.addr, self.cmd_mode, list(cmd))
def data(self, data):
"""
Sends a data byte or sequence of data bytes through to the
device - maximum allowed in one transaction is 32 bytes, so if
data is larger than this it is sent in chunks.
"""
for i in xrange(0, len(data), 32):
self.bus.write_i2c_block_data(self.addr,
self.data_mode,
list(data[i:i+32]))
class sh1106(device):
"""
A device encapsulates the I2C connection (address/port) to the SH1106
OLED display hardware. The init method pumps commands to the display
to properly initialize it. Further control commands can then be
called to affect the brightness. Direct use of the command() and
data() methods are discouraged.
"""
def __init__(self, port=1, address=0x3C):
super(sh1106, self).__init__(port, address)
self.width = 128
self.height = 64
self.pages = self.height / 8
self.command(
const.DISPLAYOFF,
const.MEMORYMODE,
const.SETHIGHCOLUMN, 0xB0, 0xC8,
const.SETLOWCOLUMN, 0x10, 0x40,
const.SETCONTRAST, 0x7F,
const.SETSEGMENTREMAP,
const.NORMALDISPLAY,
const.SETMULTIPLEX, 0x3F,
const.DISPLAYALLON_RESUME,
const.SETDISPLAYOFFSET, 0x00,
const.SETDISPLAYCLOCKDIV, 0xF0,
const.SETPRECHARGE, 0x22,
const.SETCOMPINS, 0x12,
const.SETVCOMDETECT, 0x20,
const.CHARGEPUMP, 0x14,
const.DISPLAYON)
def display(self, image):
"""
Takes a 1-bit image and dumps it to the SH1106 OLED display.
"""
assert(image.mode == '1')
assert(image.size[0] == self.width)
assert(image.size[1] == self.height)
page = 0xB0
pix = list(image.getdata())
step = self.width * 8
for y in xrange(0, self.pages * step, step):
# move to given page, then reset the column address
self.command(page, 0x02, 0x10)
page += 1
buf = []
for x in xrange(self.width):
byte = 0
for n in xrange(0, step, self.width):
byte |= (pix[x + y + n] & 0x01) << 8
byte >>= 1
buf.append(byte)
self.data(buf)
class ssd1306(device):
"""
A device encapsulates the I2C connection (address/port) to the SSD1306
OLED display hardware. The init method pumps commands to the display
to properly initialize it. Further control commands can then be
called to affect the brightness. Direct use of the command() and
data() methods are discouraged.
"""
def __init__(self, port=1, address=0x3C):
super(ssd1306, self).__init__(port, address)
self.width = 128
self.height = 64
self.pages = self.height / 8
self.command(
const.DISPLAYOFF,
const.SETDISPLAYCLOCKDIV, 0x80,
const.SETMULTIPLEX, 0x3F,
const.SETDISPLAYOFFSET, 0x00,
const.SETSTARTLINE,
const.CHARGEPUMP, 0x14,
const.MEMORYMODE, 0x00,
const.SEGREMAP,
const.COMSCANDEC,
const.SETCOMPINS, 0x12,
const.SETCONTRAST, 0xCF,
const.SETPRECHARGE, 0xF1,
const.SETVCOMDETECT, 0x40,
const.DISPLAYALLON_RESUME,
const.NORMALDISPLAY,
const.DISPLAYON)
def display(self, image):
"""
Takes a 1-bit image and dumps it to the SSD1306 OLED display.
"""
assert(image.mode == '1')
assert(image.size[0] == self.width)
assert(image.size[1] == self.height)
self.command(
const.COLUMNADDR, 0x00, self.width-1, # Column start/end address
const.PAGEADDR, 0x00, self.pages-1) # Page start/end address
pix = list(image.getdata())
step = self.width * 8
buf = []
for y in xrange(0, self.pages * step, step):
i = y + self.width-1
while i >= y:
byte = 0
for n in xrange(0, step, self.width):
byte |= (pix[i + n] & 0x01) << 8
byte >>= 1
buf.append(byte)
i -= 1
self.data(buf)
class const:
CHARGEPUMP = 0x8D
COLUMNADDR = 0x21
COMSCANDEC = 0xC8
COMSCANINC = 0xC0
DISPLAYALLON = 0xA5
DISPLAYALLON_RESUME = 0xA4
DISPLAYOFF = 0xAE
DISPLAYON = 0xAF
EXTERNALVCC = 0x1
INVERTDISPLAY = 0xA7
MEMORYMODE = 0x20
NORMALDISPLAY = 0xA6
PAGEADDR = 0x22
SEGREMAP = 0xA0
SETCOMPINS = 0xDA
SETCONTRAST = 0x81
SETDISPLAYCLOCKDIV = 0xD5
SETDISPLAYOFFSET = 0xD3
SETHIGHCOLUMN = 0x10
SETLOWCOLUMN = 0x00
SETMULTIPLEX = 0xA8
SETPRECHARGE = 0xD9
SETSEGMENTREMAP = 0xA1
SETSTARTLINE = 0x40
SETVCOMDETECT = 0xDB
SWITCHCAPVCC = 0x2
|
|
#!/usr/bin/python
# pylint: disable=line-too-long
"""
module for TiemseriesArray Class
"""
import sys
import re
import logging
import base64
import json
import os
import gzip
# own modules
from Timeseries import Timeseries as Timeseries
from TimeseriesArrayStats import TimeseriesArrayStats as TimeseriesArrayStats
#################### hack begin ##########################
"""
hack to mimic some python 2.x behaviour is string
representation of tuples
"""
def _b64encode_p3(list_obj):
if len(list_obj) == 1:
start ="(u'" + list_obj[0] + "',)"
else:
start ="(u'" + "', u'".join((str(key) for key in list_obj)) + "')"
encoded = base64.urlsafe_b64encode(start.encode("utf-8")).decode("utf-8")
#print("%s -> %s -> %s" % (list_obj, encoded, b64decode(encoded)))
return encoded
def _b64encode_p2(list_obj):
encoded = base64.urlsafe_b64encode(unicode(tuple(list_obj))).decode("utf-8")
#print("%s -> %s -> %s" % (list_obj, encoded, b64decode(encoded)))
return encoded
def _b64decode(encoded):
decoded = base64.b64decode(encoded).decode("utf-8")
#print("%s -> %s" % (encoded, decoded))
return decoded
if sys.version_info < (3,0):
print("using python 2 coding funtions")
b64encode = _b64encode_p3
b64decode = _b64decode
else:
b64encode = _b64encode_p3
b64decode = _b64decode
##################### hack end ###########################
def is_near(value, target_value, pct=0.05):
"""
function to implement if some numeric is near, in terms of percent
handy to use with floating point equality
"""
minimum = float(target_value) * (1.0 - pct)
maximum = float(target_value) * (1.0 + pct)
return minimum < float(value) < maximum
class TimeseriesArray(object):
"""
holds dictionary of Timeseries objects
"""
group_funcs = {
"sum" : sum,
"min" : min,
"max" : max,
"avg" : lambda a: sum(a) / len(a),
"len" : len,
}
def __init__(self, index_keynames, value_keynames, ts_key="ts", datatypes=None, cache=False):
"""
index_keys <tuple> column names of index columns
value_keys <tuple> column names of value columns
ts_key <str> name of timestamp column
datatypes <list> list of used datatypes
cache <bool> should already loaded timeseries be cached, useful to calculate quantiles
"""
self.__index_keynames = tuple([value for value in index_keynames])
self.__value_keynames = list([value for value in value_keynames])
self.__ts_key = ts_key
self.__cache = cache
# define instance data
self.__debug = False
self.__data = {} # holds data
self.ts_autoload = {} # holds key to ts filename dict
self.datatypes = datatypes
self.__group_keyname = None # for future use
self.__group_func = None # for future use
def __len__(self):
"""mimic dict"""
return len(self.__data.keys())
def __str__(self):
"""
return string represenation for tsa,
mainly the same as in stored version
"""
outbuffer = {
"index_keys" : self.__index_keynames,
"value_keys" : self.__value_keynames,
"ts_key" : self.__ts_key,
"ts_filenames" : [self.get_ts_dumpfilename(key) for key in self.keys()],
"tsa_filename" : self.get_dumpfilename(self.__index_keynames),
"datatypes" : self.datatypes
}
return json.dumps(outbuffer, indent=4, sort_keys=True)
def __getitem__(self, key):
"""mimic dict, honor lazy reloading of Timeseries if value is None"""
if self.__data[key] is None:
# auto load data if None
timeseries = self.__autoload_ts(key)
if self.__cache is False:
# if cache is set to None, return only loaded data,
# but do not save reference, so every later call will
# result in re-read of timeseries
return timeseries
self.__data[key] = timeseries
return self.__data[key]
def __setitem__(self, key, value):
"""mimic dict"""
self.__data[key] = value
def __delitem__(self, key):
"""mimic dict"""
del self.__data[key]
def __eq__(self, other):
"""test equality in depth"""
try:
assert self.__index_keynames == other.index_keynames
assert self.__value_keynames == other.value_keynames
assert self.__ts_key == other.ts_key
assert len(self.__data.keys()) == len(other.keys())
for key in self.__data.keys():
if len(self[key]) != len(other[key]):
raise AssertionError("timeseries %s data differences in length" % key)
return True
except AssertionError as exc:
logging.exception(exc)
return False
def items(self):
"""mimic dict, but honor autoload feature"""
for key in self.keys():
yield (key, self[key])
def values(self):
"""mimic dict, but honor autoload feature"""
for key in self.__data.keys():
yield self[key]
def keys(self):
"""
mimic dict behaviour
"""
return self.__data.keys()
def get_index_dict(self, index_values):
"""
index_values <tuple> as used in self.data.keys()
return <dict> representation of given index_values
"""
return dict(zip(self.__index_keynames, index_values))
@property
def index_keynames(self):
"""keynames which build key for self.data"""
return self.__index_keynames
@property
def value_keynames(self):
"""keynames which build value fields for Timeseries objects"""
return self.__value_keynames
@property
def ts_key(self):
"""keyname of timestamp"""
return self.__ts_key
@property
def stats(self):
"""return TimeseriesArrayStats from self"""
return TimeseriesArrayStats(self)
@property
def debug(self):
"""set some debugging on or off"""
return self.__debug
@debug.setter
def debug(self, value):
"""set this to True to get more debug messages, like raw data value errors"""
assert isinstance(value, bool)
self.__debug = value
@property
def cache(self):
"""True if timeseries will be cached in memory"""
return self.__cache
@cache.setter
def cache(self, value):
"""set to True if every loaded timeseries should be cached in memory"""
assert isinstance(value, bool)
self.__cache = value
def set_group_keyname(self, index_keyname, group_func):
"""
set index_keyname to group values for
index_keyname <basestring> in self.index_keynames
group_func <func> function to group multiple values by
if set every __getitem__ call will group data automatically
"""
assert index_keyname in self.__index_keynames
self.__group_keyname = index_keyname
self.__group_func = group_func
@staticmethod
def to_float(value_str):
"""
try to convert string to float, honor "," als decimal point if possible
otherwise raise ValueError
"""
try:
return float(value_str) # first best
except ValueError:
return float(value_str.replace(u",", u".")) # try to replace colon with point
def to_data(self):
"""
return json encodable data
"""
ret_data = {
"index_keys" : self.__index_keynames,
"value_keys" : self.__value_keynames,
"ts_key" : self.__ts_key,
"ts_filenames" : [self.get_ts_dumpfilename(key) for key in self.keys()],
"tsa_filename" : self.get_dumpfilename(self.__index_keynames),
"datatypes" : self.datatypes
}
return ret_data
def add(self, data, group_func=None):
"""
provided data must be type <dict> having following keys
ts_keyname
all(index_keynames)
all(value_keynames)
if there are additional keys, these will be ignored
if group_func is given, entries with the same index_key are grouped by group_func
group func should be type <func> something like
lambda existing_value, new_value : (existing_value + new_value) / 2
all index_keys are converted to str
all value_keys are converted to float
ts_keyname is converted to float
"""
#assert self.__ts_key in data # timestamp key has to be in dict
#assert (type(data[self.__ts_key]) == int) or (type(data[self.__ts_key]) == float) # timestamp should be int
#assert all((value_key in data for value_key in self.__value_keynames)) # test if all keys are available
# create key from data
try:
index_key = tuple([data[key] for key in self.__index_keynames])
except KeyError:
#logging.exception(exc)
logging.error("there are index_keys missing in this dataset %s, skipping this dataset", data.keys())
return
# add data to this timeseries object
try:
# timestamp and values has to be converted to float
# made the next explicit to avoid empty keys if there is no
# valueable data -> will result in ValueError
ts = float(data[self.__ts_key])
values = [self.to_float(data[key]) for key in self.__value_keynames] # must be list not tuple, to be added to another list
if index_key not in self.keys():
# if this key is new, create empty Timeseries object
logging.debug("first entry for index_key : %s", index_key)
self[index_key] = Timeseries(self.__value_keynames)
if group_func is not None:
self[index_key].group_add(ts, values, group_func)
else:
self[index_key].add(ts, values)
except KeyError as exc:
#logging.exception(exc)
if self.__debug: # some datasources have incorrect data
logging.error(exc)
logging.error("there is some key missing in %s, should be %s and %s, skipping this dataset, skipping this dataset", data.keys(), self.__ts_key, self.__value_keynames)
except ValueError as exc:
#logging.exception(exc)
if self.__debug: # some datasources have incorrect data
logging.error(exc)
logging.error("some value_keys or ts_keyname are not numeric and float convertible, skipping this dataset: %s", data)
def group_add(self, data, group_func):
"""wrapper to be api consistent, DEPRECATED"""
return self.add(data, group_func)
def append(self, key, timeserie):
"""
key <str> key to store this timeseries in dict
timeserie <Timeseries> object that holds the data
append whole timeserie to existing data
data length must be the same, but start_ts and stop_ts can be slightly different for each key
"""
#logging.debug("new start : %s, stop: %s, length %s", timeserie[0][0], timeserie[-1][0], len(timeserie))
assert key not in self.keys()
if len(self.keys()) > 0:
#logging.debug("existing start : %s, stop: %s, length %s", self.data.values()[0][0][0], self.data.values()[0][-1][0], len(self.data.values()[0]))
try:
assert len(timeserie) == len(self.values()[0]) # must be the same length
except AssertionError:
logging.error("The Timeseries Object to append, has not the same length as existing data in this array")
logging.error("existing: %d, new %d", len(timeserie), len(self.values()[0]))
else:
logging.debug("this is the first timeseries")
self[key] = timeserie
def old_groupby(self, fieldnum, group_func, time_func="avg"):
"""
fieldnum <int>
group_func <function> to group a set of numeric values
time_func
aggregate data on one of the index keys, example
<ts1> <hostname> <intance0> <value1>
<ts1> <hostname> <intance1> <value2>
<ts1> <hostname> <intance2> <value3>
<ts2> <hostname> <intance0> <value1> # here timeseries 2 starts
<ts2> <hostname> <intance1> <value2>
<ts2> <hostname> <intance2> <value3>
to
<hostname> group_func(time_func(value1) + time_func(value2) + time_func(value3))
"""
ret_data = {}
for key in self.keys():
subkey = key[fieldnum]
try:
stat_data = self[key].get_stat(time_func)
if subkey not in ret_data:
ret_data[subkey] = stat_data
ret_data[subkey]["count"] = 1
else:
#aggregate, there are more than one row who match
for valuekey in stat_data:
try:
ret_data[subkey][valuekey] = group_func(ret_data[subkey][valuekey], stat_data[valuekey])
except TypeError as exc:
logging.exception(exc)
logging.error("error at operation group_func(%s, %s)", ret_data[subkey][valuekey], stat_data[valuekey])
ret_data[subkey]["count"] += 1
except Exception as exc:
logging.exception(exc)
return ret_data
def convert(self, colname, datatype, newcolname=None):
"""
call convert method of every stored Timeseries, with given parameter
"""
if self.__cache is False:
raise AttributeError("operation only applicable in cache mode, set <TimeseriesArray>.cache=True")
if colname not in self.__value_keynames:
raise KeyError("colname %s not in defined columns" % colname)
if newcolname in self.__value_keynames:
raise KeyError("newcolname %s already in defined columns" % newcolname)
for key in self.keys():
self[key].convert(colname, datatype, newcolname)
self.__value_keynames.append(newcolname)
def add_derive_col(self, colname, newcolname):
logging.info("DEPRECATED function add_derive_col use convert(%s, 'derive', %s)", colname, newcolname)
return self.convert(colname, "derive", newcolname)
def add_per_s_col(self, colname, newcolname):
logging.info("DEPRECATED function add_derive_col use convert(%s, 'persecond', %s)", colname, newcolname)
return self.convert(colname, "persecond", newcolname)
def add_calc_col_single(self, colname, newcolname, func=lambda a: a):
"""
add a new column to every Timeserie, calculated from one single column in this row
parameters:
colname <str> name of existing column
newcolname <str> name of new column
func <func> function to call for every row, wtih data from colname
lambda <float> : <float>
return:
None
"""
if self.__cache is False:
raise AttributeError("operation only applicable in cache mode, set <TimeseriesArray>.cache=True")
if colname not in self.__value_keynames:
raise KeyError("colname %s not in defined columns" % colname)
if newcolname in self.__value_keynames:
raise KeyError("newcolname %s already in defined columns" % newcolname)
for key in self.keys():
self[key].add_calc_col_single(colname, newcolname, func)
self.__value_keynames.append(newcolname)
def add_calc_col_full(self, newcolname, func):
"""
add a new column to every Timeserie, calculated fro existing row data
parameters:
newcolname <str> name of new column
func <func> function to call for every existing row
lambda <dict> : <float>
returns:
None
"""
if self.__cache is False:
raise AttributeError("operation only applicable in cache mode, set <TimeseriesArray>.cache=True")
if newcolname in self.__value_keynames:
raise KeyError("newcolname %s already in defined columns" % newcolname)
for key in self.keys():
self[key].add_calc_col_full(newcolname, func)
self.__value_keynames.append(newcolname)
def remove_col(self, colname):
"""
remove column named from every Timeserie
parameters:
colname <str>
returns:
None
"""
if self.__cache is False:
raise AttributeError("operation only applicable in cache mode, set <TimeseriesArray>.cache=True")
if colname not in self.__value_keynames:
raise KeyError("colname %s not in defined columns" % colname)
for key in self.keys():
self[key].remove_col(colname)
self.__value_keynames.remove(colname)
def slice(self, colnames):
"""
return copy of TimeseriesArray, but only defined value_keynames
parameters:
colnames <list> of value_keynames in returned TimeseriesArray
returns:
TimeseriesArray
"""
ret_data = TimeseriesArray(index_keynames=self.__index_keynames, value_keynames=colnames, ts_key=self.__ts_key)
for key in self.keys():
ret_data[key] = self[key].slice(colnames)
return ret_data
def export(self):
"""
function to export all stored data in such a manner, that this is easily used to feed the add function of another TimeseriesArray object
create new object tsa_new (tsa is the existing one) like
tsa_new = TimeseriesArray(tsa.index_keys, tsa.value_keys, tsa.ts_key)
and you can map(tsa.add(), tsa.export())
"""
for key in self.keys():
timeseries = self[key]
# convert key tuple to dict
key_dict = self.get_index_dict(key)
# dump timeseries as dictionary and spice dict up with
# key_dict
for row in self[key].to_dict():
row.update(key_dict)
yield row
def dump(self, outpath, overwrite=False):
"""
dump all data to directory in csv format, filename will be auto generated
parameters:
outpath <str> must be existing directory
overwrite <bool> overwrite existing Timeseries files, or not
the TimeseriesArray file is witten nonetheless if this options is set or not
"""
tsa_filename = self.get_dumpfilename(self.__index_keynames)
logging.debug("tsa_filename: %s", tsa_filename)
tsa_outfilename = os.path.join(outpath, tsa_filename)
outbuffer = {
"index_keys" : self.__index_keynames,
"value_keys" : self.__value_keynames,
"ts_key" : self.__ts_key,
"ts_filenames" : []
}
for key in self.keys():
timeseries = self[key]
ts_filename = self.get_ts_dumpfilename(key)
# skip dump, if file exists, and overwrite=False
ts_outfilename = os.path.join(outpath, ts_filename)
if not os.path.isfile(ts_outfilename) or overwrite:
logging.debug("dumping key %s to filename %s", key, ts_filename)
with gzip.open(ts_outfilename, "wt") as outfile:
timeseries.dump(outfile)
outbuffer["ts_filenames"].append(ts_filename)
with open(tsa_outfilename, "wt") as outfile:
json.dump(outbuffer, outfile)
outfile.flush()
dump_split = dump
@staticmethod
def get_ts_dumpfilename(key):
"""
return filename of Timeseries dump File
parameters:
key <tuple> index_key of this particular Timeseries
returns:
<str>
"""
return "ts_%s.csv.gz" % b64encode(key)
@staticmethod
def get_dumpfilename(index_keys):
"""
return filename of TimseriesArray dump file
parameters:
index_keys <tuple> of particular index_keys of this Timeseries
returns:
<str>
"""
return "tsa_%s.json" % b64encode(index_keys)
@staticmethod
def filtermatch(key_dict, filterkeys, matchtype):
"""
key_dict is the whole index key, aka
{hostname : test, instance:1, other:2}
filterkey is part
{hostname : test}
{hostname : test, instance: None, other: None}
TODO: to be improved
"""
assert matchtype in ("and", "or")
matched = 0
for key in filterkeys.keys():
if filterkeys[key] is None: # ignore keys with value None
if matchtype == "and": # and count them as matched
matched += 1
continue
if key_dict[key] == filterkeys[key]:
matched += 1
# every key must match at AND
if (matchtype == "and") and (matched == len(filterkeys.keys())):
return True
# at least one key must match at OR
elif (matchtype == "or") and (matched > 0):
return True
return False
@staticmethod
def get_ts_filenames(path, index_keys, filterkeys=None, matchtype="and"):
"""
filterkeys could be a part of existing index_keys
all matching keys will be used
"""
tsa_filename = TimeseriesArray.get_dumpfilename(index_keys)
logging.debug("tsa_filename: %s", tsa_filename)
with open(os.path.join(path, tsa_filename), "rt") as infile:
data = json.load(infile)
logging.debug("loaded json data")
logging.debug("index_keys: %s", data["index_keys"])
logging.debug("value_keys: %s", data["value_keys"])
logging.debug("ts_key: %s", data["ts_key"])
logging.debug("number of ts files: %s", len(data["ts_filenames"]))
filenames = {}
for filename in data["ts_filenames"]:
logging.debug("parsing Timeseries filename %s", filename)
enc_key = filename.split(".")[0][3:] # only this pattern ts_(.*).csv.gz
key = eval(b64decode(enc_key))
key_dict = dict(zip(index_keys, key))
if filterkeys is not None:
if TimeseriesArray.filtermatch(key_dict, filterkeys, matchtype):
logging.debug("adding tsa key : %s", key)
filenames[key] = os.path.join(path, filename)
else:
# no filterkeys means every file is loaded
logging.debug("adding tsa key : %s", key)
filenames[key] = os.path.join(path, filename)
return filenames
@staticmethod
def load(path, index_keys, filterkeys=None, index_pattern=None, matchtype="and", datatypes=None):
"""
load stored tsa data from directory <path>
filterkeys could be a part of existing index_keys
all matching keys will be used
index_keys <tuple> * required
filterkeys <tuple> default None
matchtype <str> default "and"
index_pattern <str> for use in re.compile(index_pattern)
return:
<TimeseriesArray>
"""
# get filename and load json structure
tsa_filename = TimeseriesArray.get_dumpfilename(index_keys)
with open(os.path.join(path, tsa_filename), "rt") as infile:
data = json.load(infile)
# create object
tsa = TimeseriesArray(data["index_keys"], data["value_keys"], data["ts_key"], datatypes=datatypes)
# load full or filter some keys
if index_pattern is None:
for key, filename in tsa.get_ts_filenames(path, index_keys, filterkeys, matchtype).items():
tsa.ts_autoload[key] = filename
tsa[key] = None
else:
logging.info("using index_pattern %s to filter index_keys", index_pattern)
rex = re.compile(index_pattern)
for key, filename in tsa.get_ts_filenames(path, index_keys, filterkeys, matchtype).items():
m = rex.match(str(key))
if m is not None:
tsa.ts_autoload[key] = filename
tsa[key] = None
else:
logging.debug("index_key %s skipped by filter", key)
return tsa
load_split = load
def __autoload_ts(self, key):
"""
try to load TimeSeries given by key
key has to be already in TimeseriesArray structure
parameters:
key : <tuple>
"""
if key in self.ts_autoload:
filename = self.ts_autoload[key]
logging.debug("auto-loading Timeseries from file %s", filename)
with gzip.open(filename, "rt") as infile:
timeseries = Timeseries.load_from_csv(infile)
# convert raw timeseries to datatype
for colname, datatype in self.datatypes.items():
if datatype == "asis":
continue
timeseries.convert(colname, datatype, None)
return timeseries
else:
raise KeyError("key %s not in TimeseriesArray", key)
TimeseriesArrayLazy = TimeseriesArray
|
|
#! /usr/bin/env python
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import OpenGL
import sys
import getopt
import array
import random
from math import *
objectXform = [
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]
]
#The global viewing angle
theta = [0.0,0.0,0.0]
thetaIncr = 5.0
#A global for the amount of time a mole is up
moleTime = 3000
#a global for the current score
score = 0
#a little class for each of the moles
class Mole:
moleNumber = 99#bogus data...
moleStatus = 'MOLE_DOWN'
#The constructor
def __init__(self,id):
self.moleNumber = id
def drawMole(self,mode):
glPushMatrix()
glRotatef(90,1.0,0.0,0.0)
if(mode == GL_SELECT):
glLoadName(self.moleNumber)
if(self.moleStatus == 'MOLE_DOWN'):
glMaterialfv(GL_FRONT, GL_DIFFUSE, [0.0, 0.3, 0.0, 1.0])
else:
glMaterialfv(GL_FRONT, GL_DIFFUSE, [1.0, 1.0, 1.0, 1.0])
glutSolidCylinder(.3,1.0,20,20)
glPopMatrix()
def showMole(self):
global moleTime
self.moleStatus = 'MOLE_UP'
glutTimerFunc(moleTime,self.moleDown,0)
display()
def moleDown(self,step):
self.moleStatus = 'MOLE_DOWN'
display()
#The global list of moles
moles = []
def main():
global moleTime
global moles
for i in range(0,9):
moles.append(Mole(i))
argc = len(sys.argv)
glutInit(argc,sys.argv)
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(700, 700)
glutInitWindowPosition(100, 100)
glutCreateWindow("Whack-A-Mole!")
glEnable(GL_DEPTH_TEST)
#This function is what acctually draws stuff... It's called
#every time the window is redrawn
glutDisplayFunc(display)
glutReshapeFunc(reshapeCallback)
glutMouseFunc(mouseCallback)
glutKeyboardFunc(keyCallback)
glutTimerFunc(moleTime/2,randomMole,1)
glutMainLoop()
def randomMole(step):
global moles
moles[random.randint(0,8)].showMole()
glutTimerFunc(moleTime/2,randomMole,1)
display()
def display():
#This code runs wheever the window is redrawn
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
setUpView()
setUpLight()
setUpModelTransform()
glPushMatrix()
glMultMatrixf(objectXform)
drawObjs(GL_RENDER)
glPopMatrix()
glutSwapBuffers()
def drawObjs(mode):
drawBoard(mode)
drawMoles(mode)
return
def drawBoard(mode):
if(mode == GL_SELECT):
glLoadName(999)
glPushMatrix()
glScalef(5.0,1.0,5.0)
glMaterialfv(GL_FRONT, GL_DIFFUSE, [0.2, 0.2, 0.2, 1.0])
glutSolidCube(1.0)
glPopMatrix()
return
def drawMoles(mode):
global moles
glPushMatrix()
glTranslatef(-2.0,1.0,-2.0)
for j in range(0,3):
glPushMatrix()
for i in range (0,3):
moles[3*j+i].drawMole(mode)
glTranslatef(2.0,0,0)
glPopMatrix()
glTranslatef(0.0,0,2.0)
glPopMatrix()
def proscessHits(buffer):
global moles
global score
if(len(buffer)>0):
minimum,maximum,firstName = buffer[0]
for hit_record in buffer:
min_depth, max_depth, names = hit_record
if(min_depth <= minimum):
closest = names
if(closest[0] < 9):
if(moles[closest[0]].moleStatus == 'MOLE_UP'):
score = score+1
moles[closest[0]].moleDown(0)
def setUpView():
#this code initializes the viewing transform
glLoadIdentity()
#moves viewer along coordinate axes
gluLookAt(0.0, 0.0, 5.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)
#move the view back some relative to viewer[] position
glTranslatef(0.0,0.0, 0.0);
# rotates view
glRotatef(0, 1.0, 0.0, 0.0);
glRotatef(0, 0.0, 1.0, 0.0);
glRotatef(0, 0.0, 0.0, 1.0);
return
def setUpModelTransform():
#Rotates models
glRotatef(theta[0],1.0,0.0,0.0)
glRotatef(theta[1],0.0,1.0,0.0)
glRotatef(theta[2],0.0,0.0,1.0)
def setUpLight():
#set up the light sources for the scene
# a directional light source from directly behind
lightDir = [0.0, 0.0, 5.0, 0.0];
diffuseComp = [1.0, 1.0, 1.0, 1.0];
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glLightfv(GL_LIGHT0, GL_POSITION, lightDir);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseComp);
return;
##########################
#Begin Callback Functions#
##########################
def mouseCallback(button, state, x, y):
#If the user clicks the left button
if((button==GLUT_LEFT_BUTTON) and (state == GLUT_DOWN)):
SIZE = 20 #The size of the selection buffer
viewport = glGetIntegerv(GL_VIEWPORT)
glSelectBuffer(SIZE)
glRenderMode(GL_SELECT)
glPushName(0)
glPushMatrix()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
#This is the only transform difference in the hierarchy
gluPickMatrix(x, viewport[3] - y, 5.0, 5.0, viewport)
w = viewport[2]
h = viewport[3]
setUpProjection(w,h)
#The rest is the same as display(), except
# the mode flag is GL_SELECT
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
setUpView()
setUpModelTransform()
glMultMatrixf(objectXform)
drawObjs(GL_SELECT)
glPopMatrix()
glFlush()
hits = glRenderMode(GL_RENDER)
proscessHits(hits)
#This should re-render everything again for display
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
setUpProjection(w,h)
display()
def keyCallback(key,x,y):
global theta
global thetaIncr
global moleTime
if(key == 'w'):
theta[0] = normalizeAngle(theta[0]+thetaIncr)
elif(key == 's'):
theta[0] = normalizeAngle(theta[0]-thetaIncr)
elif(key == 'a'):
theta[1] = normalizeAngle(theta[1]+thetaIncr)
elif(key == 'd'):
theta[1] = normalizeAngle(theta[1]-thetaIncr)
if((key == '+') or (key == '=')):
moleTime += 10
elif((key == '-') or (key == '_')):
moleTime -= 10
display()
def normalizeAngle(degrees):
if(degrees > 360):
degrees -= 360.0
return degrees
def reshapeCallback(w,h):
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
setUpProjection(w, h);
def setUpProjection(w,h):
#This code initalizes the projection transform
glViewport(9,9,w,h)
if (w < h):
glFrustum(-2.0, 2.0, -2.0*(h/w), 2.0*(h/w), 2.0, 200.0)
else:
glFrustum(-2.0, 2.0, -2.0*(w/h), 2.0*(w/h), 2.0, 200.0)
glMatrixMode(GL_MODELVIEW)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
try:
import json
except ImportError:
import simplejson
from django.db import models
from django.core import urlresolvers
from django.contrib.auth.models import User
from jobsub.parameterization import find_parameters, bind_parameters
from django.utils.translation import ugettext_lazy as _
LOG = logging.getLogger(__name__)
class JobDesign(models.Model):
"""
DEPRECATED!!!
This is the old Hue 1.x job design model. In Hue 2, the design is modeled
after Oozie workflows.
Contains CMS information for "job designs".
"""
owner = models.ForeignKey(User)
name = models.CharField(max_length=40)
description = models.CharField(max_length=1024)
last_modified = models.DateTimeField(auto_now=True)
# Type corresponds to a JobSubForm that gets registered in jobsub.forms.interface.registry
type = models.CharField(max_length=128)
# Data is serialized via JobSubFormInterface.serialize_[to|from]_string
data = models.TextField()
def edit_url(self):
return urlresolvers.reverse("jobsub.views.edit_design", kwargs=dict(id=self.id))
def clone_url(self):
return urlresolvers.reverse("jobsub.views.clone_design", kwargs=dict(id=self.id))
def delete_url(self):
return urlresolvers.reverse("jobsub.views.delete_design", kwargs=dict(id=self.id))
def submit_url(self):
return urlresolvers.reverse("jobsub.views.submit_design", kwargs=dict(id=self.id))
def clone(self):
clone_kwargs = dict([(field.name, getattr(self, field.name)) for field in self._meta.fields if field.name != 'id']);
return self.__class__.objects.create(**clone_kwargs)
def to_jsonable(self):
return {
'owner': self.owner.username,
'name': self.name,
'last_modified': str(self.last_modified),
'type': self.type,
'data': repr(self.data)
}
class CheckForSetup(models.Model):
"""
A model which should have at most one row, indicating
whether jobsub_setup has run succesfully.
"""
# Pre-Hue2 setup
setup_run = models.BooleanField()
# What kind of setup have we done?
setup_level = models.IntegerField(default=0)
################################## New Models ################################
PATH_MAX = 512
class OozieAction(models.Model):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
The OozieAction model is an abstract base class. All concrete actions
derive from it. And it provides something for the OozieDesign to
reference. See
https://docs.djangoproject.com/en/dev/topics/db/models/#multi-table-inheritance
"""
PARAM_FIELDS = ( ) # Nothing is parameterized by default
# This allows the code to easily figure out which subclass to access
action_type = models.CharField(max_length=64, blank=False)
def find_parameters(self):
"""Return a list of parameters in the various fields"""
return find_parameters(self, self.PARAM_FIELDS)
def bind_parameters(self, mapping):
"""
Change the values of the model object by replacing the param variables
with actual values.
Mapping is a dictionary of variable to value.
"""
# We're going to alter this object. Disallow saving (for models).
self.save = None
bind_parameters(self, mapping, self.PARAM_FIELDS)
class OozieDesign(models.Model):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
Contains information about all (Oozie) designs. Specific action info are
stored in the Oozie*Action models.
"""
# Generic stuff
owner = models.ForeignKey(User)
name = models.CharField(max_length=64, blank=False,
help_text=_('Name of the design, which must be unique per user.'))
description = models.CharField(max_length=1024, blank=True)
last_modified = models.DateTimeField(auto_now=True)
# Action. Avoid using `root_action' directly, because it only gives you the
# intermediate table (i.e. OozieAction). You want to use `get_root_action()'
# most of the time.
root_action = models.ForeignKey(OozieAction)
def get_root_action(self):
"""Return the concrete action object, not just a generic OozieAction"""
root = self.root_action
if root is None:
return None
if root.action_type == OozieMapreduceAction.ACTION_TYPE:
return root.ooziemapreduceaction
elif root.action_type == OozieStreamingAction.ACTION_TYPE:
return root.ooziestreamingaction
elif root.action_type == OozieJavaAction.ACTION_TYPE:
return root.ooziejavaaction
LOG.error("Oozie action type '%s' is not valid (jobsub_oozieaction.id %s)"
% (root.action_type, root.id))
return None
def clone(self, new_owner=None):
"""Return a newly saved instance."""
action_copy = self.get_root_action()
action_copy.pk = None # Need a new OozieAction (superclass instance)
action_copy.id = None # Need a new action instance as well
action_copy.save()
copy = self
copy.pk = None
copy.root_action = action_copy
if new_owner is not None:
copy.owner = new_owner
copy.save()
return copy
def find_parameters(self):
return self.get_root_action().find_parameters()
def bind_parameters(self, mapping):
return self.get_root_action().bind_parameters(mapping)
class OozieMapreduceAction(OozieAction):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
Stores MR actions
"""
PARAM_FIELDS = ('files', 'archives', 'job_properties', 'jar_path')
ACTION_TYPE = "mapreduce"
# For the distributed cache. JSON arrays.
files = models.CharField(max_length=PATH_MAX, default="[]",
help_text=_('List of paths to files to be added to the distributed cache.'))
archives = models.CharField(max_length=PATH_MAX, default="[]",
help_text=_('List of paths to archives to be added to the distributed cache.'))
# For the job configuration. JSON dict. Required (e.g. mapred.mapper.class).
job_properties = models.TextField(default="[]")
# Location of the jar in hdfs
jar_path = models.CharField(max_length=PATH_MAX,
help_text=_('Path to jar files on HDFS.'))
class OozieStreamingAction(OozieAction):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
This is still an MR action from Oozie's perspective. But the data modeling is
slightly different.
Note that we don't inherit from OozieMapreduceAction because we want the data
to be in one place.
"""
PARAM_FIELDS = ('files', 'archives', 'job_properties', 'mapper', 'reducer')
ACTION_TYPE = "streaming"
# For the distributed cache. JSON arrays.
files = models.CharField(max_length=PATH_MAX, default="[]")
archives = models.CharField(max_length=PATH_MAX, default="[]")
# For the job configuration. JSON dict. Required (e.g. mapred.input.dir).
job_properties = models.TextField(default="[]")
# Scripts/commands (paths in hdfs)
mapper = models.CharField(max_length=PATH_MAX, blank=False)
reducer = models.CharField(max_length=PATH_MAX, blank=False)
class OozieJavaAction(OozieAction):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
Definition of Java actions
"""
PARAM_FIELDS = ('files', 'archives', 'jar_path', 'main_class', 'args',
'java_opts', 'job_properties')
ACTION_TYPE = "java"
# For the distributed cache. JSON arrays.
files = models.CharField(max_length=PATH_MAX, default="[]",
help_text=_('List of paths to files to be added to the distributed cache.'))
archives = models.CharField(max_length=PATH_MAX, default="[]",
help_text=_('List of paths to archives to be added to the distributed cache.'))
# Location of the jar in hdfs
jar_path = models.CharField(max_length=PATH_MAX, blank=False)
main_class = models.CharField(max_length=256, blank=False)
args = models.TextField(blank=True)
java_opts = models.CharField(max_length=256, blank=True)
# For the job configuration. JSON dict.
job_properties = models.TextField(default="[]")
class JobHistory(models.Model):
"""
DEPRECATED!!!
This is the old Hue 2.0/2.1 job design model. In Hue 2.2 and newer,
Oozie models are used.
Contains informatin on submitted jobs/workflows.
"""
owner = models.ForeignKey(User)
submission_date = models.DateTimeField(auto_now=True)
job_id = models.CharField(max_length=128)
design = models.ForeignKey(OozieDesign)
|
|
#!/usr/bin/python3
# lltapes.py - Uses BeautifulSoup and requests to parse and download episodes
# from lovelinetapes.
import re
import sys
import os
import logging
import argparse
# for multi-threading
from queue import Queue
from threading import Thread
import requests
from bs4 import BeautifulSoup
URL_BASE = "http://www.lovelinetapes.com/shows/"
EXTEN = '.mp3'
CHUNK_SIZE = 1024*8
class DownloadWorker(Thread):
"""Saves episodes by show_id from queue"""
def __init__(self, queue):
super(DownloadWorker, self).__init__()
self.queue = queue
def run(self):
while True:
# Get the work from the queue and expand the tuple
show_id = self.queue.get()
save_ep(show_id)
self.queue.task_done()
def get_page(show_id):
"""Uses bs4 to get the page of a given Loveline episode by show_id.
Argument:
show_id: numeric id (as string) from Loveline Tapes site.
Returns:
BeautifulSoup object of the page, decoded with lxml.
"""
url = (URL_BASE + '?id=' + show_id)
logging.info('Downloading page %s...' % url)
res = requests.get(url)
res.raise_for_status()
return BeautifulSoup(res.text, "lxml")
def get_year_links(year):
"""Uses bs4 to get show_ids for all episodes in a given year.
Argument:
year: a year (as string) to search the Loveline Tapes site.
soup: optional BeautifulSoup object to search.
Returns:
List containing strings of show_ids in the given year.
"""
url = (URL_BASE + 'browse/?y=' + year)
logging.info('Downloading page %s...' % url)
res = requests.get(url)
res.raise_for_status()
soup = BeautifulSoup(res.text, "lxml")
# Find the URL elements
links = soup.find_all('a', attrs={'class': 'showLink'})
show_id, show_info = [], []
for link in links:
show_id.append(link.get('href').replace('/shows/?id=', ''))
#show_info.append(link.find('div', attrs={'class': 'details'}).text.strip().split('\n'))
return show_id
def get_mp3_url(show_id, soup=None):
"""Uses bs4 to get link to mp3 recording of Loveline episode.
Argument:
show_id: numeric id (as string) from Loveline Tapes site.
soup: optional BeautifulSoup object to search.
Returns:
String of the link to mp3 recording.
"""
if not isinstance(soup, BeautifulSoup):
soup = get_page(show_id)
page_url = soup.find('meta', attrs={'property': 'og:url'}) #full link w/ redirect to mp3 is in this meta tag
if re.search(r'h\=\w+', page_url['content']) is None:
mp3_link = None
url_ep = None
else:
mp3_link = re.search(r'h\=\w+', page_url['content']).group().replace('h=', '') #match on the mp3 link format
url_ep = "http://recordings.lovelinetapes.com/" + mp3_link + EXTEN
return url_ep
def get_page_title(show_id, soup=None):
"""Uses bs4 to get guest and date of Loveline episode.
Argument:
show_id: numeric id (as string) from Loveline Tapes site
soup: optional BeautifulSoup object to search
Returns:
Dictionary containing guest, year, month and day as strings.
Month and day are zero padded.
"""
if not isinstance(soup, BeautifulSoup):
soup = get_page(show_id)
title = soup.find('title')
title_date = re.search(r'\d?\d\/\d?\d\/\d{4}', title.text) # no matches if not a valid id
if title_date is None:
title_month, title_day, title_year = None
else:
title_month = title_date.group().split('/')[0].zfill(2)
title_day = title_date.group().split('/')[1].zfill(2)
title_year = title_date.group().split('/')[2]
title_guest = re.search(r'\d{4}.*', title.text) # no matches if not a valid id
if title_guest is None:
guest = None
else:
guest = re.sub(r'\d{4} - ', '', title_guest.group())
return{'guest': guest, 'year': title_year, 'month': title_month, 'day': title_day}
def save_ep(show_id, soup=None):
"""Uses requests to download an mp3 recording of a Loveline episode.
Checks to determine if the episode already exists and is of expected size.
Argument:
show_id: numeric id (as string) from Loveline Tapes site.
soup: optional BeautifulSoup object to search.
Returns:
Nothing, downloads episode to current directory.
"""
if not isinstance(soup, BeautifulSoup):
soup = get_page(show_id)
url_ep = get_mp3_url(show_id, soup)
title = get_page_title(show_id, soup)
date = title['year'] + '-' + title['month']+ '-' + title['day']
name = ('Loveline - ' + date + ' (Guest - ' + title['guest'] + ')' + EXTEN)
save_name = safe_name(name)
curpath = os.path.abspath(os.curdir)
# Download the episode
if url_ep is not None:
logging.info('Downloading {}'.format(url_ep))
r = requests.get(url_ep, stream=True)
r.raise_for_status()
size = int(r.headers['Content-Length'])
if os.path.isfile(save_name) and (os.path.getsize(save_name) == size):
# check if file already exists and is of the expected size
# can still replace interupted downloads
logging.warning('Episode already downloaded, skipping.')
else:
if r.status_code == 200:
logging.info('Saving as: {}'.format(save_name))
logging.info('Expected file size: {} bytes'.format(size))
with open(os.path.join(curpath, save_name), 'wb') as f:
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
f.write(chunk)
else:
logging.warning('Problem loading page: HTML response code {}'.format(r.status_code))
else:
logging.warning('Episode url not found')
def safe_name(save_name):
"""Removes and replaces characters in a filename to make it safe for the filesystem.
Argument:
save_name: filename to work with (no path).
Returns:
string of the filename after disallowed characters are removed.
None if no filename is provided.
"""
path, tail = os.path.basename(save_name)
if tail is not None:
safe_name = tail.replace('/', ' ')
return safe_name
else:
return None
def find_link(show_id, direction, soup=None):
"""Uses bs4 to find links to next or previous episodes from current show id
of Loveline episode.
Argument:
show_id: numeric id (as string) from Loveline Tapes site.
soup: optional BeautifulSoup object to search.
Returns:
None if direction is not 'left' or 'right'
Dictionary containing direction of link, show id, show guest, and show date.
"""
if not isinstance(soup, BeautifulSoup):
soup = get_page(show_id)
# Find the URL elements
if direction == 'left' or 'right':
link = soup.find_all('a', attrs={'style': 'float: {};'.format(direction), 'class': 'showLink'})
else:
return None
show_id = link[0].get('href').replace('/shows/?id=', '')
show_info = link[0].find('div', attrs={'class': 'details'}).text.strip().split('\n')
return{'direction': direction, 'show_id': show_id, 'show_guest': show_info[0], 'show_date': show_info[1]}
def main():
# set up logging
logging.basicConfig(filename='lltapes.log', level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.getLogger('requests').setLevel(logging.CRITICAL)
logger = logging.getLogger(__name__)
# set up argparse
parser = argparse.ArgumentParser()
parser.add_argument('-y', '--year', help='Year of Loveline to search.')
parser.add_argument('-s', '--show', help='Show, based on Loveline Tapes show id, to search.')
parser.add_argument('-d', '--download', help='Download flag to download episode.', action='store_true')
parser.add_argument('-t', '--threads', help='Number of threads to use to download episodes, default to 4.', default=4, type=int)
args = parser.parse_args()
if args.show:
show_id = args.show
soup = get_page(show_id)
links = find_link(show_id, 'right', soup)
url_ep = get_mp3_url(show_id, soup)
title = get_page_title(show_id, soup)
if title['guest'] is None:
logging.warning('Could not find show.')
else:
try:
logging.info('Guest: ' + title['guest'])
if title['year'] is not None:
logging.info('Date: ' + title['year'] + '-' + title['month']+ '-' + title['day'])
logging.info(url_ep)
logging.info(links)
if args.download:
save_ep(show_id, soup)
except requests.exceptions.MissingSchema:
# skip this
logging.warning('Could not find show.')
elif args.year:
year = get_year_links(args.year)
# Create a Queue to communicate with the worker threads
queue = Queue()
# Create 8 worker threads
for x in range(args.threads):
worker = DownloadWorker(queue)
# Setting daemon to True will let the main thread exit even though the workers are blocking
worker.daemon = True
worker.start()
# Put tasks into the queue as tuples
for show_id in year:
logger.info('Queueing {}'.format(show_id))
queue.put(show_id)
# Causes the main thread to wait for the queue to finish processing all the tasks
queue.join()
else:
parser.print_help()
sys.exit(1)
if __name__ == "__main__":
main()
|
|
'''
API for the laws app
'''
import logging
from django.core.urlresolvers import reverse
from tastypie.constants import ALL
import tastypie.fields as fields
from agendas.templatetags.agendas_tags import agendas_for
from apis.resources.base import BaseResource
from mks.models import Member, Party
from mks.api import MemberResource
from video.utils import get_videos_queryset
from video.api import VideoResource
from links.models import Link
from links.api import LinkResource
from models import Law, Bill, Vote, VoteAction, PrivateProposal
from simple.management.commands.syncdata_globals import p_explanation
from agendas.models import AgendaVote
from datetime import datetime, timedelta
logger = logging.getLogger("open-knesset.laws.api")
class LawResource(BaseResource):
class Meta(BaseResource.Meta):
queryset = Law.objects.all()
allowed_methods = ['get']
class TagResource(BaseResource):pass
class ProvateProposalResource(BaseResource):
class Meta(BaseResource.Meta):
queryset = PrivateProposal.objects.all()
allowed_methods = ['get']
filtering = dict(from_date=ALL,
to_date=ALL)
tags = fields.ToManyField(TagResource,
'tags',
null=True,
full=False)
def dehydrate_tags(self, bundle):
return [tag.name for tag in bundle.obj.bill.tags]
def build_filters(self, filters={}):
orm_filters = super(ProvateProposalResource, self).build_filters(filters)
if 'from_date' in filters:
orm_filters["date__gte"] = filters['from_date']
if 'to_date' in filters:
# the to_date needs to be incremented by a day since when humans say to_date=2014-07-30 they
# actually mean midnight between 30 to 31. python on the other hand interperts this as midnight between
# 29 and 30
to_date = datetime.strptime(filters["to_date"], "%Y-%M-%d")+timedelta(days=1)
orm_filters["date__lte"] = to_date.strftime("%Y-%M-%d")
return orm_filters
class VoteActionResource(BaseResource):
class Meta(BaseResource.Meta):
queryset = VoteAction.objects.all()
allowed_methods = ['get']
excludes = ['type','id']
include_resource_uri = False
filtering = {
'against_own_bill': ALL,
}
list_fields = [
'member', 'party', 'vote', 'against_party', 'against_coalition', 'against_opposition', 'against_own_bill', 'member_title', 'vote_title', 'member_url', 'vote_url', 'vote_time'
]
vote_type = fields.CharField('type',null=True)
member = fields.ToOneField(MemberResource, 'member', full=False)
party = fields.ToOneField('mks.api.PartyResource', 'party', full=False)
vote = fields.ToOneField('laws.api.VoteResource', 'vote', full=False)
member_title = fields.CharField('member')
vote_title = fields.CharField('vote')
member_url = fields.CharField('member__get_absolute_url')
vote_url = fields.CharField('vote__get_absolute_url')
vote_time = fields.DateTimeField('vote__time')
class VoteResource(BaseResource):
class Meta(BaseResource.Meta):
queryset = Vote.objects.all()
allowed_methods = ['get']
list_fields = [
'time', 'title', 'vote_type', 'votes_count', 'for_votes_count',
'against_votes_count', 'meeting_number', 'vote_number',
'importance', 'controversy', 'against_party ', 'against_coalition',
'against_opposition', 'against_own_bill',
]
filtering = dict(tag=('exact'),
member=ALL,
member_for=ALL,
member_against=ALL,
from_date=ALL,
to_date=ALL)
votes = fields.ToManyField(VoteActionResource,
attribute=lambda bundle:VoteAction.objects.filter(
vote=bundle.obj).select_related('member'),
null=True,
full=True)
agendas = fields.ListField()
tags = fields.ToManyField('auxiliary.api.TagResource',
attribute=lambda t: t.obj.tags,
null=True,
full=False)
def build_filters(self, filters={}):
orm_filters = super(VoteResource, self).build_filters(filters)
if 'member' in filters:
orm_filters["voteaction__member"] = filters['member']
if 'member_for' in filters:
orm_filters["voteaction__member"] = filters['member_for']
orm_filters["voteaction__type"] = 'for'
if 'member_against' in filters:
orm_filters["voteaction__member"] = filters['member_against']
orm_filters["voteaction__type"] = 'against'
if 'tag' in filters:
# hard-coded the __in filter. not great, but works.
orm_filters["tagged_items__tag__in"] = \
filters["tag"].split(',')
if 'from_date' in filters:
orm_filters["time__gte"] = filters['from_date']
if 'to_date' in filters:
# the to_date needs to be incremented by a day since when humans say to_date=2014-07-30 they
# actually mean midnight between 30 to 31. python on the other hand interperts this as midnight between
# 29 and 30
to_date = datetime.strptime(filters["to_date"], "%Y-%M-%d")+timedelta(days=1)
orm_filters["time__lte"] = to_date.strftime("%Y-%M-%d")
return orm_filters
def dehydrate_agendas(self, bundle):
agendavotes = bundle.obj.agendavotes.select_related('agenda')
result = []
for avote in agendavotes:
agenda = avote.agenda
resource_uri = reverse(
'api_dispatch_detail',
kwargs={
'resource_name': 'agenda', 'api_name': 'v2',
'pk': agenda.pk})
agenda_bundle = {
'name': agenda.name,
'image': agenda.image.url if agenda.image else None,
'resource_uri': resource_uri,
'score': avote.score,
'importance': avote.importance,
'reasoning': avote.reasoning,
}
result.append(agenda_bundle)
return result
class PrivateProposalResource(BaseResource):
class Meta(BaseResource.Meta):
queryset = PrivateProposal.objects.all()
allowed_methods = ['get']
class BillAgendaResource(BaseResource):pass
def detailed_agendas(agenda_list):
result = []
for xagenda in agenda_list:
agenda = xagenda.agenda
resource_uri = reverse(
'api_dispatch_detail',
kwargs={
'resource_name': 'agenda', 'api_name': 'v2',
'pk': agenda.pk})
result.append({
'name': agenda.name,
'image': agenda.image.url if agenda.image else None,
'resource_uri': resource_uri,
'public_owner_name' : agenda.public_owner_name,
'reasoning' : xagenda.reasoning,
'score' : xagenda.score,
'importance' : xagenda.importance,
})
return result
class BillResource(BaseResource):
''' Bill API '''
class Meta(BaseResource.Meta):
queryset = Bill.objects.all()
allowed_methods = ['get']
ordering = ['stage_date', 'title']
filtering = dict(stage=ALL, proposer=ALL)
list_fields = [
'title', 'full_title', 'popular_name', 'law', 'stage',
'stage_date'
]
include_absolute_url = True
limit = 20
explanation = fields.CharField()
legal_code = fields.CharField()
proposers = fields.ToManyField(MemberResource,
'proposers',
full=False)
pre_votes = fields.ToManyField(VoteResource,
'pre_votes',
null=True,
full=False)
first_vote = fields.ToOneField(VoteResource,
'first_vote',
null=True,
full=False)
approval_vote = fields.ToOneField(VoteResource,
'approval_vote',
null=True,
full=False)
proposals = fields.ToManyField(PrivateProposalResource,
'proposals',
null=True,
full=True)
tags = fields.ToManyField('auxiliary.api.TagResource',
attribute=lambda t: t.obj.tags,
null=True,
full=False)
# XXX : this adds the following select phrases
# [sql] SELECT ...
# FROM "agendas_agendabill"
# WHERE ("agendas_agendabill"."agenda_id" IN
# (SELECT ...
# FROM "agendas_agenda"
# WHERE "agendas_agenda"."is_public" = TRUE)
# AND "agendas_agendabill"."bill_id" = XXX)
# [sql] SELECT ...
# FROM "agendas_agenda"
# WHERE "agendas_agenda"."id" = YYY
agendas = fields.ToManyField(BillAgendaResource,
'agendas',
null=True,
full=False)
def dehydrate_agendas(self, bundle):
result = None
try:
result = dict()
# fast-written and ugly code
agendas_detailes = agendas_for(bundle.request.user, bundle.obj, 'bill')
result["agenda_list"] = detailed_agendas(agendas_detailes["agendas"])
result["suggest_agendas"] = agendas_detailes["suggest_agendas"] # XXX : should i call detailed_agendas here too?
# XXX : there was no data examples here and i dont understand the data-structure that good. there should probably be a special handling for forms
result["formset"] = agendas_detailes["formset"]
result["suggested_agendas"] = agendas_detailes["suggested_agendas"] # XXX : should i call detailed_agendas here three?
result["suggest_agendas_login"] = agendas_detailes["suggest_agendas_login"]
return result
except:
logging.error('Got exception dehydrating agendas')
return None
def dehydrate_explanation(self, bundle):
result = None
try:
result = self.get_src_parts(bundle)[1]
except:
logging.error('Got exception dehydrating explanation')
return ""
# TODO: do we need this here????
# return result
def dehydrate_legal_code(self, bundle):
return self.get_src_parts(bundle)[0]
def dehydrate_stage(self, bundle):
return bundle.obj.get_stage_display()
def get_src_parts(self, bundle):
try:
return bundle.src_parts
except AttributeError:
parts = ['','']
bill = bundle.obj
try:
ps = bill.proposals.order_by('-date')[0]
if ps.content_html:
parts = ps.content_html.split(p_explanation)
except IndexError:
pass
bundle.src_parts = parts
return parts
def build_filters(self, filters={}):
orm_filters = super(BillResource, self).build_filters(filters)
if 'proposer' in filters:
orm_filters["proposers"] = filters['proposer']
return orm_filters
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for configuring platform specific installation."""
import os
import re
import shutil
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.util import platforms
# TODO(user): b/34807345 -- print to stderr
def _TraceAction(action):
"""Prints action to the standard output -- not really standard practice."""
print action
# pylint:disable=unused-argument
def _UpdatePathForWindows(bin_path):
"""Update the Windows system path to include bin_path.
Args:
bin_path: str, The absolute path to the directory that will contain
Cloud SDK binaries.
"""
# pylint:disable=g-import-not-at-top, we want to only attempt these imports
# on windows.
try:
import win32con
import win32gui
try:
# Python 3
import winreg
except ImportError:
# Python 2
import _winreg as winreg
except ImportError:
_TraceAction("""\
The installer is unable to automatically update your system PATH. Please add
{path}
to your system PATH to enable easy use of the Cloud SDK Command Line Tools.
""".format(path=bin_path))
return
def GetEnv(name):
root = winreg.HKEY_CURRENT_USER
subkey = 'Environment'
key = winreg.OpenKey(root, subkey, 0, winreg.KEY_READ)
try:
value, _ = winreg.QueryValueEx(key, name)
# pylint:disable=undefined-variable, This variable is defined in windows.
except WindowsError:
return ''
return value
def SetEnv(name, value):
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Environment', 0,
winreg.KEY_ALL_ACCESS)
winreg.SetValueEx(key, name, 0, winreg.REG_EXPAND_SZ, value)
winreg.CloseKey(key)
win32gui.SendMessage(
win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 'Environment')
return value
def Remove(paths, value):
while value in paths:
paths.remove(value)
def PrependEnv(name, values):
paths = GetEnv(name).split(';')
for value in values:
if value in paths:
Remove(paths, value)
paths.insert(0, value)
SetEnv(name, ';'.join(paths))
PrependEnv('Path', [bin_path])
_TraceAction("""\
The following directory has been added to your PATH.
{bin_path}
Create a new command shell for the changes to take effect.
""".format(bin_path=bin_path))
def _GetRcContents(comment, rc_path, rc_contents, pattern=None):
"""Generates the RC file contents with new comment and `source rc_path` lines.
Args:
comment: The shell comment string that precedes the source line.
rc_path: The path of the rc file to source.
rc_contents: The current contents.
pattern: A regex pattern that matches comment, None for exact match on
comment.
Returns:
The comment and `source rc_path` lines to be inserted into a shell rc file.
"""
if not pattern:
pattern = re.escape(comment)
# This pattern handles all three variants that we have injected in user RC
# files. All have the same sentinel comment line followed by:
# 1. a single 'source ...' line
# 2. a 3 line if-fi (a bug because this pattern was previously incorrect)
# 3. finally a single if-fi line.
# If you touch this code ONLY INJECT ONE LINE AFTER THE SENTINEL COMMENT LINE.
#
# At some point we can drop the alternate patterns and only search for the
# sentinel comment line and assume the next line is ours too (that was the
# original intent before th 3-line form was added).
subre = re.compile('\n' + pattern + '\n('
"source '.*'"
'|'
'if .*; then\n source .*\nfi'
'|'
'if .*; then source .*; fi'
')\n', re.MULTILINE)
# script checks that the rc_path currently exists before sourcing the file
line = ("\n{comment}\nif [ -f '{rc_path}' ]; then source '{rc_path}'; fi\n"
.format(comment=comment, rc_path=rc_path))
filtered_contents = subre.sub('', rc_contents)
rc_contents = '{filtered_contents}{line}'.format(
filtered_contents=filtered_contents, line=line)
return rc_contents
class _RcUpdater(object):
"""Updates the RC file completion and PATH code injection."""
def __init__(self, completion_update, path_update, shell, rc_path, sdk_root):
self.completion_update = completion_update
self.path_update = path_update
self.rc_path = rc_path
self.completion = os.path.join(
sdk_root, 'completion.{shell}.inc'.format(shell=shell))
self.path = os.path.join(
sdk_root, 'path.{shell}.inc'.format(shell=shell))
def Update(self):
"""Creates or updates the RC file."""
if self.rc_path:
if os.path.isfile(self.rc_path):
with open(self.rc_path) as rc_file:
rc_contents = rc_file.read()
original_rc_contents = rc_contents
else:
rc_contents = ''
original_rc_contents = ''
if self.path_update:
rc_contents = _GetRcContents(
'# The next line updates PATH for the Google Cloud SDK.',
self.path, rc_contents)
if self.completion_update:
rc_contents = _GetRcContents(
'# The next line enables shell command completion for gcloud.',
self.completion, rc_contents,
pattern=('# The next line enables [a-z][a-z]*'
' command completion for gcloud.'))
if rc_contents == original_rc_contents:
_TraceAction('No changes necessary for [{rc}].'.format(rc=self.rc_path))
return
if os.path.exists(self.rc_path):
rc_backup = self.rc_path + '.backup'
_TraceAction('Backing up [{rc}] to [{backup}].'.format(
rc=self.rc_path, backup=rc_backup))
shutil.copyfile(self.rc_path, rc_backup)
with open(self.rc_path, 'w') as rc_file:
rc_file.write(rc_contents)
_TraceAction('[{rc_path}] has been updated.'.format(rc_path=self.rc_path))
_TraceAction(console_io.FormatRequiredUserAction(
'Start a new shell for the changes to take effect.'))
if not self.completion_update:
_TraceAction(console_io.FormatRequiredUserAction(
'Source [{rc}]in your profile to enable shell command completion for '
'gcloud.'.format(rc=self.completion)))
if not self.path_update:
_TraceAction(console_io.FormatRequiredUserAction(
'Source [{rc}] in your profile to add the Google Cloud SDK command '
'line tools to your $PATH.'.format(rc=self.path)))
def _GetPreferredShell(path, default='bash'):
"""Returns the preferred shell name based on the base file name in path.
Args:
path: str, The file path to check.
default: str, The default value to return if a preferred name cannot be
determined.
Returns:
The preferred user shell name or default if none can be determined.
"""
name = os.path.basename(path)
for shell in ('bash', 'zsh', 'ksh'):
if shell in name:
return shell
return default
def _GetShellRcFileName(shell, host_os):
"""Returns the RC file name for shell and host_os.
Args:
shell: str, The shell base name.
host_os: str, The host os identification string.
Returns:
The shell RC file name, '.bashrc' by default.
"""
if shell == 'ksh':
return os.environ.get('ENV', None) or '.kshrc'
elif shell != 'bash':
return '.{shell}rc'.format(shell=shell)
elif host_os == platforms.OperatingSystem.LINUX:
return '.bashrc'
elif host_os == platforms.OperatingSystem.MACOSX:
return '.bash_profile'
elif host_os == platforms.OperatingSystem.MSYS:
return '.profile'
return '.bashrc'
def _GetRcUpdater(completion_update, path_update, rc_path, sdk_root, host_os):
"""Returns an _RcUpdater object for the preferred user shell.
Args:
completion_update: bool, Whether or not to do command completion.
path_update: bool, Whether or not to update PATH.
rc_path: str, The path to the rc file to update. If None, ask.
sdk_root: str, The path to the Cloud SDK root.
host_os: str, The host os identification string.
Returns:
An _RcUpdater() object for the preferred user shell.
"""
# An initial guess on the preferred user shell based on the environment.
preferred_shell = _GetPreferredShell(os.environ.get('SHELL', '/bin/sh'))
if not completion_update and not path_update:
rc_path = None
elif not rc_path:
file_name = _GetShellRcFileName(preferred_shell, host_os)
rc_path = os.path.join(platforms.GetHomePath(), file_name)
rc_path_update = console_io.PromptResponse((
'The Google Cloud SDK installer will now prompt you to update an rc '
'file to bring the Google Cloud CLIs into your environment.\n\n'
'Enter a path to an rc file to update, or leave blank to use '
'[{rc_path}]: ').format(rc_path=rc_path))
if rc_path_update:
rc_path = os.path.expanduser(rc_path_update)
if rc_path:
# Check the rc_path for a better hint at the user preferred shell.
preferred_shell = _GetPreferredShell(rc_path, default=preferred_shell)
return _RcUpdater(completion_update, path_update, preferred_shell, rc_path,
sdk_root)
def UpdateRC(completion_update, path_update, rc_path, bin_path, sdk_root):
"""Update the system path to include bin_path.
Args:
completion_update: bool, Whether or not to do command completion. If None,
ask.
path_update: bool, Whether or not to update PATH. If None, ask.
rc_path: str, The path to the rc file to update. If None, ask.
bin_path: str, The absolute path to the directory that will contain
Cloud SDK binaries.
sdk_root: str, The path to the Cloud SDK root.
"""
host_os = platforms.OperatingSystem.Current()
if host_os == platforms.OperatingSystem.WINDOWS:
if path_update is None:
path_update = console_io.PromptContinue(
prompt_string='Update %PATH% to include Cloud SDK binaries?')
if path_update:
_UpdatePathForWindows(bin_path)
return
if completion_update is None:
if path_update is None: # Ask only one question if both were not set.
path_update = console_io.PromptContinue(
prompt_string=('\nModify profile to update your $PATH '
'and enable shell command completion?'))
completion_update = path_update
else:
completion_update = console_io.PromptContinue(
prompt_string=('\nModify profile to enable shell command '
'completion?'))
elif path_update is None:
path_update = console_io.PromptContinue(
prompt_string=('\nModify profile to update your $PATH?'))
_GetRcUpdater(
completion_update, path_update, rc_path, sdk_root, host_os).Update()
|
|
#!/usr/bin/env python
#
# Copyright (C) 2014
# Brian Caswell <bmc@lungetech.com>
# Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import ctypes
import os
import random
import sys
import math
# this is lame, I cant free out zoombuf
import datetime
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'narfpylib.zip'))
__author__ = 'scsi'
CURDIR = os.path.dirname(__file__)
BASEPATH = os.path.abspath(os.path.join(CURDIR, '../../'))
BUILD_DIR = os.path.join(BASEPATH, 'build')
LIBDIR = os.path.join(BASEPATH, 'lib')
DATADIR = os.path.join(BASEPATH, "support", 'data')
from jinja2 import Template
H_TMPL = """
/*
* Copyright (C) Narf Industries <info@narfindustries.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef MIX_CHEM
#define MIX_CHEM
const static int N_FORMULAS = {{ n_chem_forms }};
typedef struct chem_formula {
char *compound_name;
double mole;
} chem_formula_t;
static chem_formula_t chem_formulas[N_FORMULAS] = {
{% for cfn, mole in chem_forms %}{ "{{ cfn }}", {{ mole }} },
{% endfor %}
};
#endif
"""
def get_compounds(sample_size, over_percentage):
f = open(os.path.join(DATADIR, 'comps_strip'))
lines = [x.replace('\\', '').replace(',', '') for x in map(str.strip, f.readlines())]
f.close()
compound_list = list(set(lines))
random.seed(0x3209baaa)
sample = random.sample(compound_list, sample_size)
l = [(s, random.uniform(0.1, 2.3)) for s in sample]
assert(int(sample_size*over_percentage) > 20)
l = [(s, random.uniform(2.3, 2.4)) for s, i in l[:int(sample_size*over_percentage)]] + l[int(sample_size * over_percentage):]
# disperse this thing so a hash function can actually find these values
random.shuffle(l)
return l
class MixologyCodeGen(object):
sample_size = 3000
pg_size = 25
code_fn = 'libcompound'
over_percentage = 0.05
prep_sample_sz = 1000
mix_sample_sz = 25
compounds_and_weights = get_compounds(sample_size, over_percentage)
compound_list = [c for c, w in compounds_and_weights]
mix = ctypes.cdll.LoadLibrary(os.path.join(BUILD_DIR, "patched", "so", "NRFIN_00007.so"))
class compounds_sample_t(ctypes.Structure):
_fields_=[("sample", ctypes.POINTER(ctypes.c_uint)),
("sample_sz", ctypes.c_uint )]
sample_compounds = mix.sample_compounds
sample_compounds.restype = ctypes.POINTER(compounds_sample_t)
zoom_buf = mix.zoom_buf
zoom_buf.restype = ctypes.c_char_p
zoom_buf.argtypes = [ctypes.POINTER(compounds_sample_t)]
@property
def max_pg(self):
return math.ceil(self.sample_size / self.pg_size)-2
@property
def massive_compounds(self):
return [c for c, w in filter(lambda x: x[1] > 2.3, self.compounds_and_weights)]
@property
def h_file_path(self):
return os.path.join(LIBDIR, self.code_fn + '.h')
@property
def num_entries(self):
return len(self.compounds_and_weights)
def _arb_sample_zoom(self, sample_seed, sample_sz):
x = self.sample_compounds(sample_seed, sample_sz)
b = self.zoom_buf(x)
assert(b is not None)
self.mix.free_sample_st(x)
del x
return b
def _mix_sample_idxs(self, idxs, light_idxs):
# ll = reduce(lambda x, y: x*y, [self.compounds_and_weights[i][1] for i in idxs])
ll = 1
final_idx = []
for i in idxs:
if(ll < 2**24):
ll *= self.compounds_and_weights[i][1]
final_idx.append(i)
assert(ll > 2**24)
assert(ll < 2**32)
alloc_sample_st = self.mix.alloc_sample_st
alloc_sample_st.restype = ctypes.POINTER(self.compounds_sample_t)
samp = alloc_sample_st(self.mix_sample_sz)
set_sample_at_idx = self.mix.set_sample_at_idx
set_sample_at_idx.argtypes = [ctypes.POINTER(self.compounds_sample_t), ctypes.c_uint, ctypes.c_uint]
ii = 0
for i in range(0, self.mix_sample_sz):
try:
ii = final_idx.pop()
except IndexError:
pass
# we just fill the sample up with the last index so that multiple is only done once
set_sample_at_idx(samp, i, ii)
b = self.zoom_buf(samp)
self.mix.free_sample_st(samp)
assert(b is not None)
# need 20 idxs at
return b
def _get_sample_heavy_idxs(self, sample_seed):
z = ctypes.c_char_p(sample_seed)
x = mix.sample_compounds(z, self.prep_sample_sz)
idxs = []
light_idxs = []
for i in range(0, len(self.compound_list)):
r =mix.check_compound_idx_in_sample(x, i)
if r == 1 and self.compounds_and_weights[i][1] > 2.3:
idxs.append(i)
elif r == 1 and self.compounds_and_weights[i][1] < 2.3:
light_idxs.append(i)
assert(len(idxs) >= 20)
return idxs, light_idxs
def get_prep_sample_zoom(self, sample_seed):
return self._arb_sample_zoom(sample_seed, self.prep_sample_sz)
def get_mix_sample_zoom(self, sample_seed):
return self._arb_sample_zoom(sample_seed, self.mix_sample_sz)
def generate_code(self):
t = Template(H_TMPL)
s = t.render(chem_forms=self.compounds_and_weights, n_chem_forms=len(self.compounds_and_weights))
with open(self.h_file_path, 'w') as f:
f.write(s)
if __name__ == '__main__':
m = MixologyCodeGen()
m.generate_code()
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Google Cloud Bigtable Table."""
from gcloud_bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud_bigtable._generated import (
bigtable_service_messages_pb2 as data_messages_pb2)
from gcloud_bigtable._generated import (
bigtable_table_service_messages_pb2 as messages_pb2)
from gcloud_bigtable._helpers import _to_bytes
from gcloud_bigtable.column_family import ColumnFamily
from gcloud_bigtable.column_family import _gc_rule_from_pb
from gcloud_bigtable.row import Row
from gcloud_bigtable.row_data import PartialRowData
from gcloud_bigtable.row_data import PartialRowsData
class Table(object):
"""Representation of a Google Cloud Bigtable Table.
.. note::
We don't define any properties on a table other than the name. As
the proto says, in a request:
The ``name`` field of the Table and all of its ColumnFamilies must
be left blank, and will be populated in the response.
This leaves only the ``current_operation`` and ``granularity``
fields. The ``current_operation`` is only used for responses while
``granularity`` is an enum with only one value.
We can use a :class:`Table` to:
* :meth:`create` the table
* :meth:`rename` the table
* :meth:`delete` the table
* :meth:`list_column_families` in the table
:type table_id: str
:param table_id: The ID of the table.
:type cluster: :class:`.cluster.Cluster`
:param cluster: The cluster that owns the table.
"""
def __init__(self, table_id, cluster):
self.table_id = table_id
self._cluster = cluster
@property
def cluster(self):
"""Getter for table's cluster.
:rtype: :class:`.cluster.Cluster`
:returns: The cluster stored on the table.
"""
return self._cluster
@property
def client(self):
"""Getter for table's client.
:rtype: :class:`.client.Client`
:returns: The client that owns this table.
"""
return self.cluster.client
@property
def timeout_seconds(self):
"""Getter for table's default timeout seconds.
:rtype: int
:returns: The timeout seconds default stored on the table's client.
"""
return self._cluster.timeout_seconds
@property
def name(self):
"""Table name used in requests.
.. note::
This property will not change if ``table_id`` does not, but the
return value is not cached.
The table name is of the form
``"projects/../zones/../clusters/../tables/{table_id}"``
:rtype: str
:returns: The table name.
"""
return self.cluster.name + '/tables/' + self.table_id
def column_family(self, column_family_id, gc_rule=None):
"""Factory to create a column family associated with this table.
:type column_family_id: str
:param column_family_id: The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type gc_rule: :class:`.column_family.GarbageCollectionRule`,
:class:`.column_family.GarbageCollectionRuleUnion` or
:class:`.column_family.GarbageCollectionRuleIntersection`
:param gc_rule: (Optional) The garbage collection settings for this
column family.
:rtype: :class:`.column_family.ColumnFamily`
:returns: A column family owned by this table.
"""
return ColumnFamily(column_family_id, self, gc_rule=gc_rule)
def row(self, row_key, filter_=None):
"""Factory to create a row associated with this table.
:type row_key: bytes
:param row_key: The key for the row being created.
:type filter_: :class:`.RowFilter`,
:class:`.RowFilterChain`,
:class:`.RowFilterUnion`, or
:class:`.ConditionalRowFilter`
:param filter_: (Optional) Filter to be used for conditional mutations.
See :class:`.Row` for more details.
:rtype: :class:`.Row`
:returns: A row owned by this table.
"""
return Row(row_key, self, filter_=filter_)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.table_id == self.table_id and
other.cluster == self.cluster)
def __ne__(self, other):
return not self.__eq__(other)
def create(self, initial_split_keys=None, timeout_seconds=None):
"""Creates this table.
.. note::
Though a :class:`._generated.bigtable_table_data_pb2.Table` is also
allowed (as the ``table`` property) in a create table request, we
do not support it in this method. As mentioned in the
:class:`Table` docstring, the name is the only useful property in
the table proto.
.. note::
A create request returns a
:class:`._generated.bigtable_table_data_pb2.Table` but we don't use
this response. The proto definition allows for the inclusion of a
``current_operation`` in the response, but in example usage so far,
it seems the Bigtable API does not return any operation.
:type initial_split_keys: list
:param initial_split_keys: (Optional) List of row keys that will be
used to initially split the table into
several tablets (Tablets are similar to
HBase regions). Given two split keys,
``"s1"`` and ``"s2"``, three tablets will be
created, spanning the key ranges:
``[, s1)``, ``[s1, s2)``, ``[s2, )``.
:type timeout_seconds: int
:param timeout_seconds: Number of seconds for request time-out.
If not passed, defaults to value set on table.
"""
request_pb = messages_pb2.CreateTableRequest(
initial_split_keys=initial_split_keys or [],
name=self.cluster.name,
table_id=self.table_id,
)
timeout_seconds = timeout_seconds or self.timeout_seconds
response = self.client.table_stub.CreateTable.async(request_pb,
timeout_seconds)
# We expect a `._generated.bigtable_table_data_pb2.Table`
response.result()
def rename(self, new_table_id, timeout_seconds=None):
"""Rename this table.
.. note::
This cannot be used to move tables between clusters,
zones, or projects.
.. note::
The Bigtable Table Admin API currently returns
``BigtableTableService.RenameTable is not yet implemented``
when this method is used. It's unclear when this method will
actually be supported by the API.
:type new_table_id: str
:param new_table_id: The new name table ID.
:type timeout_seconds: int
:param timeout_seconds: Number of seconds for request time-out.
If not passed, defaults to value set on table.
"""
request_pb = messages_pb2.RenameTableRequest(
name=self.name,
new_id=new_table_id,
)
timeout_seconds = timeout_seconds or self.timeout_seconds
response = self.client.table_stub.RenameTable.async(request_pb,
timeout_seconds)
# We expect a `._generated.empty_pb2.Empty`
response.result()
self.table_id = new_table_id
def delete(self, timeout_seconds=None):
"""Delete this table.
:type timeout_seconds: int
:param timeout_seconds: Number of seconds for request time-out.
If not passed, defaults to value set on table.
"""
request_pb = messages_pb2.DeleteTableRequest(name=self.name)
timeout_seconds = timeout_seconds or self.timeout_seconds
response = self.client.table_stub.DeleteTable.async(request_pb,
timeout_seconds)
# We expect a `._generated.empty_pb2.Empty`
response.result()
def list_column_families(self, timeout_seconds=None):
"""Check if this table exists.
:type timeout_seconds: int
:param timeout_seconds: Number of seconds for request time-out.
If not passed, defaults to value set on table.
:rtype: dictionary with string as keys and
:class:`.column_family.ColumnFamily` as values
:returns: List of column families attached to this table.
:raises: :class:`ValueError <exceptions.ValueError>` if the column
family name from the response does not agree with the computed
name from the column family ID.
"""
request_pb = messages_pb2.GetTableRequest(name=self.name)
timeout_seconds = timeout_seconds or self.timeout_seconds
response = self.client.table_stub.GetTable.async(request_pb,
timeout_seconds)
# We expect a `._generated.bigtable_table_data_pb2.Table`
table_pb = response.result()
result = {}
for column_family_id, value_pb in table_pb.column_families.items():
gc_rule = _gc_rule_from_pb(value_pb.gc_rule)
column_family = self.column_family(column_family_id,
gc_rule=gc_rule)
if column_family.name != value_pb.name:
raise ValueError('Column family name %s does not agree with '
'name from request: %s.' % (
column_family.name, value_pb.name))
result[column_family_id] = column_family
return result
def read_row(self, row_key, filter_=None, timeout_seconds=None):
"""Read a single row from this table.
:type row_key: bytes
:param row_key: The key of the row to read from.
:type filter_: :class:`.row.RowFilter`, :class:`.row.RowFilterChain`,
:class:`.row.RowFilterUnion` or
:class:`.row.ConditionalRowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
row. If unset, returns the entire row.
:type timeout_seconds: int
:param timeout_seconds: Number of seconds for request time-out.
If not passed, defaults to value set on table.
:rtype: :class:`.PartialRowData`, :data:`NoneType <types.NoneType>`
:returns: The contents of the row if any chunks were returned in
the response, otherwise :data:`None`.
:raises: :class:`ValueError <exceptions.ValueError>` if a commit row
chunk is never encountered.
"""
request_pb = _create_row_request(self.name, row_key=row_key,
filter_=filter_)
timeout_seconds = timeout_seconds or self.timeout_seconds
response_iterator = self.client.data_stub.ReadRows(request_pb,
timeout_seconds)
# We expect an iterator of `data_messages_pb2.ReadRowsResponse`
result = PartialRowData(row_key)
for read_rows_response in response_iterator:
result.update_from_read_rows(read_rows_response)
# Make sure the result actually contains data.
if not result._chunks_encountered:
return None
# Make sure the result was committed by the back-end.
if not result.committed:
raise ValueError('The row remains partial / is not committed.')
return result
def read_rows(self, start_key=None, end_key=None,
allow_row_interleaving=None, limit=None, filter_=None,
timeout_seconds=None):
"""Read rows from this table.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type filter_: :class:`.row.RowFilter`, :class:`.row.RowFilterChain`,
:class:`.row.RowFilterUnion` or
:class:`.row.ConditionalRowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads every column in
each row.
:type allow_row_interleaving: bool
:param allow_row_interleaving: (Optional) By default, rows are read
sequentially, producing results which
are guaranteed to arrive in increasing
row order. Setting
``allow_row_interleaving`` to
:data:`True` allows multiple rows to be
interleaved in the response stream,
which increases throughput but breaks
this guarantee, and may force the
client to use more memory to buffer
partially-received rows.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results. Note that if ``allow_row_interleaving`` is
set to :data:`True`, partial results may be returned for
more than N rows. However, only N ``commit_row`` chunks
will be sent.
:type timeout_seconds: int
:param timeout_seconds: Number of seconds for request time-out.
If not passed, defaults to value set on table.
:rtype: :class:`.PartialRowsData`
:returns: A :class:`.PartialRowsData` convenience wrapper for consuming
the streamed results.
"""
request_pb = _create_row_request(
self.name, start_key=start_key, end_key=end_key, filter_=filter_,
allow_row_interleaving=allow_row_interleaving, limit=limit)
timeout_seconds = timeout_seconds or self.timeout_seconds
response_iterator = self.client.data_stub.ReadRows(request_pb,
timeout_seconds)
# We expect an iterator of `data_messages_pb2.ReadRowsResponse`
return PartialRowsData(response_iterator)
def sample_row_keys(self, timeout_seconds=None):
"""Read a sample of row keys in the table.
The returned row keys will delimit contiguous sections of the table of
approximately equal size, which can be used to break up the data for
distributed tasks like mapreduces.
The elements in the iterator are a SampleRowKeys response and they have
the properties ``offset_bytes`` and ``row_key``. They occur in sorted
order. The table might have contents before the first row key in the
list and after the last one, but a key containing the empty string
indicates "end of table" and will be the last response given, if
present.
.. note::
Row keys in this list may not have ever been written to or read
from, and users should therefore not make any assumptions about the
row key structure that are specific to their use case.
The ``offset_bytes`` field on a response indicates the approximate
total storage space used by all rows in the table which precede
``row_key``. Buffering the contents of all rows between two subsequent
samples would require space roughly equal to the difference in their
``offset_bytes`` fields.
:type timeout_seconds: int
:param timeout_seconds: Number of seconds for request time-out.
If not passed, defaults to value set on table.
:rtype: :class:`grpc.framework.alpha._reexport._CancellableIterator`
:returns: A cancel-able iterator. Can be consumed by calling ``next()``
or by casting to a :class:`list` and can be cancelled by
calling ``cancel()``.
"""
request_pb = data_messages_pb2.SampleRowKeysRequest(
table_name=self.name)
timeout_seconds = timeout_seconds or self.timeout_seconds
response_iterator = self.client.data_stub.SampleRowKeys(
request_pb, timeout_seconds)
return response_iterator
def _create_row_request(table_name, row_key=None, start_key=None, end_key=None,
filter_=None, allow_row_interleaving=None, limit=None):
"""Creates a request to read rows in a table.
:type table_name: str
:param table_name: The name of the table to read from.
:type row_key: bytes
:param row_key: (Optional) The key of a specific row to read from.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type filter_: :class:`.row.RowFilter`, :class:`.row.RowFilterChain`,
:class:`.row.RowFilterUnion` or
:class:`.row.ConditionalRowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads the entire table.
:type allow_row_interleaving: bool
:param allow_row_interleaving: (Optional) By default, rows are read
sequentially, producing results which are
guaranteed to arrive in increasing row
order. Setting
``allow_row_interleaving`` to
:data:`True` allows multiple rows to be
interleaved in the response stream,
which increases throughput but breaks
this guarantee, and may force the
client to use more memory to buffer
partially-received rows.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results. Note that if ``allow_row_interleaving`` is
set to :data:`True`, partial results may be returned for
more than N rows. However, only N ``commit_row`` chunks
will be sent.
:rtype: :class:`data_messages_pb2.ReadRowsRequest`
:returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``row_key`` and one of ``start_key`` and ``end_key`` are set
"""
request_kwargs = {'table_name': table_name}
if (row_key is not None and
(start_key is not None or end_key is not None)):
raise ValueError('Row key and row range cannot be '
'set simultaneously')
if row_key is not None:
request_kwargs['row_key'] = _to_bytes(row_key)
if start_key is not None or end_key is not None:
range_kwargs = {}
if start_key is not None:
range_kwargs['start_key'] = _to_bytes(start_key)
if end_key is not None:
range_kwargs['end_key'] = _to_bytes(end_key)
row_range = data_pb2.RowRange(**range_kwargs)
request_kwargs['row_range'] = row_range
if filter_ is not None:
request_kwargs['filter'] = filter_.to_pb()
if allow_row_interleaving is not None:
request_kwargs['allow_row_interleaving'] = allow_row_interleaving
if limit is not None:
request_kwargs['num_rows_limit'] = limit
return data_messages_pb2.ReadRowsRequest(**request_kwargs)
|
|
# Copyright 2013 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
from oslo_log import log as logging
from trove.common import cfg
from trove.common.i18n import _
from trove.common import instance as trove_instance
from trove.common.notification import EndNotification
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.cassandra import service
from trove.guestagent.datastore import manager
from trove.guestagent import guest_log
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Manager(manager.Manager):
GUEST_LOG_DEFS_SYSTEM_LABEL = 'system'
def __init__(self, manager_name='cassandra'):
super(Manager, self).__init__(manager_name)
self._app = None
self._admin = None
@property
def status(self):
return self.app.status
@property
def app(self):
if self._app is None:
self._app = self.build_app()
return self._app
def build_app(self):
return service.CassandraApp()
@property
def admin(self):
if self._admin is None:
self._admin = self.app.build_admin()
return self._admin
@property
def configuration_manager(self):
return self.app.configuration_manager
@property
def datastore_log_defs(self):
system_log_file = self.validate_log_file(
self.app.cassandra_system_log_file, self.app.cassandra_owner)
return {
self.GUEST_LOG_DEFS_SYSTEM_LABEL: {
self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER,
self.GUEST_LOG_USER_LABEL: self.app.cassandra_owner,
self.GUEST_LOG_FILE_LABEL: system_log_file
}
}
def guest_log_enable(self, context, log_name, disable):
if disable:
LOG.debug("Disabling system log.")
self.app.set_logging_level('OFF')
else:
log_level = CONF.get(self.manager_name).get('system_log_level')
LOG.debug("Enabling system log with logging level: %s" % log_level)
self.app.set_logging_level(log_level)
return False
def restart(self, context):
self.app.restart()
def start_db_with_conf_changes(self, context, config_contents):
self.app.start_db_with_conf_changes(config_contents)
def stop_db(self, context, do_not_start_on_reboot=False):
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def reset_configuration(self, context, configuration):
self.app.reset_configuration(configuration)
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
self.app.install_if_needed(packages)
self.app.init_storage_structure(mount_point)
if config_contents or device_path or backup_info:
# FIXME(pmalik) Once the cassandra bug
# https://issues.apache.org/jira/browse/CASSANDRA-2356
# is fixed, this code may have to be revisited.
#
# Cassandra generates system keyspaces on the first start.
# The stored properties include the 'cluster_name', which once
# saved cannot be easily changed without removing the system
# tables. It is crucial that the service does not boot up in
# the middle of the configuration procedure.
# We wait here for the service to come up, stop it properly and
# remove the generated keyspaces before proceeding with
# configuration. If it does not start up within the time limit
# we assume it is not going to and proceed with configuration
# right away.
LOG.debug("Waiting for database first boot.")
if (self.app.status.wait_for_real_status_to_change_to(
trove_instance.ServiceStatuses.RUNNING,
CONF.state_change_wait_time,
False)):
LOG.debug("Stopping database prior to initial configuration.")
self.app.stop_db()
self.app._remove_system_tables()
LOG.debug("Starting initial configuration.")
if config_contents:
LOG.debug("Applying configuration.")
self.app.configuration_manager.save_configuration(
config_contents)
cluster_name = None
if cluster_config:
cluster_name = cluster_config.get('id', None)
self.app.apply_initial_guestagent_configuration(
cluster_name=cluster_name)
if cluster_config:
self.app.write_cluster_topology(
cluster_config['dc'], cluster_config['rack'],
prefer_local=True)
if device_path:
LOG.debug("Preparing data volume.")
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
if os.path.exists(mount_point):
# rsync exiting data
LOG.debug("Migrating existing data.")
device.migrate_data(mount_point)
# mount the volume
LOG.debug("Mounting new volume.")
device.mount(mount_point)
if not cluster_config:
if backup_info:
self._perform_restore(backup_info, context, mount_point)
LOG.debug("Starting database with configuration changes.")
self.app.start_db(update_db=False)
if not self.app.has_user_config():
LOG.debug("Securing superuser access.")
self.app.secure()
self.app.restart()
self._admin = self.app.build_admin()
if not cluster_config and self.is_root_enabled(context):
self.status.report_root(context, self.app.default_superuser_name)
def change_passwords(self, context, users):
with EndNotification(context):
self.admin.change_passwords(context, users)
def update_attributes(self, context, username, hostname, user_attrs):
with EndNotification(context):
self.admin.update_attributes(context, username, hostname,
user_attrs)
def create_database(self, context, databases):
with EndNotification(context):
self.admin.create_database(context, databases)
def create_user(self, context, users):
with EndNotification(context):
self.admin.create_user(context, users)
def delete_database(self, context, database):
with EndNotification(context):
self.admin.delete_database(context, database)
def delete_user(self, context, user):
with EndNotification(context):
self.admin.delete_user(context, user)
def get_user(self, context, username, hostname):
return self.admin.get_user(context, username, hostname)
def grant_access(self, context, username, hostname, databases):
self.admin.grant_access(context, username, hostname, databases)
def revoke_access(self, context, username, hostname, database):
self.admin.revoke_access(context, username, hostname, database)
def list_access(self, context, username, hostname):
return self.admin.list_access(context, username, hostname)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
return self.admin.list_databases(context, limit, marker,
include_marker)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
return self.admin.list_users(context, limit, marker, include_marker)
def enable_root(self, context):
return self.app.enable_root()
def enable_root_with_password(self, context, root_password=None):
return self.app.enable_root(root_password=root_password)
def disable_root(self, context):
self.app.enable_root(root_password=None)
def is_root_enabled(self, context):
return self.app.is_root_enabled()
def _perform_restore(self, backup_info, context, restore_location):
LOG.info(_("Restoring database from backup %s.") % backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
self.app._apply_post_restore_updates(backup_info)
except Exception as e:
LOG.error(e)
LOG.error(_("Error performing restore from backup %s.") %
backup_info['id'])
self.app.status.set_status(trove_instance.ServiceStatuses.FAILED)
raise
LOG.info(_("Restored database successfully."))
def create_backup(self, context, backup_info):
"""
Entry point for initiating a backup for this instance.
The call currently blocks guestagent until the backup is finished.
:param backup_info: a dictionary containing the db instance id of the
backup task, location, type, and other data.
"""
with EndNotification(context):
backup.backup(context, backup_info)
def update_overrides(self, context, overrides, remove=False):
LOG.debug("Updating overrides.")
if remove:
self.app.remove_overrides()
else:
self.app.update_overrides(context, overrides, remove)
def apply_overrides(self, context, overrides):
"""Configuration changes are made in the config YAML file and
require restart, so this is a no-op.
"""
pass
def get_data_center(self, context):
return self.app.get_data_center()
def get_rack(self, context):
return self.app.get_rack()
def set_seeds(self, context, seeds):
self.app.set_seeds(seeds)
def get_seeds(self, context):
return self.app.get_seeds()
def set_auto_bootstrap(self, context, enabled):
self.app.set_auto_bootstrap(enabled)
def node_cleanup_begin(self, context):
self.app.node_cleanup_begin()
def node_cleanup(self, context):
self.app.node_cleanup()
def node_decommission(self, context):
self.app.node_decommission()
def cluster_secure(self, context, password):
os_admin = self.app.cluster_secure(password)
self._admin = self.app.build_admin()
return os_admin
def get_admin_credentials(self, context):
return self.app.get_admin_credentials()
def store_admin_credentials(self, context, admin_credentials):
self.app.store_admin_credentials(admin_credentials)
self._admin = self.app.build_admin()
|
|
import os
import psycopg2
import time
# from filters_json import filter_list as FilterMap
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
QUERY_STRINGS = {}
DB_CONFIG = {}
ROOT_DIR = os.path.abspath(os.getcwd())
def read_in_bb_file():
u"""Reads in a file containing the 100 most populous cities in the US
and returns a dict with the lat/long points describing the bounding box
for each location."""
with open("text/bounding_boxes.txt", 'r') as f:
bbs = f.readlines()
bb_dict = {}
for line in bbs:
spl = line.strip().split(",")
city = spl[0].title()
place_name = city + ", " + spl[1]
lats_longs = [(spl[2], spl[3]), (spl[4], spl[5])]
bb_dict[place_name] = lats_longs
return bb_dict
def query_for_handles():
u"""Returns a list of tuples containing names and city locations."""
bb_dict = read_in_bb_file()
data_set = {}
for key, values in bb_dict.items():
vals = (values[0][0], values[0][1], values[1][0], values[1][1])
sql = """SELECT DISTINCT screen_name FROM "TweetTest" WHERE \
location_lat BETWEEN %s AND %s AND location_lng \
BETWEEN %s AND %s;"""
data = execute_query(sql, vals, need_results=True)
data_set[key] = data
print "Completed query on: " + str(key)
name_city = []
with open("text/training_set_names.txt", 'r') as f:
names = f.read()
for key, vals in data_set.items():
print "Checking names for ", key
count = 0
for val in vals:
if unicode(val) not in names and count <= 9:
name_city.append((val, key))
count += 1
print "returning now"
return name_city
def get_unique_users_in_training_data():
bb_dict = read_in_bb_file()
for city in bb_dict:
sql = """SELECT DISTINCT screen_name FROM "Tweet200" WHERE city = %s;"""
names = execute_query(sql, (city,), need_results=True)
with open("text/training_set_names.txt", 'a') as f:
for name in names:
f.write(str(name))
f.write("\n")
print "Wrote names from ", city
print "Done!"
def query_all_db(limit=False):
u"""Returns a dictionary with keys as city names and values as a list of
tweets from that city."""
bb_dict = read_in_bb_file()
data_set = {}
for key, values in bb_dict.items():
data = query_db(key, values, limit)
data_set[key] = data
return data_set
def query_all_db_Tweet200():
u"""Returns a dictionary with keys as city names and values as a list of
tweets from that city."""
bb_dict = read_in_bb_file()
data_set = {}
for key, values in bb_dict.items():
sql = """SELECT * FROM "Tweet200" WHERE city = %s ORDER BY (screen_name) ASC;"""
data = execute_query(sql, (key,), need_results=True)
data_set[key] = data
print "Completed query on: " + str(key)
return data_set
def query_db(city, values, limit=False):
u"""Takes in a city and Returns a dict containing all tweets
collected from the city (with the key being the city name and the value
being a list of tweets)."""
lats = values[0]
longs = values[1]
vals = (lats[0], lats[1], longs[0], longs[1])
if limit:
sql = """SELECT * FROM "Tweet" WHERE
(location_lat BETWEEN %s AND %s)
AND (location_lng BETWEEN %s AND %s)LIMIT 3400;"""
else:
sql = """SELECT * FROM "Tweet" WHERE
(location_lat BETWEEN %s AND %s)
AND (location_lng BETWEEN %s AND %s);"""
## LIMIT 2000
print "Querying database for ", city
data = execute_query(sql, vals, need_results=True)
return data
def send_user_queries_to_db(tweet_set, city):
u"""Sends formatted tweets into DB."""
count = 0
for blob in tweet_set:
if blob:
for tweet in blob:
if tweet:
count += 1
sql = """INSERT INTO "Tweet200" (screen_name,
text, location_lat, location_lng, created_at,
hashtags, city) VALUES (%s, %s, %s, %s, %s, %s, %s)
; """
execute_query(sql, tweet, autocommit=False)
if not count % 100:
print count, " sent to DB"
DB_CONFIG['DB_CONNECTION'].commit()
print "we committed"
with open('text/stop_cities.txt', 'a') as fff:
fff.write(city)
fff.write("\n")
print "writing city to stop_cities file"
print "committed tweets from ", city, " to DB"
def execute_query(sql, args=None, need_results=False, autocommit=True):
u"""execute the passed in SQL using the current cursor.
If the query string takes any args pass those to the cursor as well."""
_get_connection_string()
results = None
try:
cur = _get_cursor()
cur.execute(sql, args)
if need_results:
results = cur.fetchall()
except psycopg2.Error as x:
# this will catch any errors generated by the database
print "*" * 40
print "Error executing query against DB: ", x.args
print sql, args
print "Attempting to reconnect to the DB..."
DB_CONFIG['DB_CONNECTION'].close()
DB_CONFIG['DB_CONNECTION'] = None
DB_CONFIG['DB_CURSOR'] = None
time.sleep(5)
conn = _get_connection()
while conn is None:
conn = _get_connection()
time.sleep(5)
else:
if autocommit:
DB_CONFIG['DB_CONNECTION'].commit()
return results
def _get_cursor():
"""get the current cursor if it exist, else create a new cursor"""
cur = DB_CONFIG.get('DB_CURSOR')
if cur is not None:
# print "cursor exists, using that..."
return cur
else:
# print "no cursor found, so creating one..."
return _create_cursor()
def _create_cursor():
"""create a new cursor and store it"""
conn = _get_connection()
# print "creating new cursor..."
DB_CONFIG['DB_CURSOR'] = conn.cursor()
# print "got new cursor."
return DB_CONFIG['DB_CURSOR']
def _get_connection():
"""Get the current connection if it exists, else connect."""
conn = DB_CONFIG.get('DB_CONNECTION')
if conn is not None:
# print "connection exists, so reusing it..."
return conn
else:
# print "no connection found..."
return _connect_db()
def _connect_db():
try:
# print "establishing a new connection..."
conn = psycopg2.connect(DB_CONFIG['DB_CONNECTION_STRING'])
except Exception:
raise Exception("Error connecting to DB: " +
str(DB_CONFIG['DB_CONNECTION_STRING']))
# print "Connection established and stored..."
DB_CONFIG['DB_CONNECTION'] = conn
return conn
def _get_connection_string():
password = _get_pasword()
connection_string = []
connection_string.append(
"host=tweetstalk.cvf1ij0yeyiq.us-west-2.rds.amazonaws.com"
)
connection_string.append("dbname=lil_tweetstalker")
connection_string.append("user=tweetstalkers")
connection_string.append("password=")
connection_string.append(password)
connection_string.append("port=5432")
# print connection_string
connection = " ".join(connection_string)
DB_CONFIG['DB_CONNECTION_STRING'] = connection
def _get_pasword():
with open(ROOT_DIR + "/our_keys/config", 'r') as f:
password = f.read().split()[-1]
return password
def add_rows():
sql = """INSERT INTO "Tweet" (screen_name, text, location_lat, location_lng, created_at, hashtags) SELECT screen_name, text, location_lat, location_lng, created_at, hashtags FROM "TweetTest";"""
print "Querying database"
execute_query(sql)
def change_col_size():
sql = """ ALTER TABLE "Tweet200" ALTER COLUMN text TYPE varchar(2000);"""
print "Querying database"
execute_query(sql)
def drop_rows():
sql = """DELETE FROM "Tweet200" WHERE city = 'Charlotte, NC';"""
print "Querying database"
execute_query(sql)
print "deleted rows"
if __name__ == "__main__":
#query_all_db_Tweet200()
#drop_rows()
#print query_for_handles()
#change_col_size()
get_unique_users_in_training_data()
#pass
|
|
import logging
from typing import Dict, Optional, Union
import numpy as np
import pickle
from ray.tune import trial_runner
from ray.tune.result import DEFAULT_METRIC
from ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler
from ray.tune.trial import Trial
logger = logging.getLogger(__name__)
class AsyncHyperBandScheduler(FIFOScheduler):
"""Implements the Async Successive Halving.
This should provide similar theoretical performance as HyperBand but
avoid straggler issues that HyperBand faces. One implementation detail
is when using multiple brackets, trial allocation to bracket is done
randomly with over a softmax probability.
See https://arxiv.org/abs/1810.05934
Args:
time_attr (str): A training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute. If None but a mode was passed,
the `ray.tune.result.DEFAULT_METRIC` will be used per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
max_t (float): max time units per trial. Trials will be stopped after
max_t time units (determined by time_attr) have passed.
grace_period (float): Only stop trials at least this old in time.
The units are the same as the attribute named by `time_attr`.
reduction_factor (float): Used to set halving rate and amount. This
is simply a unit-less scalar.
brackets (int): Number of brackets. Each bracket has a different
halving rate, specified by the reduction factor.
"""
def __init__(self,
time_attr: str = "training_iteration",
reward_attr: Optional[str] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
max_t: int = 100,
grace_period: int = 1,
reduction_factor: float = 4,
brackets: int = 1):
assert max_t > 0, "Max (time_attr) not valid!"
assert max_t >= grace_period, "grace_period must be <= max_t!"
assert grace_period > 0, "grace_period must be positive!"
assert reduction_factor > 1, "Reduction Factor not valid!"
assert brackets > 0, "brackets must be positive!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
FIFOScheduler.__init__(self)
self._reduction_factor = reduction_factor
self._max_t = max_t
self._trial_info = {} # Stores Trial -> Bracket
# Tracks state for new trial add
self._brackets = [
_Bracket(grace_period, max_t, reduction_factor, s)
for s in range(brackets)
]
self._counter = 0 # for
self._num_stopped = 0
self._metric = metric
self._mode = mode
self._metric_op = None
if self._mode == "max":
self._metric_op = 1.
elif self._mode == "min":
self._metric_op = -1.
self._time_attr = time_attr
def set_search_properties(self, metric: Optional[str],
mode: Optional[str]) -> bool:
if self._metric and metric:
return False
if self._mode and mode:
return False
if metric:
self._metric = metric
if mode:
self._mode = mode
if self._mode == "max":
self._metric_op = 1.
elif self._mode == "min":
self._metric_op = -1.
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
return True
def on_trial_add(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial):
if not self._metric or not self._metric_op:
raise ValueError(
"{} has been instantiated without a valid `metric` ({}) or "
"`mode` ({}) parameter. Either pass these parameters when "
"instantiating the scheduler, or pass them as parameters "
"to `tune.run()`".format(self.__class__.__name__, self._metric,
self._mode))
sizes = np.array([len(b._rungs) for b in self._brackets])
probs = np.e**(sizes - sizes.max())
normalized = probs / probs.sum()
idx = np.random.choice(len(self._brackets), p=normalized)
self._trial_info[trial.trial_id] = self._brackets[idx]
def on_trial_result(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial, result: Dict) -> str:
action = TrialScheduler.CONTINUE
if self._time_attr not in result or self._metric not in result:
return action
if result[self._time_attr] >= self._max_t:
action = TrialScheduler.STOP
else:
bracket = self._trial_info[trial.trial_id]
action = bracket.on_result(trial, result[self._time_attr],
self._metric_op * result[self._metric])
if action == TrialScheduler.STOP:
self._num_stopped += 1
return action
def on_trial_complete(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial, result: Dict):
if self._time_attr not in result or self._metric not in result:
return
bracket = self._trial_info[trial.trial_id]
bracket.on_result(trial, result[self._time_attr],
self._metric_op * result[self._metric])
del self._trial_info[trial.trial_id]
def on_trial_remove(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial):
del self._trial_info[trial.trial_id]
def debug_string(self) -> str:
out = "Using AsyncHyperBand: num_stopped={}".format(self._num_stopped)
out += "\n" + "\n".join([b.debug_str() for b in self._brackets])
return out
def save(self, checkpoint_path: str):
save_object = self.__dict__
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
save_object = pickle.load(inputFile)
self.__dict__.update(save_object)
class _Bracket():
"""Bookkeeping system to track the cutoffs.
Rungs are created in reversed order so that we can more easily find
the correct rung corresponding to the current iteration of the result.
Example:
>>> b = _Bracket(1, 10, 2, 0)
>>> b.on_result(trial1, 1, 2) # CONTINUE
>>> b.on_result(trial2, 1, 4) # CONTINUE
>>> b.cutoff(b._rungs[-1][1]) == 3.0 # rungs are reversed
>>> b.on_result(trial3, 1, 1) # STOP
>>> b.cutoff(b._rungs[3][1]) == 2.0
"""
def __init__(self, min_t: int, max_t: int, reduction_factor: float,
s: int):
self.rf = reduction_factor
MAX_RUNGS = int(np.log(max_t / min_t) / np.log(self.rf) - s + 1)
self._rungs = [(min_t * self.rf**(k + s), {})
for k in reversed(range(MAX_RUNGS))]
def cutoff(self, recorded) -> Union[None, int, float, complex, np.ndarray]:
if not recorded:
return None
return np.nanpercentile(
list(recorded.values()), (1 - 1 / self.rf) * 100)
def on_result(self, trial: Trial, cur_iter: int,
cur_rew: Optional[float]) -> str:
action = TrialScheduler.CONTINUE
for milestone, recorded in self._rungs:
if cur_iter < milestone or trial.trial_id in recorded:
continue
else:
cutoff = self.cutoff(recorded)
if cutoff is not None and cur_rew < cutoff:
action = TrialScheduler.STOP
if cur_rew is None:
logger.warning("Reward attribute is None! Consider"
" reporting using a different field.")
else:
recorded[trial.trial_id] = cur_rew
break
return action
def debug_str(self) -> str:
# TODO: fix up the output for this
iters = " | ".join([
"Iter {:.3f}: {}".format(milestone, self.cutoff(recorded))
for milestone, recorded in self._rungs
])
return "Bracket: " + iters
ASHAScheduler = AsyncHyperBandScheduler
if __name__ == "__main__":
sched = AsyncHyperBandScheduler(
grace_period=1, max_t=10, reduction_factor=2)
print(sched.debug_string())
bracket = sched._brackets[0]
print(bracket.cutoff({str(i): i for i in range(20)}))
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from exam import fixture
from django.http import HttpRequest
from sentry import options
from sentry.models import Project
from sentry.testutils import TestCase
from sentry.utils.http import (
is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip,
origin_from_request,
)
class AbsoluteUriTest(TestCase):
def test_without_path(self):
assert absolute_uri() == options.get('system.url-prefix')
def test_with_path(self):
assert absolute_uri('/foo/bar') == '%s/foo/bar' % (options.get('system.url-prefix'),)
class SameDomainTestCase(TestCase):
def test_is_same_domain(self):
url1 = 'http://example.com/foo/bar'
url2 = 'http://example.com/biz/baz'
self.assertTrue(is_same_domain(url1, url2))
def test_is_same_domain_diff_scheme(self):
url1 = 'https://example.com/foo/bar'
url2 = 'http://example.com/biz/baz'
self.assertTrue(is_same_domain(url1, url2))
def test_is_same_domain_diff_port(self):
url1 = 'http://example.com:80/foo/bar'
url2 = 'http://example.com:13/biz/baz'
self.assertFalse(is_same_domain(url1, url2))
class GetOriginsTestCase(TestCase):
def test_project_default(self):
project = Project.objects.get()
with self.settings(SENTRY_ALLOW_ORIGIN=None):
result = get_origins(project)
self.assertEquals(result, frozenset(['*']))
def test_project(self):
project = Project.objects.get()
project.update_option('sentry:origins', [u'http://foo.example'])
with self.settings(SENTRY_ALLOW_ORIGIN=None):
result = get_origins(project)
self.assertEquals(result, frozenset(['http://foo.example']))
def test_project_and_setting(self):
project = Project.objects.get()
project.update_option('sentry:origins', [u'http://foo.example'])
with self.settings(SENTRY_ALLOW_ORIGIN='http://example.com'):
result = get_origins(project)
self.assertEquals(result, frozenset(['http://foo.example', 'http://example.com']))
def test_setting_empty(self):
with self.settings(SENTRY_ALLOW_ORIGIN=None):
result = get_origins(None)
self.assertEquals(result, frozenset([]))
def test_setting_all(self):
with self.settings(SENTRY_ALLOW_ORIGIN='*'):
result = get_origins(None)
self.assertEquals(result, frozenset(['*']))
def test_setting_uri(self):
with self.settings(SENTRY_ALLOW_ORIGIN='http://example.com'):
result = get_origins(None)
self.assertEquals(result, frozenset(['http://example.com']))
class IsValidOriginTestCase(TestCase):
@fixture
def project(self):
return mock.Mock()
def isValidOrigin(self, origin, inputs):
with mock.patch('sentry.utils.http.get_origins') as get_origins:
get_origins.return_value = inputs
result = is_valid_origin(origin, self.project)
get_origins.assert_called_once_with(self.project)
return result
def test_global_wildcard_matches_domain(self):
result = self.isValidOrigin('http://example.com', ['*'])
self.assertEquals(result, True)
def test_domain_wildcard_matches_domain(self):
result = self.isValidOrigin('http://example.com', ['*.example.com'])
self.assertEquals(result, True)
def test_domain_wildcard_matches_domain_with_port(self):
result = self.isValidOrigin('http://example.com:80', ['*.example.com'])
self.assertEquals(result, True)
def test_domain_wildcard_matches_subdomain(self):
result = self.isValidOrigin('http://foo.example.com', ['*.example.com'])
self.assertEquals(result, True)
def test_domain_wildcard_matches_subdomain_with_port(self):
result = self.isValidOrigin('http://foo.example.com:80', ['*.example.com'])
self.assertEquals(result, True)
def test_domain_wildcard_does_not_match_others(self):
result = self.isValidOrigin('http://foo.com', ['*.example.com'])
self.assertEquals(result, False)
def test_domain_wildcard_matches_domain_with_path(self):
result = self.isValidOrigin('http://foo.example.com/foo/bar', ['*.example.com'])
self.assertEquals(result, True)
def test_base_domain_matches_domain(self):
result = self.isValidOrigin('http://example.com', ['example.com'])
self.assertEquals(result, True)
def test_base_domain_matches_domain_with_path(self):
result = self.isValidOrigin('http://example.com/foo/bar', ['example.com'])
self.assertEquals(result, True)
def test_base_domain_matches_domain_with_port(self):
result = self.isValidOrigin('http://example.com:80', ['example.com'])
self.assertEquals(result, True)
def test_base_domain_matches_domain_with_explicit_port(self):
result = self.isValidOrigin('http://example.com:80', ['example.com:80'])
assert result is True
def test_base_domain_does_not_match_domain_with_invalid_port(self):
result = self.isValidOrigin('http://example.com:80', ['example.com:443'])
assert result is False
def test_base_domain_does_not_match_subdomain(self):
result = self.isValidOrigin('http://example.com', ['foo.example.com'])
self.assertEquals(result, False)
def test_full_uri_match(self):
result = self.isValidOrigin('http://example.com', ['http://example.com'])
self.assertEquals(result, True)
def test_full_uri_match_requires_scheme(self):
result = self.isValidOrigin('https://example.com', ['http://example.com'])
self.assertEquals(result, False)
def test_full_uri_match_does_not_require_port(self):
result = self.isValidOrigin('http://example.com:80', ['http://example.com'])
self.assertEquals(result, True)
def test_partial_uri_match(self):
result = self.isValidOrigin('http://example.com/foo/bar', ['http://example.com'])
self.assertEquals(result, True)
def test_null_valid_with_global(self):
result = self.isValidOrigin('null', ['*'])
self.assertEquals(result, True)
def test_null_invalid_graceful_with_domains(self):
result = self.isValidOrigin('null', ['http://example.com'])
self.assertEquals(result, False)
def test_custom_protocol_with_location(self):
result = self.isValidOrigin('sp://custom-thing/foo/bar', ['sp://custom-thing'])
assert result is True
result = self.isValidOrigin('sp://custom-thing-two/foo/bar', ['sp://custom-thing'])
assert result is False
def test_custom_protocol_without_location(self):
result = self.isValidOrigin('sp://custom-thing/foo/bar', ['sp://*'])
assert result is True
result = self.isValidOrigin('dp://custom-thing/foo/bar', ['sp://'])
assert result is False
def test_custom_protocol_with_domainish_match(self):
result = self.isValidOrigin('sp://custom-thing.foobar/foo/bar', ['sp://*.foobar'])
assert result is True
result = self.isValidOrigin('sp://custom-thing.bizbaz/foo/bar', ['sp://*.foobar'])
assert result is False
def test_unicode(self):
result = self.isValidOrigin(u'http://l\xf8calhost', [u'*.l\xf8calhost'])
assert result is True
def test_punycode(self):
result = self.isValidOrigin('http://xn--lcalhost-54a', [u'*.l\xf8calhost'])
assert result is True
result = self.isValidOrigin('http://xn--lcalhost-54a', [u'*.xn--lcalhost-54a'])
assert result is True
result = self.isValidOrigin(u'http://l\xf8calhost', [u'*.xn--lcalhost-54a'])
assert result is True
result = self.isValidOrigin('http://l\xc3\xb8calhost', [u'*.xn--lcalhost-54a'])
assert result is True
result = self.isValidOrigin('http://xn--lcalhost-54a', [u'l\xf8calhost'])
assert result is True
result = self.isValidOrigin('http://xn--lcalhost-54a:80', [u'l\xf8calhost:80'])
assert result is True
def test_unparseable_uri(self):
result = self.isValidOrigin('http://example.com', ['.'])
assert result is False
def test_wildcard_hostname_with_port(self):
result = self.isValidOrigin('http://example.com:1234', ['*:1234'])
assert result is True
class IsValidIPTestCase(TestCase):
def is_valid_ip(self, ip, inputs):
self.project.update_option('sentry:blacklisted_ips', inputs)
return is_valid_ip(ip, self.project)
def test_not_in_blacklist(self):
assert self.is_valid_ip('127.0.0.1', [])
assert self.is_valid_ip('127.0.0.1', ['0.0.0.0', '192.168.1.1', '10.0.0.0/8'])
def test_match_blacklist(self):
assert not self.is_valid_ip('127.0.0.1', ['127.0.0.1'])
assert not self.is_valid_ip('127.0.0.1', ['0.0.0.0', '127.0.0.1', '192.168.1.1'])
def test_match_blacklist_range(self):
assert not self.is_valid_ip('127.0.0.1', ['127.0.0.0/8'])
assert not self.is_valid_ip('127.0.0.1', ['0.0.0.0', '127.0.0.0/8', '192.168.1.0/8'])
class OriginFromRequestTestCase(TestCase):
def test_nothing(self):
request = HttpRequest()
assert origin_from_request(request) is None
def test_origin(self):
request = HttpRequest()
request.META['HTTP_ORIGIN'] = 'http://example.com'
request.META['HTTP_REFERER'] = 'nope'
assert origin_from_request(request) == 'http://example.com'
def test_referer(self):
request = HttpRequest()
request.META['HTTP_REFERER'] = 'http://example.com/foo/bar'
assert origin_from_request(request) == 'http://example.com'
def test_null_origin(self):
request = HttpRequest()
request.META['HTTP_ORIGIN'] = 'null'
assert origin_from_request(request) is None
request.META['HTTP_REFERER'] = 'http://example.com'
assert origin_from_request(request) == 'http://example.com'
|
|
#! /usr/bin/env python
# Copyright 2014 Jason F Nicholls
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This Python Script is intended to be used in conjunction with PiMame
# More specifically Mame4All
# However you can use it for what ever purpose you see a need
# Basically it reads the GPIO board of the Pi, and waits for
# Events, if these events occur it reads 2 MCP23017 ICs for input
# You can extend the Pi and use 8 ICs just extend this code
# Note that ALL the pins of the ICs are setup as output.
#
# For more information see picade.blogspot.com
#
# Hope you have as much fun with your Pi as I have had with mine.
import RPi.GPIO as GPIO
import uinput
import smbus
import time
# Custom Key Configuration, these keys will be mapped to your controls,
# below are the default MAME Controls.
DEFINED_KEYS = {
'MCPA_A_0': uinput.KEY_5, # Coin 1
'MCPA_A_1': uinput.KEY_6, # Coin 2
'MCPA_A_2': uinput.KEY_1, # Start 1
'MCPA_A_3': uinput.KEY_2, # Start 2
'MCPA_A_4': uinput.KEY_ESC, # Escape - exit the game
'MCPA_A_5': uinput.KEY_P, # Pause
'MCPA_A_6': uinput.KEY_ENTER, # Enter
'MCPA_A_7': uinput.KEY_TAB, # Tab - Access in game menu
'MCPA_B_0': uinput.KEY_G, # Player 2 Right
'MCPA_B_1': uinput.KEY_D, # Player 2 Left
'MCPA_B_2': uinput.KEY_F, # Player 2 Down
'MCPA_B_3': uinput.KEY_R, # Player 2 UP
'MCPA_B_4': uinput.KEY_RIGHT, # Player 1 Right
'MCPA_B_5': uinput.KEY_LEFT, # Player 1 Left
'MCPA_B_6': uinput.KEY_DOWN, # Player 1 Down
'MCPA_B_7': uinput.KEY_UP, # Player 1 Up
'MCPB_A_0': uinput.KEY_LEFTCTRL, # Player 1 Fire 1
'MCPB_A_1': uinput.KEY_LEFTALT, # Player 1 Fire 2
'MCPB_A_2': uinput.KEY_SPACE, # Player 1 Fire 3
'MCPB_A_3': uinput.KEY_LEFTSHIFT,# Player 1 Fire 4
'MCPB_A_4': uinput.KEY_Z, # Player 1 Fire 5
'MCPB_A_5': uinput.KEY_X, # Player 1 Fire 6
'MCPB_A_6': uinput.KEY_SPACE, # Not used
'MCPB_A_7': uinput.KEY_SPACE, # Not used
'MCPB_B_0': uinput.KEY_SPACE, # Not Used
'MCPB_B_1': uinput.KEY_SPACE, # Not Used
'MCPB_B_2': uinput.KEY_C, # C Player 2 Fire 6 // Normally Joystick 4 and is not set by default
'MCPB_B_3': uinput.KEY_E, # E Player 2 Fire 5 // Normally Joystick 5 and is not set by default
'MCPB_B_4': uinput.KEY_W, # W Player 2 Fire 4 // Normally Joystick 4 and is not set by default
'MCPB_B_5': uinput.KEY_Q, # Q Player 2 Fire 3 // Normally Joystick 5 and is not set by default
'MCPB_B_6': uinput.KEY_S, # S Player 2 Fire 2
'MCPB_B_7': uinput.KEY_A, # A Player 2 Fire 1
}
# Setup Variables and Constants
bus = smbus.SMBus(1) # Rev 2 Pi uses 1, Rev 1 Pi uses 0
# Device addresses refer to the A0-A2 Pins of the MCP23017 See the online manual
DEVICE_B = 0x20 # Device address (A0-A2 ALL GND)
DEVICE_A = 0x21 # Device B address (A0-A1 GND A2 VCC)
# The addresses below are all defined by the MCP23017 for more information see the online manual
# http://ww1.microchip.com/downloads/en/DeviceDoc/21952b.pdf
IODIRA = 0x00 # Pin direction register
IODIRB = 0x01
IOPOLA = 0x02 # Polarity
IOPOLB = 0x03
GPINTENA = 0x04 # Interrupt on Change Manager
GPINTENB = 0x05
DEFVALA = 0x06 # Default Value of the Register
DEFVALB = 0x07
INTCONA = 0x08 # Interrupt Control Register
INTCONB = 0x09
IOCON = 0x0A # Expander Configuration Bank, Mirrow, Sequential, Slew State, Hardware Address, Open Drain Int, Int Active High or Low
#ICON = 0x0B # Not required
GPPUA = 0x0C # Pull Up Register
GPPUB = 0x0D
INTFA = 0x0E # Flags the pin that caused the interrupt
INTFB = 0x0F
INTCAPA = 0x10 # The value of the pin at the time of the interrupt
INTCAPB = 0x11
GPIOA = 0x12 # Register for inputs
GPIOB = 0x13
OLATA = 0x14 #
OLATB = 0x15
# Raspberry PI Pins will be used for interrupts
INT_PIN_A = 18
INT_PIN_B = 17
INT_PIN_C = 22
INT_PIN_D = 23
PIN_SHUTDOWN = 24 # We dont really need this, but I want to use this later to shut down the pi
DOWN_KEY = 1 # Constant representing a push key value - required by uinput
UP_KEY = 0 # Constant representing a release key value - required by uinput
#Buttons state
lastButtonState = [UP_KEY for i in range(0,32)]
# Initialize PI Pins
GPIO.setmode(GPIO.BCM)
# We are not using resistors externally so set-up the internal pull
# up resistors for the Pi GPIO Pins, this means the button will pull
# it down from 3.3V to 0
GPIO.setup(INT_PIN_A, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(INT_PIN_B, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(INT_PIN_C, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(INT_PIN_D, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# We could have done the same above, just put this in to show the difference,
# here the base is 0 and it will be pulled up to 3.3V is the button is pressed.
GPIO.setup(PIN_SHUTDOWN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Get a handle to the keyboard Python uinput, defining all the keys we will be using
keyboard_device = uinput.Device([uinput.KEY_5,uinput.KEY_6, uinput.KEY_1,
uinput.KEY_2, uinput.KEY_ESC, uinput.KEY_P,
uinput.KEY_ENTER, uinput.KEY_TAB, uinput.KEY_UP, uinput.KEY_DOWN,
uinput.KEY_LEFT, uinput.KEY_RIGHT, uinput.KEY_R, uinput.KEY_F,
uinput.KEY_D, uinput.KEY_G, uinput.KEY_LEFTCTRL, uinput.KEY_LEFTALT,
uinput.KEY_SPACE, uinput.KEY_LEFTSHIFT, uinput.KEY_Z, uinput.KEY_X,
uinput.KEY_A, uinput.KEY_S, uinput.KEY_Q, uinput.KEY_W, uinput.KEY_E, uinput.KEY_C])
# Method to Setup each of the MCP23017 Devices
def setup_mcp(device_address):
#Bank 0 Configuration with Interrupt Mirror Enabled, Active Int with Active Low
bus.write_byte_data(device_address, IOCON, 0b00100000)
# Turn on the PULL UP Register for the first line GPA0
bus.write_byte_data(device_address, GPPUA, 0xFF)
bus.write_byte_data(device_address, GPPUB, 0xFF)
# Reverse the polarity
bus.write_byte_data(device_address, IOPOLA, 0xFF)
bus.write_byte_data(device_address, IOPOLB, 0xFF)
# All pins as INPUT
bus.write_byte_data(device_address, IODIRA, 0xFF)
bus.write_byte_data(device_address, IODIRB, 0xFF)
# Turn on Interrupts for the pin
bus.write_byte_data(device_address, GPINTENA, 0xFF)
bus.write_byte_data(device_address, GPINTENB, 0xFF)
# Set the Int on Change to 0 so that the previous value is compared
bus.write_byte_data(device_address, INTCONA, 0x00)
bus.write_byte_data(device_address, INTCONB, 0x00)
def setKeys(device, intf, buttonPressed, buttonState):
# I used to process only the key that changed, the problem
# is that if the interrupt has fired, but buttons are lifted
# it causes deadlock, the virtual state and real state are out of synch
# there are 2 chips with 2 banks of 8 options so that's 2 x 2 x 8 or 32 states
# to MCPA_A_0 - MCPA_A_7 would be 0-7, while MCPB_B_0 - MCPB_B_7 would be 24-31
if (device == DEVICE_A):
code = "MCPA_"
lookup = 0
else:
code = "MCPB_"
lookup = 16
if (intf == INTFA):
code = code + "A_"
else:
code = code + "B_"
lookup = lookup + 8
for x in range(0,8):
y = 0x80 >> x
upDownValue = UP_KEY
if (buttonState & y > 0):
upDownValue = DOWN_KEY
tmpIndex = lookup + x
if (lastButtonState[tmpIndex] != upDownValue):
#print "Searching ", code , " - ", y, " pressed ", (buttonPressed & y), " up down ", (buttonState & y), " history ", lastButtonState[tmpIndex], " lookup ", tmpIndex
keyboard_device.emit(DEFINED_KEYS[code + str(x)], upDownValue)
lastButtonState[tmpIndex] = upDownValue
#Findout which button was pressed and trigger the input
def checkButton(device, intf, intcap):
buttonPressed = bus.read_byte_data(device, intf)
buttonValue = bus.read_byte_data(device, intcap)
if (intf == INTFA):
buttonState = bus.read_byte_data(device, GPIOA)
else:
buttonState = bus.read_byte_data(device, GPIOB)
setKeys(device, intf, buttonPressed, buttonState)
#Event Callbacks
def eventOnPinB(channel):
time.sleep(0.03) #Sleep to cancel out bounce
checkButton(DEVICE_B, INTFA, INTCAPA)
def eventOnPinD(channel):
time.sleep(0.03) #Sleep to cancel out bounce
checkButton(DEVICE_B, INTFB, INTCAPB)
#Event Callback for Pin A
def eventOnPinA(channel):
time.sleep(0.03) #Sleep to cancel out bounce
checkButton(DEVICE_A, INTFA, INTCAPA)
def eventOnPinC(channel):
time.sleep(0.03) #Sleep to cancel out bounce
checkButton(DEVICE_A, INTFB, INTCAPB)
# Initialise the devices
setup_mcp(DEVICE_A)
setup_mcp(DEVICE_B)
# Clear any data on the interrupt lines
bus.read_byte_data(DEVICE_A, INTCAPA)
bus.read_byte_data(DEVICE_A, INTCAPB)
bus.read_byte_data(DEVICE_B, INTCAPA)
bus.read_byte_data(DEVICE_B, INTCAPB)
# Add event detection
# Not that the bounce time is set to 0 and we handle the time in the callback
# The reason is that if an even happens within the time, it will be ignored
# But the MCP23017 will flag the interrupt even though the GPIO Event Detect will ignore
# the event. No new event will ever be processed because the event INTs must be cleared
# hence making it 0, means we will always read it, but we wait in the call back just incase
GPIO.add_event_detect(INT_PIN_A, GPIO.BOTH, callback=eventOnPinA)
GPIO.add_event_detect(INT_PIN_B, GPIO.BOTH, callback=eventOnPinB)
GPIO.add_event_detect(INT_PIN_C, GPIO.BOTH, callback=eventOnPinC)
GPIO.add_event_detect(INT_PIN_D, GPIO.BOTH, callback=eventOnPinD)
try:
GPIO.wait_for_edge(PIN_SHUTDOWN, GPIO.RISING) # Wait for shutdown on pin 24
except KeyboardInterrupt:
GPIO.cleanup()
GPIO.remove_event_detect(INT_PIN_A)
GPIO.remove_event_detect(INT_PIN_B)
GPIO.remove_event_detect(INT_PIN_C)
GPIO.remove_event_detect(INT_PIN_D)
GPIO.cleanup()
print "Done"
|
|
##################################################
# __ __ __ _ __
# / /_ ____/ / / /_ (_)___ _/ /_ ______________ ________
# / __ \/ __ / / __ \/ / __ `/ __ \/ ___/ ___/ __ \/ ___/ _ \
# / / / / /_/ / / / / / / /_/ / / / (__ ) /__/ /_/ / / / __/
# /_/ /_/\__,_/____/_/ /_/_/\__, /_/ /_/____/\___/\____/_/ \___/
# /_____/ /____/
#
##################################################
# An alternate high-score entry and display manager for
# the HdDMD PyProcGame "fork"
#
# Instructions for use are mostly the same as the stock
# http://pyprocgame.pindev.org/ref/highscore.html
#
#
# 1. Put this somewhere, and import it in your Game class
# 2. in the game Class's __init__ add the following:
#
# ## load your score files
# self.load_game_data('game_default_data.yaml','game_user_data.yaml')
#
# ## high score stuff:
# self.highscore_categories = []
#
# cat = highscore.HighScoreCategory()
# cat.game_data_key = 'ClassicHighScores'
# cat.titles = ['Grand Champion', 'High Score 1', 'High Score 2', 'High Score 3', 'High Score 4']
# self.highscore_categories.append(cat)
#
# 3. in the game class's game_ended() method add the following:
# def game_ended(self):
#
# seq_manager = HD_EntrySequenceManager(game=self, priority=2)
# seq_manager.finished_handler = self.highscore_entry_finished
# seq_manager.logic = highscore.CategoryLogic(game=self, categories=self.highscore_categories)
# self.modes.add(seq_manager)
#
# 4. note this refers to a method called 'highscore_entry_finished' -- define that in the Class, too:
#
# def highscore_entry_finished(self, mode):
# self.modes.remove(mode)
# super(BuffyGame, self).game_ended()
#
# # Do clean up stuff, e.g., turn off all the lamps
# for lamp in self.lamps:
# lamp.disable()
#
# # remove active game modes and re-add the sttract mode
# self.modes.add(self.attract_mode)
# self.reset()
#
# 5. change the sizes in the code below
#
# 6. change the fonts in the code below
#
# Enjoy..?
import math
from procgame.game import Mode
from procgame import dmd
# from procgame import highscore
class HD_InitialEntryMode_ML(Mode):
"""Mode that prompts the player for their initials.
*left_text* and *right_text* are strings or arrays to be displayed at the
left and right corners of the display. If they are arrays they will be
rotated.
:attr:`entered_handler` is called once the initials have been confirmed.
This mode does not remove itself; this should be done in *entered_handler*."""
### LOOK HERE: change the following to suit your game and tastes...
# set these to your display's width and height
display_width = 450
display_height = 225
# change this to the number of chars wide you want your character selection palette to be
columns_of_chars_in_palette = 8
# the number of dots to encompass each character in the palette (includes surrounding space!)
space_per_char = 25 # make sure your font is smaller than this!
entered_handler = None
"""Method taking two parameters: `mode` and `inits`."""
char_back = '<'
char_done = '='
init_font = None
font = None
letters_font = None
def __init__(self, game, priority, left_text, right_text, entered_handler):
super(HD_InitialEntryMode_ML, self).__init__(game, priority)
self.entered_handler = entered_handler
# self.init_font = dmd.font_named('Font09Bx7.dmd')
# self.font = dmd.font_named('Font07x5.dmd')
# self.letters_font = dmd.font_named('Font07x5.dmd')
## YOU almost CERTAINLY need to change these...
self.init_font = self.game.fonts['large']
self.text_font = self.game.fonts['small']
self.letters_font = self.game.fonts['mono-tiny']
self.letters_font_mini = self.game.fonts['mono-micro']
self.init_font_height = self.init_font.size("Z")[1]
self.text_font_height = self.text_font.size(left_text)[1]
self.layer = dmd.GroupedLayer(self.display_width, self.display_height)
self.layer.opaque = True
self.layer.layers = []
if type(right_text) != list:
right_text = [right_text]
if type(left_text) != list:
left_text = [left_text]
seconds_per_text = 1.5
script = []
mh = 0
for text in left_text:
words = text.split()
h = self.text_font_height*len(words)
mh = max(h, mh)
frame = dmd.Frame(width=self.display_width, height=h)
i = 0
for w in words:
self.text_font.draw(frame, w, 0, i*self.text_font_height)
i+=1
script.append({'seconds':seconds_per_text, 'layer':dmd.FrameLayer(frame=frame)})
topthird_left_layer = dmd.ScriptedLayer(width=self.display_width, height=mh, script=script)
topthird_left_layer.composite_op = 'blacksrc'
topthird_left_layer.target_y = self.display_height/2 - mh/2
topthird_left_layer.target_x = 10
self.layer.layers += [topthird_left_layer]
script = []
mh = 0
for text in right_text:
words = text.split()
h = self.text_font_height*len(words)
mh = max(h, mh)
frame = dmd.Frame(width=self.display_width, height=h)
i = 0
for w in words:
self.text_font.draw(frame, w, self.display_width-(self.text_font.size(w)[0]), i*self.text_font_height)
i+=1
script.append({'seconds':seconds_per_text, 'layer':dmd.FrameLayer(frame=frame)})
topthird_right_layer = dmd.ScriptedLayer(width=self.display_width, height=mh, script=script)
topthird_right_layer.composite_op = 'blacksrc'
topthird_right_layer.target_y = self.display_height/2 - mh/2
topthird_right_layer.target_x = -10
self.layer.layers += [topthird_right_layer]
# the entered initials so far
self.inits_layer = dmd.HDTextLayer(self.display_width/2, self.init_font_height/2+5, self.init_font, "center", vert_justify="center", line_color=(128,128,255), line_width=1, interior_color=(0,0,192),fill_color=(0,0,0)).set_text("")
self.inits_layer.set_target_position(0, self.text_font_height+2)
self.layer.layers += [self.inits_layer]
self.letters = []
for idx in range(26):
self.letters += [chr(ord('A')+idx)]
self.letters += [' ', '.', self.char_back, self.char_done]
self.current_letter_index = 0
self.inits = self.letters[self.current_letter_index]
# Draw my fancy rows
w = self.space_per_char*(self.columns_of_chars_in_palette+1)
h = self.space_per_char*(30/self.columns_of_chars_in_palette+1.5)
print("About to create a frame with w=" + str(w) + "; h=" + str(h))
self.char_optsF = dmd.Frame(width=w, height=h)
for index in range(30):
x = index % self.columns_of_chars_in_palette
y = index / self.columns_of_chars_in_palette
(w,h) = self.letters_font.size(self.letters[index])
if(index<28):
self.letters_font.draw(self.char_optsF, self.letters[index], (x+1) * self.space_per_char - w/2, (y+1) * self.space_per_char - h/2)
elif(index==28):
(w,h) = self.letters_font_mini.size("DEL")
self.letters_font_mini.draw(self.char_optsF, "DEL", (x+1) * self.space_per_char - w/2, (y+1) * self.space_per_char - h/2)
elif(index==29):
(w,h) = self.letters_font_mini.size("END")
self.letters_font_mini.draw(self.char_optsF, "END", (x+1) * self.space_per_char - w/2, (y+1) * self.space_per_char - h/2)
fbox = dmd.Frame(width=self.space_per_char+2, height=self.space_per_char+2)
fbox.fill_rect(0, 0, self.space_per_char+2, self.space_per_char+2, (128,128,255))
fbox.fill_rect(2, 2, self.space_per_char-4+2, self.space_per_char-4+2, (0,64,128))
self.selection_box_layer = dmd.FrameLayer(opaque=False, frame=fbox)
self.selection_box_layer.composite_op = "max"
self.layer.layers += [self.selection_box_layer]
self.char_opts_layer = dmd.FrameLayer(opaque=False, frame=self.char_optsF)
self.char_opts_layer.set_target_position((self.display_width-(self.columns_of_chars_in_palette+1) * self.space_per_char)/2, self.init_font_height)
# self.char_opts_layer.composite_op = "blacksrc"
self.layer.layers += [self.char_opts_layer]
self.animate_to_index(0)
def mode_started(self):
pass
def mode_stopped(self):
pass
def animate_to_index(self, new_index, inc = 0):
x = new_index % self.columns_of_chars_in_palette
y = new_index / self.columns_of_chars_in_palette
(bx, by) = (self.char_opts_layer.target_x,self.char_opts_layer.target_y)
self.selection_box_layer.set_target_position( x * self.space_per_char + bx + self.space_per_char/2, y * self.space_per_char + by + self.space_per_char/2)
print("moving box to:", self.selection_box_layer.target_x, ",", self.selection_box_layer.target_y)
self.current_letter_index = new_index
# Now draw the initials, being careful to change the display based on which option is highlighted:
if(self.letters[self.current_letter_index]==self.char_back):
self.inits_layer.set_text(self.inits[:-2])
elif(self.letters[self.current_letter_index]==self.char_done):
self.inits_layer.set_text(self.inits[:-1], blink_frames = 15)
else:
self.inits_layer.set_text(self.inits)
def letter_increment(self, inc):
new_index = (self.current_letter_index + inc)
if new_index < 0:
new_index = len(self.letters) + new_index
elif new_index >= len(self.letters):
new_index = new_index - len(self.letters)
#print("letter_increment %d + %d = %d" % (self.current_letter_index, inc, new_index))
self.inits = self.inits[:-1] + self.letters[new_index]
self.animate_to_index(new_index, inc)
def letter_accept(self):
# TODO: Add 'back'/erase/end
letter = self.letters[self.current_letter_index]
if letter == self.char_back:
if len(self.inits) > 0:
self.inits = self.inits[:-1]
elif letter == self.char_done or len(self.inits) > 10:
self.inits = self.inits[:-1] # Strip off the done character
if self.entered_handler != None:
self.entered_handler(mode=self, inits=self.inits)
else:
self.game.logger.warning('InitialEntryMode finished but no entered_handler to notify!')
else:
self.inits += letter
self.letter_increment(0)
def sw_flipperLwL_active(self, sw):
self.periodic_left()
return False
def sw_flipperLwL_inactive(self, sw):
self.cancel_delayed('periodic_movement')
def sw_flipperLwR_active(self, sw):
self.periodic_right()
return False
def sw_flipperLwR_inactive(self, sw):
self.cancel_delayed('periodic_movement')
def periodic_left(self):
self.letter_increment(-1)
self.delay(name='periodic_movement', event_type=None, delay=0.2, handler=self.periodic_left)
def periodic_right(self):
self.letter_increment(1)
self.delay(name='periodic_movement', event_type=None, delay=0.2, handler=self.periodic_right)
def sw_startButton_active(self, sw):
self.letter_accept()
return True
####################################################################################################################
####################################################################################################################
####################################################################################################################
####################################################################################################################
####################################################################################################################
####################################################################################################################
### This is the old, scrolling mode from Adam, that I tried to adapt to be more
# flexible with fonts and sizes. I bailed on it, but leave this here if you want
# to try to track this down (e.g., if your display isn't as huge as mine)
class HD_InitialEntryMode_ML_old(Mode):
"""Mode that prompts the player for their initials.
*left_text* and *right_text* are strings or arrays to be displayed at the
left and right corners of the display. If they are arrays they will be
rotated.
:attr:`entered_handler` is called once the initials have been confirmed.
This mode does not remove itself; this should be done in *entered_handler*."""
entered_handler = None
"""Method taking two parameters: `mode` and `inits`."""
char_back = '<'
char_done = '='
init_font = None
font = None
letters_font = None
def __init__(self, game, priority, left_text, right_text, entered_handler):
super(HD_InitialEntryMode_ML, self).__init__(game, priority)
self.entered_handler = entered_handler
# self.init_font = dmd.font_named('Font09Bx7.dmd')
# self.font = dmd.font_named('Font07x5.dmd')
# self.letters_font = dmd.font_named('Font07x5.dmd')
self.init_font = self.game.fonts['small']
self.text_font = self.game.fonts['large']
self.letters_font = self.game.fonts['med']
self.init_font_height = self.init_font.size("Z")[1]
self.text_font_height = self.text_font.size("Z")[1]
self.layer = dmd.GroupedLayer(480, 240)
self.layer.opaque = True
self.layer.layers = []
if type(right_text) != list:
right_text = [right_text]
if type(left_text) != list:
left_text = [left_text]
seconds_per_text = 1.5
script = []
for text in left_text:
frame = dmd.Frame(width=450, height=self.text_font_height)
self.text_font.draw(frame, text, 0, 0)
script.append({'seconds':seconds_per_text, 'layer':dmd.FrameLayer(frame=frame)})
topthird_left_layer = dmd.ScriptedLayer(width=480, height=self.text_font_height, script=script)
topthird_left_layer.composite_op = 'blacksrc'
self.layer.layers += [topthird_left_layer]
script = []
for text in right_text:
frame = dmd.Frame(width=480, height=self.text_font_height)
self.text_font.draw(frame, text, 480-(self.text_font.size(text)[0]), 0)
script.append({'seconds':seconds_per_text, 'layer':dmd.FrameLayer(frame=frame)})
topthird_right_layer = dmd.ScriptedLayer(width=480, height=self.text_font_height, script=script)
topthird_right_layer.composite_op = 'blacksrc'
self.layer.layers += [topthird_right_layer]
self.inits_frame = dmd.Frame(width=480, height=self.init_font_height)
inits_layer = dmd.FrameLayer(opaque=False, frame=self.inits_frame)
inits_layer.set_target_position(0, self.text_font_height+2)
self.layer.layers += [inits_layer]
self.lowerhalf_layer = dmd.FrameQueueLayer(opaque=False, hold=True)
self.lowerhalf_layer.set_target_position(0, self.init_font_height+self.text_font_height)
self.layer.layers += [self.lowerhalf_layer]
self.letters = []
for idx in range(26):
self.letters += [chr(ord('A')+idx)]
self.letters += [' ', '.', self.char_back, self.char_done]
self.current_letter_index = 0
self.inits = self.letters[self.current_letter_index]
self.animate_to_index(0)
def mode_started(self):
pass
def mode_stopped(self):
pass
def animate_to_index(self, new_index, inc = 0):
letter_spread = 20
letter_width = self.letters_font_width
if inc < 0:
rng = range(inc * letter_spread, 1)
elif inc > 0:
rng = range(inc * letter_spread)[::-1]
else:
rng = [0]
#print rng
for x in rng:
frame = dmd.Frame(width=450, height=self.init_font_height+2)
for offset in range(-7, 8):
index = new_index - offset
#print "Index %d len=%d" % (index, len(self.letters))
if index < 0:
index = len(self.letters) + index
elif index >= len(self.letters):
index = index - len(self.letters)
(w, h) = self.letters_font.size(self.letters[index])
#print "Drawing %d w=%d" % (index, w)
self.letters_font.draw(frame, self.letters[index], 450/2 - offset * letter_spread - letter_width/2 + x, 0)
frame.fill_rect(64-5, 0, 1, self.init_font_height+2, 1)
frame.fill_rect(64+5, 0, 1, self.init_font_height+2, 1)
self.lowerhalf_layer.frames += [frame]
self.current_letter_index = new_index
# Prune down the frames list so we don't get too far behind while animating
x = 0
while len(self.lowerhalf_layer.frames) > 15 and x < (len(self.lowerhalf_layer.frames)-1):
del self.lowerhalf_layer.frames[x]
x += 2
# Now draw the top right panel, with the selected initials in order:
self.inits_frame.clear()
init_spread = self.init_font_width + 3
x_offset = self.inits_frame.width/2 - len(self.inits) * init_spread / 2
for x in range(len(self.inits)):
self.init_font.draw(self.inits_frame, self.inits[x], x * init_spread + x_offset, 0)
self.inits_frame.fill_rect((len(self.inits)-1) * init_spread + x_offset, 9, 8, 1, 1)
def letter_increment(self, inc):
new_index = (self.current_letter_index + inc)
if new_index < 0:
new_index = len(self.letters) + new_index
elif new_index >= len(self.letters):
new_index = new_index - len(self.letters)
#print("letter_increment %d + %d = %d" % (self.current_letter_index, inc, new_index))
self.inits = self.inits[:-1] + self.letters[new_index]
self.animate_to_index(new_index, inc)
def letter_accept(self):
# TODO: Add 'back'/erase/end
letter = self.letters[self.current_letter_index]
if letter == self.char_back:
if len(self.inits) > 0:
self.inits = self.inits[:-1]
elif letter == self.char_done or len(self.inits) > 10:
self.inits = self.inits[:-1] # Strip off the done character
if self.entered_handler != None:
self.entered_handler(mode=self, inits=self.inits)
else:
self.game.logger.warning('InitialEntryMode finished but no entered_handler to notify!')
else:
self.inits += letter
self.letter_increment(0)
def sw_flipperLwL_active(self, sw):
self.periodic_left()
return False
def sw_flipperLwL_inactive(self, sw):
self.cancel_delayed('periodic_movement')
def sw_flipperLwR_active(self, sw):
self.periodic_right()
return False
def sw_flipperLwR_inactive(self, sw):
self.cancel_delayed('periodic_movement')
def periodic_left(self):
self.letter_increment(-1)
self.delay(name='periodic_movement', event_type=None, delay=0.2, handler=self.periodic_left)
def periodic_right(self):
self.letter_increment(1)
self.delay(name='periodic_movement', event_type=None, delay=0.2, handler=self.periodic_right)
def sw_startButton_active(self, sw):
self.letter_accept()
return True
# class HD_EntrySequenceManager(highscore.EntrySequenceManager):
# def create_highscore_entry_mode(self, left_text, right_text, entered_handler):
# """Subclasses can override this to supply their own entry handler."""
# return HD_InitialEntryMode_ML(game=self.game, priority=self.priority+1, left_text=left_text, right_text=right_text, entered_handler=entered_handler)
####
# The following allows me to test just the high score entry mode, and does so
# with this hd_highscore.py file in a sub-directory of my game directory
# You likely need to change this...
def main():
import pinproc
# add the directory one level up to the path and switch to it
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../../SampleGame/'))
os.chdir(os.path.join(sys.path[0], '../../SampleGame/'))
# import your game class and instantiate it
import T2Game
game = T2Game.T2Game()
game.modes.modes = []
# (BUT you don't want to? Fine, so something like this...)
# game = procgame.game.BasicGame(pinproc.MachineTypeWPC)
# game.load_config('../sof.yaml') # in VP this is found in c:\P-ROC\shared\config\
handler = None
game.add_player() # can't test high-score entry without a player!
mode = HD_InitialEntryMode_ML(game, 3, "Player 1", "Grand Champion", handler)
game.modes.add(mode)
game.run_loop()
if __name__ == '__main__':
main()
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests copying images to a Glance API server which uses a filesystem-
based storage backend.
"""
import hashlib
import tempfile
import time
import httplib2
from oslo_serialization import jsonutils
from oslo_utils import units
from six.moves import http_client
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance.tests import functional
from glance.tests.functional.store_utils import get_http_uri
from glance.tests.functional.store_utils import setup_http
from glance.tests.utils import skip_if_disabled
FIVE_KB = 5 * units.Ki
class TestCopyToFile(functional.FunctionalTest):
"""
Functional tests for copying images from the HTTP storage
backend to file
"""
def _do_test_copy_from(self, from_store, get_uri):
"""
Ensure we can copy from an external image in from_store.
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
# POST /images with public image to be stored in from_store,
# to stand in for the 'external' image
image_data = "*" * FIVE_KB
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'external',
'X-Image-Meta-Store': from_store,
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True'}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(http_client.CREATED, response.status, content)
data = jsonutils.loads(content)
original_image_id = data['image']['id']
copy_from = get_uri(self, original_image_id)
# POST /images with public image copied from_store (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(http_client.CREATED, response.status, content)
data = jsonutils.loads(content)
copy_image_id = data['image']['id']
self.assertNotEqual(copy_image_id, original_image_id)
# GET image and make sure image content is as expected
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
def _await_status(expected_status):
for i in range(100):
time.sleep(0.01)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(http_client.OK, response.status)
if response['x-image-meta-status'] == expected_status:
return
self.fail('unexpected image status %s' %
response['x-image-meta-status'])
_await_status('active')
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
self.assertEqual(str(FIVE_KB), response['content-length'])
self.assertEqual("*" * FIVE_KB, content)
self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("copied", data['image']['name'])
# DELETE original image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
original_image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(http_client.OK, response.status)
# GET image again to make sure the existence of the original
# image in from_store is not depended on
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
self.assertEqual(str(FIVE_KB), response['content-length'])
self.assertEqual("*" * FIVE_KB, content)
self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("copied", data['image']['name'])
# DELETE copied image
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(http_client.OK, response.status)
self.stop_servers()
@skip_if_disabled
def test_copy_from_http_store(self):
"""
Ensure we can copy from an external image in HTTP store.
"""
self._do_test_copy_from('file', get_http_uri)
@skip_if_disabled
def test_copy_from_http_exists(self):
"""Ensure we can copy from an external image in HTTP."""
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
copy_from = get_http_uri(self, 'foobar')
# POST /images with public image copied from HTTP (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(http_client.CREATED, response.status, content)
data = jsonutils.loads(content)
copy_image_id = data['image']['id']
self.assertEqual('queued', data['image']['status'], content)
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port,
copy_image_id)
def _await_status(expected_status):
for i in range(100):
time.sleep(0.01)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(http_client.OK, response.status)
if response['x-image-meta-status'] == expected_status:
return
self.fail('unexpected image status %s' %
response['x-image-meta-status'])
_await_status('active')
# GET image and make sure image content is as expected
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
self.assertEqual(str(FIVE_KB), response['content-length'])
self.assertEqual("*" * FIVE_KB, content)
self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
# DELETE copied image
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(http_client.OK, response.status)
self.stop_servers()
@skip_if_disabled
def test_copy_from_http_nonexistent_location_url(self):
# Ensure HTTP 404 response returned when try to create
# image with non-existent http location URL.
self.cleanup()
self.start_servers(**self.__dict__.copy())
setup_http(self)
uri = get_http_uri(self, 'foobar')
copy_from = uri.replace('images', 'snafu')
# POST /images with public image copied from HTTP (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(http_client.NOT_FOUND, response.status, content)
expected = 'HTTP datastore could not find image at URI.'
self.assertIn(expected, content)
self.stop_servers()
@skip_if_disabled
def test_copy_from_file(self):
"""
Ensure we can't copy from file
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
with tempfile.NamedTemporaryFile() as image_file:
image_file.write("XXX")
image_file.flush()
copy_from = 'file://' + image_file.name
# POST /images with public image copied from file (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': copy_from}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(http_client.BAD_REQUEST, response.status, content)
expected = 'External sources are not supported: \'%s\'' % copy_from
msg = 'expected "%s" in "%s"' % (expected, content)
self.assertIn(expected, content, msg)
self.stop_servers()
@skip_if_disabled
def test_copy_from_swift_config(self):
"""
Ensure we can't copy from swift+config
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
# POST /images with public image copied from file (to file)
headers = {'X-Image-Meta-Name': 'copied',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Glance-API-Copy-From': 'swift+config://xxx'}
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(http_client.BAD_REQUEST, response.status, content)
expected = 'External sources are not supported: \'swift+config://xxx\''
msg = 'expected "%s" in "%s"' % (expected, content)
self.assertIn(expected, content, msg)
self.stop_servers()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.apphosting.tools.devappserver2.api_server."""
import cStringIO
import pickle
import tempfile
import unittest
import urllib
import wsgiref.util
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import urlfetch_service_pb
from google.appengine.api import user_service_pb
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.devappserver2 import wsgi_request_info
from google.appengine.tools.devappserver2 import wsgi_test_utils
APP_ID = 'test'
APPLICATION_ROOT = '/tmp'
TRUSTED = False
_, BLOBSTORE_PATH = tempfile.mkstemp(prefix='ae-blobstore')
_, DATASTORE_PATH = tempfile.mkstemp(prefix='ae-datastore')
DATASTORE_REQUIRE_INDEXES = False
IMAGES_HOST_PREFIX = 'localhost:8080'
LOGS_PATH = ':memory:'
MAIL_SMTP_HOST = 'localhost'
MAIL_SMTP_PORT = 80
MAIL_SMTP_USER = 'user'
MAIL_SMTP_PASSWORD = 'abc123'
MAIL_ENABLE_SENDMAIL = False
MAIL_SHOW_MAIL_BODY = True
_, MATCHER_PROSPECTIVE_SEARCH_PATH = tempfile.mkstemp(prefix='ae-ps')
TASKQUEUE_AUTO_RUN_TASKS = False
TASKQUEUE_DEFAULT_HTTP_SERVER = 'localhost:8080'
USER_LOGIN_URL = 'https://localhost/Login?continue=%s'
USER_LOGOUT_URL = 'https://localhost/Logout?continue=%s'
request_data = wsgi_request_info.WSGIRequestInfo(None)
class FakeURLFetchServiceStub(apiproxy_stub.APIProxyStub):
def __init__(self):
super(FakeURLFetchServiceStub, self).__init__('urlfetch')
def _Dynamic_Fetch(self, request, unused_response):
if request.url() == 'exception':
raise IOError('the remote error')
elif request.url() == 'application_error':
raise apiproxy_errors.ApplicationError(23, 'details')
def setup_stubs():
"""Setup the API stubs. This can only be done once."""
api_server.test_setup_stubs(
request_data,
app_id=APP_ID,
application_root=APPLICATION_ROOT,
trusted=TRUSTED,
blobstore_path=BLOBSTORE_PATH,
datastore_consistency=datastore_stub_util.TimeBasedHRConsistencyPolicy(),
datastore_path=DATASTORE_PATH,
datastore_require_indexes=DATASTORE_REQUIRE_INDEXES,
images_host_prefix=IMAGES_HOST_PREFIX,
logs_path=':memory:',
mail_smtp_host=MAIL_SMTP_HOST,
mail_smtp_port=MAIL_SMTP_PORT,
mail_smtp_user=MAIL_SMTP_USER,
mail_smtp_password=MAIL_SMTP_PASSWORD,
mail_enable_sendmail=MAIL_ENABLE_SENDMAIL,
mail_show_mail_body=MAIL_SHOW_MAIL_BODY,
matcher_prospective_search_path=MATCHER_PROSPECTIVE_SEARCH_PATH,
taskqueue_auto_run_tasks=TASKQUEUE_AUTO_RUN_TASKS,
taskqueue_default_http_server=TASKQUEUE_DEFAULT_HTTP_SERVER,
user_login_url=USER_LOGIN_URL,
user_logout_url=USER_LOGOUT_URL)
apiproxy_stub_map.apiproxy.ReplaceStub('urlfetch', FakeURLFetchServiceStub())
class TestAPIServer(wsgi_test_utils.WSGITestCase):
"""Tests for api_server.APIServer."""
def setUp(self):
setup_stubs()
self.server = api_server.APIServer('localhost',
0,
APP_ID)
def tearDown(self):
api_server.cleanup_stubs()
def _assert_remote_call(
self, expected_remote_response, stub_request, service, method):
"""Test a call across the remote API to the API server.
Args:
expected_remote_response: the remote response that is expected.
stub_request: the request protobuf that the stub expects.
service: the stub's service name.
method: which service method to call.
"""
request_environ = {'HTTP_HOST': 'machine:8080'}
wsgiref.util.setup_testing_defaults(request_environ)
with request_data.request(request_environ, None) as request_id:
remote_request = remote_api_pb.Request()
remote_request.set_service_name(service)
remote_request.set_method(method)
remote_request.set_request(stub_request.Encode())
remote_request.set_request_id(request_id)
remote_payload = remote_request.Encode()
environ = {'CONTENT_LENGTH': len(remote_payload),
'REQUEST_METHOD': 'POST',
'wsgi.input': cStringIO.StringIO(remote_payload)}
expected_headers = {'Content-Type': 'application/octet-stream'}
self.assertResponse('200 OK',
expected_headers,
expected_remote_response.Encode(),
self.server,
environ)
def test_user_api_call(self):
logout_response = user_service_pb.CreateLogoutURLResponse()
logout_response.set_logout_url(
USER_LOGOUT_URL % urllib.quote('http://machine:8080/crazy_logout'))
expected_remote_response = remote_api_pb.Response()
expected_remote_response.set_response(logout_response.Encode())
logout_request = user_service_pb.CreateLogoutURLRequest()
logout_request.set_destination_url('/crazy_logout')
self._assert_remote_call(
expected_remote_response, logout_request, 'user', 'CreateLogoutURL')
def test_GET(self):
environ = {'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'rtok=23'}
self.assertResponse('200 OK',
{'Content-Type': 'text/plain'},
"{app_id: test, rtok: '23'}\n",
self.server,
environ)
def test_unsupported_method(self):
environ = {'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'rtok=23'}
self.assertResponse('405 Method Not Allowed',
{},
'',
self.server,
environ)
def test_exception(self):
urlfetch_request = urlfetch_service_pb.URLFetchRequest()
urlfetch_request.set_url('exception')
urlfetch_request.set_method(urlfetch_service_pb.URLFetchRequest.GET)
expected_remote_response = remote_api_pb.Response()
expected_remote_response.set_exception(pickle.dumps(
RuntimeError(repr(IOError('the remote error')))))
self._assert_remote_call(
expected_remote_response, urlfetch_request, 'urlfetch', 'Fetch')
def test_application_error(self):
urlfetch_request = urlfetch_service_pb.URLFetchRequest()
urlfetch_request.set_url('application_error')
urlfetch_request.set_method(urlfetch_service_pb.URLFetchRequest.GET)
expected_remote_response = remote_api_pb.Response()
expected_remote_response.mutable_application_error().set_code(23)
expected_remote_response.mutable_application_error().set_detail('details')
expected_remote_response.set_exception(pickle.dumps(
apiproxy_errors.ApplicationError(23, 'details')))
self._assert_remote_call(
expected_remote_response, urlfetch_request, 'urlfetch', 'Fetch')
if __name__ == '__main__':
unittest.main()
|
|
"""
Copyright (c) 2016 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import calendar
import logging
import traceback
from cStringIO import StringIO
from datetime import datetime
from multiprocessing.dummy import Pool, Manager
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pytz
import shapely.geometry
import shapely.wkt
from backports.functools_lru_cache import lru_cache
from nexustiles.nexustiles import NexusTileService
from pytz import timezone
from scipy import stats
from webservice import Filtering as filtering
from webservice.NexusHandler import NexusHandler, nexus_handler
from webservice.webmodel import NexusResults, NexusProcessingException, NoDataException
SENTINEL = 'STOP'
EPOCH = timezone('UTC').localize(datetime(1970, 1, 1))
ISO_8601 = '%Y-%m-%dT%H:%M:%S%z'
@nexus_handler
class TimeSeriesHandlerImpl(NexusHandler):
name = "Time Series"
path = "/stats"
description = "Computes a time series plot between one or more datasets given an arbitrary geographical area and time range"
params = {
"ds": {
"name": "Dataset",
"type": "comma-delimited string",
"description": "The dataset(s) Used to generate the Time Series. Required"
},
"startTime": {
"name": "Start Time",
"type": "string",
"description": "Starting time in format YYYY-MM-DDTHH:mm:ssZ or seconds since EPOCH. Required"
},
"endTime": {
"name": "End Time",
"type": "string",
"description": "Ending time in format YYYY-MM-DDTHH:mm:ssZ or seconds since EPOCH. Required"
},
"b": {
"name": "Bounding box",
"type": "comma-delimited float",
"description": "Minimum (Western) Longitude, Minimum (Southern) Latitude, "
"Maximum (Eastern) Longitude, Maximum (Northern) Latitude. Required"
},
"seasonalFilter": {
"name": "Compute Seasonal Cycle Filter",
"type": "boolean",
"description": "Flag used to specify if the seasonal averages should be computed during "
"Time Series computation. Optional (Default: True)"
},
"lowPassFilter": {
"name": "Compute Low Pass Filter",
"type": "boolean",
"description": "Flag used to specify if a low pass filter should be computed during "
"Time Series computation. Optional (Default: True)"
}
}
singleton = True
def __init__(self):
NexusHandler.__init__(self)
self.log = logging.getLogger(__name__)
def parse_arguments(self, request):
# Parse input arguments
self.log.debug("Parsing arguments")
try:
ds = request.get_dataset()
if type(ds) != list and type(ds) != tuple:
ds = (ds,)
except:
raise NexusProcessingException(
reason="'ds' argument is required. Must be comma-delimited string",
code=400)
# Do not allow time series on Climatology
if next(iter([clim for clim in ds if 'CLIM' in clim]), False):
raise NexusProcessingException(reason="Cannot compute time series on a climatology", code=400)
try:
bounding_polygon = request.get_bounding_polygon()
request.get_min_lon = lambda: bounding_polygon.bounds[0]
request.get_min_lat = lambda: bounding_polygon.bounds[1]
request.get_max_lon = lambda: bounding_polygon.bounds[2]
request.get_max_lat = lambda: bounding_polygon.bounds[3]
except:
try:
west, south, east, north = request.get_min_lon(), request.get_min_lat(), \
request.get_max_lon(), request.get_max_lat()
bounding_polygon = shapely.geometry.Polygon(
[(west, south), (east, south), (east, north), (west, north), (west, south)])
except:
raise NexusProcessingException(
reason="'b' argument is required. Must be comma-delimited float formatted as "
"Minimum (Western) Longitude, Minimum (Southern) Latitude, "
"Maximum (Eastern) Longitude, Maximum (Northern) Latitude",
code=400)
try:
start_time = request.get_start_datetime()
except:
raise NexusProcessingException(
reason="'startTime' argument is required. Can be int value seconds from epoch or "
"string format YYYY-MM-DDTHH:mm:ssZ",
code=400)
try:
end_time = request.get_end_datetime()
except:
raise NexusProcessingException(
reason="'endTime' argument is required. Can be int value seconds from epoch or "
"string format YYYY-MM-DDTHH:mm:ssZ",
code=400)
if start_time > end_time:
raise NexusProcessingException(
reason="The starting time must be before the ending time. Received startTime: %s, endTime: %s" % (
request.get_start_datetime().strftime(ISO_8601), request.get_end_datetime().strftime(ISO_8601)),
code=400)
apply_seasonal_cycle_filter = request.get_apply_seasonal_cycle_filter()
apply_low_pass_filter = request.get_apply_low_pass_filter()
start_seconds_from_epoch = long((start_time - EPOCH).total_seconds())
end_seconds_from_epoch = long((end_time - EPOCH).total_seconds())
return ds, bounding_polygon, start_seconds_from_epoch, end_seconds_from_epoch, \
apply_seasonal_cycle_filter, apply_low_pass_filter
def calc(self, request, **args):
"""
:param request: StatsComputeOptions
:param args: dict
:return:
"""
ds, bounding_polygon, start_seconds_from_epoch, end_seconds_from_epoch, \
apply_seasonal_cycle_filter, apply_low_pass_filter = self.parse_arguments(request)
resultsRaw = []
for shortName in ds:
results, meta = self.getTimeSeriesStatsForBoxSingleDataSet(bounding_polygon,
shortName,
start_seconds_from_epoch,
end_seconds_from_epoch,
apply_seasonal_cycle_filter=apply_seasonal_cycle_filter,
apply_low_pass_filter=apply_low_pass_filter)
resultsRaw.append([results, meta])
the_time = datetime.now()
results = self._mergeResults(resultsRaw)
if len(ds) == 2:
try:
stats = TimeSeriesHandlerImpl.calculate_comparison_stats(results)
except Exception:
stats = {}
tb = traceback.format_exc()
self.log.warn("Error when calculating comparison stats:\n%s" % tb)
else:
stats = {}
meta = []
for singleRes in resultsRaw:
meta.append(singleRes[1])
res = TimeSeriesResults(results=results, meta=meta, stats=stats,
computeOptions=None, minLat=bounding_polygon.bounds[1],
maxLat=bounding_polygon.bounds[3], minLon=bounding_polygon.bounds[0],
maxLon=bounding_polygon.bounds[2], ds=ds, startTime=start_seconds_from_epoch,
endTime=end_seconds_from_epoch)
self.log.info("Merging results and calculating comparisons took %s" % (str(datetime.now() - the_time)))
return res
def getTimeSeriesStatsForBoxSingleDataSet(self, bounding_polygon, ds, start_seconds_from_epoch,
end_seconds_from_epoch,
apply_seasonal_cycle_filter=True, apply_low_pass_filter=True):
the_time = datetime.now()
daysinrange = self._tile_service.find_days_in_range_asc(bounding_polygon.bounds[1],
bounding_polygon.bounds[3],
bounding_polygon.bounds[0],
bounding_polygon.bounds[2],
ds,
start_seconds_from_epoch,
end_seconds_from_epoch)
self.log.info("Finding days in range took %s for dataset %s" % (str(datetime.now() - the_time), ds))
if len(daysinrange) == 0:
raise NoDataException(reason="No data found for selected timeframe")
the_time = datetime.now()
maxprocesses = int(self.algorithm_config.get("multiprocessing", "maxprocesses"))
results = []
if maxprocesses == 1:
calculator = TimeSeriesCalculator()
for dayinseconds in daysinrange:
result = calculator.calc_average_on_day(bounding_polygon.wkt, ds, dayinseconds)
results += [result] if result else []
else:
# Create a task to calc average difference for each day
manager = Manager()
work_queue = manager.Queue()
done_queue = manager.Queue()
for dayinseconds in daysinrange:
work_queue.put(
('calc_average_on_day', bounding_polygon.wkt, ds, dayinseconds))
[work_queue.put(SENTINEL) for _ in xrange(0, maxprocesses)]
# Start new processes to handle the work
pool = Pool(maxprocesses)
[pool.apply_async(pool_worker, (work_queue, done_queue)) for _ in xrange(0, maxprocesses)]
pool.close()
# Collect the results as [(day (in ms), average difference for that day)]
for i in xrange(0, len(daysinrange)):
result = done_queue.get()
try:
error_str = result['error']
self.log.error(error_str)
raise NexusProcessingException(reason="Error calculating average by day.")
except KeyError:
pass
results += [result] if result else []
pool.terminate()
manager.shutdown()
results = sorted(results, key=lambda entry: entry["time"])
self.log.info("Time series calculation took %s for dataset %s" % (str(datetime.now() - the_time), ds))
if apply_seasonal_cycle_filter:
the_time = datetime.now()
for result in results:
month = datetime.utcfromtimestamp(result['time']).month
month_mean, month_max, month_min = self.calculate_monthly_average(month, bounding_polygon.wkt, ds)
seasonal_mean = result['mean'] - month_mean
seasonal_min = result['min'] - month_min
seasonal_max = result['max'] - month_max
result['meanSeasonal'] = seasonal_mean
result['minSeasonal'] = seasonal_min
result['maxSeasonal'] = seasonal_max
self.log.info(
"Seasonal calculation took %s for dataset %s" % (str(datetime.now() - the_time), ds))
the_time = datetime.now()
filtering.applyAllFiltersOnField(results, 'mean', applySeasonal=False, applyLowPass=apply_low_pass_filter)
filtering.applyAllFiltersOnField(results, 'max', applySeasonal=False, applyLowPass=apply_low_pass_filter)
filtering.applyAllFiltersOnField(results, 'min', applySeasonal=False, applyLowPass=apply_low_pass_filter)
if apply_seasonal_cycle_filter and apply_low_pass_filter:
try:
filtering.applyFiltersOnField(results, 'meanSeasonal', applySeasonal=False, applyLowPass=True,
append="LowPass")
filtering.applyFiltersOnField(results, 'minSeasonal', applySeasonal=False, applyLowPass=True,
append="LowPass")
filtering.applyFiltersOnField(results, 'maxSeasonal', applySeasonal=False, applyLowPass=True,
append="LowPass")
except Exception as e:
# If it doesn't work log the error but ignore it
tb = traceback.format_exc()
self.log.warn("Error calculating SeasonalLowPass filter:\n%s" % tb)
self.log.info(
"LowPass filter calculation took %s for dataset %s" % (str(datetime.now() - the_time), ds))
return results, {}
@lru_cache()
def calculate_monthly_average(self, month=None, bounding_polygon_wkt=None, ds=None):
min_date, max_date = self.get_min_max_date(ds=ds)
monthly_averages, monthly_counts = [], []
monthly_mins, monthly_maxes = [], []
bounding_polygon = shapely.wkt.loads(bounding_polygon_wkt)
for year in range(min_date.year, max_date.year + 1):
beginning_of_month = datetime(year, month, 1)
end_of_month = datetime(year, month, calendar.monthrange(year, month)[1], 23, 59, 59)
start = (pytz.UTC.localize(beginning_of_month) - EPOCH).total_seconds()
end = (pytz.UTC.localize(end_of_month) - EPOCH).total_seconds()
tile_stats = self._tile_service.find_tiles_in_polygon(bounding_polygon, ds, start, end,
fl=('id,'
'tile_avg_val_d,tile_count_i,'
'tile_min_val_d,tile_max_val_d,'
'tile_min_lat,tile_max_lat,'
'tile_min_lon,tile_max_lon'),
fetch_data=False)
if len(tile_stats) == 0:
continue
# Split list into tiles on the border of the bounding box and tiles completely inside the bounding box.
border_tiles, inner_tiles = [], []
for tile in tile_stats:
inner_tiles.append(tile) if bounding_polygon.contains(shapely.geometry.box(tile.bbox.min_lon,
tile.bbox.min_lat,
tile.bbox.max_lon,
tile.bbox.max_lat)) else border_tiles.append(
tile)
# We can use the stats of the inner tiles directly
tile_means = [tile.tile_stats.mean for tile in inner_tiles]
tile_mins = [tile.tile_stats.min for tile in inner_tiles]
tile_maxes = [tile.tile_stats.max for tile in inner_tiles]
tile_counts = [tile.tile_stats.count for tile in inner_tiles]
# Border tiles need have the data loaded, masked, and stats recalculated
border_tiles = list(self._tile_service.fetch_data_for_tiles(*border_tiles))
border_tiles = self._tile_service.mask_tiles_to_polygon(bounding_polygon, border_tiles)
for tile in border_tiles:
tile.update_stats()
tile_means.append(tile.tile_stats.mean)
tile_mins.append(tile.tile_stats.min)
tile_maxes.append(tile.tile_stats.max)
tile_counts.append(tile.tile_stats.count)
tile_means = np.array(tile_means)
tile_mins = np.array(tile_mins)
tile_maxes = np.array(tile_maxes)
tile_counts = np.array(tile_counts)
sum_tile_counts = np.sum(tile_counts) * 1.0
monthly_averages += [np.average(tile_means, None, tile_counts / sum_tile_counts).item()]
monthly_mins += [np.average(tile_mins, None, tile_counts / sum_tile_counts).item()]
monthly_maxes += [np.average(tile_maxes, None, tile_counts / sum_tile_counts).item()]
monthly_counts += [sum_tile_counts]
count_sum = np.sum(monthly_counts) * 1.0
weights = np.array(monthly_counts) / count_sum
return np.average(monthly_averages, None, weights).item(), \
np.average(monthly_averages, None, weights).item(), \
np.average(monthly_averages, None, weights).item()
@lru_cache()
def get_min_max_date(self, ds=None):
min_date = pytz.timezone('UTC').localize(
datetime.utcfromtimestamp(self._tile_service.get_min_time([], ds=ds)))
max_date = pytz.timezone('UTC').localize(
datetime.utcfromtimestamp(self._tile_service.get_max_time([], ds=ds)))
return min_date.date(), max_date.date()
@staticmethod
def calculate_comparison_stats(results):
xy = [[], []]
for item in results:
if len(item) == 2:
xy[item[0]["ds"]].append(item[0]["mean"])
xy[item[1]["ds"]].append(item[1]["mean"])
slope, intercept, r_value, p_value, std_err = stats.linregress(xy[0], xy[1])
comparisonStats = {
"slope": slope,
"intercept": intercept,
"r": r_value,
"p": p_value,
"err": std_err
}
return comparisonStats
class TimeSeriesResults(NexusResults):
LINE_PLOT = "line"
SCATTER_PLOT = "scatter"
__SERIES_COLORS = ['red', 'blue']
def toImage(self):
type = self.computeOptions().get_plot_type()
if type == TimeSeriesResults.LINE_PLOT or type == "default":
return self.createLinePlot()
elif type == TimeSeriesResults.SCATTER_PLOT:
return self.createScatterPlot()
else:
raise Exception("Invalid or unsupported time series plot specified")
def createScatterPlot(self):
timeSeries = []
series0 = []
series1 = []
res = self.results()
meta = self.meta()
plotSeries = self.computeOptions().get_plot_series() if self.computeOptions is not None else None
if plotSeries is None:
plotSeries = "mean"
for m in res:
if len(m) == 2:
timeSeries.append(datetime.fromtimestamp(m[0]["time"] / 1000))
series0.append(m[0][plotSeries])
series1.append(m[1][plotSeries])
title = ', '.join(set([m['title'] for m in meta]))
sources = ', '.join(set([m['source'] for m in meta]))
dateRange = "%s - %s" % (timeSeries[0].strftime('%b %Y'), timeSeries[-1].strftime('%b %Y'))
fig, ax = plt.subplots()
fig.set_size_inches(11.0, 8.5)
ax.scatter(series0, series1, alpha=0.5)
ax.set_xlabel(meta[0]['units'])
ax.set_ylabel(meta[1]['units'])
ax.set_title("%s\n%s\n%s" % (title, sources, dateRange))
par = np.polyfit(series0, series1, 1, full=True)
slope = par[0][0]
intercept = par[0][1]
xl = [min(series0), max(series0)]
yl = [slope * xx + intercept for xx in xl]
plt.plot(xl, yl, '-r')
ax.grid(True)
fig.tight_layout()
sio = StringIO()
plt.savefig(sio, format='png')
return sio.getvalue()
def createLinePlot(self):
nseries = len(self.meta())
res = self.results()
meta = self.meta()
timeSeries = [datetime.fromtimestamp(m[0]["time"] / 1000) for m in res]
means = [[np.nan] * len(res) for n in range(0, nseries)]
plotSeries = self.computeOptions().get_plot_series() if self.computeOptions is not None else None
if plotSeries is None:
plotSeries = "mean"
for n in range(0, len(res)):
timeSlot = res[n]
for seriesValues in timeSlot:
means[seriesValues['ds']][n] = seriesValues[plotSeries]
x = timeSeries
fig, axMain = plt.subplots()
fig.set_size_inches(11.0, 8.5)
fig.autofmt_xdate()
title = ', '.join(set([m['title'] for m in meta]))
sources = ', '.join(set([m['source'] for m in meta]))
dateRange = "%s - %s" % (timeSeries[0].strftime('%b %Y'), timeSeries[-1].strftime('%b %Y'))
axMain.set_title("%s\n%s\n%s" % (title, sources, dateRange))
axMain.set_xlabel('Date')
axMain.grid(True)
axMain.xaxis.set_major_locator(mdates.YearLocator())
axMain.xaxis.set_major_formatter(mdates.DateFormatter('%b %Y'))
axMain.xaxis.set_minor_locator(mdates.MonthLocator())
axMain.format_xdata = mdates.DateFormatter('%Y-%m-%d')
plots = []
for n in range(0, nseries):
if n == 0:
ax = axMain
else:
ax = ax.twinx()
plots += ax.plot(x, means[n], color=self.__SERIES_COLORS[n], zorder=10, linewidth=3, label=meta[n]['title'])
ax.set_ylabel(meta[n]['units'])
labs = [l.get_label() for l in plots]
axMain.legend(plots, labs, loc=0)
sio = StringIO()
plt.savefig(sio, format='png')
return sio.getvalue()
class TimeSeriesCalculator(object):
def __init__(self):
self.__tile_service = NexusTileService()
def calc_average_on_day(self, bounding_polygon_wkt, dataset, timeinseconds):
bounding_polygon = shapely.wkt.loads(bounding_polygon_wkt)
ds1_nexus_tiles = self.__tile_service.get_tiles_bounded_by_polygon_at_time(bounding_polygon,
dataset,
timeinseconds)
# If all data ends up getting masked, ds1_nexus_tiles will be empty
if len(ds1_nexus_tiles) == 0:
return {}
tile_data_agg = np.ma.array([tile.data for tile in ds1_nexus_tiles])
data_min = np.ma.min(tile_data_agg)
data_max = np.ma.max(tile_data_agg)
daily_mean = np.ma.mean(tile_data_agg).item()
data_count = np.ma.count(tile_data_agg)
try:
data_count = data_count.item()
except AttributeError:
pass
data_std = np.ma.std(tile_data_agg)
# Return Stats by day
stat = {
'min': data_min,
'max': data_max,
'mean': daily_mean,
'cnt': data_count,
'std': data_std,
'time': int(timeinseconds)
}
return stat
def pool_worker(work_queue, done_queue):
try:
calculator = TimeSeriesCalculator()
for work in iter(work_queue.get, SENTINEL):
scifunction = work[0]
args = work[1:]
result = calculator.__getattribute__(scifunction)(*args)
done_queue.put(result)
except Exception as e:
e_str = traceback.format_exc(e)
done_queue.put({'error': e_str})
|
|
"""Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["R Celestre, M Sanchez del Rio"]
__license__ = "MIT"
__date__ = "27/05/2018"
"""
Example of dumping and loading SRW wavefronts to hdf5 files
"""
import numpy
from oasys_srw.srwlib import *
from wofrysrw.util.srw_hdf5 import save_wfr_2_hdf5, load_hdf5_2_wfr, load_hdf5_2_dictionary
import scipy.constants as codata
try:
import matplotlib.pylab as plt
# plt.switch_backend("Qt5Agg")
from srxraylib.plot.gol import plot_image
except:
raise Exception("Failed to import graphics tools.")
#
# main code
#
def plot_wfr(wfr,kind='intensity',show=True,xtitle="X",ytitle="Y",title="",aspect='auto'):
if kind == 'intensity':
ar1 = array('f', [0]*wfr.mesh.nx*wfr.mesh.ny) # "flat" 2D array to take intensity data
srwl.CalcIntFromElecField(ar1, wfr, 6, 0, 3, wfr.mesh.eStart, 0, 0)
elif kind == 'phase':
ar1 = array('d', [0]*wfr.mesh.nx*wfr.mesh.ny) # "flat" array to take 2D phase data (note it should be 'd')
srwl.CalcIntFromElecField(ar1, wfr, 0, 4, 3, wfr.mesh.eStart, 0, 0)
else:
raise Exception("Unknown kind of calculation: %s"%(kind))
arxx = numpy.array(ar1)
arxx = arxx.reshape((wfr.mesh.ny,wfr.mesh.nx)).T
x = numpy.linspace(1e6*wfr.mesh.xStart, 1e6*wfr.mesh.xFin, wfr.mesh.nx)
y = numpy.linspace(1e6*wfr.mesh.yStart, 1e6*wfr.mesh.yFin, wfr.mesh.ny)
plot_image(arxx, x, y, xtitle="%s (%d pixels)"%(xtitle,x.size), ytitle="%s (%d pixels)"%(ytitle,y.size), title=title, aspect=aspect,show=show)
return ar1,x,y
def calculate_undulator_source(Source="EBS",pMltLr=28.3,do_plots=True):
#############################################################################
# Photon source
#********************************Undulator parameters
numPer = 77 # Number of ID Periods
undPer = 0.0183 # Period Length [m]
phB = 0 # Initial Phase of the Horizontal field component
sB = 1 # Symmetry of the Horizontal field component vs Longitudinal position
xcID = 0 # Transverse Coordinates of Undulator Center [m]
ycID = 0
zcID = 0
n = 1
beamE = 17
#********************************Storage ring parameters
# Wavelength = 1E-10*12.39841975/beamE
Wavelength = codata.h*codata.c/codata.e/(1e3*beamE)
# these first order moments CONTAIN the initial condition of the electron (X,X',Y,Y') (energy comes later)
eBeam = SRWLPartBeam()
eBeam.Iavg = 0.2 # average Current [A]
eBeam.partStatMom1.x = 0.
eBeam.partStatMom1.y = 0.
eBeam.partStatMom1.z = -0.5*undPer*(numPer + 4) # initial Longitudinal Coordinate (set before the ID)
eBeam.partStatMom1.xp = 0. # initial Relative Transverse Velocities
eBeam.partStatMom1.yp = 0.
electron_rest_energy_in_GeV = codata.electron_mass*codata.c**2/codata.e*1e-9
KtoBfactor = codata.e/(2*pi*codata.electron_mass*codata.c)
#
# obviously these emittances value (with exception of the electron_energy) are not used for
# the single electron calculation
#
if (Source.lower() == 'ebs'):
# e- beam paramters (RMS) EBS
sigEperE = 9.3E-4 # relative RMS energy spread
sigX = 30.3E-06 # horizontal RMS size of e-beam [m]
sigXp = 4.4E-06 # horizontal RMS angular divergence [rad]
sigY = 3.6E-06 # vertical RMS size of e-beam [m]
sigYp = 1.46E-06 # vertical RMS angular divergence [rad]
electron_energy_in_GeV = 6.00
else:
# e- beam paramters (RMS) ESRF @ low beta
sigEperE = 1.1E-3 # relative RMS energy spread
sigX = 48.6E-06 # horizontal RMS size of e-beam [m]
sigXp = 106.9E-06 # horizontal RMS angular divergence [rad]
sigY = 3.5E-06 # vertical RMS size of e-beam [m]
sigYp = 1.26E-06 # vertical RMS angular divergence [rad]
electron_energy_in_GeV = 6.04
eBeam.partStatMom1.gamma = electron_energy_in_GeV/electron_rest_energy_in_GeV # Relative Energy
K = sqrt(2)*sqrt(((Wavelength*2*n*eBeam.partStatMom1.gamma**2)/undPer)-1)
print("K: ",K)
B = K/(undPer*KtoBfactor) # Peak Horizontal field [T] (undulator)
# 2nd order stat. moments
eBeam.arStatMom2[0] = sigX*sigX # <(x-<x>)^2>
eBeam.arStatMom2[1] = 0 # <(x-<x>)(x'-<x'>)>
eBeam.arStatMom2[2] = sigXp*sigXp # <(x'-<x'>)^2>
eBeam.arStatMom2[3] = sigY*sigY # <(y-<y>)^2>
eBeam.arStatMom2[4] = 0 # <(y-<y>)(y'-<y'>)>
eBeam.arStatMom2[5] = sigYp*sigYp # <(y'-<y'>)^2>
eBeam.arStatMom2[10] = sigEperE*sigEperE # <(E-<E>)^2>/<E>^2
# Electron trajectory
eTraj = 0
# Precision parameters
arPrecSR = [0]*7
arPrecSR[0] = 1 # SR calculation method: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"
arPrecSR[1] = 0.01 # relative precision
arPrecSR[2] = 0 # longitudinal position to start integration (effective if < zEndInteg)
arPrecSR[3] = 0 # longitudinal position to finish integration (effective if > zStartInteg)
arPrecSR[4] = 20000 # Number of points for trajectory calculation
arPrecSR[5] = 1 # Use "terminating terms" or not (1 or 0 respectively)
arPrecSR[6] = 0 # sampling factor for adjusting nx, ny (effective if > 0) # -1 @Petra
und = SRWLMagFldU([SRWLMagFldH(n, 'v', B, phB, sB, 1)], undPer, numPer)
magFldCnt = SRWLMagFldC([und], array('d', [xcID]), array('d', [ycID]), array('d', [zcID]))
#********************************Wavefronts
# Monochromatic wavefront
wfr = SRWLWfr()
wfr.allocate(1, 512, 256) # Photon Energy, Horizontal and Vertical Positions
wfr.mesh.zStart = pMltLr
wfr.mesh.eStart = beamE*1E3
wfr.mesh.eFin = wfr.mesh.eStart
wfr.mesh.xStart = -2.5*1E-3
wfr.mesh.xFin = - wfr.mesh.xStart
wfr.mesh.yStart = -1*1E-3
wfr.mesh.yFin = - wfr.mesh.yStart
wfr.partBeam = eBeam
print('source calculation starts ... ')
srwl.CalcElecFieldSR(wfr, eTraj, magFldCnt, arPrecSR)
#
# plot source
#
if do_plots:
plot_wfr(wfr,kind='intensity',title='Source Intensity at ' + str(wfr.mesh.eStart) + ' eV',
xtitle='Horizontal Position [\u03bcm]',
ytitle='Vertical Position [\u03bcm]',aspect=None,show=True)
print('\nsource calculation finished\n')
return wfr
def propagate_beamline(wfr,do_plots=True):
print("beamline calculations starts...")
pMltLr = 28.3
pSlt = 40
Drft1 = SRWLOptD((pSlt-pMltLr))
W_MltLr = 13*1E-3
L_MltLr = 120*1E-3
grzAngl = 31.42*1E-3
oeAptrMltLr = SRWLOptA('r','a',L_MltLr*numpy.sin(grzAngl),W_MltLr)
fMltLrh = 1/((1/pMltLr)+(1/(pSlt-pMltLr)))
fMltLrv =1E23
oeMltLr = SRWLOptL(_Fx=fMltLrh, _Fy=fMltLrv)
#============= Wavefront Propagation Parameters =======================#
# [ 0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
ppAptrMltLr =[ 0, 0, 1., 0, 0, 1., 4., 1., 4., 0, 0, 0]
ppMltLr =[ 0, 0, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0]
ppDrft1 =[ 0, 0, 1., 2, 0, 1., 1., 1., 1., 0, 0, 0]
optBL = SRWLOptC(
[oeAptrMltLr, oeMltLr, Drft1],
[ppAptrMltLr, ppMltLr, ppDrft1]
)
srwl.PropagElecField(wfr, optBL)
if do_plots:
plot_wfr(wfr,kind='intensity',title='Focal Intensity at ' + str(wfr.mesh.eStart) + ' eV',
xtitle='Horizontal Position [\u03bcm]',
ytitle='Vertical Position [\u03bcm]',show=True)
print('\nbeamline calculation finished\n')
return wfr
if __name__ == "__main__":
do_calculate = True
do_load = True
do_compare = True
show_plots = False
if do_calculate:
wfr = calculate_undulator_source(do_plots=show_plots)
save_wfr_2_hdf5(wfr,"tmp2.h5",intensity=True,phase=True,overwrite=True)
wfr_end = propagate_beamline(wfr,do_plots=show_plots)
save_wfr_2_hdf5(wfr_end,"tmp2.h5",intensity=True,phase=False,overwrite=False,subgroupname="wfr_end")
if do_load:
wfr_loaded = load_hdf5_2_wfr("tmp2.h5","wfr")
save_wfr_2_hdf5(wfr_loaded,"tmp2bis.h5",intensity=True,phase=False,overwrite=True)
wfr_end2 = propagate_beamline(wfr_loaded,do_plots=False)
save_wfr_2_hdf5(wfr_end2,"tmp2bis.h5",intensity=True,phase=True,overwrite=False,subgroupname="wfr_end")
if do_compare:
wf1_source = load_hdf5_2_dictionary("tmp2.h5","wfr")
wf2_source = load_hdf5_2_dictionary("tmp2bis.h5","wfr")
print("comparing wavefront at source")
for key in wf1_source.keys():
print(" checking field: ",key)
numpy.testing.assert_almost_equal(wf1_source[key],wf2_source[key])
wf1_end = load_hdf5_2_dictionary("tmp2.h5","wfr_end")
wf2_end = load_hdf5_2_dictionary("tmp2bis.h5","wfr_end")
print("comparing wavefront propagated")
for key in wf1_source.keys():
print(" checking field: ",key)
numpy.testing.assert_almost_equal(1e-6*wf1_end[key],1e-6*wf2_end[key],1)
|
|
import pyscf.pbc.tools.make_test_cell as make_test_cell
from pyscf.pbc.tools.pbc import super_cell
from pyscf.pbc import gto, scf, cc
from pyscf.pbc.cc.eom_kccsd_ghf import EOMIP, EOMEA, EOMEE
from pyscf.pbc.cc.eom_kccsd_ghf import EOMIP_Ta, EOMEA_Ta
from pyscf.cc import eom_gccsd
import unittest
cell = make_test_cell.test_cell_n3_diffuse()
kmf = scf.KRHF(cell, kpts=cell.make_kpts([1,1,2], with_gamma_point=True), exxdiv=None)
kmf.conv_tol = 1e-10
kmf.conv_tol_grad = 1e-6
kmf.scf()
mycc = cc.KGCCSD(kmf)
mycc.conv_tol = 1e-7
mycc.conv_tol_normt = 1e-7
mycc.run()
eris = mycc.ao2mo()
eris.mo_energy = [eris.fock[ikpt].diagonal() for ikpt in range(mycc.nkpts)]
def tearDownModule():
global cell, kmf, mycc, eris
cell.stdout.close()
del cell, kmf, mycc, eris
class KnownValues(unittest.TestCase):
def test_n3_diffuse(self):
cell = make_test_cell.test_cell_n3_diffuse()
nmp = [1,1,2]
'''
# treating 1*1*2 supercell at gamma point
supcell = super_cell(cell,nmp)
gmf = scf.GHF(supcell,exxdiv=None)
ehf = gmf.kernel()
gcc = cc.GCCSD(gmf)
gcc.conv_tol=1e-12
gcc.conv_tol_normt=1e-10
gcc.max_cycle=250
ecc, t1, t2 = gcc.kernel()
print('GHF energy (supercell) %.7f \n' % (float(ehf)/2.))
print('GCCSD correlation energy (supercell) %.7f \n' % (float(ecc)/2.))
eom = eom_gccsd.EOMIP(gcc)
e1, v = eom.ipccsd(nroots=6)
eom = eom_gccsd.EOMEA(gcc)
e2, v = eom.eaccsd(nroots=6, left=True, koopmans=True)
'''
# Running HF and CCSD with 1x1x2 Monkhorst-Pack k-point mesh
ehf2 = kmf.e_tot
self.assertAlmostEqual(ehf2, -6.1870676561725695, 6)
ecc2 = mycc.e_corr
self.assertAlmostEqual(ecc2, -0.0676483716898783, 6)
eom = EOMIP(mycc)
imds = eom.make_imds(eris=eris)
# Basic ipccsd
e1_obt, v = eom.ipccsd(nroots=3, left=True, kptlist=[0], imds=imds)
self.assertAlmostEqual(e1_obt[0][0],-1.14894700482871,6)
self.assertAlmostEqual(e1_obt[0][1],-1.148947004822481,6)
self.assertAlmostEqual(e1_obt[0][2],-1.108819439453179,6)
# Ensure left is working
e1_obt, v = eom.ipccsd(nroots=3, kptlist=[0], imds=imds)
self.assertAlmostEqual(e1_obt[0][0], -1.1489469962099519, 6)
self.assertAlmostEqual(e1_obt[0][1], -1.1489469961858796, 6)
self.assertAlmostEqual(e1_obt[0][2], -1.1088194518036925, 6)
# Ensure kptlist behaves correctly
e1_obt, v = eom.ipccsd(nroots=3, koopmans=True, kptlist=[1], imds=imds)
self.assertAlmostEqual(e1_obt[0][0], -0.9074337292436309, 6)
self.assertAlmostEqual(e1_obt[0][1], -0.9074337292161299, 6)
self.assertAlmostEqual(e1_obt[0][2], -0.9074331788469051, 6)
eom = EOMEA(mycc)
imds = eom.make_imds(eris=eris)
# Basic eaccsd
e2_obt, v = eom.eaccsd(nroots=3, kptlist=[0], imds=imds)
self.assertAlmostEqual(e2_obt[0][0], 1.2669788613362731, 6)
self.assertAlmostEqual(e2_obt[0][1], 1.2669788614703625, 6)
self.assertAlmostEqual(e2_obt[0][2], 1.278883205515518, 6)
# Ensure left is working
e2_obt, v = eom.eaccsd(nroots=3, left=True, kptlist=[0], imds=imds)
self.assertAlmostEqual(e2_obt[0][0], 1.266978976813125,6)
self.assertAlmostEqual(e2_obt[0][1], 1.266978976822988,6)
self.assertAlmostEqual(e2_obt[0][2], 1.278883205348326,6)
# Ensure kptlist behaves correctly
e2_obt, v = eom.eaccsd(nroots=3, koopmans=True, kptlist=[1], imds=imds)
self.assertAlmostEqual(e2_obt[0][0], 1.227583017804503, 6)
self.assertAlmostEqual(e2_obt[0][1], 1.2275830178298166, 6)
self.assertAlmostEqual(e2_obt[0][2], 1.3830379190440196, 6)
# Basis eeccsd
eom = EOMEE(mycc)
imds = eom.make_imds(eris=eris)
ee, v = eom.eeccsd(nroots=3, kptlist=[0], imds=imds)
self.assertAlmostEqual(ee[0][0], 0.118301677904104, 6)
self.assertAlmostEqual(ee[0][1], 0.118301914631351, 6)
self.assertAlmostEqual(ee[0][2], 0.128285117266903, 6)
ee, v = eom.eeccsd(nroots=3, kptlist=[1], imds=imds)
self.assertAlmostEqual(ee[0][0], 0.07928010716890202, 6)
self.assertAlmostEqual(ee[0][1], 0.07928011416043479, 6)
self.assertAlmostEqual(ee[0][2], 0.07928011417159982, 6)
def test_n3_diffuse_frozen(self):
ehf2 = kmf.e_tot
self.assertAlmostEqual(ehf2, -6.1870676561725695, 6)
mycc_frozen = cc.KGCCSD(kmf, frozen=[[0,1],[0,1,2,3]])
mycc_frozen.conv_tol = 1e-7
mycc_frozen.conv_tol_normt = 1e-7
eris = mycc_frozen.ao2mo()
eris.mo_energy = [eris.fock[ikpt].diagonal() for ikpt in range(mycc_frozen.nkpts)]
ecc2, t1, t2 = mycc_frozen.kernel(eris=eris)
self.assertAlmostEqual(ecc2, -0.0442506265840587, 6)
eom = EOMIP(mycc_frozen)
imds = eom.make_imds(eris=eris)
e1_obt, v = eom.ipccsd(nroots=3, koopmans=False, kptlist=[0], imds=imds)
self.assertAlmostEqual(e1_obt[0][0], -1.1316152294295743, 6)
self.assertAlmostEqual(e1_obt[0][1], -1.1316152294295743, 6)
self.assertAlmostEqual(e1_obt[0][2], -1.104163717600433, 6)
e1_obt, v = eom.ipccsd(nroots=3, koopmans=True, kptlist=[1], imds=imds)
self.assertAlmostEqual(e1_obt[0][0], -0.8983145129187627, 6)
self.assertAlmostEqual(e1_obt[0][1], -0.8983145129187627, 6)
self.assertAlmostEqual(e1_obt[0][2], -0.8983139520017552, 6)
eom = EOMEA(mycc_frozen)
imds = eom.make_imds(eris=eris)
e2_obt, v = eom.eaccsd(nroots=3, kptlist=[0], imds=imds)
self.assertAlmostEqual(e2_obt[0][0], 1.2572812499753756, 6)
self.assertAlmostEqual(e2_obt[0][1], 1.2572812532456588, 6)
self.assertAlmostEqual(e2_obt[0][2], 1.280747357928012, 6)
eom = EOMEA(mycc_frozen)
e2_obt, v = eom.eaccsd(nroots=3, koopmans=True, kptlist=[1], imds=imds)
self.assertAlmostEqual(e2_obt[0][0], 1.229802629928757, 6)
self.assertAlmostEqual(e2_obt[0][1], 1.229802629928764, 6)
self.assertAlmostEqual(e2_obt[0][2], 1.384394578043613, 6)
def test_n3_diffuse_star(self):
'''Tests EOM-CCSD* method.'''
cell = make_test_cell.test_cell_n3_diffuse()
nmp = [1,1,2]
'''
# treating 1*1*2 supercell at gamma point
supcell = super_cell(cell,nmp)
gmf = scf.GHF(supcell,exxdiv=None)
ehf = gmf.kernel()
gcc = cc.GCCSD(gmf)
gcc.conv_tol=1e-12
gcc.conv_tol_normt=1e-10
gcc.max_cycle=250
ecc, t1, t2 = gcc.kernel()
print('GHF energy (supercell) %.7f \n' % (float(ehf)/2.))
print('GCCSD correlation energy (supercell) %.7f \n' % (float(ecc)/2.))
#eom = eom_gccsd.EOMIP(gcc)
#e1, v = eom.ipccsd_star(nroots=6, koopmans=True)
eom = eom_gccsd.EOMEA(gcc)
e2, v = eom.eaccsd_star(nroots=9, koopmans=True)
'''
ehf2 = kmf.e_tot
self.assertAlmostEqual(ehf2, -6.1870676561725695, 6)
ecc2 = mycc.e_corr
self.assertAlmostEqual(ecc2, -0.0676483716898783, 6)
eom = EOMIP(mycc)
e1_obt = eom.ipccsd_star(nroots=3, koopmans=True, kptlist=[0], eris=eris)
self.assertAlmostEqual(e1_obt[0][0], -1.1452481194582802, 6)
self.assertAlmostEqual(e1_obt[0][1], -1.1452481194456137, 6)
self.assertAlmostEqual(e1_obt[0][2], -1.1174912094746994, 6)
eom = EOMEA(mycc)
e1_obt = eom.eaccsd_star(nroots=2, koopmans=True, kptlist=[0,1], eris=eris)
self.assertAlmostEqual(e1_obt[0][0], 1.260627794895514, 6)
self.assertAlmostEqual(e1_obt[0][1], 1.260627794895514, 6)
self.assertAlmostEqual(e1_obt[1][0], 1.2222607619733454, 6)
self.assertAlmostEqual(e1_obt[1][1], 1.2222607619733026, 6)
def test_n3_diffuse_Ta(self):
'''Tests EOM-CCSD(T)*a method.'''
cell = make_test_cell.test_cell_n3_diffuse()
nmp = [1,1,2]
'''
# treating 1*1*2 supercell at gamma point
supcell = super_cell(cell,nmp)
gmf = scf.GHF(supcell,exxdiv=None)
gmf.conv_tol = 1e-10
gmf.conv_tol_grad = gmf.conv_tol * 10**2
ehf = gmf.kernel()
gcc = cc.GCCSD(gmf)
gcc.conv_tol=1e-12
gcc.conv_tol_normt=1e-10
gcc.max_cycle=250
ecc, t1, t2 = gcc.kernel()
print('GHF energy (supercell) %.7f \n' % (float(ehf)/2.))
print('GCCSD correlation energy (supercell) %.7f \n' % (float(ecc)/2.))
eom = eom_gccsd.EOMIP_Ta(gcc)
e1 = eom.ipccsd_star(nroots=6, koopmans=True)
eom = eom_gccsd.EOMEA_Ta(gcc)
e2 = eom.eaccsd_star(nroots=6, koopmans=True)
'''
# Running HF and CCSD with 1x1x2 Monkhorst-Pack k-point mesh
ehf2 = kmf.e_tot
self.assertAlmostEqual(ehf2, -6.1870676561725695, 6)
ecc2 = mycc.e_corr
self.assertAlmostEqual(ecc2, -0.0676483716898783, 6)
eom = EOMIP_Ta(mycc)
imds = eom.make_imds(eris=eris)
e1_obt, v = eom.ipccsd(nroots=3, koopmans=True, kptlist=[0], imds=imds)
self.assertAlmostEqual(e1_obt[0][0], -1.146351234409813, 6)
self.assertAlmostEqual(e1_obt[0][1], -1.146351234404151, 6)
self.assertAlmostEqual(e1_obt[0][2], -1.107255699646373, 6)
e1_obt = eom.ipccsd_star(nroots=3, koopmans=True, kptlist=[0], imds=imds)
self.assertAlmostEqual(e1_obt[0][0], -1.143510075691, 6)
self.assertAlmostEqual(e1_obt[0][1], -1.143510075684, 6)
self.assertAlmostEqual(e1_obt[0][2], -1.116991306080, 6)
eom = EOMEA_Ta(mycc)
imds = eom.make_imds(eris=eris)
e2_obt, v = eom.eaccsd(nroots=3, koopmans=True, kptlist=[0], imds=imds)
self.assertAlmostEqual(e2_obt[0][0], 1.267728934041309, 6)
self.assertAlmostEqual(e2_obt[0][1], 1.267728934041309, 6)
self.assertAlmostEqual(e2_obt[0][2], 1.280954980102639, 6)
e2_obt, v = eom.eaccsd(nroots=3, koopmans=True, kptlist=[1], imds=imds)
self.assertAlmostEqual(e2_obt[0][0], 1.2290479727093149, 6)
self.assertAlmostEqual(e2_obt[0][1], 1.2290479727093468, 6)
self.assertAlmostEqual(e2_obt[0][2], 1.384154366703175, 6)
e2_obt = eom.eaccsd_star(nroots=3, koopmans=True, kptlist=[1], imds=imds)
self.assertAlmostEqual(e2_obt[0][0], 1.2229050426609025, 6)
self.assertAlmostEqual(e2_obt[0][1], 1.2229050426609025, 6)
self.assertAlmostEqual(e2_obt[0][2], 1.374851059956632, 6)
|
|
"""
records.assessment.orthographic_visualization.orthographic_records.py
"""
from dlkit.json_.osid.metadata import Metadata
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.transport.objects import DataInputStream
from dlkit.primordium.type.primitives import Type
from dlkit.abstract_osid.osid.errors import InvalidArgument, NullArgument,\
NoAccess, NotFound, IllegalState
from dlkit.json_.id.objects import IdList
from ..basic.simple_records import QuestionTextRecord,\
QuestionTextFormRecord,\
QuestionFilesRecord,\
QuestionFilesFormRecord,\
IntegerAnswersRecord,\
IntegerAnswersFormRecord,\
QuestionTextAndFilesMixin
from ...osid.base_records import ObjectInitRecord
from ...registry import ASSET_CONTENT_GENUS_TYPES, ASSET_GENUS_TYPES
MANIP_ASSET_TYPE = Type(**ASSET_GENUS_TYPES['manipulateable-asset-type'])
MANIP_ASSET_CONTENT_TYPE = Type(**ASSET_CONTENT_GENUS_TYPES['manipulateable-asset-content'])
OV_ASSET_TYPE = Type(**ASSET_GENUS_TYPES['ortho-view-asset'])
OV_ASSET_CONTENT_TYPE = Type(**ASSET_CONTENT_GENUS_TYPES['ortho-view-asset-content'])
class FirstAngleProjectionRecord(ObjectInitRecord):
"""flag for first-angle projection or not"""
_implemented_record_type_identifiers = [
'first-angle-projection'
]
def is_first_angle_projection(self):
"""stub"""
return bool(self.my_osid_object._my_map['firstAngle'])
class FirstAngleProjectionFormRecord(ObjectInitRecord):
"""form for including first-angle information"""
_implemented_record_type_identifiers = [
'first-angle-projection'
]
def __init__(self, osid_object_form=None):
if osid_object_form is not None:
self.my_osid_object_form = osid_object_form
self._init_metadata()
if not self.my_osid_object_form.is_for_update():
self._init_map()
super(FirstAngleProjectionFormRecord, self).__init__(osid_object_form)
def _init_map(self):
"""stub"""
self.my_osid_object_form._my_map['firstAngle'] = \
self._first_angle_metadata['default_boolean_values'][0]
def _init_metadata(self):
"""stub"""
self._first_angle_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'first_angle'),
'element_label': 'First Angle',
'instructions': 'set boolean, is this a first angle projection',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_boolean_values': [False],
'syntax': 'BOOLEAN',
}
def get_first_angle_projection_metadata(self):
"""stub"""
return Metadata(**self._first_angle_metadata)
def set_first_angle_projection(self, value=None):
"""stub"""
if value is None:
raise NullArgument()
if self.get_first_angle_projection_metadata().is_read_only():
raise NoAccess()
if not self.my_osid_object_form._is_valid_boolean(value):
raise InvalidArgument()
self.my_osid_object_form._my_map['firstAngle'] = value
def clear_first_angle_projection(self):
"""stub"""
if (self.get_first_angle_projection_metadata().is_read_only() or
self.get_first_angle_projection_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['firstAngle'] = \
self._first_angle_metadata['default_boolean_values'][0]
first_angle_projection = property(fset=set_first_angle_projection,
fdel=clear_first_angle_projection)
class BaseOrthoQuestionRecord(QuestionTextRecord,
QuestionFilesRecord,
FirstAngleProjectionRecord):
"""basic ortho3d question"""
_implemented_record_type_identifiers = [
'base-ortho',
'question-text',
'question-files',
'first-angle-projection'
]
def has_manip(self):
"""stub"""
return self.has_file('manip')
def get_manip_id(self):
"""stub"""
return self.get_asset_id('manip')
def get_manip(self):
"""stub"""
return self.get_file_by_label('manip', MANIP_ASSET_CONTENT_TYPE)
def has_ortho_view_set(self):
"""stub"""
return bool(self.has_file('frontView') and
self.has_file('sideView') and
self.has_file('topView'))
def get_front_view_id(self):
"""stub"""
return self.get_asset_id('frontView')
def get_front_view(self):
"""stub"""
return self.get_file_by_label('frontView', OV_ASSET_CONTENT_TYPE)
def get_side_view_id(self):
"""stub"""
return self.get_asset_id('sideView')
def get_side_view(self):
"""stub"""
return self.get_file_by_label('sideView', OV_ASSET_CONTENT_TYPE)
def get_top_view_id(self):
"""stub"""
return self.get_asset_id('topView')
def get_top_view(self):
"""stub"""
return self.get_file_by_label('topView', OV_ASSET_CONTENT_TYPE)
manip_id = property(fget=get_manip_id)
manip = property(fget=get_manip)
front_view_id = property(fget=get_front_view_id)
front_view = property(fget=get_front_view)
top_view_id = property(fget=get_top_view_id)
top_view = property(fget=get_top_view)
side_view_id = property(fget=get_side_view_id)
side_view = property(fget=get_side_view)
class BaseInitMixin(QuestionTextAndFilesMixin,
FirstAngleProjectionFormRecord):
"""Mixin class to make the three classes compatible with super()
for _init_map and _init_metadata
"""
def _init_map(self):
"""stub"""
FirstAngleProjectionFormRecord._init_map(self)
super(BaseInitMixin, self)._init_map()
def _init_metadata(self):
"""stub"""
FirstAngleProjectionFormRecord._init_metadata(self)
super(BaseInitMixin, self)._init_metadata()
class BaseOrthoQuestionFormRecord(BaseInitMixin):
"""form for basic ortho3d questions
https://rhettinger.wordpress.com/2011/05/26/super-considered-super/
Because QuestionTextFormRecord, QuestionFilesFormRecord, and
FirstAngleProjectionFormRecord are all "terminal" classes with regards
to _init_map and _init_metadata, (i.e. non-cooperative), we will
have to call them manually here.
"""
_implemented_record_type_identifiers = [
'base-ortho',
'question-text',
'question-files',
'first-angle-projection'
]
def __init__(self, osid_object_form=None):
if osid_object_form is not None:
self.my_osid_object_form = osid_object_form
self._init_metadata()
if not self.my_osid_object_form.is_for_update():
self._init_map()
super(BaseOrthoQuestionFormRecord, self).__init__(
osid_object_form=osid_object_form)
def _init_map(self):
"""stub"""
super(BaseOrthoQuestionFormRecord, self)._init_map()
def _init_metadata(self):
"""stub"""
super(BaseOrthoQuestionFormRecord, self)._init_metadata()
self._ortho_view_set_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'ortho_view_set'),
'element_label': 'Orthographic View Set',
'instructions': '',
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_object_values': [''],
'syntax': 'OBJECT',
'object_set': []
}
def get_manip_metadata(self):
"""stub"""
return self.get_file_metadata()
def set_manip(self, manipulatable):
"""stub"""
if not isinstance(manipulatable, DataInputStream):
raise InvalidArgument('Manipulatable object be an ' +
'osid.transport.DataInputStream object')
self.add_file(manipulatable,
label='manip',
asset_type=MANIP_ASSET_TYPE,
asset_content_type=MANIP_ASSET_CONTENT_TYPE)
def get_ortho_view_set_metadata(self):
"""stub"""
return Metadata(**self._ortho_view_set_metadata)
def get_ovs_front_view_metadata(self):
"""stub"""
return self.get_file_metadata()
def get_ovs_top_view_metadata(self):
"""stub"""
return self.get_file_metadata()
def get_ovs_side_view_metadata(self):
"""stub"""
return self.get_file_metadata()
def set_ortho_view_set(self, front_view, side_view, top_view):
"""stub"""
if (not isinstance(front_view, DataInputStream) or
not isinstance(top_view, DataInputStream) or
not isinstance(side_view, DataInputStream)):
raise InvalidArgument('views must be osid.transport.DataInputStream objects')
self.add_file(front_view,
label='frontView',
asset_type=OV_ASSET_TYPE,
asset_content_type=OV_ASSET_CONTENT_TYPE)
self.add_file(side_view,
label='sideView',
asset_type=OV_ASSET_TYPE,
asset_content_type=OV_ASSET_CONTENT_TYPE)
self.add_file(top_view,
label='topView',
asset_type=OV_ASSET_TYPE,
asset_content_type=OV_ASSET_CONTENT_TYPE)
def clear_ortho_view_set(self):
"""stub"""
if (self.get_ortho_view_set_metadata().is_read_only() or
self.get_ortho_view_set_metadata().is_required()):
raise NoAccess()
self.clear_file('frontView')
self.clear_file('sideView')
self.clear_file('topView')
def set_ovs_view(self, asset_data, view_name):
"""
view_name should be frontView, sideView, or topView
"""
if not isinstance(asset_data, DataInputStream):
raise InvalidArgument('view file must be an ' +
'osid.transport.DataInputStream object')
if view_name not in ['frontView', 'sideView', 'topView']:
raise InvalidArgument('View name must be frontView, sideView, or topView.')
self.clear_file(view_name)
self.add_file(asset_data,
label=view_name,
asset_type=OV_ASSET_TYPE,
asset_content_type=OV_ASSET_CONTENT_TYPE)
class LabelOrthoFacesQuestionRecord(BaseOrthoQuestionRecord):
"""label the 3 faces on a manipulatable object"""
_implemented_record_type_identifiers = [
'label-ortho-faces',
'base_ortho',
'question-text',
'question-files'
]
class LabelOrthoFacesQuestionFormRecord(BaseOrthoQuestionFormRecord):
"""form to create this type of question"""
_implemented_record_type_identifiers = [
'label-ortho-faces',
'base_ortho',
'question-text',
'question-files'
]
class LabelOrthoFacesItemFormRecord(ObjectInitRecord):
_implemented_record_type_identifiers = [
'label-ortho-faces'
]
pass
class LabelOrthoFacesItemRecord(ObjectInitRecord):
_implemented_record_type_identifiers = [
'label-ortho-faces'
]
def _is_match(self, response, answer):
match = False
if (int(answer.get_front_face_value()) == int(response.get_front_face_value()) and
int(answer.get_side_face_value()) == int(response.get_side_face_value()) and
int(answer.get_top_face_value()) == int(response.get_top_face_value())):
match = True
return match
def is_correctness_available_for_response(self, response):
"""is a measure of correctness available for a particular mc response"""
return True
def is_response_correct(self, response):
"""returns True if response evaluates to an Item Answer that is 100 percent correct"""
for answer in self.my_osid_object.get_answers():
if self._is_match(response, answer):
return True
return False
def get_correctness_for_response(self, response):
"""get measure of correctness available for a particular response"""
for answer in self.my_osid_object.get_answers():
if self._is_match(response, answer):
try:
return answer.get_score()
except AttributeError:
return 100
for answer in self.my_osid_object.get_wrong_answers():
if self._is_match(response, answer):
try:
return answer.get_score()
except AttributeError:
return 0
def get_answer_for_response(self, response):
for answer in self.my_osid_object.get_answers():
if self._is_match(response, answer):
return answer
wrong_answers = None
try:
wrong_answers = list(self.my_osid_object.get_wrong_answers())
except AttributeError:
pass
else:
for answer in wrong_answers:
if self._is_match(response, answer):
return answer
# also look for generic incorrect answer
if wrong_answers is not None:
for answer in wrong_answers:
if not answer.has_face_values():
return answer
raise NotFound('no matching answer found for response')
def is_feedback_available_for_response(self, response):
try:
answer = self.get_answer_for_response(response)
except NotFound:
return False
try:
return answer.has_feedback()
except AttributeError:
return False
def get_feedback_for_response(self, response):
try:
answer = self.get_answer_for_response(response)
except NotFound:
raise IllegalState('no answer matching response was found')
return answer.get_feedback() # raises IllegalState
def get_confused_learning_objective_ids_for_response(self, response):
try:
answer = self.get_answer_for_response(response)
except NotFound:
raise IllegalState('no answer matching response was found')
try:
return answer.get_confused_learning_objective_ids()
except AttributeError:
return IdList([])
class LabelOrthoFacesAnswerRecord(IntegerAnswersRecord):
"""the three face value answers"""
_implemented_record_type_identifiers = [
'label-ortho-faces'
]
def has_face_values(self):
"""stub"""
return (self.has_integer_value('frontFaceValue') and
self.has_integer_value('topFaceValue') and
self.has_integer_value('sideFaceValue'))
def get_front_face_value(self):
"""stub"""
return self.get_integer_value('frontFaceValue')
def get_top_face_value(self):
"""stub"""
return self.get_integer_value('topFaceValue')
def get_side_face_value(self):
"""stub"""
return self.get_integer_value('sideFaceValue')
class LabelOrthoFacesAnswerFormRecord(IntegerAnswersFormRecord):
"""form to set the answer faces"""
_implemented_record_type_identifiers = [
'label-ortho-faces'
]
def __init__(self, osid_object_form):
if osid_object_form is not None:
self.my_osid_object_form = osid_object_form
self._init_metadata()
if not self.my_osid_object_form.is_for_update():
self._init_map()
super(LabelOrthoFacesAnswerFormRecord, self).__init__(
osid_object_form=osid_object_form)
def _init_map(self):
"""stub"""
super(LabelOrthoFacesAnswerFormRecord, self)._init_map()
def _init_metadata(self):
"""stub"""
super(LabelOrthoFacesAnswerFormRecord, self)._init_metadata()
self._face_values_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'face_values'),
'element_label': 'Orthographic Face Values',
'instructions': '',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
}
def get_face_values_metadata(self):
"""stub"""
return Metadata(**self._face_values_metadata)
def set_face_values(self, front_face_value, side_face_value, top_face_value):
"""stub"""
if front_face_value is None or side_face_value is None or top_face_value is None:
raise NullArgument()
self.add_integer_value(value=int(front_face_value), label='frontFaceValue')
self.add_integer_value(value=int(side_face_value), label='sideFaceValue')
self.add_integer_value(value=int(top_face_value), label='topFaceValue')
def clear_face_values(self):
"""stub"""
if (self.get_face_values_metadata().is_read_only() or
self.get_face_values_metadata().is_required()):
raise NoAccess()
self.clear_integer_value('frontFaceValue')
self.clear_integer_value('sideFaceValue')
self.clear_integer_value('topFaceValue')
class EulerRotationQuestionRecord(BaseOrthoQuestionRecord):
"""students can orient the manipulatable at the right angle"""
_implemented_record_type_identifiers = [
'euler-rotation',
'base_ortho'
'question-text',
'question-files'
]
class EulerRotationQuestionFormRecord(BaseOrthoQuestionFormRecord):
"""form to create these questions"""
_implemented_record_type_identifiers = [
'euler-rotation',
'base_ortho'
'question-text',
'question-files'
]
class EulerRotationAnswerRecord(IntegerAnswersRecord):
"""correct angle answers"""
_implemented_record_type_identifiers = [
'euler-rotation'
]
def has_angle_values(self):
"""stub"""
return (self.has_integer_value('xAngle') and
self.has_integer_value('yAngle') and
self.has_integer_value('zAngle'))
def get_x_angle_value(self):
"""stub"""
return self.get_integer_value('xAngle')
def get_y_angle_value(self):
"""stub"""
return self.get_integer_value('yAngle')
def get_z_angle_value(self):
"""stub"""
return self.get_integer_value('zAngle')
class EulerRotationAnswerFormRecord(IntegerAnswersFormRecord):
"""form to create the answer"""
_implemented_record_type_identifiers = [
'euler-rotation'
]
def __init__(self, osid_object_form):
if osid_object_form is not None:
self.my_osid_object_form = osid_object_form
self._init_metadata()
if not self.my_osid_object_form.is_for_update():
self._init_map()
super(EulerRotationAnswerFormRecord, self).__init__(
osid_object_form=osid_object_form)
def _init_map(self):
"""stub"""
super(EulerRotationAnswerFormRecord, self)._init_map()
def _init_metadata(self):
"""stub"""
super(EulerRotationAnswerFormRecord, self)._init_metadata()
self._euler_rotation_metadata = {
'element_id': Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
'angle_values'),
'element_label': 'Euler Angle Values',
'instructions': 'Provide X, Y, and Z euler angle rotation values',
'required': True,
'read_only': False,
'linked': True,
'array': False,
'default_object_values': [{}],
'syntax': 'OBJECT',
'object_set': []
}
def get_euler_rotation_values_metadata(self):
"""stub"""
return Metadata(**self._euler_rotation_metadata)
def set_euler_angle_values(self, x_angle, y_angle, z_angle):
"""stub"""
if x_angle is None or y_angle is None or z_angle is None:
raise NullArgument()
self.add_integer_value(value=x_angle, label='xAngle')
self.add_integer_value(value=y_angle, label='yAngle')
self.add_integer_value(value=z_angle, label='zAngle')
def clear_angle_values(self):
"""stub"""
if (self.get_euler_rotation_values_metadata().is_read_only() or
self.get_euler_rotation_values_metadata().is_required()):
raise NoAccess()
self.clear_integer_value('xAngle')
self.clear_integer_value('yAngle')
self.clear_integer_value('zAngle')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.