gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "vlfeat-"
cfg.versionfile_source = "vlfeat/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| |
import unittest
import os
from xml.etree.ElementTree import ParseError
from programy.parser.aiml_parser import AIMLParser
from programy.parser.exceptions import ParserException
from programy.parser.pattern.nodes.root import PatternRootNode
from programy.parser.pattern.nodes.topic import PatternTopicNode
from programy.parser.pattern.nodes.that import PatternThatNode
from programy.parser.pattern.nodes.word import PatternWordNode
from programy.parser.pattern.nodes.oneormore import PatternOneOrMoreWildCardNode
from programy.parser.pattern.nodes.template import PatternTemplateNode
from programy.dialog import Sentence
class AIMLParserTests(unittest.TestCase):
def setUp(self):
self.parser = AIMLParser(supress_warnings=True, stop_on_invalid=True)
self.assertIsNotNone(self.parser)
def test_parse_from_file_valid(self):
filename = os.path.dirname(__file__)+ '/valid.aiml'
self.parser.parse_from_file(filename)
def test_parse_from_file_invalid(self):
filename = os.path.dirname(__file__)+ '/invalid.aiml'
self.parser.parse_from_file(filename)
def test_crud(self):
with self.assertRaises(ParseError) as raised:
self.parser.parse_from_text(
"""Blah Blah Blah
""")
def test_no_aiml(self):
with self.assertRaises(ParseError) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
""")
self.assertTrue(str(raised.exception).startswith("no element found:"))
def test_no_content(self):
with self.assertRaises(ParseError) as raised:
self.parser.parse_from_text(
"""
""")
self.assertTrue(str(raised.exception).startswith("no element found:"))
def test_base_aiml_no_content(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no categories in aiml file")
def test_base_aiml_topic_no_name(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, missing name attribute for topic")
def test_base_aiml_topic_no_category(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no categories in topic")
def test_base_aiml_topic_category_no_content(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
</category>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no template node found in category")
def test_base_aiml_topic_at_multiple_levels(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
<topic name="test2" />
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, topic exists in category AND as parent node")
def test_base_aiml_topic_category_no_template(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
<pattern>*</pattern>
</category>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no template node found in category")
def test_base_aiml_category_no_content(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
</category>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no template node found in category")
def test_base_aiml_category_no_template(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>*</pattern>
</category>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error, no template node found in category")
def test_base_aiml_topic_empty_parent_node(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="">
<category>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Topic name empty or null")
def test_base_aiml_topic_with_something_else(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<xxxx>
<pattern>*</pattern>
<template>RESPONSE</template>
</xxxx>
</topic>
</aiml>
""")
self.assertEqual(raised.exception.message, "Error unknown child node of topic, xxxx")
def test_base_aiml_topic_empty_child_node1(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<topic name="" />
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertEqual(raised.exception.message, "Topic node text is empty")
def test_base_aiml_topic_empty_child_node2(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<topic></topic>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertEqual(raised.exception.message, "Topic node text is empty")
def test_base_aiml_that_empty_child_node(self):
with self.assertRaises(ParserException) as raised:
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<that></that>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertEqual(raised.exception.message, "That node text is empty")
def test_base_aiml_topic_category_template(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</topic>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEquals(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
template = that.star.template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(bot=None, clientid="test"), "RESPONSE")
def test_base_aiml_topic_category_template_multi_line(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
<pattern>*</pattern>
<template>
RESPONSE1,
RESPONSE2.
RESPONSE3
</template>
</category>
</topic>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEquals(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
template = that.star.template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(bot=None, clientid="test"), "RESPONSE1, RESPONSE2. RESPONSE3")
def test_base_aiml_category_template(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>*</pattern>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEquals(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEquals(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
template = that.star.template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(bot=None, clientid="test"), "RESPONSE")
def test_base_aiml_category_template_that(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>*</pattern>
<that>something</that>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEquals(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEquals(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertEqual(len(that.children), 1)
self.assertIsNotNone(that.children[0])
self.assertIsInstance(that.children[0], PatternWordNode)
self.assertEqual(that.children[0].word, "something")
template = that.children[0].template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(bot=None, clientid="test"), "RESPONSE")
def test_base_aiml_category_template_topic(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>*</pattern>
<topic>something</topic>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEquals(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "something")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
template = that.star.template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(bot=None, clientid="test"), "RESPONSE")
def test_base_aiml_category_template_topic_that(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>*</pattern>
<that>something</that>
<topic>other</topic>
<template>RESPONSE</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertTrue(self.parser.pattern_parser.root.has_one_or_more())
node = self.parser.pattern_parser.root.star
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternOneOrMoreWildCardNode)
self.assertEquals(node.wildcard, "*")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "other")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertEqual(len(that.children), 1)
self.assertIsNotNone(that.children[0])
self.assertIsInstance(that.children[0], PatternWordNode)
self.assertEqual(that.children[0].word, "something")
template = that.children[0].template
self.assertIsNotNone(template)
self.assertIsInstance(template, PatternTemplateNode)
self.assertEqual(template.template.resolve(bot=None, clientid="test"), "RESPONSE")
def test_base_aiml_multiple_categories(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>Hello</pattern>
<template>Hiya</template>
</category>
<category>
<pattern>Goodbye</pattern>
<template>See ya</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser)
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertIsInstance(self.parser.pattern_parser.root, PatternRootNode)
self.assertEqual(2, len(self.parser.pattern_parser.root.children))
node = self.parser.pattern_parser.root.children[1]
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternWordNode)
self.assertEquals(node.word, "Hello")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEquals(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
node = self.parser.pattern_parser.root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternWordNode)
self.assertEquals(node.word, "Goodbye")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEquals(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
def test_base_aiml_multiple_categories_in_a_topic(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<topic name="test">
<category>
<pattern>Hello</pattern>
<template>Hiya</template>
</category>
<category>
<pattern>Goodbye</pattern>
<template>See ya</template>
</category>
</topic>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertEqual(2, len(self.parser.pattern_parser.root.children))
node = self.parser.pattern_parser.root.children[1]
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternWordNode)
self.assertEquals(node.word, "Hello")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
node = self.parser.pattern_parser.root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, PatternWordNode)
self.assertEquals(node.word, "Goodbye")
topic = node.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
def test_base_aiml_multiple_categories_in_and_out_of_topic(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>Welcome</pattern>
<template>Hello there</template>
</category>
<topic name="test">
<category>
<pattern>Hello</pattern>
<template>Hiya</template>
</category>
<category>
<pattern>Goodbye</pattern>
<template>See ya</template>
</category>
</topic>
<category>
<pattern>Interesting</pattern>
<template>Yes</template>
</category>
</aiml>
""")
self.assertIsNotNone(self.parser.pattern_parser.root)
self.assertEqual(4, len(self.parser.pattern_parser.root.children))
node1 = self.parser.pattern_parser.root.children[0]
self.assertIsNotNone(node1)
self.assertIsInstance(node1, PatternWordNode)
self.assertEquals(node1.word, "Interesting")
topic = node1.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEquals(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
node2 = self.parser.pattern_parser.root.children[1]
self.assertIsNotNone(node2)
self.assertIsInstance(node2, PatternWordNode)
self.assertEquals(node2.word, "Goodbye")
topic = node2.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
node3 = self.parser.pattern_parser.root.children[2]
self.assertIsNotNone(node3)
self.assertIsInstance(node3, PatternWordNode)
self.assertEquals(node3.word, "Hello")
topic = node3.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertEqual(len(topic.children), 1)
self.assertIsNotNone(topic.children[0])
self.assertIsInstance(topic.children[0], PatternWordNode)
self.assertEqual(topic.children[0].word, "test")
that = topic.children[0].that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
node4 = self.parser.pattern_parser.root.children[3]
self.assertIsNotNone(node4)
self.assertIsInstance(node4, PatternWordNode)
self.assertEquals(node4.word, "Welcome")
topic = node4.topic
self.assertIsNotNone(topic)
self.assertIsInstance(topic, PatternTopicNode)
self.assertTrue(topic.has_one_or_more())
self.assertIsInstance(topic.star, PatternOneOrMoreWildCardNode)
self.assertEquals(topic.star.wildcard, "*")
that = topic.star.that
self.assertIsNotNone(that)
self.assertIsInstance(that, PatternThatNode)
self.assertTrue(that.has_one_or_more())
self.assertIsInstance(that.star, PatternOneOrMoreWildCardNode)
self.assertEquals(that.star.wildcard, "*")
def test_match_sentence(self):
self.parser.parse_from_text(
"""<?xml version="1.0" encoding="UTF-8"?>
<aiml>
<category>
<pattern>HELLO</pattern>
<template>Hiya</template>
</category>
</aiml>
""")
self.parser.pattern_parser.dump()
context = self.parser.match_sentence(None, "test", Sentence("HELLO"), "*", "*")
self.assertIsNotNone(context)
self.assertEqual("Hiya", context.template_node().template.resolve(None, None))
if __name__ == '__main__':
unittest.main()
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import os
import shlex
import transaction
from pyramid import renderers
from pyramid.config import Configurator as _Configurator
from pyramid.response import Response
from pyramid.security import Allow, Authenticated
from pyramid.tweens import EXCVIEW
from pyramid_rpc.xmlrpc import XMLRPCRenderer
from warehouse.errors import BasicAuthBreachedPassword
from warehouse.utils.static import ManifestCacheBuster
from warehouse.utils.wsgi import ProxyFixer, VhmRootRemover, HostRewrite
class Environment(enum.Enum):
production = "production"
development = "development"
class Configurator(_Configurator):
def add_wsgi_middleware(self, middleware, *args, **kwargs):
middlewares = self.get_settings().setdefault("wsgi.middlewares", [])
middlewares.append((middleware, args, kwargs))
def make_wsgi_app(self, *args, **kwargs):
# Get the WSGI application from the underlying configurator
app = super().make_wsgi_app(*args, **kwargs)
# Look to see if we have any WSGI middlewares configured.
for middleware, args, kw in self.get_settings()["wsgi.middlewares"]:
app = middleware(app, *args, **kw)
# Finally, return our now wrapped app
return app
class RootFactory:
__parent__ = None
__name__ = None
__acl__ = [(Allow, "group:admins", "admin"), (Allow, Authenticated, "manage:user")]
def __init__(self, request):
pass
def require_https_tween_factory(handler, registry):
if not registry.settings.get("enforce_https", True):
return handler
def require_https_tween(request):
# If we have an :action URL and we're not using HTTPS, then we want to
# return a 403 error.
if request.params.get(":action", None) and request.scheme != "https":
resp = Response("SSL is required.", status=403, content_type="text/plain")
resp.status = "403 SSL is required"
resp.headers["X-Fastly-Error"] = "803"
return resp
return handler(request)
return require_https_tween
def activate_hook(request):
if request.path.startswith(("/_debug_toolbar/", "/static/")):
return False
return True
def commit_veto(request, response):
# By default pyramid_tm will veto the commit anytime request.exc_info is not None,
# we are going to copy that logic with one difference, we are still going to commit
# if the exception was for a BreachedPassword.
# TODO: We should probably use a registry or something instead of hardcoded.
exc_info = getattr(request, "exc_info", None)
if exc_info is not None and not isinstance(exc_info[1], BasicAuthBreachedPassword):
return True
def template_view(config, name, route, template, route_kw=None):
if route_kw is None:
route_kw = {}
config.add_route(name, route, **route_kw)
config.add_view(renderer=template, route_name=name)
def maybe_set(settings, name, envvar, coercer=None, default=None):
if envvar in os.environ:
value = os.environ[envvar]
if coercer is not None:
value = coercer(value)
settings.setdefault(name, value)
elif default is not None:
settings.setdefault(name, default)
def maybe_set_compound(settings, base, name, envvar):
if envvar in os.environ:
value = shlex.split(os.environ[envvar])
kwargs = {k: v for k, v in (i.split("=") for i in value[1:])}
settings[".".join([base, name])] = value[0]
for key, value in kwargs.items():
settings[".".join([base, key])] = value
def configure(settings=None):
if settings is None:
settings = {}
# Add information about the current copy of the code.
maybe_set(settings, "warehouse.commit", "SOURCE_COMMIT", default="null")
# Set the environment from an environment variable, if one hasn't already
# been set.
maybe_set(
settings,
"warehouse.env",
"WAREHOUSE_ENV",
Environment,
default=Environment.production,
)
# Pull in default configuration from the environment.
maybe_set(settings, "warehouse.token", "WAREHOUSE_TOKEN")
maybe_set(settings, "warehouse.num_proxies", "WAREHOUSE_NUM_PROXIES", int)
maybe_set(settings, "warehouse.theme", "WAREHOUSE_THEME")
maybe_set(settings, "warehouse.domain", "WAREHOUSE_DOMAIN")
maybe_set(settings, "forklift.domain", "FORKLIFT_DOMAIN")
maybe_set(settings, "warehouse.legacy_domain", "WAREHOUSE_LEGACY_DOMAIN")
maybe_set(settings, "site.name", "SITE_NAME", default="Warehouse")
maybe_set(settings, "aws.key_id", "AWS_ACCESS_KEY_ID")
maybe_set(settings, "aws.secret_key", "AWS_SECRET_ACCESS_KEY")
maybe_set(settings, "aws.region", "AWS_REGION")
maybe_set(settings, "gcloud.credentials", "GCLOUD_CREDENTIALS")
maybe_set(settings, "gcloud.project", "GCLOUD_PROJECT")
maybe_set(settings, "warehouse.trending_table", "WAREHOUSE_TRENDING_TABLE")
maybe_set(settings, "celery.broker_url", "BROKER_URL")
maybe_set(settings, "celery.result_url", "REDIS_URL")
maybe_set(settings, "celery.scheduler_url", "REDIS_URL")
maybe_set(settings, "database.url", "DATABASE_URL")
maybe_set(settings, "elasticsearch.url", "ELASTICSEARCH_URL")
maybe_set(settings, "elasticsearch.url", "ELASTICSEARCH_SIX_URL")
maybe_set(settings, "sentry.dsn", "SENTRY_DSN")
maybe_set(settings, "sentry.frontend_dsn", "SENTRY_FRONTEND_DSN")
maybe_set(settings, "sentry.transport", "SENTRY_TRANSPORT")
maybe_set(settings, "sessions.url", "REDIS_URL")
maybe_set(settings, "ratelimit.url", "REDIS_URL")
maybe_set(settings, "sessions.secret", "SESSION_SECRET")
maybe_set(settings, "camo.url", "CAMO_URL")
maybe_set(settings, "camo.key", "CAMO_KEY")
maybe_set(settings, "docs.url", "DOCS_URL")
maybe_set(settings, "ga.tracking_id", "GA_TRACKING_ID")
maybe_set(settings, "statuspage.url", "STATUSPAGE_URL")
maybe_set(settings, "token.password.secret", "TOKEN_PASSWORD_SECRET")
maybe_set(settings, "token.email.secret", "TOKEN_EMAIL_SECRET")
maybe_set(settings, "warehouse.xmlrpc.cache.url", "REDIS_URL")
maybe_set(settings, "token.password.max_age", "TOKEN_PASSWORD_MAX_AGE", coercer=int)
maybe_set(settings, "token.email.max_age", "TOKEN_EMAIL_MAX_AGE", coercer=int)
maybe_set(
settings,
"token.default.max_age",
"TOKEN_DEFAULT_MAX_AGE",
coercer=int,
default=21600, # 6 hours
)
maybe_set_compound(settings, "files", "backend", "FILES_BACKEND")
maybe_set_compound(settings, "docs", "backend", "DOCS_BACKEND")
maybe_set_compound(settings, "origin_cache", "backend", "ORIGIN_CACHE")
maybe_set_compound(settings, "mail", "backend", "MAIL_BACKEND")
maybe_set_compound(settings, "metrics", "backend", "METRICS_BACKEND")
maybe_set_compound(settings, "breached_passwords", "backend", "BREACHED_PASSWORDS")
# Add the settings we use when the environment is set to development.
if settings["warehouse.env"] == Environment.development:
settings.setdefault("enforce_https", False)
settings.setdefault("pyramid.reload_assets", True)
settings.setdefault("pyramid.reload_templates", True)
settings.setdefault("pyramid.prevent_http_cache", True)
settings.setdefault("debugtoolbar.hosts", ["0.0.0.0/0"])
settings.setdefault(
"debugtoolbar.panels",
[
".".join(["pyramid_debugtoolbar.panels", panel])
for panel in [
"versions.VersionDebugPanel",
"settings.SettingsDebugPanel",
"headers.HeaderDebugPanel",
"request_vars.RequestVarsDebugPanel",
"renderings.RenderingsDebugPanel",
"logger.LoggingPanel",
"performance.PerformanceDebugPanel",
"routes.RoutesDebugPanel",
"sqla.SQLADebugPanel",
"tweens.TweensDebugPanel",
"introspection.IntrospectionDebugPanel",
]
],
)
# Actually setup our Pyramid Configurator with the values pulled in from
# the environment as well as the ones passed in to the configure function.
config = Configurator(settings=settings)
config.set_root_factory(RootFactory)
# Register support for services
config.include("pyramid_services")
# Register metrics
config.include(".metrics")
# Register our CSRF support. We do this here, immediately after we've
# created the Configurator instance so that we ensure to get our defaults
# set ASAP before anything else has a chance to set them and possibly call
# Configurator().commit()
config.include(".csrf")
# Include anything needed by the development environment.
if config.registry.settings["warehouse.env"] == Environment.development:
config.include("pyramid_debugtoolbar")
# Register our logging support
config.include(".logging")
# We'll want to use Jinja2 as our template system.
config.include("pyramid_jinja2")
# Include our filters
config.include(".filters")
# Including pyramid_mailer for sending emails through SMTP.
config.include("pyramid_mailer")
# We want to use newstyle gettext
config.add_settings({"jinja2.newstyle": True})
# We also want to use Jinja2 for .html templates as well, because we just
# assume that all templates will be using Jinja.
config.add_jinja2_renderer(".html")
# Sometimes our files are .txt files and we still want to use Jinja2 to
# render them.
config.add_jinja2_renderer(".txt")
# Anytime we want to render a .xml template, we'll also use Jinja.
config.add_jinja2_renderer(".xml")
# We need to enable our Client Side Include extension
config.get_settings().setdefault(
"jinja2.extensions", ["warehouse.utils.html.ClientSideIncludeExtension"]
)
# We'll want to configure some filters for Jinja2 as well.
filters = config.get_settings().setdefault("jinja2.filters", {})
filters.setdefault("format_classifiers", "warehouse.filters:format_classifiers")
filters.setdefault("format_tags", "warehouse.filters:format_tags")
filters.setdefault("json", "warehouse.filters:tojson")
filters.setdefault("camoify", "warehouse.filters:camoify")
filters.setdefault("shorten_number", "warehouse.filters:shorten_number")
filters.setdefault("urlparse", "warehouse.filters:urlparse")
filters.setdefault("contains_valid_uris", "warehouse.filters:contains_valid_uris")
filters.setdefault("format_package_type", "warehouse.filters:format_package_type")
filters.setdefault("parse_version", "warehouse.filters:parse_version")
# We also want to register some global functions for Jinja
jglobals = config.get_settings().setdefault("jinja2.globals", {})
jglobals.setdefault("is_valid_uri", "warehouse.utils.http:is_valid_uri")
jglobals.setdefault("gravatar", "warehouse.utils.gravatar:gravatar")
jglobals.setdefault("gravatar_profile", "warehouse.utils.gravatar:profile")
jglobals.setdefault("now", "warehouse.utils:now")
# We'll store all of our templates in one location, warehouse/templates
# so we'll go ahead and add that to the Jinja2 search path.
config.add_jinja2_search_path("warehouse:templates", name=".html")
config.add_jinja2_search_path("warehouse:templates", name=".txt")
config.add_jinja2_search_path("warehouse:templates", name=".xml")
# We want to configure our JSON renderer to sort the keys, and also to use
# an ultra compact serialization format.
config.add_renderer("json", renderers.JSON(sort_keys=True, separators=(",", ":")))
# Configure retry support.
config.add_settings({"retry.attempts": 3})
config.include("pyramid_retry")
# Configure our transaction handling so that each request gets its own
# transaction handler and the lifetime of the transaction is tied to the
# lifetime of the request.
config.add_settings(
{
"tm.manager_hook": lambda request: transaction.TransactionManager(),
"tm.activate_hook": activate_hook,
"tm.commit_veto": commit_veto,
"tm.annotate_user": False,
}
)
config.include("pyramid_tm")
# Register our XMLRPC cache
config.include(".legacy.api.xmlrpc.cache")
# Register support for XMLRPC and override it's renderer to allow
# specifying custom dumps arguments.
config.include("pyramid_rpc.xmlrpc")
config.add_renderer("xmlrpc", XMLRPCRenderer(allow_none=True))
# Register support for our legacy action URLs
config.include(".legacy.action_routing")
# Register support for our domain predicates
config.include(".domain")
# Register support for template views.
config.add_directive("add_template_view", template_view, action_wrap=False)
# Register support for internationalization and localization
config.include(".i18n")
# Register the configuration for the PostgreSQL database.
config.include(".db")
# Register the support for Celery Tasks
config.include(".tasks")
# Register support for our rate limiting mechanisms
config.include(".rate_limiting")
config.include(".static")
config.include(".policy")
config.include(".search")
# Register the support for AWS and Google Cloud
config.include(".aws")
config.include(".gcloud")
# Register our session support
config.include(".sessions")
# Register our support for http and origin caching
config.include(".cache.http")
config.include(".cache.origin")
# Register support for sendnging emails
config.include(".email")
# Register our authentication support.
config.include(".accounts")
# Register logged-in views
config.include(".manage")
# Allow the packaging app to register any services it has.
config.include(".packaging")
# Configure redirection support
config.include(".redirects")
# Register all our URL routes for Warehouse.
config.include(".routes")
# Include our admin application
config.include(".admin")
# Register forklift, at least until we split it out into it's own project.
config.include(".forklift")
# Block non HTTPS requests for the legacy ?:action= routes when they are
# sent via POST.
config.add_tween("warehouse.config.require_https_tween_factory")
# Enable compression of our HTTP responses
config.add_tween(
"warehouse.utils.compression.compression_tween_factory",
over=[
"warehouse.cache.http.conditional_http_tween_factory",
"pyramid_debugtoolbar.toolbar_tween_factory",
"warehouse.raven.raven_tween_factory",
EXCVIEW,
],
)
# Enable Warehouse to serve our static files
prevent_http_cache = config.get_settings().get("pyramid.prevent_http_cache", False)
config.add_static_view(
"static",
"warehouse:static/dist/",
# Don't cache at all if prevent_http_cache is true, else we'll cache
# the files for 10 years.
cache_max_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.add_cache_buster(
"warehouse:static/dist/",
ManifestCacheBuster(
"warehouse:static/dist/manifest.json",
reload=config.registry.settings["pyramid.reload_assets"],
strict=not prevent_http_cache,
),
)
config.whitenoise_serve_static(
autorefresh=prevent_http_cache,
max_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.whitenoise_add_files("warehouse:static/dist/", prefix="/static/")
config.whitenoise_add_manifest(
"warehouse:static/dist/manifest.json", prefix="/static/"
)
# Enable Warehouse to serve our locale files
config.add_static_view("locales", "warehouse:locales/")
# Enable support of passing certain values like remote host, client
# address, and protocol support in from an outer proxy to the application.
config.add_wsgi_middleware(
ProxyFixer,
token=config.registry.settings["warehouse.token"],
num_proxies=config.registry.settings.get("warehouse.num_proxies", 1),
)
# Protect against cache poisoning via the X-Vhm-Root headers.
config.add_wsgi_middleware(VhmRootRemover)
# Fix our host header when getting sent upload.pypi.io as a HOST.
# TODO: Remove this, this is at the wrong layer.
config.add_wsgi_middleware(HostRewrite)
# We want Raven to be the last things we add here so that it's the outer
# most WSGI middleware.
config.include(".raven")
# Register Content-Security-Policy service
config.include(".csp")
# Register Referrer-Policy service
config.include(".referrer_policy")
config.add_settings({"http": {"verify": "/etc/ssl/certs/"}})
config.include(".http")
# Add our theme if one was configured
if config.get_settings().get("warehouse.theme"):
config.include(config.get_settings()["warehouse.theme"])
# Scan everything for configuration
config.scan(
ignore=["warehouse.migrations.env", "warehouse.celery", "warehouse.wsgi"]
)
# Sanity check our request and responses.
# Note: It is very important that this go last. We need everything else that might
# have added a tween to be registered prior to this.
config.include(".sanity")
# Finally, commit all of our changes
config.commit()
return config
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``ndb`` is a library for Google Cloud Datastore.
It was originally included in the Google App Engine runtime as a "new"
version of the ``db`` API (hence ``ndb``).
.. autodata:: __version__
.. autodata:: __all__
"""
from pkg_resources import get_distribution
__version__ = get_distribution("google-cloud-ndb").version
from google.cloud.ndb.client import Client
from google.cloud.ndb.context import AutoBatcher
from google.cloud.ndb.context import Context
from google.cloud.ndb.context import ContextOptions
from google.cloud.ndb.context import get_context
from google.cloud.ndb.context import get_toplevel_context
from google.cloud.ndb.context import TransactionOptions
from google.cloud.ndb._datastore_api import EVENTUAL
from google.cloud.ndb._datastore_api import EVENTUAL_CONSISTENCY
from google.cloud.ndb._datastore_api import STRONG
from google.cloud.ndb._datastore_query import Cursor
from google.cloud.ndb._datastore_query import QueryIterator
from google.cloud.ndb.global_cache import GlobalCache
from google.cloud.ndb.global_cache import MemcacheCache
from google.cloud.ndb.global_cache import RedisCache
from google.cloud.ndb.key import Key
from google.cloud.ndb.model import BlobKey
from google.cloud.ndb.model import BlobKeyProperty
from google.cloud.ndb.model import BlobProperty
from google.cloud.ndb.model import BooleanProperty
from google.cloud.ndb.model import ComputedProperty
from google.cloud.ndb.model import ComputedPropertyError
from google.cloud.ndb.model import DateProperty
from google.cloud.ndb.model import DateTimeProperty
from google.cloud.ndb.model import delete_multi
from google.cloud.ndb.model import delete_multi_async
from google.cloud.ndb.model import Expando
from google.cloud.ndb.model import FloatProperty
from google.cloud.ndb.model import GenericProperty
from google.cloud.ndb.model import GeoPt
from google.cloud.ndb.model import GeoPtProperty
from google.cloud.ndb.model import get_indexes
from google.cloud.ndb.model import get_indexes_async
from google.cloud.ndb.model import get_multi
from google.cloud.ndb.model import get_multi_async
from google.cloud.ndb.model import Index
from google.cloud.ndb.model import IndexProperty
from google.cloud.ndb.model import IndexState
from google.cloud.ndb.model import IntegerProperty
from google.cloud.ndb.model import InvalidPropertyError
from google.cloud.ndb.model import BadProjectionError
from google.cloud.ndb.model import JsonProperty
from google.cloud.ndb.model import KeyProperty
from google.cloud.ndb.model import KindError
from google.cloud.ndb.model import LocalStructuredProperty
from google.cloud.ndb.model import make_connection
from google.cloud.ndb.model import MetaModel
from google.cloud.ndb.model import Model
from google.cloud.ndb.model import ModelAdapter
from google.cloud.ndb.model import ModelAttribute
from google.cloud.ndb.model import ModelKey
from google.cloud.ndb.model import PickleProperty
from google.cloud.ndb.model import Property
from google.cloud.ndb.model import put_multi
from google.cloud.ndb.model import put_multi_async
from google.cloud.ndb.model import ReadonlyPropertyError
from google.cloud.ndb.model import Rollback
from google.cloud.ndb.model import StringProperty
from google.cloud.ndb.model import StructuredProperty
from google.cloud.ndb.model import TextProperty
from google.cloud.ndb.model import TimeProperty
from google.cloud.ndb.model import UnprojectedPropertyError
from google.cloud.ndb.model import User
from google.cloud.ndb.model import UserNotFoundError
from google.cloud.ndb.model import UserProperty
from google.cloud.ndb.polymodel import PolyModel
from google.cloud.ndb.query import ConjunctionNode
from google.cloud.ndb.query import AND
from google.cloud.ndb.query import DisjunctionNode
from google.cloud.ndb.query import OR
from google.cloud.ndb.query import FalseNode
from google.cloud.ndb.query import FilterNode
from google.cloud.ndb.query import gql
from google.cloud.ndb.query import Node
from google.cloud.ndb.query import Parameter
from google.cloud.ndb.query import ParameterizedFunction
from google.cloud.ndb.query import ParameterizedThing
from google.cloud.ndb.query import ParameterNode
from google.cloud.ndb.query import PostFilterNode
from google.cloud.ndb.query import Query
from google.cloud.ndb.query import QueryOptions
from google.cloud.ndb.query import RepeatedStructuredPropertyPredicate
from google.cloud.ndb.tasklets import add_flow_exception
from google.cloud.ndb.tasklets import Future
from google.cloud.ndb.tasklets import make_context
from google.cloud.ndb.tasklets import make_default_context
from google.cloud.ndb.tasklets import QueueFuture
from google.cloud.ndb.tasklets import ReducingFuture
from google.cloud.ndb.tasklets import Return
from google.cloud.ndb.tasklets import SerialQueueFuture
from google.cloud.ndb.tasklets import set_context
from google.cloud.ndb.tasklets import sleep
from google.cloud.ndb.tasklets import synctasklet
from google.cloud.ndb.tasklets import tasklet
from google.cloud.ndb.tasklets import toplevel
from google.cloud.ndb.tasklets import wait_all
from google.cloud.ndb.tasklets import wait_any
from google.cloud.ndb._transaction import in_transaction
from google.cloud.ndb._transaction import transaction
from google.cloud.ndb._transaction import transaction_async
from google.cloud.ndb._transaction import transactional
from google.cloud.ndb._transaction import transactional_async
from google.cloud.ndb._transaction import transactional_tasklet
from google.cloud.ndb._transaction import non_transactional
__all__ = [
"AutoBatcher",
"Client",
"Context",
"ContextOptions",
"EVENTUAL",
"EVENTUAL_CONSISTENCY",
"STRONG",
"TransactionOptions",
"Key",
"BlobKey",
"BlobKeyProperty",
"BlobProperty",
"BooleanProperty",
"ComputedProperty",
"ComputedPropertyError",
"DateProperty",
"DateTimeProperty",
"delete_multi",
"delete_multi_async",
"Expando",
"FloatProperty",
"GenericProperty",
"GeoPt",
"GeoPtProperty",
"get_indexes",
"get_indexes_async",
"get_multi",
"get_multi_async",
"GlobalCache",
"in_transaction",
"Index",
"IndexProperty",
"IndexState",
"IntegerProperty",
"InvalidPropertyError",
"BadProjectionError",
"JsonProperty",
"KeyProperty",
"KindError",
"LocalStructuredProperty",
"make_connection",
"MemcacheCache",
"MetaModel",
"Model",
"ModelAdapter",
"ModelAttribute",
"ModelKey",
"non_transactional",
"PickleProperty",
"PolyModel",
"Property",
"put_multi",
"put_multi_async",
"ReadonlyPropertyError",
"RedisCache",
"Rollback",
"StringProperty",
"StructuredProperty",
"TextProperty",
"TimeProperty",
"transaction",
"transaction_async",
"transactional",
"transactional_async",
"transactional_tasklet",
"UnprojectedPropertyError",
"User",
"UserNotFoundError",
"UserProperty",
"ConjunctionNode",
"AND",
"Cursor",
"DisjunctionNode",
"OR",
"FalseNode",
"FilterNode",
"gql",
"Node",
"Parameter",
"ParameterizedFunction",
"ParameterizedThing",
"ParameterNode",
"PostFilterNode",
"Query",
"QueryIterator",
"QueryOptions",
"RepeatedStructuredPropertyPredicate",
"add_flow_exception",
"Future",
"get_context",
"get_toplevel_context",
"make_context",
"make_default_context",
"QueueFuture",
"ReducingFuture",
"Return",
"SerialQueueFuture",
"set_context",
"sleep",
"synctasklet",
"tasklet",
"toplevel",
"wait_all",
"wait_any",
]
| |
import pytest
import time
from rancher import ApiError
from .common import wait_for_template_to_be_created, \
wait_for_template_to_be_deleted, random_str, wait_for_atleast_workload
from .conftest import set_server_version, wait_for, DEFAULT_CATALOG
def test_catalog(admin_mc, remove_resource):
client = admin_mc.client
name1 = random_str()
name2 = random_str()
url1 = "https://github.com/StrongMonkey/charts-1.git"
url2 = "HTTP://github.com/StrongMonkey/charts-1.git"
catalog1 = client.create_catalog(name=name1,
branch="test",
url=url1,
)
remove_resource(catalog1)
catalog2 = client.create_catalog(name=name2,
branch="test",
url=url2,
)
remove_resource(catalog2)
wait_for_template_to_be_created(client, name1)
wait_for_template_to_be_created(client, name2)
client.delete(catalog1)
client.delete(catalog2)
wait_for_template_to_be_deleted(client, name1)
wait_for_template_to_be_deleted(client, name2)
def test_invalid_catalog_chars(admin_mc, remove_resource):
client = admin_mc.client
name = random_str()
url = "https://github.com/%0dStrongMonkey%0A/charts-1.git"
with pytest.raises(ApiError) as e:
catalog = client.create_catalog(name=name,
branch="test",
url=url,
)
remove_resource(catalog)
assert e.value.error.status == 422
assert e.value.error.message == "Invalid characters in catalog URL"
url = "https://github.com/StrongMonkey\t/charts-1.git"
with pytest.raises(ApiError) as e:
catalog = client.create_catalog(name=name,
branch="test",
url=url,
)
remove_resource(catalog)
assert e.value.error.status == 422
assert e.value.error.message == "Invalid characters in catalog URL"
def test_global_catalog_template_access(admin_mc, user_factory,
remove_resource):
client = admin_mc.client
user1 = user_factory()
remove_resource(user1)
name = random_str()
# Get all templates from library catalog that is enabled by default
updated = False
start = time.time()
interval = 0.5
while not updated:
time.sleep(interval)
interval *= 2
c = client.list_catalog(name="library").data[0]
if c.transitioning == "no":
updated = True
continue
if time.time() - start > 90:
raise AssertionError(
"Timed out waiting for catalog to stop transitioning")
existing = client.list_template(catalogId="library").data
templates = []
for t in existing:
templates.append("library-" + t.name)
url = "https://github.com/mrajashree/charts.git"
catalog = client.create_catalog(name=name,
branch="onlyOne",
url=url,
)
wait_for_template_to_be_created(client, name)
updated = False
start = time.time()
interval = 0.5
while not updated:
time.sleep(interval)
interval *= 2
c = client.list_catalog(name=name).data[0]
if c.transitioning == "no":
updated = True
continue
if time.time() - start > 90:
raise AssertionError(
"Timed out waiting for catalog to stop transitioning")
# Now list all templates of this catalog
new_templates = client.list_template(catalogId=name).data
for t in new_templates:
templates.append(name + "-" + t.name)
all_templates = existing + new_templates
# User should be able to list all these templates
user_client = user1.client
user_lib_templates = user_client.list_template(catalogId="library").data
user_new_templates = user_client.list_template(catalogId=name).data
user_templates = user_lib_templates + user_new_templates
assert len(user_templates) == len(all_templates)
client.delete(catalog)
wait_for_template_to_be_deleted(client, name)
def test_user_can_list_global_catalog(user_factory, remove_resource):
user1 = user_factory()
remove_resource(user1)
user_client = user1.client
c = user_client.list_catalog(name="library")
assert len(c) == 1
@pytest.mark.nonparallel
def test_template_version_links(admin_mc, admin_pc, custom_catalog,
remove_resource, restore_rancher_version):
"""Test that template versionLinks are being updated based off the rancher
version set on the server and the query paramater 'rancherVersion' being
set.
"""
# 1.6.0 uses 2.0.0-2.2.0
# 1.6.2 uses 2.1.0-2.3.0
client = admin_mc.client
c_name = random_str()
custom_catalog(name=c_name)
# Set the server expecting both versions
set_server_version(client, "2.1.0")
templates = client.list_template(
rancherVersion='2.1.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 2
assert '1.6.0' in templates.data[0]['versionLinks']
assert '1.6.2' in templates.data[0]['versionLinks']
# Set the server expecting only the older version
set_server_version(client, "2.0.0")
templates = client.list_template(
rancherVersion='2.0.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 1
assert '1.6.0' in templates.data[0]['versionLinks']
# Set the server expecting only the newer version
set_server_version(client, "2.3.0")
templates = client.list_template(
rancherVersion='2.3.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 1
assert '1.6.2' in templates.data[0]['versionLinks']
# Set the server expecting no versions, this should be outside both
# versions acceptable ranges
set_server_version(client, "2.4.0")
templates = client.list_template(
rancherVersion='2.4.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 0
def test_relative_paths(admin_mc, admin_pc, remove_resource):
""" This test adds a catalog's index.yaml with a relative chart url
and ensures that rancher can resolve the relative url"""
client = admin_mc.client
catalogname = "cat-" + random_str()
url = "https://raw.githubusercontent.com/rancher/integration-test-charts"\
"/relative-path"
catalog = client.create_catalog(catalogName=catalogname, branch="master",
url=url)
remove_resource(catalog)
catalog = client.reload(catalog)
assert catalog['url'] == url
# now deploy the app in the catalog to ensure we can resolve the tarball
ns = admin_pc.cluster.client.create_namespace(
catalogName="ns-" + random_str(),
projectId=admin_pc.project.id)
remove_resource(ns)
wait_for_template_to_be_created(client, catalog.id)
mysqlha = admin_pc.client.create_app(name="app-" + random_str(),
externalId="catalog://?catalog=" +
catalog.id +
"&template=mysql"
"&version=1.6.2",
targetNamespace=ns.name,
projectId=admin_pc.project.id)
remove_resource(mysqlha)
wait_for_atleast_workload(pclient=admin_pc.client, nsid=ns.id, timeout=60,
count=1)
def test_cannot_delete_system_catalog(admin_mc):
"""This test asserts that the system catalog cannot be delete"""
client = admin_mc.client
system_catalog = client.by_id_catalog("system-library")
with pytest.raises(ApiError) as e:
client.delete(system_catalog)
assert e.value.error.status == 422
assert e.value.error.message == 'not allowed to delete system-library' \
' catalog'
def test_system_catalog_missing_remove_link(admin_mc):
"""This test asserts that the remove link is missing from system-catalog's
links"""
client = admin_mc.client
system_catalog = client.by_id_catalog("system-library")
assert "remove" not in system_catalog.links
def test_cannot_update_system_if_embedded(admin_mc):
"""This test asserts that the system catalog cannot be updated if
system-catalog setting is set to 'bundled'"""
client = admin_mc.client
system_catalog_setting = client.by_id_setting("system-catalog")
# this could potentially interfere with other tests if they were to rely
# on system-catalog setting
client.update_by_id_setting(id=system_catalog_setting.id, value="bundled")
system_catalog = client.by_id_catalog("system-library")
with pytest.raises(ApiError) as e:
client.update_by_id_catalog(id=system_catalog.id, branch="asd")
assert e.value.error.status == 422
assert e.value.error.message == 'not allowed to edit system-library' \
' catalog'
def test_embedded_system_catalog_missing_edit_link(admin_mc):
"""This test asserts that the system catalog is missing the 'update' link
if system-catalog setting is set to 'bundled'"""
client = admin_mc.client
system_catalog_setting = client.by_id_setting("system-catalog")
# this could potentially interfere with other tests if they were to rely
# on system-catalog setting
client.update_by_id_setting(id=system_catalog_setting.id, value="bundled")
system_catalog = client.by_id_catalog("system-library")
assert "update" not in system_catalog.links
@pytest.mark.nonparallel
def test_catalog_refresh(admin_mc):
"""Test that on refresh the response includes the names of the catalogs
that are being refreshed"""
client = admin_mc.client
catalog = client.by_id_catalog("library")
out = client.action(obj=catalog, action_name="refresh")
assert out['catalogs'][0] == "library"
catalogs = client.list_catalog()
out = client.action(obj=catalogs, action_name="refresh")
# It just needs to be more than none, other test can add/remove catalogs
# so a hard count will break
assert len(out['catalogs']) > 0, 'no catalogs in response'
def test_invalid_catalog_chart_names(admin_mc, remove_resource):
"""Test chart with invalid name in catalog error properly
and test that a chart names are truncated and processed without
error"""
client = admin_mc.client
name = random_str()
catalog = client.create_catalog(name=name,
branch="broke-charts",
url=DEFAULT_CATALOG,
)
remove_resource(catalog)
wait_for_template_to_be_created(client, catalog.id)
def get_errored_catalog(catalog):
catalog = client.reload(catalog)
if catalog.transitioning == "error":
return catalog
return None
catalog = wait_for(lambda: get_errored_catalog(catalog),
fail_handler=lambda:
"catalog was not found in error state")
templates = client.list_template(catalogId=catalog.id).data
templatesString = ','.join([str(i) for i in templates])
assert "areallylongname" not in templatesString
assert "bad-chart_name" not in templatesString
assert catalog.state == "processed"
assert catalog.transitioning == "error"
assert "Error in chart(s):" in catalog.transitioningMessage
assert "bad-chart_name" in catalog.transitioningMessage
assert "areallylongname" in catalog.transitioningMessage
# this will break if github repo changes
assert len(templates) == 6
# checking that the errored catalog can be deleted successfully
client.delete(catalog)
wait_for_template_to_be_deleted(client, name)
assert not client.list_catalog(name=name).data
def test_invalid_catalog_chart_urls(admin_mc, remove_resource):
"""Test chart with file:// and local:// url paths"""
client = admin_mc.client
name = random_str()
catalog = client.create_catalog(name=name,
branch="invalid-urls",
url=DEFAULT_CATALOG,
)
remove_resource(catalog)
wait_for_template_to_be_created(client, catalog.id)
def get_errored_catalog(catalog):
catalog = client.reload(catalog)
if catalog.transitioning == "error":
return catalog
return None
catalog = wait_for(lambda: get_errored_catalog(catalog),
fail_handler=lambda:
"catalog was not found in error state")
templates = client.list_template(catalogId=catalog.id).data
templatesString = ','.join([str(i) for i in templates])
# url in index.yaml:
# local://azure-samples.github.io/helm-charts/aks-helloworld-0.1.0.tgz
assert "aks-goodbyeworld" not in templatesString
# url in index.yaml:
# file://azure-samples.github.io/helm-charts/aks-helloworld-0.1.0.tgz
assert "aks-helloworld" not in templatesString
assert catalog.state == "processed"
assert catalog.transitioning == "error"
assert "Error in chart(s):" in catalog.transitioningMessage
assert "aks-goodbyeworld" in catalog.transitioningMessage
assert "aks-helloworld" in catalog.transitioningMessage
# this will break if github repo changes
# valid url in index.yaml:
# https://azure-samples.github.io/helm-charts/azure-vote-0.1.0.tgz
assert len(templates) == 1
# checking that the errored catalog can be deleted successfully
client.delete(catalog)
wait_for_template_to_be_deleted(client, name)
assert not client.list_catalog(name=name).data
def test_catalog_has_helmversion(admin_mc, remove_resource):
"""Test to see that the helm version can be added to a catalog
on create and that the value is passed to the template"""
client = admin_mc.client
name1 = random_str()
name2 = random_str()
catalog1 = client.create_catalog(name=name1,
branch="master",
url=DEFAULT_CATALOG,
)
remove_resource(catalog1)
catalog2 = client.create_catalog(name=name2,
branch="master",
url=DEFAULT_CATALOG,
helmVersion="helm_v3"
)
remove_resource(catalog2)
wait_for_template_to_be_created(client, name1)
wait_for_template_to_be_created(client, name2)
assert "helm_v3" not in catalog1
assert catalog2.helmVersion == "helm_v3"
templates1 = client.list_template(catalogId=catalog1.id).data
for template in templates1:
assert "helmVersion" not in template.status
templates2 = client.list_template(catalogId=catalog2.id).data
for template in templates2:
assert "helmVersion" in template.status
assert template.status.helmVersion == "helm_v3"
def test_refresh_catalog_access(admin_mc, user_mc):
"""Tests that a user with standard access is not
able to refresh a catalog.
"""
catalog = admin_mc.client.by_id_catalog("library")
out = admin_mc.client.action(obj=catalog, action_name="refresh")
assert out['catalogs'][0] == "library"
# use catalog obj from admin client to get action not available to user
with pytest.raises(ApiError) as e:
user_mc.client.action(obj=catalog, action_name="refresh")
assert e.value.error.status == 404
| |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import _dsz
import dsz
import dsz.data
import dsz.lp
import mcl.tasking
import re
import sys
import Queue
import xml
import xml.sax
import xml.sax.handler
InitialList = 1
StartList = 2
StopList = 3
Type_IPv4 = 'IPv4'
Type_IPv6 = 'IPv6'
class Processes(dsz.data.TaskReader):
def __init__(self, cmd=None, id=None):
dsz.data.TaskReader.__init__(self, cmd, id)
self.__currentList = None
self.__currentItem = None
self.StartProcessListItem = FilteredIterator(self, ProcessList, StartList)
self.StopProcessListItem = FilteredIterator(self, ProcessList, StopList)
self.InitialProcessListItem = None
for item in self:
if self.InitialProcessListItem != None:
break
return
def startElement(self, name, attrs):
dsz.data.TaskReader.startElement(self, name, attrs)
self.__currentText = None
if name == 'Initial':
temp = ProcessList(InitialList, attrs['lptimestamp'])
self.InitialProcessListItem = temp
self.__currentList = temp
self.currentItems.put(temp)
elif name == 'Started':
temp = ProcessList(StartList, attrs['lptimestamp'])
self.__currentList = temp
self.currentItems.put(temp)
elif name == 'Stopped':
temp = ProcessList(StopList, attrs['lptimestamp'])
self.__currentList = temp
self.currentItems.put(temp)
elif name == 'Process':
temp = Process(attrs)
self.__currentItem = temp
self.__currentList.ProcessItem.append(temp)
elif name == 'CreateTime':
self.__currentType = attrs['type']
try:
self.__currentTypeValue = int(attrs['typeValue'])
except:
self.__currentTypeValue = 0
elif name == 'CpuTime':
self.__currentType = attrs['type']
elif name == 'Is64Bit':
self.__currentItem.Is64Bit = True
return
def endElement(self, name):
dsz.data.TaskReader.endElement(self, name)
if name == 'Name':
self.__currentItem.Name = self.__currentText
elif name == 'ExecutablePath':
self.__currentItem.Path = self.__currentText
elif name == 'Description':
self.__currentItem.Description = self.__currentText
elif name == 'CreateTime':
temp = dsz.data.DataBean()
self.__currentItem.Created = temp
temp.TypeValue = self.__currentTypeValue
temp.Type = self.__currentType
items = self.__currentText.split('T')
temp.Date = items[0]
items = items[1].split('.')
temp.Time = items[0]
try:
temp.Nanoseconds = items[1]
except:
temp.Nanoseconds = 0
elif name == 'CpuTime':
items = re.findall('\\d+', self.__currentText)
temp = dsz.data.DataBean()
self.__currentItem.CpuTime = temp
temp.Days = int(items[0], 10)
temp.Hours = int(items[1], 10)
temp.Minutes = int(items[2], 10)
temp.Seconds = int(items[3], 10)
temp.Nanoseconds = int(items[4], 10)
self.__currentText = None
return
def characters(self, content):
dsz.data.TaskReader.characters(self, content)
content = ''.join(content.encode('utf-8'))
if self.__currentText == None:
self.__currentText = content
else:
self.__currentText += content
return
def Display(self):
dsz.ui.Echo(' PID PPID CREATED CPU TIME USER')
dsz.ui.Echo('-------------------------------------------------------------------------------------')
for item in self:
if isinstance(item, ProcessList):
item.Display()
class ProcessList(dsz.data.DataBean):
def __init__(self, listType, timestamp):
self.ListType = listType
self.Timestamp = timestamp
self.ProcessItem = list()
def Display(self):
prefix = ' '
if self.ListType == StartList:
prefix = '+'
elif self.ListType == StopList:
prefix = '-'
for procItem in self.ProcessItem:
procItem.DisplayFunction(prefix)
class Process(dsz.data.DataBean):
def __init__(self, attrs):
self.Is64Bit = False
self.Description = None
self.Name = None
self.Path = None
self.Created = None
self.CpuTime = None
try:
self.Id = int(attrs['id'])
except:
self.Id = None
try:
self.ParentId = int(attrs['parent'])
except:
self.ParentId = None
try:
self.Display = attrs['display']
except:
self.Display = None
try:
self.User = attrs['user']
except:
self.User = None
return
def DisplayFunction(self, prefix=' '):
try:
time = '%s %s' % (self.Created.Date, self.Created.Time)
except:
time = ''
try:
cpuTime = '%4d.%02d:%02d:%02d' % (self.CpuTime.Days, self.CpuTime.Hours, self.CpuTime.Minutes, self.CpuTime.Seconds)
except:
cpuTime = '0.00:00:00'
if self.User == None:
user = ''
else:
user = self.User
if self.Path == None:
path = ''
else:
path = self.Path
if self.Name == None:
name = ''
else:
name = self.Name
if self.Id == 0 and (self.Name == None or len(self.Name) == 0):
procString = 'System Idle Process'
elif self.Path == None or len(self.Path) == 0:
procString = '%s' % name
elif '\\' in path:
procString = '%s\\%s' % (path, name)
else:
procString = '%s/%s' % (path, name)
if self.Is64Bit:
procString = '%s (64-bit)' % procString
dsz.ui.Echo('%s %12d%12d %s%s %s' % (prefix, self.Id, self.ParentId, time, cpuTime, user))
dsz.ui.Echo(' %s' % procString)
dsz.ui.Echo(' -------------------------------------------------------------------------------')
return
class FilteredIterator(dsz.data.IteratorBean):
def __init__(self, item, iterType, listType):
dsz.data.IteratorBean.__init__(self, item, iterType)
self.listType = listType
def evaluate(self, ret):
return ret.ListType == self.listType
dsz.data.RegisterCommand('Processes', Processes, True)
PROCESSES = Processes
processes = Processes
| |
#!/usr/bin/env python
#
# Use the raw transactions API to spend vivos received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a vivod or Vivo-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the Vivo Core data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/VivoCore/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "VivoCore")
return os.path.expanduser("~/.vivocore")
def read_bitcoin_config(dbdir):
"""Read the vivo.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "vivo.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a Vivo Core JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19998 if testnet else 9998
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the vivod we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(vivod):
info = vivod.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
vivod.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = vivod.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(vivod):
address_summary = dict()
address_to_account = dict()
for info in vivod.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = vivod.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = vivod.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-vivo-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(vivod, fromaddresses, toaddress, amount, fee):
all_coins = list_available(vivod)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to vivod.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = vivod.createrawtransaction(inputs, outputs)
signed_rawtx = vivod.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(vivod, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = vivod.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(vivod, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = vivod.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(vivod, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get vivos from")
parser.add_option("--to", dest="to", default=None,
help="address to get send vivos to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of vivo.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
vivod = connect_JSON(config)
if options.amount is None:
address_summary = list_available(vivod)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(vivod) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(vivod, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(vivod, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = vivod.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| |
import logging
import numpy as np
from .modest_image import extract_matched_slices
from ..core.exceptions import IncompatibleAttribute
from ..core.data import Data
from ..core.subset import Subset, RoiSubsetState
from ..core.roi import PolygonalROI
from ..core.edit_subset_mode import EditSubsetMode
from .viz_client import VizClient, init_mpl
from .layer_artist import (ScatterLayerArtist, LayerArtistContainer,
ImageLayerArtist, SubsetImageLayerArtist,
RGBImageLayerArtist)
def requires_data(func):
"""Decorator that checks an ImageClient for a non-null display_data
attribute. Only executes decorated function if present"""
def result(*args, **kwargs):
if args[0].display_data is None:
return
return func(*args, **kwargs)
return result
class ImageClient(VizClient):
def __init__(self, data, figure=None, axes=None, artist_container=None):
figure, axes = init_mpl(figure, axes)
VizClient.__init__(self, data)
self.artists = artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
self.display_data = None
self.display_attribute = None
self._slice_ori = 0
self._slice_ind = 0
self._view_window = None
self._view = None
self._image = None
self._ax = axes
self._ax.get_xaxis().set_ticks([])
self._ax.get_yaxis().set_ticks([])
self._figure = figure
self._norm_cache = {}
#format axes
fc = self._ax.format_coord
def format_coord(x, y):
if self.display_data is None:
return fc(x, y)
pix = self._pixel_coords(x, y)
world = self.display_data.coords.pixel2world(*pix)
world = world[::-1] # reverse for numpy convention
ind = _slice_axis(self.display_data.shape, self._slice_ori)
labels = _slice_labels(self.display_data, self._slice_ori)
return '%s=%s %s=%s' % (labels[1], world[ind[1]],
labels[0], world[ind[0]])
self._ax.format_coord = format_coord
self._cid = self._ax.figure.canvas.mpl_connect('button_release_event',
self.check_update)
@property
def axes(self):
return self._ax
@property
def is_3D(self):
if not self.display_data:
return False
return len(self.display_data.shape) == 3
@property
def slice_ind(self):
if self.is_3D:
return self._slice_ind
return None
@property
def image(self):
return self._image
@slice_ind.setter
def slice_ind(self, value):
if self.is_3D:
self._slice_ind = value
self._update_data_plot()
self._update_subset_plots()
self._redraw()
else:
raise IndexError("Cannot set slice for 2D image")
def can_image_data(self, data):
return data.ndim in [2, 3]
def _ensure_data_present(self, data):
if data not in self.artists:
self.add_layer(data)
def check_update(self, *args):
logging.getLogger(__name__).debug("check update")
vw = _view_window(self._ax)
if vw != self._view_window:
logging.getLogger(__name__).debug("updating")
self._update_data_plot()
self._update_subset_plots()
self._redraw()
self._view_window = vw
def set_data(self, data, attribute=None):
if not self.can_image_data(data):
return
self._ensure_data_present(data)
attribute = attribute or _default_component(data)
self.display_data = data
self.display_attribute = attribute
self._update_axis_labels()
self._update_data_plot(relim=True)
self._update_subset_plots()
self._redraw()
def slice_bounds(self):
if not self.is_3D:
return (0, 0)
if self._slice_ori == 2:
return (0, self.display_data.shape[2] - 1)
if self._slice_ori == 1:
return (0, self.display_data.shape[1] - 1)
if self._slice_ori == 0:
return (0, self.display_data.shape[0] - 1)
def set_slice_ori(self, ori):
if not self.is_3D:
raise IndexError("Cannot set orientation of 2D image")
if ori not in [0, 1, 2]:
raise TypeError("Orientation must be 0, 1, or 2")
self._slice_ori = ori
self.slice_ind = min(self.slice_ind, self.slice_bounds()[1])
self.slice_ind = max(self.slice_ind, self.slice_bounds()[0])
self._update_axis_labels()
self._update_data_plot(relim=True)
self._update_subset_plots()
self._redraw()
@requires_data
def _update_axis_labels(self):
ori = self._slice_ori
labels = _slice_labels(self.display_data, ori)
self._ax.set_xlabel(labels[1])
self._ax.set_ylabel(labels[0])
def set_attribute(self, attribute):
if not self.display_data or \
attribute not in self.display_data.component_ids():
raise IncompatibleAttribute(
"Attribute not in data's attributes: %s" % attribute)
if self.display_attribute is not None:
self._norm_cache[self.display_attribute] = self.get_norm()
self.display_attribute = attribute
if attribute in self._norm_cache:
self.set_norm(norm=self._norm_cache[attribute])
else:
self.clear_norm()
self._update_data_plot()
self._redraw()
def _redraw(self):
"""
Re-render the screen
"""
self._ax.figure.canvas.draw()
@requires_data
def set_norm(self, **kwargs):
for a in self.artists[self.display_data]:
a.set_norm(**kwargs)
self._update_data_plot()
self._redraw()
@requires_data
def clear_norm(self):
for a in self.artists[self.display_data]:
a.clear_norm()
@requires_data
def get_norm(self):
a = self.artists[self.display_data][0]
return a.norm
@requires_data
def set_cmap(self, cmap):
for a in self.artists[self.display_data]:
a.cmap = cmap
a.redraw()
def _build_view(self, matched=False):
att = self.display_attribute
shp = self.display_data.shape
shp_2d = _2d_shape(shp, self._slice_ori)
x, y = np.s_[:], np.s_[:]
if matched:
v = extract_matched_slices(self._ax, shp_2d)
x = slice(v[0], v[1], v[2])
y = slice(v[3], v[4], v[5])
if not self.is_3D:
return (att, y, x)
if self._slice_ori == 0:
return (att, self.slice_ind, y, x)
if self._slice_ori == 1:
return (att, y, self.slice_ind, x)
assert self._slice_ori == 2
return (att, y, x, self.slice_ind)
@requires_data
def _update_data_plot(self, relim=False):
"""
Re-sync the main image and its subsets
"""
if relim:
self.relim()
view = self._build_view(matched=True)
self._image = self.display_data[view]
self._view = view
for a in list(self.artists):
if (not isinstance(a, ScatterLayerArtist)) and \
a.layer.data is not self.display_data:
self.artists.remove(a)
else:
a.update(view)
for a in self.artists[self.display_data]:
a.update(view)
def relim(self):
shp = _2d_shape(self.display_data.shape, self._slice_ori)
self._ax.set_xlim(0, shp[1])
self._ax.set_ylim(0, shp[0])
def _update_subset_single(self, s, redraw=False):
"""
Update the location and visual properties
of each point in a single subset
Parameters:
----------
s: A subset instance
The subset to refresh.
"""
logging.getLogger(__name__).debug("update subset single: %s", s)
self._update_scatter_layer(s)
if s not in self.artists:
return
if s.data is not self.display_data:
return
view = self._build_view(matched=True)
for a in self.artists[s]:
a.update(view)
if redraw:
self._redraw()
@requires_data
def apply_roi(self, roi):
subset_state = RoiSubsetState()
xroi, yroi = roi.to_polygon()
x, y = self._get_axis_components()
subset_state.xatt = x
subset_state.yatt = y
subset_state.roi = PolygonalROI(xroi, yroi)
mode = EditSubsetMode()
mode.update(self.data, subset_state, focus_data=self.display_data)
def _horizontal_axis_index(self):
"""Which index (in numpy convention - zyx) does the horizontal
axis coorespond to?"""
if not self.is_3D or self._slice_ori == 2:
return 1
return 2
def _vertical_axis_index(self):
"""Which index (in numpy convention - zyx) does the vertical
axis coorespond to?"""
if self.is_3D and self._slice_ori == 0:
return 1
return 0
def _get_axis_components(self):
data = self.display_data
ids = [self._horizontal_axis_index(), self._vertical_axis_index()]
return map(data.get_pixel_component_id, ids)
def _remove_subset(self, message):
self.delete_layer(message.sender)
def delete_layer(self, layer):
if layer not in self.artists:
return
for a in self.artists.pop(layer):
a.clear()
if layer is self.display_data:
self.display_data = None
if isinstance(layer, Data):
for subset in layer.subsets:
self.delete_layer(subset)
self._redraw()
def _remove_data(self, message):
self.delete_layer(message.data)
for s in message.data.subsets:
self.delete_layer(s)
def init_layer(self, layer):
#only auto-add subsets if they are of the main image
if isinstance(layer, Subset) and layer.data is not self.display_data:
return
self.add_layer(layer)
def add_rgb_layer(self, layer, r=None, g=None, b=None):
a = RGBImageLayerArtist(layer, self._ax)
a.r = r
a.g = g
a.b = b
for artist in self.artists.pop(layer):
artist.clear()
self.artists.append(a)
self._update_data_plot()
self._redraw()
def add_layer(self, layer):
if layer in self.artists:
return
if layer.data not in self.data:
raise TypeError("Data not managed by client's data collection")
if not self.can_image_data(layer.data):
if len(layer.data.shape) == 1: # if data is 1D, try to scatter plot
self.add_scatter_layer(layer)
return
logging.getLogger(__name__).warning(
"Cannot visualize %s. Aborting", layer.label)
return
if isinstance(layer, Data):
self.artists.append(ImageLayerArtist(layer, self._ax))
for s in layer.subsets:
self.add_layer(s)
elif isinstance(layer, Subset):
self.artists.append(SubsetImageLayerArtist(layer, self._ax))
self._update_subset_single(layer)
else:
raise TypeError("Unrecognized layer type: %s" % type(layer))
def add_scatter_layer(self, layer):
logging.getLogger(
__name__).debug('Adding scatter layer for %s' % layer)
if layer in self.artists:
logging.getLogger(__name__).debug('Layer already present')
return
self.artists.append(ScatterLayerArtist(layer, self._ax))
self._update_scatter_layer(layer)
@requires_data
def _update_scatter_layer(self, layer):
xatt, yatt = self._get_plot_attributes()
for a in self.artists[layer]:
if not isinstance(a, ScatterLayerArtist):
continue
a.xatt = xatt
a.yatt = yatt
if self.is_3D:
zatt = self.display_data.get_pixel_component_id(
self._slice_ori)
subset = (
zatt > self._slice_ind) & (zatt <= self._slice_ind + 1)
a.emphasis = subset
else:
a.emphasis = None
a.update()
a.redraw()
self._redraw()
@requires_data
def _get_plot_attributes(self):
y, x = _slice_axis(self.display_data.shape, self._slice_ori)
ids = self.display_data.pixel_component_ids
return ids[x], ids[y]
def _pixel_coords(self, x, y):
"""From a slice coordinate (x,y), return the full (possibly
3D) location
*Note*
The order of inputs and outputs from this function are reverse
the numpy convention (i.e. x axis specified first, not last)
*Returns*
Either (x,y) or (x,y,z)
"""
if not self.is_3D:
return x, y
if self._slice_ori == 0:
return x, y, self.slice_ind
elif self._slice_ori == 1:
return x, self.slice_ind, y
else:
assert self._slice_ori == 2
return self.slice_ind, x, y
def is_visible(self, layer):
return all(a.visible for a in self.artists[layer])
def set_visible(self, layer, state):
for a in self.artists[layer]:
a.visible = state
def _2d_shape(shape, slice_ori):
"""Return the shape of the 2D slice through a 2 or 3D image"""
if len(shape) == 2:
return shape
if slice_ori == 0:
return shape[1:]
if slice_ori == 1:
return shape[0], shape[2]
assert slice_ori == 2
return shape[0:2]
def _slice_axis(shape, slice_ori):
"""Return a 2-tuple of the axis indices for the given
image and slice orientation"""
if len(shape) == 2:
return 0, 1
if slice_ori == 0:
return 1, 2
if slice_ori == 1:
return 0, 2
assert slice_ori == 2
return 0, 1
def _slice_labels(data, slice_ori):
shape = data.shape
names = [data.get_world_component_id(i).label
for i in range(len(shape))]
names = [n.split(':')[-1].split('-')[0] for n in names]
if len(shape) == 2:
return names[0], names[1]
if slice_ori == 0:
return names[1], names[2]
if slice_ori == 1:
return names[0], names[2]
assert slice_ori == 2
return names[0], names[1]
def _view_window(ax):
""" Return a tuple describing the view window of an axes object.
The contents should not be used directly, Rather, several
return values should be compared with == to determine if the
window has been panned/zoomed
"""
ext = ax.transAxes.transform([1, 1]) - ax.transAxes.transform([0, 0])
xlim, ylim = ax.get_xlim(), ax.get_ylim()
result = xlim[0], ylim[0], xlim[1], ylim[1], ext[0], ext[1]
logging.getLogger(__name__).debug("view window: %s", result)
return result
def _default_component(data):
"""Choose a default ComponentID to display for data
Returns PRIMARY if present
"""
cid = data.find_component_id('PRIMARY')
if cid is not None:
return cid
return data.component_ids()[0]
| |
# --------------------------------------------------------------------------------------------------
# Name: MainWindow
# Purpose: Represents the Application MainWindow and hosts all components inside it:
# Canvas, Animation Display etc.
# Author: Rafael Vasco
#
# Created: 26/01/2013
# Copyright: (c) Rafael 2013
# Licence: <your licence>
#--------------------------------------------------------------------------------------------------
from PyQt5.QtCore import Qt, QEvent, pyqtSignal
from PyQt5.QtGui import QPixmap, QPainter
from PyQt5.QtWidgets import QMainWindow, QVBoxLayout, QDockWidget, QHBoxLayout
from src.view.options_bar_widget import OptionsBar
from src.view.pixel_size_widget import PixelSizeWidget
from src.view.toolbar_widget import ToolBar
from ui.mainwindow_ui import Ui_MainWindow
from src.view.animation_display_widget import AnimationDisplay
from src.view.canvas_widget import Canvas
from src.view.color_picker_widget import ColorPicker
from src.view.layer_manager_widget import LayerManager
from src.view.new_sprite_dialog import NewSpriteDialog
from src.view.animation_manager_widget import AnimationManager
from src.model.resources_cache import ResourcesCache
import src.model.appdata as app_data
# -------------------------------------------------------------------------------------------------
class MainWindow(QMainWindow, Ui_MainWindow):
closed = pyqtSignal()
def __init__(self):
QMainWindow.__init__(self)
self.setupUi(self)
self._logo = QPixmap(':/images/logo')
self._workspaceVisible = False
self._pixelSizeWidget = PixelSizeWidget()
self._colorPicker = ColorPicker()
self._canvas = Canvas()
self._canvas.primary_color = self._colorPicker.primary_color
self._canvas.secondary_color = self._colorPicker.secondary_color
self._toolbar = ToolBar()
self._optionsBar = OptionsBar(self._canvas)
self._animationDisplay = AnimationDisplay()
self._animationDisplay.backlight_enabled = self._canvas.backlight_enabled
self._animationDisplayDock = QDockWidget()
self._animationDisplayDock.setFeatures(QDockWidget.DockWidgetFloatable)
self._animationDisplayDock.setWindowTitle("Animation Display")
self._animationDisplayDock.setWidget(self._animationDisplay)
self._animationManager = AnimationManager()
self._layerManager = LayerManager()
self._newSpriteDialog = NewSpriteDialog()
self._newSpriteDialog.setWindowFlags(Qt.WindowTitleHint | Qt.WindowCloseButtonHint)
# -----------------------------------------------------------------------------------------
self._init_components()
self._init_layout()
self._init_events()
# -----------------------------------------------------------------------------------------
self.hide_workspace()
@property
def canvas(self):
return self._canvas
@property
def color_picker(self):
return self._colorPicker
@property
def layer_manager(self):
return self._layerManager
@property
def animation_manager(self):
return self._animationManager
@property
def toolbar_widget(self):
return self.toolBar
@property
def new_sprite_dialog(self):
return self._newSpriteDialog
@property
def animation_display(self):
return self._animationDisplay
@property
def tool_box(self):
return self._toolbar
def show_workspace(self):
self.centralWidget().setVisible(True)
self._workspaceVisible = True
def hide_workspace(self):
self.centralWidget().setVisible(False)
self._workspaceVisible = False
def paintEvent(self, e):
if not self._workspaceVisible:
p = QPainter(self)
x = self.width() / 2 - self._logo.width() / 2
y = self.height() / 2 - self._logo.height() / 2
p.drawPixmap(x, y, self._logo)
p.drawText(x + 50, y + 200,
'.:: SpriteMator ::. | Version: %s' % app_data.meta['VERSION'])
def eventFilter(self, target, event):
if event.type() == QEvent.Wheel:
if event.modifiers() & Qt.ControlModifier:
if event.angleDelta().y() > 0:
self.color_picker.select_next_color_on_palette()
elif event.angleDelta().y() < 0:
self.color_picker.select_previous_color_on_palette()
return True
elif event.modifiers() & Qt.AltModifier:
if event.angleDelta().y() > 0:
self.color_picker.select_next_ramp_on_palette()
elif event.angleDelta().y() < 0:
self.color_picker.select_previous_ramp_on_palette()
return True
return super(QMainWindow, self).eventFilter(target, event)
def closeEvent(self, e):
self.closed.emit()
super(QMainWindow, self).closeEvent(e)
def _init_components(self):
menufont = ResourcesCache.get('BigFont')
self.actionNew.setFont(menufont)
self.actionOpen.setFont(menufont)
self.actionClose.setFont(menufont)
self.actionExport.setFont(menufont)
self.actionImport.setFont(menufont)
self.actionQuit.setFont(menufont)
self.actionSave.setFont(menufont)
self.actionSaveAs.setFont(menufont)
self._init_toolbox()
def _init_layout(self):
# -----------------------------------------------------------------------------------------
canvaslayout = QVBoxLayout()
canvaslayout.setContentsMargins(0, 0, 0, 0)
canvaslayout.addWidget(self._toolbar)
canvaslayout.addWidget(self._canvas)
canvaslayout.addWidget(self._optionsBar)
self.canvasFrame.setLayout(canvaslayout)
# -----------------------------------------------------------------------------------------
color_picker_layout = QVBoxLayout()
color_picker_layout.setContentsMargins(0, 0, 0, 0)
color_picker_layout.addWidget(self._pixelSizeWidget)
color_picker_layout.addWidget(self._colorPicker)
self.colorPickerFrame.setLayout(color_picker_layout)
# -----------------------------------------------------------------------------------------
animation_preview_layout = QVBoxLayout()
animation_preview_layout.setContentsMargins(0, 0, 0, 0)
animation_preview_layout.addWidget(self._animationDisplayDock)
self.previewFrame.setLayout(animation_preview_layout)
# -----------------------------------------------------------------------------------------
layer_manager_layout = QVBoxLayout()
layer_manager_layout.setContentsMargins(0, 0, 0, 0)
layer_manager_layout.addWidget(self._layerManager)
self.layerListFrame.setLayout(layer_manager_layout)
# -----------------------------------------------------------------------------------------
animation_bar_layout = QHBoxLayout()
animation_bar_layout.setContentsMargins(0, 0, 0, 0)
animation_bar_layout.addWidget(self._animationManager)
animation_bar_layout.setAlignment(Qt.AlignLeft)
self.animationBarFrame.setLayout(animation_bar_layout)
def _init_events(self):
self._pixelSizeWidget.pixelSizeChanged.connect(self._on_pixel_size_changed)
self._colorPicker.primaryColorChanged.connect(self._on_primary_color_changed)
self._colorPicker.secondaryColorChanged.connect(self._on_secondary_color_changed)
self._canvas.surfaceChanged.connect(self._on_canvas_surface_changed)
self._canvas.surfaceChanging.connect(self._on_canvas_surface_changing)
self._canvas.viewportChanged.connect(self._on_canvas_viewport_changed)
self._canvas.colorPicked.connect(self._on_canvas_color_picked)
self._toolbar.toolChanged.connect(self._on_tool_changed)
self._toolbar.primaryInkChanged.connect(self._on_primary_ink_changed)
self._toolbar.secondaryInkChanged.connect(self._on_secondary_ink_changed)
self._optionsBar.toggledGrid.connect(self._on_options_grid_toggle)
self._optionsBar.toggledOnionSkin.connect(self._on_options_onion_skin_toggle)
self._optionsBar.toggledLights.connect(self._on_options_lights_toggle)
self._animationManager.currentFrameChanged.connect(self._on_current_frame_changed)
self._layerManager.currentLayerChanged.connect(self._on_current_layer_changed)
self._layerManager.layerOrderChanged.connect(self._on_layer_order_changed)
self._layerManager.layerImported.connect(self._on_layer_imported)
def _init_toolbox(self):
self._toolbar.register_tool(self._canvas.find_tool_by_name('Pen'), is_default=True)
self._toolbar.register_tool(self._canvas.find_tool_by_name('Picker'))
self._toolbar.register_tool(self._canvas.find_tool_by_name('Filler'))
self._toolbar.register_tool(self._canvas.find_tool_by_name('Manipulator'))
self._toolbar.register_ink(self._canvas.find_ink_by_name('Solid'), slot=0)
self._toolbar.register_ink(self._canvas.find_ink_by_name('Eraser'), slot=1)
# =================== Event Handlers ==============================
# ------- Pixel Size Widget ---------------------------------------
def _on_pixel_size_changed(self, size):
self._canvas.pixel_size = size
# ------- Color Picker --------------------------------------------
def _on_primary_color_changed(self, color):
self._canvas.primary_color = color
def _on_secondary_color_changed(self, color):
self._canvas.secondary_color = color
# ------- Canvas ----------------------------------------------------------
def _on_canvas_surface_changed(self):
self._animationDisplay.update()
self._animationManager.update()
self._layerManager.update()
def _on_canvas_surface_changing(self):
self._animationDisplay.update()
def _on_canvas_viewport_changed(self):
self._animationDisplay.update_viewport()
self._layerManager.update()
self._animationManager.update()
def _on_canvas_color_picked(self, color, button_pressed):
if button_pressed == Qt.LeftButton:
self._colorPicker.primary_color = color
elif button_pressed == Qt.RightButton:
self._colorPicker.secondary_color = color
# ------ ToolBar ----------------------------------------------------------
def _on_tool_changed(self, tool_name):
self._canvas.current_tool = tool_name
self._canvas.update()
def _on_primary_ink_changed(self, ink_name):
self._canvas.primary_ink = self._canvas.find_ink_by_name(ink_name)
def _on_secondary_ink_changed(self, ink_name):
self._canvas.secondary_ink = self._canvas.find_ink_by_name(ink_name)
# ----- Options Bar --------------------------------------------------------
def _on_options_grid_toggle(self, grid_on):
self._canvas.grid_enabled = grid_on
def _on_options_onion_skin_toggle(self, onion_on):
self._canvas.onion_skin_enabled = onion_on
self._animationDisplay.onion_skin_enabled = onion_on
def _on_options_lights_toggle(self, lights_on):
self._canvas.backlight_enabled = lights_on
self._animationDisplay.backlight_enabled = lights_on
# ------ Frame Events------------------------------------------------------
def _on_current_frame_changed(self, index):
self._canvas.update()
self._layerManager.rebuild()
self._animationDisplay.go_to_frame(index)
# ------- Layer Events ----------------------------------------------------
def _on_current_layer_changed(self):
self._canvas.update()
def _on_layer_order_changed(self):
self._canvas.update()
self._animationDisplay.update()
self._animationManager.update()
def _on_layer_imported(self):
self._canvas.update_viewport()
self._animationDisplay.update_viewport()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Convert caffe model
"""
from __future__ import print_function
import argparse
import sys
import re
import caffe_parser
import mxnet as mx
import numpy as np
from convert_symbol import convert_symbol
def prob_label(arg_names):
candidates = [arg for arg in arg_names if
not arg.endswith('data') and
not arg.endswith('_weight') and
not arg.endswith('_bias') and
not arg.endswith('_gamma') and
not arg.endswith('_beta')]
if len(candidates) == 0:
return 'prob_label'
return candidates[-1]
def convert_model(prototxt_fname, caffemodel_fname, output_prefix=None):
"""Convert caffe model
Parameters
----------
prototxt_fname : str
Filename of the prototxt model definition
caffemodel_fname : str
Filename of the binary caffe model
output_prefix : str, optinoal
If given, then save the converted MXNet into output_prefx+'.json' and
output_prefx+'.params'
Returns
-------
sym : Symbol
Symbol convereted from prototxt
arg_params : list of NDArray
Argument parameters
aux_params : list of NDArray
Aux parameters
input_dim : tuple
Input dimension
"""
sym, input_dim = convert_symbol(prototxt_fname)
arg_shapes, _, aux_shapes = sym.infer_shape(data=tuple(input_dim))
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
arg_shape_dic = dict(zip(arg_names, arg_shapes))
aux_shape_dic = dict(zip(aux_names, aux_shapes))
arg_params = {}
aux_params = {}
first_conv = True
layers, names = caffe_parser.read_caffemodel(prototxt_fname, caffemodel_fname)
layer_iter = caffe_parser.layer_iter(layers, names)
layers_proto = caffe_parser.get_layers(caffe_parser.read_prototxt(prototxt_fname))
for layer_name, layer_type, layer_blobs in layer_iter:
if layer_type == 'Convolution' or layer_type == 'InnerProduct' \
or layer_type == 4 or layer_type == 14 or layer_type == 'PReLU' \
or layer_type == 'Deconvolution' or layer_type == 39:
if layer_type == 'PReLU':
assert (len(layer_blobs) == 1)
weight_name = layer_name + '_gamma'
wmat = np.array(layer_blobs[0].data).reshape(arg_shape_dic[weight_name])
arg_params[weight_name] = mx.nd.zeros(wmat.shape)
arg_params[weight_name][:] = wmat
continue
wmat_dim = []
if getattr(layer_blobs[0].shape, 'dim', None) is not None:
if len(layer_blobs[0].shape.dim) > 0:
wmat_dim = layer_blobs[0].shape.dim
else:
wmat_dim = [layer_blobs[0].num, layer_blobs[0].channels,
layer_blobs[0].height, layer_blobs[0].width]
else:
wmat_dim = list(layer_blobs[0].shape)
wmat = np.array(layer_blobs[0].data).reshape(wmat_dim)
channels = wmat_dim[1]
if channels == 3 or channels == 4: # RGB or RGBA
if first_conv:
# Swapping BGR of caffe into RGB in mxnet
wmat[:, [0, 2], :, :] = wmat[:, [2, 0], :, :]
assert(wmat.flags['C_CONTIGUOUS'] is True)
sys.stdout.write('converting layer {0}, wmat shape = {1}'.format(
layer_name, wmat.shape))
if len(layer_blobs) == 2:
bias = np.array(layer_blobs[1].data)
bias = bias.reshape((bias.shape[0], 1))
assert(bias.flags['C_CONTIGUOUS'] is True)
bias_name = layer_name + "_bias"
if bias_name not in arg_shape_dic:
print(bias_name + ' not found in arg_shape_dic.')
continue
bias = bias.reshape(arg_shape_dic[bias_name])
arg_params[bias_name] = mx.nd.zeros(bias.shape)
arg_params[bias_name][:] = bias
sys.stdout.write(', bias shape = {}'.format(bias.shape))
sys.stdout.write('\n')
sys.stdout.flush()
wmat = wmat.reshape((wmat.shape[0], -1))
weight_name = layer_name + "_weight"
if weight_name not in arg_shape_dic:
print(weight_name + ' not found in arg_shape_dic.')
continue
wmat = wmat.reshape(arg_shape_dic[weight_name])
arg_params[weight_name] = mx.nd.zeros(wmat.shape)
arg_params[weight_name][:] = wmat
if first_conv and (layer_type == 'Convolution' or layer_type == 4):
first_conv = False
elif layer_type == 'Scale':
if 'scale' in layer_name:
bn_name = layer_name.replace('scale', 'bn')
elif 'sc' in layer_name:
bn_name = layer_name.replace('sc', 'bn')
else:
assert False, 'Unknown name convention for bn/scale'
gamma = np.array(layer_blobs[0].data)
beta = np.array(layer_blobs[1].data)
# beta = np.expand_dims(beta, 1)
beta_name = '{}_beta'.format(bn_name)
gamma_name = '{}_gamma'.format(bn_name)
beta = beta.reshape(arg_shape_dic[beta_name])
gamma = gamma.reshape(arg_shape_dic[gamma_name])
arg_params[beta_name] = mx.nd.zeros(beta.shape)
arg_params[gamma_name] = mx.nd.zeros(gamma.shape)
arg_params[beta_name][:] = beta
arg_params[gamma_name][:] = gamma
assert gamma.flags['C_CONTIGUOUS'] is True
assert beta.flags['C_CONTIGUOUS'] is True
print('converting scale layer, beta shape = {}, gamma shape = {}'.format(
beta.shape, gamma.shape))
elif layer_type == 'BatchNorm':
bn_name = layer_name
mean = np.array(layer_blobs[0].data)
var = np.array(layer_blobs[1].data)
rescale_factor = layer_blobs[2].data[0]
if rescale_factor != 0:
rescale_factor = 1 / rescale_factor
mean_name = '{}_moving_mean'.format(bn_name)
var_name = '{}_moving_var'.format(bn_name)
mean = mean.reshape(aux_shape_dic[mean_name])
var = var.reshape(aux_shape_dic[var_name])
aux_params[mean_name] = mx.nd.zeros(mean.shape)
aux_params[var_name] = mx.nd.zeros(var.shape)
# Get the original epsilon
for idx, layer in enumerate(layers_proto):
if layer.name == bn_name or re.sub('[-/]', '_', layer.name) == bn_name:
bn_index = idx
eps_caffe = layers_proto[bn_index].batch_norm_param.eps
# Compensate for the epsilon shift performed in convert_symbol
eps_symbol = float(sym.attr_dict()[bn_name + '_moving_mean']['eps'])
eps_correction = eps_caffe - eps_symbol
# Fill parameters
aux_params[mean_name][:] = mean * rescale_factor
aux_params[var_name][:] = var * rescale_factor + eps_correction
assert var.flags['C_CONTIGUOUS'] is True
assert mean.flags['C_CONTIGUOUS'] is True
print('converting batchnorm layer, mean shape = {}, var shape = {}'.format(
mean.shape, var.shape))
fix_gamma = layers_proto[bn_index+1].type != 'Scale'
if fix_gamma:
gamma_name = '{}_gamma'.format(bn_name)
gamma = np.array(np.ones(arg_shape_dic[gamma_name]))
beta_name = '{}_beta'.format(bn_name)
beta = np.array(np.zeros(arg_shape_dic[beta_name]))
arg_params[beta_name] = mx.nd.zeros(beta.shape)
arg_params[gamma_name] = mx.nd.zeros(gamma.shape)
arg_params[beta_name][:] = beta
arg_params[gamma_name][:] = gamma
assert gamma.flags['C_CONTIGUOUS'] is True
assert beta.flags['C_CONTIGUOUS'] is True
else:
print('\tskipping layer {} of type {}'.format(layer_name, layer_type))
assert len(layer_blobs) == 0
if output_prefix is not None:
model = mx.mod.Module(symbol=sym, label_names=[prob_label(arg_names), ])
model.bind(data_shapes=[('data', tuple(input_dim))])
model.init_params(arg_params=arg_params, aux_params=aux_params)
model.save_checkpoint(output_prefix, 0)
return sym, arg_params, aux_params, input_dim
def main():
parser = argparse.ArgumentParser(
description='Caffe prototxt to mxnet model parameter converter.')
parser.add_argument('prototxt', help='The prototxt filename')
parser.add_argument('caffemodel', help='The binary caffemodel filename')
parser.add_argument('save_model_name', help='The name of the output model prefix')
args = parser.parse_args()
convert_model(args.prototxt, args.caffemodel, args.save_model_name)
print ('Saved model successfully to {}'.format(args.save_model_name))
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
import sys, traceback
import os
import shutil
import requests
import time
import grequests
import itertools
from itertools import product
import multiprocessing as mp
from multiprocessing import Pool
import logging
import subprocess
from subprocess import check_call
import codecs
from xml.etree import ElementTree as ET
from git import Repo
print 'core num: ' + str(mp.cpu_count())
print 'python version:' + sys.version
print 'create sailfish index Started......'
argvs = sys.argv
argc = len(argvs)
if (argc != 3):
print 'Usage: # python %s out-dir filename(index_list)' % argvs[0]
quit()
logger = mp.log_to_stderr(logging.DEBUG)
out_dname = argvs[1]
print out_dname
def read_input():
f = open(argvs[2])
ret_list = []
for i in f.readlines():
i = i.strip()
if len(i) < 1:
continue
ret_list = ret_list + [i]
f.close
return ret_list
def makeDir(dname):
if os.path.exists(dname) is False:
os.mkdir(dname)
print '%s (dir) created.' % dname
else:
print '%s (dir) is already exists.' % dname
def print_tree(directory):
for root, dirs, files in os.walk(directory):
#yield root
for file in files:
yield os.path.join(root, file)
def create_dl_list(index_list):
ret = []
for item in index_list:
list = item.split(',')
item_dir = out_dname + '/' + list[0]
makeDir(item_dir)
dl_path = item_dir + '/' + list[1].split('/')[-1]
print 'downloading-file: ' + dl_path
if os.path.isfile(dl_path) or os.path.isfile(dl_path.replace('.gz', '')):
print '>>>>>>>>>> This file is already exists. continue next.'
continue
ret.append(list[1])
return ret
def grequests_async(dl_list, idx_list):
rs = (grequests.get(url) for url in dl_list)
for r in grequests.map(rs):
print r.url + ' >>>>>>>>>><Response>' + str(r.status_code)
#print idx_list[0]
#print str(r.url).split('/')[-1]
out_listitem = next(itertools.ifilter(lambda x:x.find(str(r.url)) > -1, idx_list), None)
#print out_listitem
file_name = str(r.url).split('/')[-1]
out_dir = out_dname + '/' + out_listitem.split(',')[0]
if os.path.isfile(out_dir + '/' + file_name):
print '%s is already exists.' % out_dir + '/' + file_name
else:
print 'creat-file in ' + out_dir
os.chdir(out_dir)
f = open(file_name, 'w')
f.write(r.content)
f.close
def run_cmd(cmd):
print ' '.join(cmd)
str_cmd = ' '.join(cmd)
cmd_relist = str_cmd.split(' ')
print cmd_relist
subprocess.check_call(cmd_relist)
def generate_cmds(script, keys, vals):
"""
Generate list of commands from script name, option names, and sets of value
>>> cmds = gene_cmds_normal('python run.py', ['A', 'B'], [['1', '2'], ['x', 'y']])
>>> list(cmds) #doctest: +NORMALIZE_WHITESPACE
[['python', 'run.py', '--A 1', '--B 2'], ['python', 'run.py', '--A x', '--B y']]
"""
script = script.split()
for val in vals:
#print val
yield script + ['%s %s' % (k, v) for (k, v) in zip(keys, val)]
def make_param(path):
dir = path.rsplit('/',1)[0]
if not os.path.isfile(dir + '/' + 'kmerEquivClasses.bin'):
return [path.replace('.gz', ''), dir, '20']
else:
return []
def unpack_and_make_filelist(idx_list):
index_files = []
for file in print_tree(out_dname):
#print file
root, ext = os.path.splitext(file)
if ext == '.gz':
subprocess.check_call(["gunzip","-fd",file])
index_files.append(file)
elif ext == '.fa':
#print root.split('/')[-2]
listchk = next(itertools.ifilter(lambda x:x.find(root.split('/')[-2]) > -1, idx_list), None)
if listchk is not None:
index_files.append(file)
print index_files
index_files = sorted(set(index_files), key=index_files.index)
return index_files
def main():
try:
input_index_list = []
input_index_list = read_input()
print 'length of index_list: ' + str(len(input_index_list))
makeDir(out_dname)
os.chdir(out_dname)
print 'moved to %s' % os.getcwd()
print ':::::::::::::::::::::::::::::::::::::::::::'
print '>>>>>>>>>>>>>>>>> download sailfish index-files ...'
dl_url_list = []
dl_url_list = create_dl_list(input_index_list)
if len(dl_url_list) > 0:
print 'start download-jobs...'
grequests_async(dl_url_list, input_index_list)
print 'all download-jobs finished.'
else:
print 'no execute download-jobs.'
print ':::::::::::::::::::::::::::::::::::::::::::'
print '>>>>>>>>>>>>>>>>> unpacking and make-list sailfish index-files...'
index_files = []
index_files = unpack_and_make_filelist(input_index_list)
print index_files
print ':::::::::::::::::::::::::::::::::::::::::::'
print '>>>>>>>>>>>>>>>>> executing sailfish index...'
param_list = [make_param(str(x)) for x in index_files]
print param_list
while param_list.count([]) > 0:
param_list.remove([])
if len(param_list) > 0:
cmds = generate_cmds('sailfish index --force', ['-t', '-o', '--kmerSize'], param_list)
[ run_cmd(x) for x in cmds ]
#if mp.cpu_count() > 1:
# pool = Pool(mp.cpu_count())
# callback = pool.map_async(run_cmd, cmds).get()
#else:
# print 'cpu is single core.'
# [ run_cmd(x) for x in cmds ]
else:
print 'sailfish indexes already created.'
print ':::::::::::::::::::::::::::::::::::::::::::'
print '>>>>>>>>>>>>>>>>> script ended :)'
except KeyboardInterrupt:
print ">>>>>>>>>>>>>>>>> Caught KeyboardInterrupt. Terminating workers..."
pool.terminate()
sys.exit(1)
except:
info = sys.exc_info()
tbinfo = traceback.format_tb( info[2] )
print 'Error Info...'.ljust( 80, '=' )
for tbi in tbinfo:
print tbi
print ' %s' % str( info[1] )
print '\n'.rjust( 85, '=' )
sys.exit(1)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
import logging
from hugin.pos import POS
from hugin.pos import Pos
class PredicateArguments(object):
def __init__(self, arg_list):
self.arg_list = [False] * len(arg_list)
for i, arg in enumerate(arg_list):
if arg and arg[0] != "u":
self.arg_list[i] = arg
@property
def first(self):
if len(self.arg_list) > 0:
return self.arg_list[0]
return False
@property
def second(self):
if len(self.arg_list) > 1:
return self.arg_list[1]
return False
@property
def third(self):
if len(self.arg_list) > 2:
return self.arg_list[2]
return False
@property
def fourth(self):
if len(self.arg_list) > 3:
return self.arg_list[3]
return False
def __iter__(self):
for arg in self.arg_list:
yield arg
class Predicate(object):
def __init__(self, pid=None, lemma=None, pos=None, args=None, extra=None, none=False):
self.none = none
if not none:
self.pid = pid
self.lemma = lemma
self.pos = Pos(pos)
self.args = PredicateArguments(args)
self.extra = extra
else:
self.pid = None
self.lemma = None
self.pos = None
self.args = None
self.extra = None
def lemma_pos(self):
if self.none:
return "<NONE>"
return u"%s-%s" % (self.lemma, self.pos)
@staticmethod
def fromstr(line):
result = line.split(":")
if len(result) != 2:
pid = result[0]
other = result[1:len(result)]
other = "".join(other)
else:
pid, other = line.split(":")
result = other.split("-")
if len(result) != 2:
other = result[-1]
lemma = "-".join(result[0:len(result) - 1])
else:
lemma, other = result
pos, arg_line = other.split("(")
arg_line = arg_line[0:(len(arg_line) - 1)]
args = arg_line.split(",")
return Predicate(pid, lemma, pos, args)
@staticmethod
def efromstr(line):
result = line.split("(")
if result == 2:
extra, arg_line = result
else:
extra = result[0:(len(result) - 1)]
arg_line = result[-1]
arg_line = arg_line[0:(len(arg_line) - 1)]
args = arg_line.split(",")
return Predicate(-1, None, None, args, extra[0])
def __eq__(self, other):
if self.lemma is None:
return False
if other.lemma is None:
return False
return self.lemma == other.lemma
def __hash__(self):
return self.lemma.__hash__()
def __repr__(self):
if self.extra is None:
predicate_str = u"%s-%s(%s)" % (
self.lemma,
self.pos,
u", ".join([str(arg) for arg in self.args])
)
else:
predicate_str = u"%s(%s)" % (
self.extra,
u", ".join([str(arg) for arg in self.args])
)
return predicate_str.encode("utf-8")
class PredicateSet(object):
def __init__(self, predicates, pos, max_sequence=4):
self.predicates = list(set(predicates)) if len(predicates) <= max_sequence else []
self.pos = Pos.fromenum(pos)
def lemmas(self):
return [pred.lemma for pred in self.predicates]
def lemma_pos(self):
if len(self.predicates) > 0:
lemmas = sorted([pred.lemma for pred in self.predicates])
lemmas = "&&".join(lemmas)
return u"%s-%r" % (lemmas, self.pos)
return "<NONE>"
def __cmp__(self, other):
set1 = set(other.lemmas())
set2 = set(self.lemmas())
if set1 == set2:
return 0
return 1
def __eq__(self, other):
set1 = set(other.lemmas())
set2 = set(self.lemmas())
return set1 == set2
def __int__(self):
return len(self.predicates)
def __str__(self):
if list(self.predicates) == 0:
return "<NONE>"
else:
set_str = u"<PredicateSet(%s)>" % self.lemma_pos()
return set_str.encode("utf-8")
def __len__(self):
return len(self.predicates)
def __repr__(self):
return self.__str__()
class SentenceIndex(object):
def __init__(self, sentence):
self.i_dic_arg = dict()
self.i_dic_arg_first = dict()
self.i_dic_arg_second = dict()
self.i_dic_arg_third = dict()
self.i_dic_arg_fourth = dict()
self.i_dic_extra = dict()
self.sentence = sentence
for pred in sentence:
for arg in pred.args:
if arg is not None:
if arg in self.i_dic_arg:
self.i_dic_arg[arg].append(pred)
else:
self.i_dic_arg[arg] = [pred]
if pred.extra:
if pred.extra in self.i_dic_extra:
self.i_dic_extra[pred.extra].append(pred)
else:
self.i_dic_extra[pred.extra] = [pred]
if pred.args.first:
if pred.args.first in self.i_dic_arg_first:
self.i_dic_arg_first[pred.args.first].append(pred)
else:
self.i_dic_arg_first[pred.args.first] = [pred]
if pred.args.second:
if pred.args.second in self.i_dic_arg_second:
self.i_dic_arg_second[pred.args.second].append(pred)
else:
self.i_dic_arg_second[pred.args.second] = [pred]
if pred.args.third:
if pred.args.third in self.i_dic_arg_third:
self.i_dic_arg_third[pred.args.third].append(pred)
else:
self.i_dic_arg_third[pred.args.third] = [pred]
if pred.args.fourth:
if pred.args.fourth in self.i_dic_arg_fourth:
self.i_dic_arg_fourth[pred.args.fourth].append(pred)
else:
self.i_dic_arg_fourth[pred.args.fourth] = [pred]
def find(self, first=None, second=None, third=None, fourth=None, pos=None, arg=None, extra=None, return_set=False):
predicate_lists = []
if arg is not None:
predicate_lists.append(self.i_dic_arg.get(arg, []))
if extra is not None:
predicate_lists.append(self.i_dic_extra.get(extra, []))
if first is not None:
predicate_lists.append(self.i_dic_arg_first.get(first, []))
if second is not None:
predicate_lists.append(self.i_dic_arg_second.get(second, []))
if third is not None:
predicate_lists.append(self.i_dic_arg_third.get(third, []))
if fourth is not None:
predicate_lists.append(self.i_dic_arg_fourth.get(fourth, []))
if pos is not None and pos is not POS.NONE:
predicate_lists.append(filter(lambda p: p.pos.pos == pos, self.sentence))
if len(predicate_lists) == 1:
if return_set and pos:
return PredicateSet(predicate_lists[0], pos)
return predicate_lists[0]
else:
list1 = predicate_lists.pop()
matched = []
for e in list1:
in_intersection = True
for l in predicate_lists:
if e not in l:
in_intersection = False
break
if in_intersection:
matched.append(e)
if return_set and pos is not None:
return PredicateSet(matched, pos)
return matched
class Sentence(object):
def __init__(self, sid, predicates, line=None, raw_text=None):
self.line = line
self.predicates = predicates
self.sid = sid
self.index = SentenceIndex(self)
self.raw_text = raw_text
def lemmas(self):
lemmas = []
for p in self.predicates:
if p.lemma is not None:
lemmas.append(p.lemma)
return lemmas
@staticmethod
def from_lf_line(lf_line_index, lf_line):
predicates = []
lf_line = lf_line.replace(" & ", "&")
lf_line = lf_line.replace("((", "(")
lf_line = lf_line.replace("))", ")")
predicate_str = lf_line.split("&")
predicate_str = filter(lambda t: t != "&", predicate_str)
for i, p_str in enumerate(predicate_str):
try:
if p_str[0] == "[":
predicate = Predicate.fromstr(p_str)
if predicate.lemma and predicate.pos.pos:
predicates.append(predicate)
else:
predicate = Predicate.efromstr(p_str)
if len(predicate.extra) > 0:
predicates.append(predicate)
except Exception:
try:
logging.error("Error while parsing line: %s" % lf_line)
except Exception:
pass
return Sentence(lf_line_index, predicates, lf_line)
def __iter__(self):
for pred in self.predicates:
yield pred
def __len__(self):
return len(self.predicates)
class MetaphorAdpLFReader(object):
def __init__(self, lf_file):
self.lf_file = lf_file
def i_sentences(self):
i = 0
text = None
for line in self.lf_file:
line = line.decode("utf-8")
if line[0] == "%":
if len(line) >= 3 and line[0:3] == "%%%":
text = line[4:len(line)]
else:
text = line[2:len(line)]
elif len(line) >= 3 and line[0:3] == "id(":
continue
elif line[0].isdigit():
continue
elif len(line) > 1:
sentence = Sentence.from_lf_line(i, line)
sentence.raw_text = text
i += 1
yield sentence
else:
continue
| |
# Copyright 2015 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: HDF5
:platform: Unix
:synopsis: Transport for saving and loading files using hdf5
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import sys
import logging
import os
import time
import numpy as np
import h5py
from mpi4py import MPI
import savu.plugins.utils as pu
from savu.data.structures import NX_CLASS
from savu.data.TransportMechanism import TransportMechanism
from savu.core.utils import logmethod
# TODO tidy up the NeXus format parts of this
class Hdf5Transport(TransportMechanism):
def run_plugin_list(self, input_file, plugin_list, processing_dir, mpi=False,
processes=["CPU0"], process=0):
"""Runs a chain of plugins
:param input_data: The input data to give to the chain
:type input_data: savu.data.structure.
:param plugin_list: Plugin list.
:type plugin_list: savu.data.structure.PluginList.
:param processing_dir: Location of the processing directory.
:type processing_dir: str.
:param mpi: Whether this is running in mpi, default is false.
:type mpi: bool.
"""
input_data = pu.load_raw_data(input_file)
logging.debug("Running plugin list, just a check")
filename = os.path.basename(input_data.backing_file.filename)
filename = os.path.splitext(filename)[0]
output_filename = \
os.path.join(processing_dir,
"%s_processed_%s.nxs" % (filename,
time.strftime("%Y%m%d%H%M%S")))
if process == 0:
logging.debug("Running process List.save_list_to_file")
plugin_list.save_list_to_file(output_filename)
in_data = input_data
output = None
logging.debug("generating all output files")
files = []
count = 0
for plugin_dict in plugin_list.plugin_list:
logging.debug("Loading plugin %s", plugin_dict['id'])
plugin = pu.load_plugin(plugin_dict['id'])
# generate somewhere for the data to go
file_name = os.path.join(processing_dir,
"%s%02i_%s.h5" % (plugin_list.name, count,
plugin_dict['id']))
group_name = "%i-%s" % (count, plugin.name)
logging.debug("Creating output file %s", file_name)
output = pu.create_output_data(plugin, in_data, file_name, group_name,
mpi)
files.append(output)
in_data = output
count += 1
logging.debug("processing Plugins")
in_data = input_data
count = 0
for plugin_dict in plugin_list.plugin_list:
logging.debug("Loading plugin %s", plugin_dict['id'])
plugin = pu.load_plugin(plugin_dict['id'])
output = files[count]
plugin.set_parameters(plugin_dict['data'])
logging.debug("Starting processing plugin %s", plugin_dict['id'])
plugin.run_plugin(in_data, output, processes, process, self)
logging.debug("Completed processing plugin %s", plugin_dict['id'])
if in_data is not output:
in_data.complete()
in_data = output
if mpi:
logging.debug("Blocking till all processes complete")
MPI.COMM_WORLD.Barrier()
if plugin == 0:
cite_info = plugin.get_citation_information()
if cite_info is not None:
plugin_list.add_plugin_citation(output_filename, count,
cite_info)
group_name = "%i-%s" % (count, plugin.name)
plugin_list.add_intermediate_data_link(output_filename,
output, group_name)
count += 1
if output is not None:
output.complete()
def process(self, plugin, data, output, processes, process, params, kernel):
if 'reconstruction' in kernel:
params = [params[0], params[1], data, output, plugin, processes, process]
self.reconstruction_set_up(params)
elif 'timeseries' in kernel:
params = [plugin, processes, process]
self.timeseries_correction_set_up(data, output, params)
elif 'filter' in kernel:
params = [params[0], params[1], processes, process]
self.filter_set_up()
else:
print("Kernel", kernel, "undefined in data.transport.HDF5")
sys.exit(1)
@logmethod
def reconstruction_set_up(self, params):
centre_of_rotations = params[0]
angles = params[1]
data = params[2]
output = params[3]
plugin = params[4]
processes = params[5]
process = params[6]
sinogram_frames = np.arange(data.get_number_of_sinograms())
frames = np.array_split(sinogram_frames, len(processes))[process]
for i in range(len(frames)):
frame_centre_of_rotation = centre_of_rotations[i]
sinogram = data.data[:, frames[i], :]
reconstruction = \
plugin.reconstruct(sinogram, frame_centre_of_rotation, angles,
(output.data.shape[0], output.data.shape[2]),
(output.data.shape[0]/2,
output.data.shape[2]/2))
output.data[:, frames[i], :] = reconstruction
plugin.count+=1
print plugin.count
@logmethod
def timeseries_correction_set_up(self, data, output, params):
plugin = params[0]
processes = params[1]
process = params[2]
image_key = data.image_key[...]
# pull out the average dark and flat data
dark = None
try:
dark = np.mean(data.data[image_key == 2, :, :], 0)
except:
dark = np.zeros((data.data.shape[1], data.data.shape[2]))
flat = None
try:
flat = np.mean(data.data[image_key == 1, :, :], 0)
except:
flat = np.ones((data.data.shape[1], data.data.shape[2]))
# shortcut to reduce processing
flat = flat - dark
flat[flat == 0.0] = 1
# get a list of all the frames
output_frames = np.arange(data.data.shape[1])
frames = np.array_split(output_frames, len(processes))[process]
# The rotation angle can just be pulled out of the file so write that
rotation_angle = data.rotation_angle[image_key == 0]
output.rotation_angle[:] = rotation_angle
for i in frames:
output.data[:, i, :] = plugin.correction(data.data[image_key == 0, i,:], dark[i,:], flat[i,:])
@logmethod
def filter_set_up(self, params):
param_name = []
for name in param_name:
for p in params:
globals()[name] = p
pass
def setup(self, path, name):
self.backing_file = h5py.File(path, 'w')
if self.backing_file is None:
raise IOError("Failed to open the hdf5 file")
self.group = self.backing_file.create_group(name)
self.group.attrs[NX_CLASS] = 'NXdata'
def add_data_block(self, name, shape, dtype):
self.group.create_dataset(name, shape, dtype)
def get_data_block(self, name):
return self.group[name]
def finalise(self):
if self.backing_file is not None:
self.backing_file.close()
self.backing_file = None
| |
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import subprocess
import textwrap
from collections import defaultdict
from typing import Any, Dict, List, NamedTuple, Optional, Set, Union
import click
from github import Github, Issue, PullRequest, UnknownObjectException
from rich.console import Console
from rich.progress import Progress
logger = logging.getLogger(__name__)
console = Console(width=400, color_system="standard")
PullRequestOrIssue = Union[PullRequest.PullRequest, Issue.Issue]
MY_DIR_PATH = os.path.dirname(__file__)
SOURCE_DIR_PATH = os.path.abspath(os.path.join(MY_DIR_PATH, os.pardir))
PR_PATTERN = re.compile(r".*\(#([0-9]+)\)")
ISSUE_MATCH_IN_BODY = re.compile(r" #([0-9]+)[^0-9]")
@click.group(context_settings={'help_option_names': ['-h', '--help'], 'max_content_width': 500})
def cli():
...
option_verbose = click.option(
"--verbose",
is_flag=True,
help="Print verbose information about performed steps",
)
option_previous_release = click.option(
"--previous-release",
type=str,
required=True,
help="commit reference (for example hash or tag) of the previous release.",
)
option_current_release = click.option(
"--current-release",
type=str,
required=True,
help="commit reference (for example hash or tag) of the current release.",
)
option_github_token = click.option(
"--github-token",
type=str,
required=True,
help=textwrap.dedent(
"""
Github token used to authenticate.
You can set omit it if you have GITHUB_TOKEN env variable set
Can be generated with:
https://github.com/settings/tokens/new?description=Read%20sssues&scopes=repo:status"""
),
envvar='GITHUB_TOKEN',
)
option_excluded_pr_list = click.option(
"--excluded-pr-list", type=str, default='', help="Coma-separated list of PRs to exclude from the issue."
)
option_limit_pr_count = click.option(
"--limit-pr-count",
type=int,
default=None,
help="Limit PR count processes (useful for testing small subset of PRs).",
)
def get_git_log_command(
verbose: bool, from_commit: Optional[str] = None, to_commit: Optional[str] = None
) -> List[str]:
"""
Get git command to run for the current repo from the current folder (which is the package folder).
:param verbose: whether to print verbose info while getting the command
:param from_commit: if present - base commit from which to start the log from
:param to_commit: if present - final commit which should be the start of the log
:return: git command to run
"""
git_cmd = [
"git",
"log",
"--pretty=format:%H %h %cd %s",
"--date=short",
]
if from_commit and to_commit:
git_cmd.append(f"{from_commit}...{to_commit}")
elif from_commit:
git_cmd.append(from_commit)
git_cmd.extend(['--', '.'])
if verbose:
console.print(f"Command to run: '{' '.join(git_cmd)}'")
return git_cmd
class Change(NamedTuple):
"""Stores details about commits"""
full_hash: str
short_hash: str
date: str
message: str
message_without_backticks: str
pr: Optional[int]
def get_change_from_line(line: str):
split_line = line.split(" ", maxsplit=3)
message = split_line[3]
pr = None
pr_match = PR_PATTERN.match(message)
if pr_match:
pr = pr_match.group(1)
return Change(
full_hash=split_line[0],
short_hash=split_line[1],
date=split_line[2],
message=message,
message_without_backticks=message.replace("`", "'").replace("'", "'").replace('&', "&"),
pr=int(pr) if pr else None,
)
def get_changes(verbose: bool, previous_release: str, current_release: str) -> List[Change]:
change_strings = subprocess.check_output(
get_git_log_command(verbose, from_commit=previous_release, to_commit=current_release),
cwd=SOURCE_DIR_PATH,
universal_newlines=True,
)
return [get_change_from_line(line) for line in change_strings.split("\n")]
def render_template(
template_name: str,
context: Dict[str, Any],
autoescape: bool = True,
keep_trailing_newline: bool = False,
) -> str:
"""
Renders template based on it's name. Reads the template from <name>_TEMPLATE.md.jinja2 in current dir.
:param template_name: name of the template to use
:param context: Jinja2 context
:param autoescape: Whether to autoescape HTML
:param keep_trailing_newline: Whether to keep the newline in rendered output
:return: rendered template
"""
import jinja2
template_loader = jinja2.FileSystemLoader(searchpath=MY_DIR_PATH)
template_env = jinja2.Environment(
loader=template_loader,
undefined=jinja2.StrictUndefined,
autoescape=autoescape,
keep_trailing_newline=keep_trailing_newline,
)
template = template_env.get_template(f"{template_name}_TEMPLATE.md.jinja2")
content: str = template.render(context)
return content
def print_issue_content(
current_release: str,
pull_requests: Dict[int, PullRequestOrIssue],
linked_issues: Dict[int, List[Issue.Issue]],
users: Dict[int, Set[str]],
):
pr_list = list(pull_requests.keys())
pr_list.sort()
user_logins: Dict[int, str] = {pr: "@" + " @".join(users[pr]) for pr in users}
all_users: Set[str] = set()
for user_list in users.values():
all_users.update(user_list)
all_user_logins = "@" + " @".join(all_users)
content = render_template(
template_name='ISSUE',
context={
'version': current_release,
'pr_list': pr_list,
'pull_requests': pull_requests,
'linked_issues': linked_issues,
'users': users,
'user_logins': user_logins,
'all_user_logins': all_user_logins,
},
autoescape=False,
keep_trailing_newline=True,
)
print(content)
@cli.command()
@option_github_token
@option_previous_release
@option_current_release
@option_excluded_pr_list
@option_verbose
@option_limit_pr_count
def generate_issue_content(
github_token: str,
previous_release: str,
current_release: str,
excluded_pr_list: str,
verbose: bool,
limit_pr_count: Optional[int],
):
if excluded_pr_list:
excluded_prs = [int(pr) for pr in excluded_pr_list.split(",")]
else:
excluded_prs = []
changes = get_changes(verbose, previous_release, current_release)
change_prs = [change.pr for change in changes]
prs = [pr for pr in change_prs if pr is not None and pr not in excluded_prs]
g = Github(github_token)
repo = g.get_repo("apache/airflow")
pull_requests: Dict[int, PullRequestOrIssue] = {}
linked_issues: Dict[int, List[Issue.Issue]] = defaultdict(lambda: [])
users: Dict[int, Set[str]] = defaultdict(lambda: set())
count_prs = len(prs)
if limit_pr_count:
count_prs = limit_pr_count
with Progress(console=console) as progress:
task = progress.add_task(f"Retrieving {count_prs} PRs ", total=count_prs)
for i in range(count_prs):
pr_number = prs[i]
progress.console.print(
f"Retrieving PR#{pr_number}: " f"https://github.com/apache/airflow/pull/{pr_number}"
)
pr: PullRequestOrIssue
try:
pr = repo.get_pull(pr_number)
except UnknownObjectException:
# Fallback to issue if PR not found
try:
pr = repo.get_issue(pr_number) # (same fields as PR)
except UnknownObjectException:
console.print(f"[red]The PR #{pr_number} could not be found[/]")
continue
# Ignore doc-only and skipped PRs
label_names = [label.name for label in pr.labels]
if "type:doc-only" in label_names or "changelog:skip" in label_names:
continue
pull_requests[pr_number] = pr
# GitHub does not have linked issues in PR - but we quite rigorously add Fixes/Closes
# Relate so we can find those from the body
if pr.body:
body = pr.body.replace("\n", " ").replace("\r", " ")
linked_issue_numbers = {
int(issue_match.group(1)) for issue_match in ISSUE_MATCH_IN_BODY.finditer(body)
}
for linked_issue_number in linked_issue_numbers:
progress.console.print(
f"Retrieving Linked issue PR#{linked_issue_number}: "
f"https://github.com/apache/airflow/issue/{linked_issue_number}"
)
try:
linked_issues[pr_number].append(repo.get_issue(linked_issue_number))
except UnknownObjectException:
progress.console.print(
f"Failed to retrieve linked issue #{linked_issue_number}: Unknown Issue"
)
users[pr_number].add(pr.user.login)
for linked_issue in linked_issues[pr_number]:
users[pr_number].add(linked_issue.user.login)
progress.advance(task)
print_issue_content(current_release, pull_requests, linked_issues, users)
if __name__ == "__main__":
cli()
| |
# -*- coding: utf-8 -*-
"""Full-featured O(1) LRU cache backported from Python3.3. The full
Py3.3 API is supported (thread safety, maxsize, keyword args, type
checking, __wrapped__, and cache_info). Includes Py3.3 optimizations
for better memory utilization, fewer dependencies, and fewer dict
lookups.
http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/
Added persistence capability
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
from ..exceptions import HGVSDataNotAvailableError, HGVSVerifyFailedError
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = ['hashvalue']
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def __getstate__(self):
return True # needs to be truthy for __setstate__ to be called
def __setstate__(self, _):
self.hashvalue = hash(tuple(self))
def __repr__(self):
return '_HashedSeq({tuple!r})'.format(self=self, tuple=tuple(self))
def _make_key(func,
args,
kwds,
typed,
kwd_mark=(object(), ),
fasttypes={int, str, frozenset, type(None)},
sorted=sorted,
tuple=tuple,
type=type,
len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
key += kwd_mark
key += ('__func__', func)
if kwds:
sorted_items = sorted(kwds.items())
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
LEARN = 1
RUN = 2
VERIFY = 3
def lru_cache(maxsize=100, typed=False, mode=None, cache=None):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
:param mode: cache run mode
None: the default lru cache behaver
LEARN: queries are executed against persistent cache from file "filename";
in the event of a miss, the novel query and results would be written to cache, then returned.
RUN: queries are executed against caches; misses result in DataNotLocallyAvailableError
VERIFY: always execute the function; if persistent cache value and returned value are different, raise VerifyFailedError
:param cache: PersistentDict object or None;
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
lock = RLock() # because linkedlist updates aren't threadsafe
_cache = cache
_maxsize = maxsize
if _cache is None:
_cache = dict()
elif mode is not None:
_maxsize = None
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = _cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if _maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif _maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(user_function.__name__, args, kwds, typed, ())
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
if mode == VERIFY:
latestres = user_function(*args, **kwds)
if latestres != result:
raise HGVSVerifyFailedError(
'The cached result is not consistent with latest result when calling '
+ user_function.__name__ + ' with args ' + str(args) +
' and keywords ' + str(kwds))
return result
if mode == RUN:
raise HGVSDataNotAvailableError(
'Data not available in local cache when calling ' + user_function.__name__ +
' with args ' + str(args) + ' and keywords ' + str(kwds))
result = user_function(*args, **kwds)
_cache[key] = result
if mode == LEARN:
_cache.sync()
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(user_function.__name__, args, kwds, typed, ())
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in _cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(_cache) >= _maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del _cache[oldkey]
_cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = _cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], _maxsize, len(_cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
_cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| |
# cython: auto_cpdef=True
"""Python code for writing AVRO files"""
# This code is a modified version of the code at
# http://svn.apache.org/viewvc/avro/trunk/lang/py/src/avro/ which is under
# Apache 2.0 license (http://www.apache.org/licenses/LICENSE-2.0)
try:
from fastavro._six import utob, MemoryIO, long, is_str, iteritems, mk_bits
from fastavro._reader import HEADER_SCHEMA, SYNC_SIZE, MAGIC
from fastavro._schema import extract_named_schemas_into_repo,\
extract_record_type, extract_logical_type
except ImportError:
from fastavro.six import utob, MemoryIO, long, is_str, iteritems, mk_bits
from fastavro.reader import HEADER_SCHEMA, SYNC_SIZE, MAGIC
from fastavro.schema import extract_named_schemas_into_repo,\
extract_record_type, extract_logical_type
from fastavro.const import MCS_PER_HOUR, MCS_PER_MINUTE, MCS_PER_SECOND,\
MLS_PER_HOUR, MLS_PER_MINUTE, MLS_PER_SECOND
try:
import simplejson as json
except ImportError:
import json
import datetime
import decimal
import time
from binascii import crc32
from collections import Iterable, Mapping
from os import urandom, SEEK_SET
from struct import pack
from zlib import compress
NoneType = type(None)
def write_null(fo, datum, schema=None):
"""null is written as zero bytes"""
pass
def write_boolean(fo, datum, schema=None):
"""A boolean is written as a single byte whose value is either 0 (false) or
1 (true)."""
fo.write(pack('B', 1 if datum else 0))
def prepare_timestamp_millis(data, schema):
if isinstance(data, datetime.datetime):
t = int(time.mktime(data.timetuple())) * MLS_PER_SECOND + int(
data.microsecond / 1000)
return t
else:
return data
def prepare_timestamp_micros(data, schema):
if isinstance(data, datetime.datetime):
t = int(time.mktime(data.timetuple())) * MCS_PER_SECOND + \
data.microsecond
return t
else:
return data
def prepare_date(data, schema):
if isinstance(data, datetime.date):
return data.toordinal()
else:
return data
def prepare_uuid(data, schema):
return str(data)
def prepare_time_millis(data, schema):
if isinstance(data, datetime.time):
return int(
data.hour * MLS_PER_HOUR + data.minute * MLS_PER_MINUTE +
data.second * MLS_PER_SECOND + int(data.microsecond / 1000))
else:
return data
def prepare_time_micros(data, schema):
if isinstance(data, datetime.time):
return long(data.hour * MCS_PER_HOUR + data.minute * MCS_PER_MINUTE +
data.second * MCS_PER_SECOND + data.microsecond)
else:
return data
def prepare_bytes_decimal(data, schema):
if not isinstance(data, decimal.Decimal):
return data
scale = schema['scale']
# based on https://github.com/apache/avro/pull/82/
sign, digits, exp = data.as_tuple()
if -exp > scale:
raise AssertionError(
'Scale provided in schema does not match the decimal')
delta = exp + scale
if delta > 0:
digits = digits + (0,) * delta
unscaled_datum = 0
for digit in digits:
unscaled_datum = (unscaled_datum * 10) + digit
# 2.6 support
if not hasattr(unscaled_datum, 'bit_length'):
bits_req = len(bin(abs(unscaled_datum))) - 2
else:
bits_req = unscaled_datum.bit_length() + 1
if sign:
unscaled_datum = (1 << bits_req) - unscaled_datum
bytes_req = bits_req // 8
padding_bits = ~((1 << bits_req) - 1) if sign else 0
packed_bits = padding_bits | unscaled_datum
bytes_req += 1 if (bytes_req << 3) < bits_req else 0
tmp = MemoryIO()
for index in range(bytes_req - 1, -1, -1):
bits_to_write = packed_bits >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
return tmp.getvalue()
def write_int(fo, datum, schema=None):
"""int and long values are written using variable-length, zig-zag coding.
"""
datum = (datum << 1) ^ (datum >> 63)
while (datum & ~0x7F) != 0:
fo.write(pack('B', (datum & 0x7f) | 0x80))
datum >>= 7
fo.write(pack('B', datum))
write_long = write_int
def write_float(fo, datum, schema=None):
"""A float is written as 4 bytes. The float is converted into a 32-bit
integer using a method equivalent to Java's floatToIntBits and then encoded
in little-endian format."""
fo.write(pack('<f', datum))
def write_double(fo, datum, schema=None):
"""A double is written as 8 bytes. The double is converted into a 64-bit
integer using a method equivalent to Java's doubleToLongBits and then
encoded in little-endian format. """
fo.write(pack('<d', datum))
def write_bytes(fo, datum, schema=None):
"""Bytes are encoded as a long followed by that many bytes of data."""
write_long(fo, len(datum))
fo.write(datum)
def write_utf8(fo, datum, schema=None):
"""A string is encoded as a long followed by that many bytes of UTF-8
encoded character data."""
write_bytes(fo, utob(datum))
def write_crc32(fo, bytes):
"""A 4-byte, big-endian CRC32 checksum"""
data = crc32(bytes) & 0xFFFFFFFF
fo.write(pack('>I', data))
def write_fixed(fo, datum, schema=None):
"""Fixed instances are encoded using the number of bytes declared in the
schema."""
fo.write(datum)
def write_enum(fo, datum, schema):
"""An enum is encoded by a int, representing the zero-based position of
the symbol in the schema."""
index = schema['symbols'].index(datum)
write_int(fo, index)
def write_array(fo, datum, schema):
"""Arrays are encoded as a series of blocks.
Each block consists of a long count value, followed by that many array
items. A block with count zero indicates the end of the array. Each item
is encoded per the array's item schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written. """
if len(datum) > 0:
write_long(fo, len(datum))
dtype = schema['items']
for item in datum:
write_data(fo, item, dtype)
write_long(fo, 0)
def write_map(fo, datum, schema):
"""Maps are encoded as a series of blocks.
Each block consists of a long count value, followed by that many key/value
pairs. A block with count zero indicates the end of the map. Each item is
encoded per the map's value schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written."""
if len(datum) > 0:
write_long(fo, len(datum))
vtype = schema['values']
for key, val in iteritems(datum):
write_utf8(fo, key)
write_data(fo, val, vtype)
write_long(fo, 0)
INT_MIN_VALUE = -(1 << 31)
INT_MAX_VALUE = (1 << 31) - 1
LONG_MIN_VALUE = -(1 << 63)
LONG_MAX_VALUE = (1 << 63) - 1
def validate(datum, schema):
"""Determine if a python datum is an instance of a schema."""
record_type = extract_record_type(schema)
if record_type == 'null':
return datum is None
if record_type == 'boolean':
return isinstance(datum, bool)
if record_type == 'string':
return is_str(datum)
if record_type == 'bytes':
return isinstance(datum, (bytes, decimal.Decimal))
if record_type == 'int':
return (
(isinstance(datum, (int, long,)) and
INT_MIN_VALUE <= datum <= INT_MAX_VALUE) or
isinstance(datum, (datetime.time, datetime.datetime))
)
if record_type == 'long':
return (
(isinstance(datum, (int, long,)) and
LONG_MIN_VALUE <= datum <= LONG_MAX_VALUE) or
isinstance(datum, (datetime.time, datetime.datetime))
)
if record_type in ['float', 'double']:
return isinstance(datum, (int, long, float))
if record_type == 'fixed':
return isinstance(datum, bytes) and len(datum) == schema['size']
if record_type == 'union':
if isinstance(datum, tuple):
(name, datum) = datum
for candidate in schema:
if extract_record_type(candidate) == 'record':
if name == candidate["name"]:
return validate(datum, candidate)
else:
return False
return any(validate(datum, s) for s in schema)
# dict-y types from here on.
if record_type == 'enum':
return datum in schema['symbols']
if record_type == 'array':
return (
isinstance(datum, Iterable) and
all(validate(d, schema['items']) for d in datum)
)
if record_type == 'map':
return (
isinstance(datum, Mapping) and
all(is_str(k) for k in datum.keys()) and
all(validate(v, schema['values']) for v in datum.values())
)
if record_type in ('record', 'error', 'request',):
return (
isinstance(datum, Mapping) and
all(
validate(datum.get(f['name'], f.get('default')), f['type'])
for f in schema['fields']
)
)
if record_type in SCHEMA_DEFS:
return validate(datum, SCHEMA_DEFS[record_type])
raise ValueError('unkown record type - %s' % record_type)
def write_union(fo, datum, schema):
"""A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value. The value
is then encoded per the indicated schema within the union."""
if isinstance(datum, tuple):
(name, datum) = datum
for index, candidate in enumerate(schema):
if extract_record_type(candidate) == 'record':
if name == candidate["name"]:
break
else:
msg = 'provided union type name %s not found in schema %s' \
% (name, schema)
raise ValueError(msg)
else:
pytype = type(datum)
for index, candidate in enumerate(schema):
if validate(datum, candidate):
break
else:
msg = '%r (type %s) do not match %s' % (datum, pytype, schema)
raise ValueError(msg)
# write data
write_long(fo, index)
write_data(fo, datum, schema[index])
def write_record(fo, datum, schema):
"""A record is encoded by encoding the values of its fields in the order
that they are declared. In other words, a record is encoded as just the
concatenation of the encodings of its fields. Field values are encoded per
their schema."""
for field in schema['fields']:
name = field['name']
if name not in datum and 'default' not in field and\
'null' not in field['type']:
raise ValueError('no value and no default for %s' % name)
write_data(fo, datum.get(
name, field.get('default')), field['type'])
LOGICAL_WRITERS = {
'long-timestamp-millis': prepare_timestamp_millis,
'long-timestamp-micros': prepare_timestamp_micros,
'int-date': prepare_date,
'bytes-decimal': prepare_bytes_decimal,
'string-uuid': prepare_uuid,
'int-time-millis': prepare_time_millis,
'long-time-micros': prepare_time_micros,
}
WRITERS = {
'null': write_null,
'boolean': write_boolean,
'string': write_utf8,
'int': write_long,
'long': write_long,
'float': write_float,
'double': write_double,
'bytes': write_bytes,
'fixed': write_fixed,
'enum': write_enum,
'array': write_array,
'map': write_map,
'union': write_union,
'error_union': write_union,
'record': write_record,
'error': write_record,
}
_base_types = [
'boolean',
'bytes',
'double',
'float',
'int',
'long',
'null',
'string',
]
SCHEMA_DEFS = dict((typ, typ) for typ in _base_types)
def write_data(fo, datum, schema):
"""Write a datum of data to output stream.
Paramaters
----------
fo: file like
Output file
datum: object
Data to write
schema: dict
Schemda to use
"""
record_type = extract_record_type(schema)
logical_type = extract_logical_type(schema)
fn = WRITERS[record_type]
if logical_type:
prepare = LOGICAL_WRITERS[logical_type]
data = prepare(datum, schema)
return fn(fo, data, schema)
return fn(fo, datum, schema)
def write_header(fo, metadata, sync_marker):
header = {
'magic': MAGIC,
'meta': dict([(key, utob(value)) for key, value in
iteritems(metadata)]),
'sync': sync_marker
}
write_data(fo, header, HEADER_SCHEMA)
def null_write_block(fo, block_bytes):
"""Write block in "null" codec."""
write_long(fo, len(block_bytes))
fo.write(block_bytes)
def deflate_write_block(fo, block_bytes):
"""Write block in "deflate" codec."""
# The first two characters and last character are zlib
# wrappers around deflate data.
data = compress(block_bytes)[2:-1]
write_long(fo, len(data))
fo.write(data)
BLOCK_WRITERS = {
'null': null_write_block,
'deflate': deflate_write_block
}
try:
import snappy
def snappy_write_block(fo, block_bytes):
"""Write block in "snappy" codec."""
data = snappy.compress(block_bytes)
write_long(fo, len(data) + 4) # for CRC
fo.write(data)
write_crc32(fo, block_bytes)
BLOCK_WRITERS['snappy'] = snappy_write_block
except ImportError:
pass
def acquaint_schema(schema, repo=None):
"""Extract schema into repo (default WRITERS)"""
repo = WRITERS if repo is None else repo
extract_named_schemas_into_repo(
schema,
repo,
lambda schema: lambda fo, datum, _: write_data(fo, datum, schema),
)
extract_named_schemas_into_repo(
schema,
SCHEMA_DEFS,
lambda schema: schema,
)
class Writer(object):
def __init__(self,
fo,
schema,
codec='null',
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None):
self.fo = fo
self.schema = schema
self.validate_fn = validate if validator is True else validator
self.sync_marker = urandom(SYNC_SIZE)
self.io = MemoryIO()
self.block_count = 0
self.metadata = metadata or {}
self.metadata['avro.codec'] = codec
self.metadata['avro.schema'] = json.dumps(schema)
self.sync_interval = sync_interval
try:
self.block_writer = BLOCK_WRITERS[codec]
except KeyError:
raise ValueError('unrecognized codec: %r' % codec)
write_header(self.fo, self.metadata, self.sync_marker)
acquaint_schema(self.schema)
def dump(self):
write_long(self.fo, self.block_count)
self.block_writer(self.fo, self.io.getvalue())
self.fo.write(self.sync_marker)
self.io.truncate(0)
self.io.seek(0, SEEK_SET)
self.block_count = 0
def write(self, record):
if self.validate_fn:
self.validate_fn(record, self.schema)
write_data(self.io, record, self.schema)
self.block_count += 1
if self.io.tell() >= self.sync_interval:
self.dump()
def flush(self):
if self.io.tell() or self.block_count > 0:
self.dump()
self.fo.flush()
def writer(fo,
schema,
records,
codec='null',
sync_interval=1000 * SYNC_SIZE,
metadata=None,
validator=None):
"""Write records to fo (stream) according to schema
Paramaters
----------
fo: file like
Output stream
records: iterable
Records to write
codec: string, optional
Compression codec, can be 'null', 'deflate' or 'snappy' (if installed)
sync_interval: int, optional
Size of sync interval
metadata: dict, optional
Header metadata
validator: None, True or a function
Validator function. If None (the default) - no validation. If True then
then fastavro.writer.validate will be used. If it's a function, it
should have the same signature as fastavro.writer.validate and raise an
exeption on error.
Example
-------
>>> from fastavro import writer
>>> schema = {
>>> 'doc': 'A weather reading.',
>>> 'name': 'Weather',
>>> 'namespace': 'test',
>>> 'type': 'record',
>>> 'fields': [
>>> {'name': 'station', 'type': 'string'},
>>> {'name': 'time', 'type': 'long'},
>>> {'name': 'temp', 'type': 'int'},
>>> ],
>>> }
>>> records = [
>>> {u'station': u'011990-99999', u'temp': 0, u'time': 1433269388},
>>> {u'station': u'011990-99999', u'temp': 22, u'time': 1433270389},
>>> {u'station': u'011990-99999', u'temp': -11, u'time': 1433273379},
>>> {u'station': u'012650-99999', u'temp': 111, u'time': 1433275478},
>>> ]
>>> with open('weather.avro', 'wb') as out:
>>> writer(out, schema, records)
"""
output = Writer(
fo,
schema,
codec,
sync_interval,
metadata,
validator,
)
for record in records:
output.write(record)
output.flush()
def schemaless_writer(fo, schema, record):
"""Write a single record without the schema or header information
Paramaters
----------
fo: file like
Output file
schema: dict
Schema
record: dict
Record to write
"""
acquaint_schema(schema)
write_data(fo, record, schema)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class FromConstructor(test_util.TensorFlowTestCase):
# Tests invalid constructors using a dummy value for the GraphDef.
def testInvalidConstructor(self):
message = ('If input_tensors and output_tensors are None, both '
'input_arrays_with_shape and output_arrays must be defined.')
# `output_arrays` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(
None, None, [], input_arrays_with_shape=[('input', [3, 9])])
self.assertEqual(message, str(error.exception))
# `input_arrays_with_shape` is not defined.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter(None, [], None, output_arrays=['output'])
self.assertEqual(message, str(error.exception))
# Tests valid constructors using a dummy value for the GraphDef.
def testValidConstructor(self):
converter = lite.TFLiteConverter(
None,
None,
None,
input_arrays_with_shape=[('input', [3, 9])],
output_arrays=['output'])
self.assertFalse(converter._has_valid_tensors())
self.assertEqual(converter.get_input_arrays(), ['input'])
with self.assertRaises(ValueError) as error:
converter._set_batch_size(1)
self.assertEqual(
'The batch size cannot be set for this model. Please use '
'input_shapes parameter.', str(error.exception))
converter = lite.TFLiteConverter(None, ['input_tensor'], ['output_tensor'])
self.assertTrue(converter._has_valid_tensors())
@test_util.run_v1_only('b/120545219')
class FromSessionTest(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testString(self):
in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.string_, input_details[0]['dtype'])
self.assertTrue(([4] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('Reshape', output_details[0]['name'])
self.assertEqual(np.string_, output_details[0]['dtype'])
self.assertTrue(([2, 2] == output_details[0]['shape']).all())
# TODO(b/122659643): Test setting/getting string data via the python
# interpreter API after support has been added.
def testQuantization(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testIntermediateInputArray(self):
"""Convert a model from an intermediate input array."""
in_tensor_init = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
in_tensor_final = in_tensor_init + in_tensor_init
out_tensor = in_tensor_final + in_tensor_final
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor_final],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('add', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSizeNoneInvalid(self):
in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test None as shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual('Provide an input shape for input array \'Placeholder\'.',
str(error.exception))
def testScalarValid(self):
# Construct a graph using a scalar (empty shape) input.
in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[])
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test conversion with the scalar input shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([] == input_details[0]['shape']).all())
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([] == input_details[0]['shape']).all())
# Validate inference using the scalar inputs/outputs.
test_input = np.array(4.0, dtype=np.float32)
expected_output = np.array(8.0, dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertTrue((expected_output == output_data).all())
def testSizeInvalid(self):
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
sess.run(_global_variables_initializer())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertTrue(graphviz_output)
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testDumpGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure interpreter is able to allocate and check graphviz data.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
num_items_graphviz = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
converter.dump_graphviz_video = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure graphviz folder has more data after using video flag.
num_items_graphviz_video = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz_video > num_items_graphviz)
def testInferenceInputType(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_input_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testDefaultRangesStats(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testPostTrainingQuantize(self):
np.random.seed(0)
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1],
[out_tensor])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.post_training_quantize = True
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def testFloatTocoConverter(self):
"""Tests deprecated test TocoConverter."""
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the interpreter is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
@test_util.run_v1_only('b/120545219')
class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py.',
str(error.exception))
def testPbtxt(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFileNotFound(self):
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph('invalid_file', ['Placeholder'],
['add'])
self.assertEqual('File \'invalid_file\' does not exist.',
str(error.exception))
def testInvalidFileBadData(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(IOError) as error:
lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
# TODO(nupurgarg): Test model loading in open source.
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
# Looks for the model file which is saved in a different location interally
# and externally.
filename = resource_loader.get_path_to_datafile('testdata/tflite_graph.pb')
if not os.path.exists(filename):
filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
'../tflite_mobilenet_ssd_quant_protobuf/tflite_graph.pb')
if not os.path.exists(filename):
raise IOError("File '{0}' does not exist.".format(filename))
self._graph_def_file = filename
self._input_arrays = ['normalized_input_image_tensor']
self._output_arrays = [
'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'
]
self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}
def testTFLiteGraphDef(self):
# Tests the object detection model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
converter = lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays,
self._input_shapes)
converter.allow_custom_ops = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('TFLite_Detection_PostProcess:1',
output_details[1]['name'])
self.assertTrue(([1, 10] == output_details[1]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:2',
output_details[2]['name'])
self.assertTrue(([1, 10] == output_details[2]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:3',
output_details[3]['name'])
self.assertTrue(([1] == output_details[3]['shape']).all())
def testTFLiteGraphDefMissingShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# Missing `input_shapes`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays)
self.assertEqual('input_shapes must be defined for this model.',
str(error.exception))
def testTFLiteGraphDefInvalidShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# `input_shapes` does not contain the names in `input_arrays`.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_frozen_graph(
self._graph_def_file,
self._input_arrays,
self._output_arrays,
input_shapes={'invalid-value': [1, 19]})
self.assertEqual(
'input_shapes must contain a value for each item in input_array.',
str(error.exception))
def testFloatTocoConverter(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
@test_util.run_v1_only('b/120545219')
class FromSavedModelTest(test_util.TensorFlowTestCase):
def _createSavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def testSimpleModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testNoneBatchSize(self):
"""Test a SavedModel, with None in input tensor's shape."""
saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testOrderInputArrays(self):
"""Test a SavedModel ordering of input arrays."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputB', 'inputA'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Check case where input shape is given.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir,
input_arrays=['inputA'],
input_shapes={'inputA': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check case where input shape is None.
converter = lite.TFLiteConverter.from_saved_model(
saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def testSimpleModelTocoConverter(self):
"""Test a SavedModel with deprecated TocoConverter."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
@test_util.run_v1_only('b/120545219')
class FromKerasFile(test_util.TensorFlowTestCase):
def setUp(self):
keras.backend.clear_session()
def _getSequentialModel(self):
with session.Session().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
try:
fd, keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
return keras_file
def testSequentialModel(self):
"""Test a Sequential tf.keras model with default inputs."""
keras_file = self._getSequentialModel()
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testSequentialModelInputArray(self):
"""Test a Sequential tf.keras model testing input arrays argument."""
keras_file = self._getSequentialModel()
# Invalid input array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
keras_file, input_arrays=['invalid-input'])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, input_arrays=['dense_input'])
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
def testSequentialModelInputShape(self):
"""Test a Sequential tf.keras model testing input shapes argument."""
keras_file = self._getSequentialModel()
# Passing in shape of invalid input array raises error.
with self.assertRaises(ValueError) as error:
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, input_shapes={'invalid-input': [2, 3]})
self.assertEqual(
"Invalid tensor 'invalid-input' found in tensor shapes map.",
str(error.exception))
# Passing in shape of valid input array.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, input_shapes={'dense_input': [2, 3]})
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
# Check input shape from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertTrue(([2, 3] == input_details[0]['shape']).all())
def testSequentialModelOutputArray(self):
"""Test a Sequential tf.keras model testing output arrays argument."""
keras_file = self._getSequentialModel()
# Invalid output array raises error.
with self.assertRaises(ValueError) as error:
lite.TFLiteConverter.from_keras_model_file(
keras_file, output_arrays=['invalid-output'])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
# Valid output array.
converter = lite.TFLiteConverter.from_keras_model_file(
keras_file, output_arrays=['time_distributed/Reshape_1'])
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
def testFunctionalModel(self):
"""Test a Functional tf.keras model with default inputs."""
with session.Session().as_default():
inputs = keras.layers.Input(shape=(3,), name='input')
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
model.predict(x)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testFunctionalModelMultipleInputs(self):
"""Test a Functional tf.keras model with multiple inputs and outputs."""
with session.Session().as_default():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.mae],
loss_weights=[1., 0.5])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
model.predict([input_a_np, input_b_np], batch_size=5)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
os.remove(keras_file)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('input_a', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('input_b', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(2, len(output_details))
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('dropout/Identity', output_details[1]['name'])
self.assertEqual(np.float32, output_details[1]['dtype'])
self.assertTrue(([1, 4] == output_details[1]['shape']).all())
self.assertEqual((0., 0.), output_details[1]['quantization'])
def testFunctionalSequentialModel(self):
"""Test a Functional tf.keras model containing a Sequential model."""
with session.Session().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model = keras.models.Model(model.input, model.output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
model.predict(x)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TFLiteConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testSequentialModelTocoConverter(self):
"""Test a Sequential tf.keras model with deprecated TocoConverter."""
keras_file = self._getSequentialModel()
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure the model is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
if __name__ == '__main__':
test.main()
| |
import numpy as np
import menpo
from nose.tools import raises
from numpy.testing import assert_allclose
from menpo.image import BooleanImage, Image, MaskedImage, OutOfMaskSampleError
from menpo.shape import PointCloud
from menpo.transform import Affine
import menpo.io as mio
# do the import to generate the expected outputs
rgb_image = mio.import_builtin_asset('takeo.ppm')
gray_image = rgb_image.as_greyscale()
gray_template = gray_image.crop(np.array([70, 30]),
np.array([169, 129]))
rgb_template = rgb_image.crop(np.array([70, 30]),
np.array([169, 129]))
template_mask = BooleanImage.init_blank(gray_template.shape)
initial_params = np.array([0, 0, 0, 0, 70, 30])
row_indices, col_indices = np.meshgrid(np.arange(50, 100), np.arange(50, 100),
indexing='ij')
row_indices, col_indices = row_indices.flatten(), col_indices.flatten()
multi_expected = rgb_image.crop([50, 50],
[100, 100]).pixels.flatten()
def test_warp_gray():
rgb_image = mio.import_builtin_asset('takeo.ppm')
gray_image = rgb_image.as_greyscale()
target_transform = Affine.init_identity(2).from_vector(initial_params)
warped_im = gray_image.warp_to_mask(template_mask, target_transform)
assert(warped_im.shape == gray_template.shape)
assert_allclose(warped_im.pixels, gray_template.pixels)
def test_warp_gray_batch():
rgb_image = mio.import_builtin_asset('takeo.ppm')
gray_image = rgb_image.as_greyscale()
target_transform = Affine.init_identity(2).from_vector(initial_params)
warped_im = gray_image.warp_to_mask(template_mask, target_transform,
batch_size=100)
assert(warped_im.shape == gray_template.shape)
assert_allclose(warped_im.pixels, gray_template.pixels)
def test_warp_multi():
rgb_image = mio.import_builtin_asset('takeo.ppm')
target_transform = Affine.init_identity(2).from_vector(initial_params)
warped_im = rgb_image.warp_to_mask(template_mask, target_transform)
assert(warped_im.shape == rgb_template.shape)
assert_allclose(warped_im.pixels, rgb_template.pixels)
def test_warp_to_mask_boolean():
b = BooleanImage.init_blank((10, 10))
b.pixels[:, :5] = False
template_mask = BooleanImage.init_blank((10, 10))
template_mask.pixels[:5, :] = False
t = Affine.init_identity(2)
warped_mask = b.warp_to_mask(template_mask, t)
assert(type(warped_mask) == BooleanImage)
result = template_mask.pixels.copy()
result[:, :5] = False
assert(np.all(result == warped_mask.pixels))
def test_warp_to_mask_image():
img = Image.init_blank((10, 10), n_channels=2)
img.pixels[:, :, :5] = 0.5
template_mask = BooleanImage.init_blank((10, 10))
template_mask.pixels[:, 5:, :] = False
t = Affine.init_identity(2)
warped_img = img.warp_to_mask(template_mask, t)
assert(type(warped_img) == MaskedImage)
result = Image.init_blank((10, 10), n_channels=2).pixels
result[:, :5, :5] = 0.5
assert(np.all(result == warped_img.pixels))
def test_warp_to_mask_masked_image():
mask = BooleanImage.init_blank((15, 15))
# make a truncated mask on the original image
mask.pixels[0, -1, -1] = False
img = MaskedImage.init_blank((15, 15), n_channels=2, mask=mask,
fill=2.5)
template_mask = BooleanImage.init_blank((10, 10), fill=False)
template_mask.pixels[:, :5, :5] = True
t = Affine.init_identity(2)
warped_img = img.warp_to_mask(template_mask, t)
assert(type(warped_img) == MaskedImage)
result = Image.init_blank((10, 10), n_channels=2).pixels
result[:, :5, :5] = 2.5
result_mask = BooleanImage.init_blank((10, 10), fill=False).pixels
result_mask[:, :5, :5] = True
assert(warped_img.n_true_pixels() == 25)
assert_allclose(result, warped_img.pixels)
assert_allclose(result_mask, warped_img.mask.pixels)
def test_warp_to_mask_masked_image_all_true():
img = MaskedImage.init_blank((10, 10), fill=2.5)
template_mask = BooleanImage.init_blank((10, 10), fill=False)
template_mask.pixels[:, :5, :5] = True
t = Affine.init_identity(2)
warped_img = img.warp_to_mask(template_mask, t)
assert(type(warped_img) == MaskedImage)
def test_warp_to_shape_equal_warp_to_mask():
r = menpo.transform.UniformScale(2.0, n_dims=2)
b = mio.import_builtin_asset('breakingbad.jpg')
m_shape = b.warp_to_shape((540, 960), r)
m_mask = b.warp_to_mask(menpo.image.BooleanImage.init_blank((540, 960)), r)
assert_allclose(m_shape.pixels, m_mask.pixels)
def test_warp_to_shape_batch():
r = menpo.transform.Affine.init_identity(2)
b = mio.import_builtin_asset('takeo.ppm')
m_shape = b.warp_to_shape(b.shape, r, batch_size=100)
assert_allclose(m_shape.pixels, b.pixels)
def test_rescale_boolean():
mask = BooleanImage.init_blank((100, 100))
mask.resize((10, 10))
def test_sample_image():
im = Image.init_blank((100, 100), fill=2)
p = PointCloud(np.array([[0, 0], [1, 0]]))
arr = im.sample(p)
assert_allclose(arr, [[2., 2.]])
def test_sample_maskedimage():
im = MaskedImage.init_blank((100, 100), fill=2)
p = PointCloud(np.array([[0, 0], [1, 0]]))
arr = im.sample(p)
assert_allclose(arr, [[2., 2.]])
@raises(OutOfMaskSampleError)
def test_sample_maskedimage_error():
m = np.zeros([100, 100], dtype=np.bool)
im = MaskedImage.init_blank((100, 100), mask=m, fill=2)
p = PointCloud(np.array([[0, 0], [1, 0]]))
im.sample(p)
def test_sample_maskedimage_error_values():
m = np.zeros([100, 100], dtype=np.bool)
m[1, 0] = True
im = MaskedImage.init_blank((100, 100), mask=m, fill=2)
p = PointCloud(np.array([[0, 0], [1, 0]]))
try:
im.sample(p)
# Expect exception!
assert 0
except OutOfMaskSampleError as e:
sampled_mask = e.sampled_mask
sampled_values = e.sampled_values
assert_allclose(sampled_values, [[2., 2.]])
assert_allclose(sampled_mask, [[False, True]])
def test_sample_booleanimage():
im = BooleanImage.init_blank((100, 100))
im.pixels[0, 1, 0] = False
p = PointCloud(np.array([[0, 0], [1, 0]]))
arr = im.sample(p)
assert_allclose(arr, [[True, False]])
def test_zoom_image():
im = Image.init_blank((100, 100), fill=0)
# White square in the centre of size 10x10
im.pixels[0, 45:55, 45:55] = 1.0
# Zoom in 50% makes the white square 5 pixel bigger in theory (16x16)
zim = im.zoom(1.5)
assert np.count_nonzero(zim.pixels) == 256
def test_zoom_booleanimage():
im = BooleanImage.init_blank((100, 100))
im.pixels[0, 0, :] = False
im.pixels[0, -1, :] = False
im.pixels[0, :, 0] = False
im.pixels[0, :, -1] = False
zim = im.zoom(1.2)
assert np.all(zim.pixels)
| |
# -*- coding: utf-8 -*-
import pytest
import ethereum.db
import ethereum.blocks
import ethereum.config
from ethereum import tester
from ethereum.utils import int_to_addr, zpad
from pyethapp.jsonrpc import address_decoder, data_decoder, quantity_decoder
from raiden.utils import privatekey_to_address
from raiden.tests.utils.blockchain import DEFAULT_BALANCE
from raiden.tests.utils.tester import (
approve_and_deposit,
channel_from_nettingcontract,
create_registryproxy,
create_tokenproxy,
deploy_channelmanager_library,
deploy_nettingchannel_library,
deploy_registry,
deploy_standard_token,
new_channelmanager,
new_nettingcontract,
)
from raiden.tests.utils.tester_client import ChannelExternalStateTester
@pytest.fixture
def tester_blockgas_limit():
""" The tester's block gas limit.
Set this value to `GAS_LIMIT`ing if the test needs to consider the gas usage.
Note:
`GAS_LIMIT` is defined in `raiden.network.rpc.client.GAS_LIMIT`
"""
return 10 ** 10
@pytest.fixture
def tester_events():
return list()
@pytest.fixture
def tester_state(deploy_key, private_keys, tester_blockgas_limit):
tester_state = tester.state()
# special addresses 1 to 5
alloc = {
int_to_addr(i): {'wei': 1}
for i in range(1, 5)
}
for privkey in [deploy_key] + private_keys:
address = privatekey_to_address(privkey)
alloc[address] = {
'balance': DEFAULT_BALANCE,
}
for account in tester.accounts:
alloc[account] = {
'balance': DEFAULT_BALANCE,
}
db = ethereum.db.EphemDB()
env = ethereum.config.Env(
db,
ethereum.config.default_config,
)
genesis_overwrite = {
'nonce': zpad(data_decoder('0x00006d6f7264656e'), 8),
'difficulty': quantity_decoder('0x20000'),
'mixhash': zpad(b'\x00', 32),
'coinbase': address_decoder('0x0000000000000000000000000000000000000000'),
'timestamp': 0,
'extra_data': b'',
'gas_limit': tester_blockgas_limit,
'start_alloc': alloc,
}
genesis_block = ethereum.blocks.genesis(
env,
**genesis_overwrite
)
# enable DELEGATECALL opcode
genesis_block.number = genesis_block.config['HOMESTEAD_FORK_BLKNUM'] + 1
tester_state.db = db
tester_state.env = env
tester_state.block = genesis_block
tester_state.block_gas_limit = tester_blockgas_limit
tester_state.blocks = [genesis_block]
return tester_state
@pytest.fixture
def tester_token_address(private_keys, token_amount, tester_state, sender_index=0):
deploy_key = private_keys[sender_index]
return deploy_standard_token(
deploy_key,
tester_state,
token_amount,
)
@pytest.fixture
def tester_nettingchannel_library_address(deploy_key, tester_state):
return deploy_nettingchannel_library(
deploy_key,
tester_state,
)
@pytest.fixture
def tester_channelmanager_library_address(
deploy_key,
tester_state,
tester_nettingchannel_library_address):
return deploy_channelmanager_library(
deploy_key,
tester_state,
tester_nettingchannel_library_address,
)
@pytest.fixture
def tester_registry_address(tester_state, deploy_key, tester_channelmanager_library_address):
return deploy_registry(
deploy_key,
tester_state,
tester_channelmanager_library_address,
)
@pytest.fixture
def tester_token_raw(tester_state, tester_token_address, tester_events):
return create_tokenproxy(
tester_state,
tester_token_address,
tester_events.append,
)
@pytest.fixture
def tester_token(token_amount, private_keys, tester_state, tester_token_address, tester_events):
token = create_tokenproxy(
tester_state,
tester_token_address,
tester_events.append,
)
privatekey0 = private_keys[0]
for transfer_to in private_keys[1:]:
token.transfer( # pylint: disable=no-member
privatekey_to_address(transfer_to),
token_amount // len(private_keys),
sender=privatekey0,
)
return token
@pytest.fixture
def tester_registry(tester_state, tester_registry_address, tester_events):
return create_registryproxy(
tester_state,
tester_registry_address,
tester_events.append,
)
@pytest.fixture
def tester_channelmanager(
private_keys,
tester_state,
tester_events,
tester_registry,
tester_token):
privatekey0 = private_keys[0]
channel_manager = new_channelmanager(
privatekey0,
tester_state,
tester_events.append,
tester_registry,
tester_token.address,
)
return channel_manager
@pytest.fixture
def tester_nettingcontracts(
deposit,
both_participants_deposit,
private_keys,
settle_timeout,
tester_state,
tester_events,
tester_channelmanager,
tester_token):
raiden_chain = zip(private_keys[:-1], private_keys[1:])
result = list()
for pos, (first_key, second_key) in enumerate(raiden_chain, start=1):
# tester.py log_listener is enabled for the whole tester, meaning that
# a log_listener will receive all events that it can decode, even if
# the event is from a different contract, because of that we _must_
# only install the log_listener for the first ABI, otherwise the logs
# will be repeated for each ABI
if pos == 1:
log_listener = tester_events.append
else:
log_listener = None
nettingcontract = new_nettingcontract(
first_key,
second_key,
tester_state,
log_listener,
tester_channelmanager,
settle_timeout,
)
result.append(
(first_key, second_key, nettingcontract),
)
approve_and_deposit(
tester_token,
nettingcontract,
deposit,
first_key,
)
if both_participants_deposit:
approve_and_deposit(
tester_token,
nettingcontract,
deposit,
second_key,
)
return result
@pytest.fixture
def tester_channels(tester_state, tester_nettingcontracts, reveal_timeout):
result = list()
for first_key, second_key, nettingcontract in tester_nettingcontracts:
first_externalstate = ChannelExternalStateTester(
tester_state,
first_key,
nettingcontract.address.decode('hex'),
)
first_channel = channel_from_nettingcontract(
first_key,
nettingcontract,
first_externalstate,
reveal_timeout,
)
second_externalstate = ChannelExternalStateTester(
tester_state,
second_key,
nettingcontract.address.decode('hex'),
)
second_channel = channel_from_nettingcontract(
second_key,
nettingcontract,
second_externalstate,
reveal_timeout,
)
result.append(
(first_key, second_key, nettingcontract, first_channel, second_channel)
)
return result
| |
# polling_location/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import PollingLocationListManager, PollingLocationManager
from config.base import get_environment_variable
from django.contrib import messages
import glob
import json
import requests
import wevote_functions.admin
from wevote_functions.functions import positive_value_exists, process_request_from_master
import xml.etree.ElementTree as MyElementTree
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_API_KEY = get_environment_variable("WE_VOTE_API_KEY")
POLLING_LOCATIONS_SYNC_URL = get_environment_variable("POLLING_LOCATIONS_SYNC_URL") # pollingLocationsSyncOut
# def polling_locations_import_from_master_server(request, state_code):
# """
# Get the json data, and either create new entries or update existing
# :return:
# """
#
# import_results, structured_json = process_request_from_master(
# request, "Loading Polling Locations from We Vote Master servers",
# POLLING_LOCATIONS_SYNC_URL, {
# "key": WE_VOTE_API_KEY, # This comes from an environment variable
# "state": state_code,
# }
# )
#
# if import_results['success']:
# results = filter_polling_locations_structured_json_for_local_duplicates(structured_json)
# filtered_structured_json = results['structured_json']
# duplicates_removed = results['duplicates_removed']
#
# import_results = polling_locations_import_from_structured_json(filtered_structured_json)
# import_results['duplicates_removed'] = duplicates_removed
#
# return import_results
def filter_polling_locations_structured_json_for_local_duplicates(structured_json):
"""
With this function, we remove polling_locations that seem to be duplicates, but have different we_vote_id's.
:param structured_json:
:return:
"""
duplicates_removed = 0
filtered_structured_json = []
polling_location_list_manager = PollingLocationListManager()
for one_polling_location in structured_json:
polling_location_id = one_polling_location['polling_location_id'] \
if 'polling_location_id' in one_polling_location else ''
we_vote_id = one_polling_location['we_vote_id'] if 'we_vote_id' in one_polling_location else ''
state = one_polling_location['state'] if 'state' in one_polling_location else ''
location_name = one_polling_location['location_name'] if 'location_name' in one_polling_location else ''
line1 = one_polling_location['line1'] if 'line1' in one_polling_location else ''
zip_long = one_polling_location['zip_long'] if 'zip_long' in one_polling_location else ''
# Check to see if there is an entry that matches in all critical ways, minus the we_vote_id
we_vote_id_from_master = we_vote_id
results = polling_location_list_manager.retrieve_possible_duplicate_polling_locations(
polling_location_id, state, location_name, line1, zip_long,
we_vote_id_from_master)
if results['polling_location_list_found']:
# There seems to be a duplicate already in this database using a different we_vote_id
duplicates_removed += 1
else:
filtered_structured_json.append(one_polling_location)
polling_locations_results = {
'success': True,
'status': "FILTER_POLLING_LOCATIONS_FOR_DUPLICATES_PROCESS_COMPLETE",
'duplicates_removed': duplicates_removed,
'structured_json': filtered_structured_json,
}
return polling_locations_results
def polling_locations_import_from_structured_json(structured_json):
"""
This pathway in requires a we_vote_id, and is not used when we import from Google Civic
:param structured_json:
:return:
"""
polling_location_manager = PollingLocationManager()
polling_locations_saved = 0
polling_locations_updated = 0
polling_locations_not_processed = 0
for one_polling_location in structured_json:
we_vote_id = one_polling_location['we_vote_id'] if 'we_vote_id' in one_polling_location else ''
line1 = one_polling_location['line1'] if 'line1' in one_polling_location else ''
city = one_polling_location['city'] if 'city' in one_polling_location else ''
state = one_polling_location['state'] if 'state' in one_polling_location else ''
if positive_value_exists(we_vote_id) and positive_value_exists(line1) and positive_value_exists(city) and \
positive_value_exists(state):
proceed_to_update_or_create = True
else:
proceed_to_update_or_create = False
if proceed_to_update_or_create:
# Values that are not required
polling_location_id = one_polling_location['polling_location_id'] \
if 'polling_location_id' in one_polling_location else ''
location_name = one_polling_location['location_name'] if 'location_name' in one_polling_location else ''
polling_hours_text = one_polling_location['polling_hours_text'] \
if 'polling_hours_text' in one_polling_location else ''
directions_text = one_polling_location['directions_text'] \
if 'directions_text' in one_polling_location else ''
latitude = one_polling_location['latitude'] if 'latitude' in one_polling_location else 0
longitude = one_polling_location['longitude'] if 'longitude' in one_polling_location else 0
line2 = one_polling_location['line2'] if 'line2' in one_polling_location else ''
polling_location_deleted = one_polling_location['polling_location_deleted'] \
if 'polling_location_deleted' in one_polling_location else False
use_for_bulk_retrieve = one_polling_location['use_for_bulk_retrieve'] \
if 'use_for_bulk_retrieve' in one_polling_location else False
zip_long = one_polling_location['zip_long'] if 'zip_long' in one_polling_location else ''
results = polling_location_manager.update_or_create_polling_location(
we_vote_id, polling_location_id, location_name, polling_hours_text, directions_text,
line1, line2, city, state, zip_long, latitude, longitude, use_for_bulk_retrieve,
polling_location_deleted)
else:
polling_locations_not_processed += 1
results = {
'success': False,
'status': 'Required value missing, cannot update or create'
}
if results['success']:
if results['new_polling_location_created']:
polling_locations_saved += 1
else:
polling_locations_updated += 1
else:
polling_locations_not_processed += 1
polling_locations_results = {
'success': True,
'status': "POLLING_LOCATIONS_IMPORT_PROCESS_COMPLETE",
'saved': polling_locations_saved,
'updated': polling_locations_updated,
'not_processed': polling_locations_not_processed,
}
return polling_locations_results
def import_and_save_all_polling_locations_data(state_code=''):
# In most states we can visit this URL (example is 'va' or virginia):
# https://data.votinginfoproject.org/feeds/va/?order=D
# and download the first zip file.
# https://data.votinginfoproject.org/feeds/STATE/?order=D
print('import_and_save_all_polling_locations_data...')
all_results = []
for xml_path in glob.glob('polling_location/import_data/*/vipFeed-*.xml'):
if 'ignore' in xml_path:
continue
if positive_value_exists(state_code):
state_code_folder_path = "/" + state_code + "/"
if state_code_folder_path in xml_path:
print(' loading:', xml_path)
all_results.append(import_and_save_polling_location_data(xml_path))
else:
print(' IMPORT_AND_SAVE_ALL_POLLING_LOCATIONS-STATE_REQUIRED ')
return merge_polling_location_results(*all_results)
def merge_polling_location_results(*dict_args):
results = {
'updated': 0,
'saved': 0,
'not_processed': 0,
}
for incoming_results in dict_args:
new_results = {
'updated': results['updated'] + incoming_results['updated'],
'saved': results['saved'] + incoming_results['saved'],
'not_processed': results['not_processed'] + incoming_results['not_processed'],
}
results = new_results
return results
def import_and_save_polling_location_data(xml_file_location):
polling_locations_list = retrieve_polling_locations_data_from_xml(xml_file_location)
results = save_polling_locations_from_list(polling_locations_list)
return results
def retrieve_polling_locations_data_from_xml(xml_file_location):
# We parse the XML file, which can be quite large
# <polling_location id="80037">
# <polling_hours>6:00 AM - 7:00 PM</polling_hours>
# <address>
# <city>HARRISONBURG</city>
# <line1>400 MOUNTAIN VIEW DRIVE</line1>
# <state>VA</state>
# <location_name>SPOTSWOOD ELEMENTARY SCHOOL</location_name>
# <zip>22801</zip>
# </address>
# </polling_location>
tree = MyElementTree.parse(xml_file_location)
root = tree.getroot()
polling_locations_list = []
for polling_location in root.findall('polling_location'):
address = polling_location.find('address')
if address is not None:
location_name = address.find('location_name')
location_name_text = location_name.text if location_name is not None else ''
line1 = address.find('line1')
line1_text = line1.text if line1 is not None else ''
city = address.find('city')
city_text = city.text if city is not None else ''
if city_text == 'A BALLOT FOR EACH ELECTION':
# We don't want to save this polling location
continue
if city_text == '0':
# We don't want to save this polling location
continue
state = address.find('state')
state_text = state.text if state is not None else ''
zip_long = address.find('zip')
zip_long_text = zip_long.text if zip_long is not None else ''
else:
location_name_text = ''
line1_text = ''
city_text = ''
state_text = ''
zip_long_text = ''
polling_hours = polling_location.find('polling_hours')
polling_hours_text = polling_hours.text if polling_hours is not None else ''
directions = polling_location.find('directions')
directions_text = directions.text if directions is not None else ''
one_entry = {
"polling_location_id": polling_location.get('id'),
"location_name": location_name_text,
"polling_hours_text": polling_hours_text,
"directions": directions_text,
"line1": line1_text,
"line2": '',
"city": city_text,
"state": state_text,
"zip_long": zip_long_text,
}
polling_locations_list.append(one_entry)
# Some states, like Oregon, have early_vote_site instead of polling_location
for polling_location in root.findall('early_vote_site'):
address = polling_location.find('address')
if address is not None:
location_name = address.find('name') # name instead of location_name
location_name_text = location_name.text if location_name is not None else ''
line1 = address.find('line1')
line1_text = line1.text if line1 is not None else ''
city = address.find('city')
city_text = city.text if city is not None else ''
if city_text == 'A BALLOT FOR EACH ELECTION':
# We don't want to save this polling location
continue
if city_text == '0':
# We don't want to save this polling location
continue
state = address.find('state')
state_text = state.text if state is not None else ''
zip_long = address.find('zip')
zip_long_text = zip_long.text if zip_long is not None else ''
else:
location_name_text = ''
line1_text = ''
city_text = ''
state_text = ''
zip_long_text = ''
polling_hours = polling_location.find('polling_hours')
polling_hours_text = polling_hours.text if polling_hours is not None else ''
directions = polling_location.find('directions')
directions_text = directions.text if directions is not None else ''
one_entry = {
"polling_location_id": polling_location.get('id'),
"location_name": location_name_text,
"polling_hours_text": polling_hours_text,
"directions": directions_text,
"line1": line1_text,
"line2": '',
"city": city_text,
"state": state_text,
"zip_long": zip_long_text,
}
polling_locations_list.append(one_entry)
return polling_locations_list
def save_polling_locations_from_list(polling_locations_list):
polling_location_manager = PollingLocationManager()
polling_locations_updated = 0
polling_locations_saved = 0
polling_locations_not_processed = 0
polling_location_we_vote_id = ''
for polling_location in polling_locations_list:
latitude = polling_location['latitude'] if 'latitude' in polling_location else 0
longitude = polling_location['longitude'] if 'longitude' in polling_location else 0
polling_location_deleted = polling_location['polling_location_deleted'] \
if 'polling_location_deleted' in polling_location else False
use_for_bulk_retrieve = polling_location['use_for_bulk_retrieve'] \
if 'use_for_bulk_retrieve' in polling_location else False
results = polling_location_manager.update_or_create_polling_location(
polling_location_we_vote_id,
polling_location['polling_location_id'],
polling_location['location_name'],
polling_location['polling_hours_text'],
polling_location['directions'],
polling_location['line1'],
polling_location['line2'],
polling_location['city'],
polling_location['state'],
polling_location['zip_long'],
latitude,
longitude,
use_for_bulk_retrieve,
polling_location_deleted
)
if results['success']:
if results['new_polling_location_created']:
polling_locations_saved += 1
else:
polling_locations_updated += 1
else:
polling_locations_not_processed += 1
save_results = {
'updated': polling_locations_updated,
'saved': polling_locations_saved,
'not_processed': polling_locations_not_processed,
}
return save_results
| |
import json
import logging
from pathlib import Path
from threading import RLock
from uuid import uuid4
from azure.identity import DefaultAzureCredential
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentMode
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import (
TAG_RAY_CLUSTER_NAME,
TAG_RAY_NODE_NAME,
TAG_RAY_NODE_KIND,
TAG_RAY_LAUNCH_CONFIG,
TAG_RAY_USER_NODE_TYPE,
)
from ray.autoscaler._private._azure.config import (
bootstrap_azure,
get_azure_sdk_function,
)
VM_NAME_MAX_LEN = 64
VM_NAME_UUID_LEN = 8
logger = logging.getLogger(__name__)
azure_logger = logging.getLogger("azure.core.pipeline.policies.http_logging_policy")
azure_logger.setLevel(logging.WARNING)
def synchronized(f):
def wrapper(self, *args, **kwargs):
self.lock.acquire()
try:
return f(self, *args, **kwargs)
finally:
self.lock.release()
return wrapper
class AzureNodeProvider(NodeProvider):
"""Node Provider for Azure
This provider assumes Azure credentials are set by running ``az login``
and the default subscription is configured through ``az account``
or set in the ``provider`` field of the autoscaler configuration.
Nodes may be in one of three states: {pending, running, terminated}. Nodes
appear immediately once started by ``create_node``, and transition
immediately to terminated when ``terminate_node`` is called.
"""
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
subscription_id = provider_config["subscription_id"]
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes", True)
credential = DefaultAzureCredential(exclude_shared_token_cache_credential=True)
self.compute_client = ComputeManagementClient(credential, subscription_id)
self.network_client = NetworkManagementClient(credential, subscription_id)
self.resource_client = ResourceManagementClient(credential, subscription_id)
self.lock = RLock()
# cache node objects
self.cached_nodes = {}
@synchronized
def _get_filtered_nodes(self, tag_filters):
def match_tags(vm):
for k, v in tag_filters.items():
if vm.tags.get(k) != v:
return False
return True
vms = self.compute_client.virtual_machines.list(
resource_group_name=self.provider_config["resource_group"]
)
nodes = [self._extract_metadata(vm) for vm in filter(match_tags, vms)]
self.cached_nodes = {node["name"]: node for node in nodes}
return self.cached_nodes
def _extract_metadata(self, vm):
# get tags
metadata = {"name": vm.name, "tags": vm.tags, "status": ""}
# get status
resource_group = self.provider_config["resource_group"]
instance = self.compute_client.virtual_machines.instance_view(
resource_group_name=resource_group, vm_name=vm.name
).as_dict()
for status in instance["statuses"]:
code, state = status["code"].split("/")
# skip provisioning status
if code == "PowerState":
metadata["status"] = state
break
# get ip data
nic_id = vm.network_profile.network_interfaces[0].id
metadata["nic_name"] = nic_id.split("/")[-1]
nic = self.network_client.network_interfaces.get(
resource_group_name=resource_group,
network_interface_name=metadata["nic_name"],
)
ip_config = nic.ip_configurations[0]
if not self.provider_config.get("use_internal_ips", False):
public_ip_id = ip_config.public_ip_address.id
metadata["public_ip_name"] = public_ip_id.split("/")[-1]
public_ip = self.network_client.public_ip_addresses.get(
resource_group_name=resource_group,
public_ip_address_name=metadata["public_ip_name"],
)
metadata["external_ip"] = public_ip.ip_address
metadata["internal_ip"] = ip_config.private_ip_address
return metadata
def stopped_nodes(self, tag_filters):
"""Return a list of stopped node ids filtered by the specified tags dict."""
nodes = self._get_filtered_nodes(tag_filters=tag_filters)
return [k for k, v in nodes.items() if v["status"].startswith("deallocat")]
def non_terminated_nodes(self, tag_filters):
"""Return a list of node ids filtered by the specified tags dict.
This list must not include terminated nodes. For performance reasons,
providers are allowed to cache the result of a call to nodes() to
serve single-node queries (e.g. is_running(node_id)). This means that
nodes() must be called again to refresh results.
Examples:
>>> provider.non_terminated_nodes({TAG_RAY_NODE_KIND: "worker"})
["node-1", "node-2"]
"""
nodes = self._get_filtered_nodes(tag_filters=tag_filters)
return [k for k, v in nodes.items() if not v["status"].startswith("deallocat")]
def is_running(self, node_id):
"""Return whether the specified node is running."""
# always get current status
node = self._get_node(node_id=node_id)
return node["status"] == "running"
def is_terminated(self, node_id):
"""Return whether the specified node is terminated."""
# always get current status
node = self._get_node(node_id=node_id)
return node["status"].startswith("deallocat")
def node_tags(self, node_id):
"""Returns the tags of the given node (string dict)."""
return self._get_cached_node(node_id=node_id)["tags"]
def external_ip(self, node_id):
"""Returns the external ip of the given node."""
ip = (
self._get_cached_node(node_id=node_id)["external_ip"]
or self._get_node(node_id=node_id)["external_ip"]
)
return ip
def internal_ip(self, node_id):
"""Returns the internal ip (Ray ip) of the given node."""
ip = (
self._get_cached_node(node_id=node_id)["internal_ip"]
or self._get_node(node_id=node_id)["internal_ip"]
)
return ip
def create_node(self, node_config, tags, count):
resource_group = self.provider_config["resource_group"]
if self.cache_stopped_nodes:
VALIDITY_TAGS = [
TAG_RAY_CLUSTER_NAME,
TAG_RAY_NODE_KIND,
TAG_RAY_LAUNCH_CONFIG,
TAG_RAY_USER_NODE_TYPE,
]
filters = {tag: tags[tag] for tag in VALIDITY_TAGS if tag in tags}
reuse_nodes = self.stopped_nodes(filters)[:count]
logger.info(
f"Reusing nodes {list(reuse_nodes)}. "
"To disable reuse, set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration.",
)
start = get_azure_sdk_function(
client=self.compute_client.virtual_machines, function_name="start"
)
for node_id in reuse_nodes:
start(resource_group_name=resource_group, vm_name=node_id).wait()
self.set_node_tags(node_id, tags)
count -= len(reuse_nodes)
if count:
self._create_node(node_config, tags, count)
def _create_node(self, node_config, tags, count):
"""Creates a number of nodes within the namespace."""
resource_group = self.provider_config["resource_group"]
# load the template file
current_path = Path(__file__).parent
template_path = current_path.joinpath("azure-vm-template.json")
with open(template_path, "r") as template_fp:
template = json.load(template_fp)
# get the tags
config_tags = node_config.get("tags", {}).copy()
config_tags.update(tags)
config_tags[TAG_RAY_CLUSTER_NAME] = self.cluster_name
name_tag = config_tags.get(TAG_RAY_NODE_NAME, "node")
unique_id = uuid4().hex[:VM_NAME_UUID_LEN]
vm_name = "{name}-{id}".format(name=name_tag, id=unique_id)
use_internal_ips = self.provider_config.get("use_internal_ips", False)
template_params = node_config["azure_arm_parameters"].copy()
template_params["vmName"] = vm_name
template_params["provisionPublicIp"] = not use_internal_ips
template_params["vmTags"] = config_tags
template_params["vmCount"] = count
parameters = {
"properties": {
"mode": DeploymentMode.incremental,
"template": template,
"parameters": {
key: {"value": value} for key, value in template_params.items()
},
}
}
# TODO: we could get the private/public ips back directly
create_or_update = get_azure_sdk_function(
client=self.resource_client.deployments, function_name="create_or_update"
)
create_or_update(
resource_group_name=resource_group,
deployment_name="ray-vm-{}".format(name_tag),
parameters=parameters,
).wait()
@synchronized
def set_node_tags(self, node_id, tags):
"""Sets the tag values (string dict) for the specified node."""
node_tags = self._get_cached_node(node_id)["tags"]
node_tags.update(tags)
update = get_azure_sdk_function(
client=self.compute_client.virtual_machines, function_name="update"
)
update(
resource_group_name=self.provider_config["resource_group"],
vm_name=node_id,
parameters={"tags": node_tags},
)
self.cached_nodes[node_id]["tags"] = node_tags
def terminate_node(self, node_id):
"""Terminates the specified node. This will delete the VM and
associated resources (NIC, IP, Storage) for the specified node."""
resource_group = self.provider_config["resource_group"]
try:
# get metadata for node
metadata = self._get_node(node_id)
except KeyError:
# node no longer exists
return
if self.cache_stopped_nodes:
try:
# stop machine and leave all resources
logger.info(
f"Stopping instance {node_id}"
"(to fully terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)"
)
stop = get_azure_sdk_function(
client=self.compute_client.virtual_machines,
function_name="deallocate",
)
stop(resource_group_name=resource_group, vm_name=node_id)
except Exception as e:
logger.warning("Failed to stop VM: {}".format(e))
else:
vm = self.compute_client.virtual_machines.get(
resource_group_name=resource_group, vm_name=node_id
)
disks = {d.name for d in vm.storage_profile.data_disks}
disks.add(vm.storage_profile.os_disk.name)
try:
# delete machine, must wait for this to complete
delete = get_azure_sdk_function(
client=self.compute_client.virtual_machines, function_name="delete"
)
delete(resource_group_name=resource_group, vm_name=node_id).wait()
except Exception as e:
logger.warning("Failed to delete VM: {}".format(e))
try:
# delete nic
delete = get_azure_sdk_function(
client=self.network_client.network_interfaces,
function_name="delete",
)
delete(
resource_group_name=resource_group,
network_interface_name=metadata["nic_name"],
)
except Exception as e:
logger.warning("Failed to delete nic: {}".format(e))
# delete ip address
if "public_ip_name" in metadata:
try:
delete = get_azure_sdk_function(
client=self.network_client.public_ip_addresses,
function_name="delete",
)
delete(
resource_group_name=resource_group,
public_ip_address_name=metadata["public_ip_name"],
)
except Exception as e:
logger.warning("Failed to delete public ip: {}".format(e))
# delete disks
for disk in disks:
try:
delete = get_azure_sdk_function(
client=self.compute_client.disks, function_name="delete"
)
delete(resource_group_name=resource_group, disk_name=disk)
except Exception as e:
logger.warning("Failed to delete disk: {}".format(e))
def _get_node(self, node_id):
self._get_filtered_nodes({}) # Side effect: updates cache
return self.cached_nodes[node_id]
def _get_cached_node(self, node_id):
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id=node_id)
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_azure(cluster_config)
| |
"""
TwoPhaseFlow
"""
from __future__ import division
from past.utils import old_div
import numpy as np
from proteus import (Domain, Context, Gauges,
MeshTools as mt)
from proteus.Profiling import logEvent
from proteus.mprans.SpatialTools import Tank2D
from proteus.mprans.SpatialTools import Tank3D
from proteus.mprans import SpatialTools as st
from proteus.Profiling import logEvent
import proteus.TwoPhaseFlow.TwoPhaseFlowProblem as TpFlow
import proteus.TwoPhaseFlow.utils.Parameters as Parameters
import math
import os
# *************************** #
# ***** GENERAL OPTIONS ***** #
# *************************** #
opts= Context.Options([
('ns_model',1,"ns_model={0,1} for {rans2p,rans3p}"),
("final_time",7.5,"Final time for simulation"),
("dt_output",0.1,"Time interval to output solution"),
("gauges", True, "Collect data for validation"),
("cfl",0.2,"Desired CFL restriction"),
("he",0.5,"Max mesh element diameter"),
("ARTIFICIAL_VISCOSITY",3,"artificial viscosity")
])
assert opts.ns_model==1, "use ns_model=1 (rans3pf) for this"
# ****************** #
# ***** GAUGES ***** #
# ****************** #
if opts.gauges:
pressure_gauges = Gauges.PointGauges(gauges=((('p',),
((2.389,0.526,0.025), #P1
(2.389,0.526,0.099), #P3
(2.414,0.474,0.165), #P5
(2.487,0.474,0.165))),), #P7
fileName="pressure.csv")
point_height_gauges = Gauges.PointGauges(gauges=((('phi',),
((2.389,0.526,0.025), #P1
(2.389,0.526,0.099), #P3
(2.414,0.474,0.165), #P5
(2.487,0.474,0.165))),), #P7
fileName="point_clsvof.csv")
height_gauges = Gauges.LineGauges(gauges=((("phi",),
(((2.724, 0.5, 0.0),
(2.724, 0.5, 1.0)),
((2.228, 0.5, 0.0),
(2.228, 0.5, 1.0)),
((1.732, 0.5, 0.0),
(1.732, 0.5, 1.0)),
((0.582, 0.5, 0.0),
(0.582, 0.5, 1.0)))),),
fileName="height.csv")
# *************************** #
# ***** DOMAIN AND MESH ***** #
# ****************** #******* #
L = [3.22,1.0,1.0]
box_L = [0.16,0.4,0.16]
box_xy = [2.39,0.3]
he = opts.he
boundaries=['left','right','bottom','top','front','back','box_left','box_right','box_top','box_front','box_back',]
boundaryTags=dict([(key,i+1) for (i,key) in enumerate(boundaries)])
bt = boundaryTags
holes = [[0.5*box_L[0]+box_xy[0],0.5*box_L[1]+box_xy[1],0.5*box_L[2]]]
vertices=[[0.0,0.0,0.0],#0
[L[0],0.0,0.0],#1
[L[0],L[1],0.0],#2
[0.0,L[1],0.0],#3
[0.0,0.0,L[2]],#4
[L[0],0.0,L[2]],#5
[L[0],L[1],L[2]],#6
[0.0,L[1],L[2]],#7
[box_xy[0],box_xy[1],0.0],#8
[box_xy[0]+box_L[0],box_xy[1],0.0],#9
[box_xy[0]+box_L[0],box_xy[1]+box_L[1],0.0],#10
[box_xy[0],box_xy[1]+box_L[1],0.0],#11
[box_xy[0],box_xy[1],box_L[2]],#12
[box_xy[0]+box_L[0],box_xy[1],box_L[2]],#13
[box_xy[0]+box_L[0],box_xy[1]+box_L[1],box_L[2]],#14
[box_xy[0],box_xy[1]+box_L[1],box_L[2]]]#15
vertexFlags=[boundaryTags['left'],
boundaryTags['right'],
boundaryTags['right'],
boundaryTags['left'],
boundaryTags['left'],
boundaryTags['right'],
boundaryTags['right'],
boundaryTags['left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left'],
boundaryTags['box_left']]
facets=[[[0,1,2,3],[8,9,10,11]],
[[0,1,5,4]],
[[1,2,6,5]],
[[2,3,7,6]],
[[3,0,4,7]],
[[4,5,6,7]],
[[8,9,13,12]],
[[9,10,14,13]],
[[10,11,15,14]],
[[11,8,12,15]],
[[12,13,14,15]]]
facetFlags=[boundaryTags['bottom'],
boundaryTags['front'],
boundaryTags['right'],
boundaryTags['back'],
boundaryTags['left'],
boundaryTags['top'],
boundaryTags['box_front'],
boundaryTags['box_right'],
boundaryTags['box_back'],
boundaryTags['box_left'],
boundaryTags['box_top']]
regions=[[0.5*L[0],0.5*L[1],0.5*L[2]]]
regionFlags=[0]
domain = Domain.PiecewiseLinearComplexDomain(vertices=vertices,
vertexFlags=vertexFlags,
facets=facets,
facetFlags=facetFlags,
regions = regions,
regionFlags = regionFlags,
holes=holes)
domain.MeshOptions.setParallelPartitioningType('node')
domain.boundaryTags = boundaryTags
#domain.polyfile="meshMarin"
domain.polyfile=os.path.dirname(os.path.abspath(__file__))+"/"+"meshMarin"
#domain.writePoly("meshMarin")
#domain.writePLY("mesh")
#domain.writeAsymptote("mesh")
domain.MeshOptions.triangleOptions="VApq1.25q12feena%e" % ((he**3)/6.0,)
domain.MeshOptions.he = opts.he
domain.MeshOptions.triangleFlag = 0
domain.MeshOptions.genMesh=False
# ****************************** #
# ***** INITIAL CONDITIONS ***** #
# ****************************** #
class zero(object):
def uOfXT(self,x,t):
return 0.
disc_ICs=True
class clsvof_init_cond(object):
def uOfXT(self,x,t):
waterLine_x = 1.22
waterLine_z = 0.55
phi_x = x[0]-waterLine_x
phi_z = x[2]-waterLine_z
if disc_ICs:
if x[0] < waterLine_x and x[2] < waterLine_z:
return -1.0
elif x[0] > waterLine_x or x[2] > waterLine_z:
return 1.0
else:
return 0.0
else:
if phi_x < 0.0:
if phi_z < 0.0:
return max(phi_x,phi_z)
else:
return phi_z
else:
if phi_z < 0.0:
return phi_x
else:
return math.sqrt(phi_x**2 + phi_z**2)
# ******************************* #
# ***** BOUNDARY CONDITIONS ***** #
# ******************************* #
non_slip_BCs=True
openTop=True
# DIRICHLET BOUNDARY CONDITIONS #
def vel_u_DBC(x,flag):
if non_slip_BCs and (flag == boundaryTags['box_left'] or
flag == boundaryTags['box_right'] or
flag == boundaryTags['box_top'] or
flag == boundaryTags['box_front'] or
flag == boundaryTags['box_back']):
return lambda x,t: 0.0
def vel_v_DBC(x,flag):
if non_slip_BCs and (flag == boundaryTags['box_left'] or
flag == boundaryTags['box_right'] or
flag == boundaryTags['box_top'] or
flag == boundaryTags['box_front'] or
flag == boundaryTags['box_back']):
return lambda x,t: 0.0
def vel_w_DBC(x,flag):
if non_slip_BCs and (flag == boundaryTags['box_left'] or
flag == boundaryTags['box_right'] or
flag == boundaryTags['box_top'] or
flag == boundaryTags['box_front'] or
flag == boundaryTags['box_back']):
return lambda x,t: 0.0
def pressure_increment_DBC(x,flag):
if flag == boundaryTags['top'] and openTop:
return lambda x,t: 0.0
def pressure_DBC(x,flag):
if flag == boundaryTags['top'] and openTop:
return lambda x,t: 0.0
def clsvof_DBC(x,flag):
if openTop and flag == boundaryTags['top']:
return lambda x,t: 1.0
# ADVECTIVE FLUX BOUNDARY CONDITIONS #
def vel_u_AFBC(x,flag):
if non_slip_BCs and (flag == boundaryTags['box_left'] or
flag == boundaryTags['box_right'] or
flag == boundaryTags['box_top'] or
flag == boundaryTags['box_front'] or
flag == boundaryTags['box_back']):
return None
elif openTop and flag == boundaryTags['top']:
return None
else: #slip everywhere but the box
return lambda x,t: 0.0
def vel_v_AFBC(x,flag):
if non_slip_BCs and (flag == boundaryTags['box_left'] or
flag == boundaryTags['box_right'] or
flag == boundaryTags['box_top'] or
flag == boundaryTags['box_front'] or
flag == boundaryTags['box_back']):
return None
elif openTop and flag == boundaryTags['top']:
return None
else: #slip everywhere but the box
return lambda x,t: 0.0
def vel_w_AFBC(x,flag):
if non_slip_BCs and (flag == boundaryTags['box_left'] or
flag == boundaryTags['box_right'] or
flag == boundaryTags['box_top'] or
flag == boundaryTags['box_front'] or
flag == boundaryTags['box_back']):
return None
elif openTop and flag == boundaryTags['top']:
return None
else: #slip everywhere but the box
return lambda x,t: 0.0
def pressure_increment_AFBC(x,flag):
if not (flag == boundaryTags['top'] and openTop):
return lambda x,t: 0.0
def pressure_AFBC(x,flag):
if not(flag == boundaryTags['top'] and openTop):
return lambda x,t: 0.0
def clsvof_AFBC(x,flag):
if openTop and flag == boundaryTags['top']:
return None
else:
return lambda x,t: 0.0
# DIFFUSIVE FLUX BCs #
def pressure_increment_DFBC(x,flag):
if not (flag == boundaryTags['top'] and openTop):
return lambda x,t: 0.0
############################################
# ***** Create myTwoPhaseFlowProblem ***** #
###########################################
initialConditions = {'pressure': zero(),
'pressure_increment': zero(),
'vel_u': zero(),
'vel_v': zero(),
'vel_w': zero(),
'clsvof': clsvof_init_cond()}
boundaryConditions = {
# DIRICHLET BCs #
'pressure_DBC': pressure_DBC,
'pressure_increment_DBC': pressure_increment_DBC,
'vel_u_DBC': vel_u_DBC,
'vel_v_DBC': vel_v_DBC,
'vel_w_DBC': vel_w_DBC,
'clsvof_DBC': clsvof_DBC,
# ADVECTIVE FLUX BCs #
'pressure_AFBC': pressure_AFBC,
'pressure_increment_AFBC': pressure_increment_AFBC,
'vel_u_AFBC': vel_u_AFBC,
'vel_v_AFBC': vel_v_AFBC,
'vel_w_AFBC': vel_w_AFBC,
'clsvof_AFBC': clsvof_AFBC,
# DIFFUSIVE FLUX BCs #
'pressure_increment_DFBC': pressure_increment_DFBC,
'vel_u_DFBC': lambda x, flag: lambda x,t: 0.,
'vel_v_DFBC': lambda x, flag: lambda x,t: 0.,
'vel_w_DFBC': lambda x, flag: lambda x,t: 0.,
'clsvof_DFBC': lambda x, flag: None}
myTpFlowProblem = TpFlow.TwoPhaseFlowProblem()
myTpFlowProblem.domain=domain
myTpFlowProblem.outputStepping.final_time = opts.final_time
myTpFlowProblem.outputStepping.dt_output = opts.dt_output
myTpFlowProblem.outputStepping.systemStepExact = True
myTpFlowProblem.SystemPhysics.setDefaults()
#myTpFlowProblem.SystemPhysics.useDefaultModels(flowModel=opts.ns_model,interfaceModel=1)
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelCLSVOF,'clsvof')
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelRANS3PF,'flow')
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelPressureIncrement,'pressureInc')
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelPressure,'pressure')
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelPressureInitial,'pressureInit')
myTpFlowProblem.SystemPhysics.gravity = np.array([0.0,0.0,-9.8])
myTpFlowProblem.SystemPhysics.boundaryConditions=boundaryConditions
myTpFlowProblem.SystemPhysics.useBoundaryConditionsModule = False
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['u']=zero()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['v']=zero()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['w']=zero()
myTpFlowProblem.SystemPhysics.modelDict['clsvof'].p.initialConditions['clsvof'] = clsvof_init_cond()
myTpFlowProblem.SystemPhysics.modelDict['pressure'].p.initialConditions['p'] = zero()
myTpFlowProblem.SystemPhysics.modelDict['pressureInc'].p.initialConditions['pInc'] = zero()
myTpFlowProblem.SystemPhysics.modelDict['pressureInit'].p.initialConditions['pInit'] = zero()
m = myTpFlowProblem.SystemPhysics.modelDict
m['clsvof'].p.coefficients.disc_ICs = disc_ICs
m['flow'].p.coefficients.useVF = 1.0
m['flow'].p.coefficients.eb_bc_penalty_constant = 1e6
m['flow'].p.coefficients.ARTIFICIAL_VISCOSITY = opts.ARTIFICIAL_VISCOSITY
m['clsvof'].auxiliaryVariables = [point_height_gauges, height_gauges]
m['pressure'].auxiliaryVariables = [pressure_gauges]
myTpFlowProblem.SystemNumerics.cfl=opts.cfl
myTpFlowProblem.SystemNumerics.useSuperlu=True
| |
import sys, os, inspect
from abc import ABCMeta, abstractmethod
from ctypes import *
MY_DIR = os.path.dirname(os.path.abspath(inspect.getframeinfo(inspect.currentframe())[0]))
HELPER_DIR = os.path.abspath(os.path.join(MY_DIR, '..', 'helpers'))
sys.path.append(HELPER_DIR)
import dhlog
from dhcore import *
class MSAAMode:
NONE = 0
X2 = 2
X4 = 4
X8 = 8
class TextureQuality:
HIGH = 1
NORMAL = 2
LOW = 3
HIGHEST = 0
class TextureFilter:
TRILINEAR = 0
BILINEAR = 1
ANISO2X = 2
ANISO4X = 3
ANISO8X = 4
ANISO16X = 5
class ShadingQuality:
LOW = 2
NORMAL = 1
HIGH = 0
class GfxHwVer:
UNKNOWN = 0
D3D11_0 = 3
D3D10_1 = 2
D3D10_0 = 1
D3D11_1 = 4
GL4_4 = 12
GL4_3 = 11
GL4_2 = 10
GL4_1 = 9
GL4_0 = 8
GL3_3 = 7
GL3_2 = 6
class GfxFlags:
FULLSCREEN = (1<<0)
VSYNC = (1<<1)
DEBUG = (1<<2)
FXAA = (1<<3)
REBUILDSHADERS = (1<<4)
class EngFlags:
DEBUG = (1<<0)
DEV = (1<<1)
EDITOR = (1<<2)
CONSOLE = (1<<3)
DISABLEPHX = (1<<4)
OPTIMIZEMEMORY = (1<<5)
DISABLEBGLOAD = (1<<6)
class PhxFlags:
TRACKMEM = (1<<0)
PROFILE = (1<<1)
class InitParams(Structure):
class DevParams(Structure):
_fields_ = [\
('fpsgraph_max', c_int),
('ftgraph_max', c_int),
('webserver_port', c_int),
('buffsize_data', c_uint),
('buffsize_tmp', c_uint)]
class PhxParams(Structure):
_fields_ = [\
('flags', c_uint),
('mem_sz', c_uint),
('substeps_max', c_uint),
('scratch_sz', c_uint)]
class SctParams(Structure):
_fields_ = [('mem_sz', c_uint)]
class GfxParams(Structure):
_fields_ = [\
('flags', c_uint),
('msaa_mode', c_uint),
('tex_quality', c_uint),
('tex_filter', c_uint),
('shading_quality', c_uint),
('hwver', c_uint),
('adapter_id', c_uint),
('width', c_uint),
('height', c_uint),
('refresh_rate', c_uint)]
_fields_ = [\
('flags', c_uint),
('console_lines_max', c_uint),
('gfx', GfxParams),
('dev', DevParams),
('phx', PhxParams),
('sct', SctParams),
('console_cmds', c_char_p),
('console_cmds_cnt', c_uint),
('data_path', c_char_p)]
class _API:
is_init = False
@staticmethod
def init(debug = False):
if _API.is_init:
return
postfix = ''
if debug:
postfix = '-dbg'
if sys.platform == 'win32':
shlib = 'dhapp' + postfix + '.dll'
elif sys.platform == 'linux':
shlib = 'libdhapp' + postfix + '.so'
# load library
try:
dhapplib = cdll.LoadLibrary(shlib)
except:
dhlog.Log.warn(str(sys.exc_info()[1]))
dhlog.Log.fatal('could not load dynamic library %s' % shlib)
sys.exit(-1)
dhlog.Log.msgline('module "%s" loaded' % shlib, dhlog.TERM_GREEN)
# app.h
_API.app_config_default = dhapplib.app_config_default
_API.app_config_default.restype = POINTER(InitParams)
_API.app_config_load = dhapplib.app_config_load
_API.app_config_load.restype = POINTER(InitParams)
_API.app_config_load.argtypes = [c_char_p]
_API.app_config_addconsolecmd = dhapplib.app_config_addconsolecmd
_API.app_config_addconsolecmd.argtypes = [POINTER(InitParams), c_char_p]
_API.app_config_unload = dhapplib.app_config_unload
_API.app_config_unload.argtypes = [POINTER(InitParams)]
_API.app_display_querymodes = dhapplib.app_display_querymodes
_API.app_display_querymodes.restype = c_char_p
_API.app_display_freemodes = dhapplib.app_display_freemodes
_API.app_display_freemodes.argtypes = [c_char_p]
_API.app_init = dhapplib.app_init
_API.app_init.restype = c_int
_API.app_init.argtypes = [c_char_p, POINTER(InitParams)]
_API.app_release = dhapplib.app_release
_API.app_window_alwaysactive = dhapplib.app_window_alwaysactive
_API.argtypes = [c_int]
_API.app_window_swapbuffers = dhapplib.app_window_swapbuffers
_API.app_window_getheight = dhapplib.app_window_getheight
_API.app_window_getheight.restype = c_uint
_API.app_window_getwidth = dhapplib.app_window_getwidth
_API.app_window_getwidth.restype = c_uint
_API.app_window_resize = dhapplib.app_window_resize
_API.restype = c_int
_API.argtypes = [c_uint, c_uint]
_API.app_window_show = dhapplib.app_window_show
_API.app_window_hide = dhapplib.app_window_hide
_API.fn_app_update = CFUNCTYPE(None)
_API.app_window_setupdatefn = dhapplib.app_window_setupdatefn
_API.app_window_setupdatefn.argtypes = [_API.fn_app_update]
_API.fn_app_keypress = CFUNCTYPE(None, c_byte, c_uint)
_API.app_window_setkeypressfn = dhapplib.app_window_setkeypressfn
_API.app_window_setkeypressfn.argtypes = [_API.fn_app_keypress]
_API.fn_app_resize = CFUNCTYPE(None, c_uint, c_uint)
_API.app_window_setresizefn = dhapplib.app_window_setresizefn
_API.app_window_setresizefn.argtypes = [_API.fn_app_resize]
_API.app_window_run = dhapplib.app_window_run
# input.h
_API.input_update = dhapplib.input_update
_API.input_kb_getkey = dhapplib.input_kb_getkey
_API.input_kb_getkey.restype = c_uint
_API.input_kb_getkey.argtypes = [c_uint, c_int]
_API.input_mouse_getkey = dhapplib.input_mouse_getkey
_API.input_mouse_getkey.restype = c_uint
_API.input_mouse_getkey.argtypes = [c_uint, c_int]
_API.input_mouse_getpos = dhapplib.input_mouse_getpos
_API.input_mouse_getpos.restype = POINTER(Vec2i)
_API.input_mouse_getpos.argtypes = [POINTER(Vec2i)]
_API.input_mouse_smooth = dhapplib.input_mouse_smooth
_API.input_mouse_smooth.argtypes = [POINTER(c_float), POINTER(c_float), c_float, c_float,
c_float, c_float]
_API.input_mouse_lockcursor = dhapplib.input_mouse_lockcursor
_API.input_mouse_lockcursor.argtypes = [c_int, c_int]
_API.input_mouse_unlockcursor = dhapplib.input_mouse_unlockcursor
class Config:
def __init__(self, json_filepath=''):
if json_filepath != '':
self.params = _API.app_config_load(to_cstr(json_filepath))
if not self.params:
dhlog.Log.warn(Errors.last_error())
self.params = _API.app_config_default()
else:
self.params = _API.app_config_default()
def __del__(self):
if self.params:
_API.app_config_unload(self.params)
del self.params
def __set_datapath(self, path):
self.params.contents.data_path = path.encode('ascii')
def __get_datapath(self):
return self.params.contents.data_path
data_path = property(__get_datapath, __set_datapath)
def __set_engineflags(self, flags):
self.params.contents.flags = flags
def __get_engineflags(self):
return self.params.contents.flags
engine_flags = property(__get_engineflags, __set_engineflags)
def __set_gfxflags(self, flags):
self.params.contents.gfx.flags = flags
def __get_gfxflags(self):
return self.params.contents.gfx.flags
gfx_flags = property(__get_gfxflags, __set_gfxflags)
def __set_gfxhwver(self, hwver):
self.params.contents.gfx.hwver = hwver
def __get_gfxhwver(self):
return self.params.contents.gfx.hwver
gfx_hwver = property(__get_gfxhwver, __set_gfxhwver)
def __set_height(self, height):
self.params.contents.gfx.height = height
def __get_height(self):
return self.params.contents.gfx.height
height = property(__get_height, __set_height)
def __set_width(self, width):
self.params.contents.gfx.width = width
def __get_width(self):
return self.params.contents.gfx.width
width = property(__get_width, __set_width)
def __set_buffsizedata(self, buffsize):
self.params.contents.dev.buffsize_data = buffsize
def __get_buffsizedata(self):
return self.params.contents.dev.buffsize_data
buffsize_data = property(__get_buffsizedata, __set_buffsizedata)
def __set_buffsizetmp(self, buffsize):
self.params.contents.dev.buffsize_tmp = buffsize
def __get_buffsizetmp(self):
return self.params.contents.dev.buffsize_tmp
buffsize_tmp = property(__get_buffsizetmp, __set_buffsizetmp)
def __set_texturefilter(self, filter):
self.params.contents.gfx.tex_filter = filter
def __get_texturefilter(self, filter):
return self.params.contents.gfx.tex_filter
texture_filter = property(__get_texturefilter, __set_texturefilter)
def __set_texturequality(self, quality):
self.params.contents.tex_quality = quality
def __get_texturequality(self, quality):
return self.params.contents.tex_quality
texture_quality = property(__get_texturequality, __set_texturequality)
def __set_shadingquality(self, shquality):
self.params.contents.shading_quality = shquality
def __get_shadingquality(self, shquality):
return self.params.contents.shading_quality
shading_quality = property(__get_shadingquality, __set_shadingquality)
def add_console_command(self, cmd):
_API.app_config_addconsolecmd(self.params, to_cstr(cmd))
class Key:
ESC = 0
F1 = 1
F2 = 2
F3 = 3
F4 = 4
F5 = 5
F6 = 6
F7 = 7
F8 = 8
F9 = 9
F10 = 10
F11 = 11
F12 = 12
PRINTSCREEN = 13
BREAK = 14
TILDE = 15
N1 = 16
N2 = 17
N3 = 18
N4 = 19
N5 = 20
N6 = 21
N7 = 22
N8 = 23
N9 = 24
N0 = 25
DASH = 26
EQUAL = 27
BACKSPACE = 28
TAB = 29
Q = 30
W = 31
E = 32
R = 33
T = 34
Y = 35
U = 36
I = 37
O = 38
P = 39
BRACKET_OPEN = 40
BRACKET_CLOSE = 41
BACKSLASH = 42
CAPS = 43
A = 44
S = 45
D = 46
F = 47
G = 48
H = 49
J = 50
K = 51
L = 52
SEMICOLON = 53
QUOTE = 54
ENTER = 55
LSHIFT = 56
Z = 57
X = 58
C = 59
V = 60
B = 61
N = 62
M = 63
COMMA = 64
DOT = 65
SLASH = 66
RSHIFT = 67
LCTRL = 68
LALT = 69
SPACE = 70
RALT = 71
RCTRL = 72
DELETE = 73
INSERT = 74
HOME = 75
END = 76
PGUP = 77
PGDWN = 78
UP = 79
DOWN = 80
LEFT = 81
RIGHT = 82
NUM_SLASH = 83
NUM_MULTIPLY = 84
NUM_MINUS = 85
NUM_PLUS = 86
NUM_ENTER = 87
NUM_DOT = 88
NUM_1 = 89
NUM_2 = 90
NUM_3 = 91
NUM_4 = 92
NUM_5 = 93
NUM_6 = 94
NUM_7 = 95
NUM_8 = 96
NUM_9 = 97
NUM_0 = 98
NUM_LOCK = 99
class MouseKey:
LEFT = 0
RIGHT = 1
MIDDLE = 2
PGUP = 3
PGDOWN = 4
class Input:
__mpos = Vec2()
@staticmethod
def is_keydown(key, once = False):
if not App.is_init:
return
return _API.input_kb_getkey(c_uint(key), c_int(once))
@staticmethod
def is_mousedown(key, once = False):
if not App.is_init:
return
return _API.input_mouse_getkey(c_uint(key), c_int(once))
@staticmethod
def __update_mouse_pos(smooth = True, smoothness = 60, dt = 0):
if not App.is_init:
return
pos = Vec2i()
_API.input_mouse_getpos(byref(pos))
if smooth:
x = c_float(Input.__mpos.x)
y = c_float(Input.__mpos.y)
_API.input_mouse_smooth(byref(x), byref(y),
c_float(pos.x), c_float(pos.y), c_float(smoothness), c_float(dt))
Input.__mpos = Vec2(x.value, y.value)
else:
Input.__mpos.x = pos.x
Input.__mpos.y = pos.y
@staticmethod
def lock_cursor():
if not App.is_init:
return
_API.input_mouse_lockcursor(int(Input.__mpos.x), int(Input.__mpos.y))
@staticmethod
def unlock_cursor():
if not App.is_init:
return
_API.input_mouse_unlockcursor()
@staticmethod
def get_mousepos():
if not App.is_init:
return
return Input.__mpos
@staticmethod
def update(dt):
if not App.is_init:
return
_API.input_update()
Input.__update_mouse_pos(smooth=True, dt=dt)
class App:
is_init = False
@staticmethod
def init(title, conf):
r = _API.app_init(to_cstr(title), conf.params)
if IS_FAIL(r):
raise Exception(Errors.last_error())
App.show_window()
App.is_init = True
@staticmethod
def init_d3d_device(native_wnd_handle, name, conf):
postfix = ''
if ('--debug' in sys.argv):
postfix = '-dbg'
if sys.platform == 'win32':
shlib = 'dhapp' + postfix + '.dll'
else:
raise Exception('d3d device not implemented for this platform')
# load library
try:
hwnd = int(native_wnd_handle)
dhapplib = cdll.LoadLibrary(shlib)
dhlog.Log.msgline('module "%s" loaded' % shlib, dhlog.TERM_GREEN)
# fetch only d3d_initdev function
app_d3d_initdev = dhapplib.app_d3d_initdev
app_d3d_initdev.restype = c_int
app_d3d_initdev.argtypes = [c_void_p, c_char_p, POINTER(InitParams)]
if IS_FAIL(app_d3d_initdev(c_void_p(hwnd), to_cstr(name), conf.params)):
raise Exception(Errors.last_error())
except:
dhlog.Log.fatal(str(sys.exc_info()[1]))
sys.exit(-1)
@staticmethod
def show_window():
if App.is_init:
_API.app_window_show()
@staticmethod
def hide_window():
if App.is_init:
_API.app_window_hide()
@staticmethod
def run():
if App.is_init:
_API.app_window_run()
@staticmethod
def release():
_API.app_release()
App.is_init = False
@staticmethod
def set_events(events):
if App.is_init:
_API.app_window_setupdatefn(events.get_update())
_API.app_window_setkeypressfn(events.get_keypress())
_API.app_window_setresizefn(events.get_resize())
@staticmethod
def swapbuffers():
_API.app_window_swapbuffers()
@staticmethod
def resize_view(width, height):
_API.app_window_resize(c_uint(width), c_uint(height))
class AppEvents:
def get_update(self):
def foo():
self.on_update()
AppEvents.pfn_update_callback = _API.fn_app_update(foo)
return AppEvents.pfn_update_callback
def get_resize(self):
def foo(width, height):
self.on_resize(width, height)
AppEvents.pfn_resize_callback = _API.fn_app_resize(foo)
return AppEvents.pfn_resize_callback
def get_keypress(self):
def foo(ch_code, vkey):
self.on_keypress(ch_code, vkey)
AppEvents.pfn_keypress_callback = _API.fn_app_keypress(foo)
return AppEvents.pfn_keypress_callback
def on_create(self):
pass
def on_destroy(self):
pass
def on_resize(self, width, height):
_API.app_window_resize(c_uint(width), c_uint(height))
def on_keypress(self, ch_code, vkey):
pass
def on_mousedown(self, x, y, mouse_key):
pass
def on_mouseup(self, x, y, mouse_key):
pass
def on_mousemove(self, x, y):
pass
def on_update(self):
pass
_API.init(debug = ('--debug' in sys.argv))
| |
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The ``spyne.model.binary`` package contains binary type markers."""
import os
import base64
import tempfile
from base64 import b64encode
from base64 import b64decode
from base64 import urlsafe_b64encode
from base64 import urlsafe_b64decode
from binascii import hexlify
from binascii import unhexlify
from os.path import abspath, isdir
from spyne.util.six import StringIO
from spyne.error import ValidationError
from spyne.util import _bytes_join
from spyne.model import ModelBase, ComplexModel, Unicode
from spyne.model import SimpleModel
from spyne.util import six
class BINARY_ENCODING_HEX: pass
class BINARY_ENCODING_BASE64: pass
class BINARY_ENCODING_USE_DEFAULT: pass
class BINARY_ENCODING_URLSAFE_BASE64: pass
class ByteArray(SimpleModel):
"""Canonical container for arbitrary data. Every protocol has a different
way of encapsulating this type. E.g. xml-based protocols encode this as
base64, while HttpRpc just hands it over.
Its native python format is a sequence of ``str`` objects for Python 2.x
and a sequence of ``bytes`` objects for Python 3.x.
"""
__type_name__ = 'base64Binary'
__namespace__ = "http://www.w3.org/2001/XMLSchema"
class Attributes(SimpleModel.Attributes):
encoding = BINARY_ENCODING_USE_DEFAULT
"""The binary encoding to use when the protocol does not enforce an
encoding for binary data.
One of (None, 'base64', 'hex')
"""
def __new__(cls, **kwargs):
tn = None
if 'encoding' in kwargs:
v = kwargs['encoding']
if v is None:
kwargs['encoding'] = BINARY_ENCODING_USE_DEFAULT
elif v in ('base64', 'base64Binary', BINARY_ENCODING_BASE64):
# This string is defined in the Xml Schema Standard
tn = 'base64Binary'
kwargs['encoding'] = BINARY_ENCODING_BASE64
elif v in ('urlsafe_base64', BINARY_ENCODING_URLSAFE_BASE64):
# the Xml Schema Standard does not define urlsafe base64
# FIXME: produce a regexp that validates urlsafe base64 strings
tn = 'string'
kwargs['encoding'] = BINARY_ENCODING_URLSAFE_BASE64
elif v in ('hex', 'hexBinary', BINARY_ENCODING_HEX):
# This string is defined in the Xml Schema Standard
tn = 'hexBinary'
kwargs['encoding'] = BINARY_ENCODING_HEX
else:
raise ValueError("'encoding' must be one of: %r" % \
(tuple(ByteArray._encoding.handlers.values()),))
retval = cls.customize(**kwargs)
if tn is not None:
retval.__type_name__ = tn
return retval
@staticmethod
def is_default(cls):
return True
@classmethod
def to_base64(cls, value):
return b64encode(b''.join(value))
@classmethod
def from_base64(cls, value):
joiner = type(value)()
try:
return [b64decode(joiner.join(value))]
except TypeError:
raise ValidationError(value)
@classmethod
def to_urlsafe_base64(cls, value):
return urlsafe_b64encode(_bytes_join(value))
@classmethod
def from_urlsafe_base64(cls, value):
#FIXME: Find out why we need to do this.
if isinstance(value, six.text_type):
value = value.encode('utf8')
return [urlsafe_b64decode(_bytes_join(value))]
@classmethod
def to_hex(cls, value):
return hexlify(_bytes_join(value))
@classmethod
def from_hex(cls, value):
return [unhexlify(_bytes_join(value))]
binary_encoding_handlers = {
None: ''.join,
BINARY_ENCODING_HEX: ByteArray.to_hex,
BINARY_ENCODING_BASE64: ByteArray.to_base64,
BINARY_ENCODING_URLSAFE_BASE64: ByteArray.to_urlsafe_base64,
}
binary_decoding_handlers = {
None: lambda x: [x],
BINARY_ENCODING_HEX: ByteArray.from_hex,
BINARY_ENCODING_BASE64: ByteArray.from_base64,
BINARY_ENCODING_URLSAFE_BASE64: ByteArray.from_urlsafe_base64,
}
class HybridFileStore(object):
def __init__(self, store_path, db_format='json', type=None):
"""Marker to be passed to File's store_as to denote a hybrid
Sql/Filesystem storage scheme.
:param store_path: The path where the file contents are stored. This is
converted to an absolute path if it's not already one.
:param db_format: The format (and the relevant column type) used to
store file metadata. Currently only 'json' is implemented.
"""
self.store = abspath(store_path)
self.db_format = db_format
self.type = type
if not isdir(self.store):
os.makedirs(self.store)
assert isdir(self.store)
_BINARY = type('FileTypeBinary', (object,), {})
_TEXT = type('FileTypeText', (object,), {})
class _Value(ComplexModel):
"""The class for values marked as ``File``.
:param name: Original name of the file
:param path: Current path to the file.
:param type: The mime type of the file's contents.
:param data: Optional sequence of ``str`` or ``bytes`` instances
that contain the file's data.
:param handle: :class:`file` object that contains the file's data.
It is ignored unless the ``path`` argument is ``None``.
"""
_type_info = [
('name', Unicode(encoding='utf8')),
('type', Unicode),
('data', ByteArray(logged='len')),
]
def __init__(self, name=None, path=None, type='application/octet-stream',
data=None, handle=None, move=False):
self.name = name
if self.name is not None:
if not os.path.basename(self.name) == self.name:
raise ValidationError(self.name,
"File name %r should not contain any '/' char")
self.path = path
self.type = type
self.data = data
self.handle = handle
self.move = move
self.abspath = None
if self.path is not None:
self.abspath = abspath(self.path)
def rollover(self):
"""This method normalizes the file object by making ``path``,
``name`` and ``handle`` properties consistent. It writes
incoming data to the file object and points the ``data`` iterable
to the contents of this file.
"""
iter(self.data)
if self.path is None:
handle, self.path = tempfile.mkstemp()
f = os.fdopen(handle, 'wb')
else:
assert os.path.isabs(self.path)
f = open(self.path, 'wb')
if self.name is None:
self.name = os.path.basename(self.path)
for data in self.data:
f.write(data)
f.close()
self.data = None
class File(SimpleModel):
"""A compact way of dealing with incoming files for protocols with a
standard way of encoding file metadata along with binary data. (E.g. Http)
"""
__type_name__ = 'base64Binary'
__namespace__ = "http://www.w3.org/2001/XMLSchema"
BINARY = _BINARY
TEXT = _BINARY
Value = _Value
class Attributes(SimpleModel.Attributes):
encoding = BINARY_ENCODING_USE_DEFAULT
"""The binary encoding to use when the protocol does not enforce an
encoding for binary data.
One of (None, 'base64', 'hex')
"""
type = _Value
"""The native type used to serialize the information in the file object.
"""
contents = _BINARY
"""Set this to type=File.TEXT if you're sure you're handling unicode
data. This lets serializers like HtmlCloth avoid base64 encoding. Do
note that you still need to set encoding attribute explicitly to None!..
One of (File.BINARY, File.TEXT)
"""
@classmethod
def to_base64(cls, value):
if value is None:
raise StopIteration()
assert value.path, "You need to write data to persistent storage first " \
"if you want to read it back."
f = open(value.path, 'rb')
# base64 encodes every 3 bytes to 4 base64 characters
data = f.read(0x4001) # so this needs to be a multiple of 3
while len(data) > 0:
yield base64.b64encode(data)
data = f.read(0x4001)
f.close()
@classmethod
def from_base64(cls, value):
if value is None:
return None
return File.Value(data=[base64.b64decode(value)])
def __repr__(self):
return "File(name=%r, path=%r, type=%r, data=%r)" % \
(self.name, self.path, self.type, self.data)
@classmethod
def store_as(cls, what):
return cls.customize(store_as=what)
# **DEPRECATED!** Use ByteArray or File instead.
class Attachment(ModelBase):
__type_name__ = 'base64Binary'
__namespace__ = "http://www.w3.org/2001/XMLSchema"
def __init__(self, data=None, file_name=None):
self.data = data
self.file_name = file_name
def save_to_file(self):
"""This method writes the data to the specified file. This method
assumes that the file_name is the full path to the file to be written.
This method also assumes that self.data is the base64 decoded data,
and will do no additional transformations on it, simply write it to
disk.
"""
if not self.data:
raise Exception("No data to write")
if not self.file_name:
raise Exception("No file_name specified")
f = open(self.file_name, 'wb')
f.write(self.data)
f.close()
def load_from_file(self):
"""This method loads the data from the specified file, and does
no encoding/decoding of the data
"""
if not self.file_name:
raise Exception("No file_name specified")
f = open(self.file_name, 'rb')
self.data = f.read()
f.close()
@classmethod
def to_base64(cls, value):
if value is None:
return None
ostream = StringIO()
if not (value.data is None):
istream = StringIO(value.data)
elif not (value.file_name is None):
istream = open(value.file_name, 'rb')
else:
raise ValueError("Neither data nor a file_name has been specified")
base64.encode(istream, ostream)
ostream.seek(0)
return ostream.read()
@classmethod
def from_base64(cls, value):
if value is None:
return None
istream = StringIO(value)
ostream = StringIO()
base64.decode(istream, ostream)
ostream.seek(0)
return Attachment(data=ostream.read())
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import time
import traceback
import jsonschema
from novaclient import exceptions as nova_exc
import six
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally import consts
from rally import exceptions
LOG = logging.getLogger(__name__)
def get_status(resource, status_attr="status"):
"""Get the status of a given resource object.
The status is returned in upper case. The status is checked for the
standard field names with special cases for Heat and Ceilometer.
:param resource: The resource object or dict.
:param status_attr: Allows to specify non-standard status fields.
:return: The status or "NONE" if it is not available.
"""
for s_attr in ["stack_status", "state", status_attr]:
status = getattr(resource, s_attr, None)
if isinstance(status, six.string_types):
return status.upper()
# Dict case
if ((isinstance(resource, dict) and status_attr in resource.keys() and
isinstance(resource[status_attr], six.string_types))):
return resource[status_attr].upper()
return "NONE"
class resource_is(object):
def __init__(self, desired_status, status_getter=None):
self.desired_status = desired_status
self.status_getter = status_getter or get_status
def __call__(self, resource):
return self.status_getter(resource) == self.desired_status.upper()
def __str__(self):
return str(self.desired_status)
def get_from_manager(error_statuses=None):
error_statuses = error_statuses or ["ERROR"]
error_statuses = map(lambda str: str.upper(), error_statuses)
def _get_from_manager(resource):
# catch client side errors
try:
res = resource.manager.get(resource.id)
except Exception as e:
if getattr(e, "code", getattr(e, "http_status", 400)) == 404:
raise exceptions.GetResourceNotFound(resource=resource)
raise exceptions.GetResourceFailure(resource=resource, err=e)
# catch abnormal status, such as "no valid host" for servers
status = get_status(res)
if status in ("DELETED", "DELETE_COMPLETE"):
raise exceptions.GetResourceNotFound(resource=res)
if status in error_statuses:
raise exceptions.GetResourceErrorStatus(
resource=res, status=status,
fault=getattr(res, "fault", "n/a"))
return res
return _get_from_manager
def manager_list_size(sizes):
def _list(mgr):
return len(mgr.list()) in sizes
return _list
@utils.log_deprecated("Use wait_for_status instead.", "0.1.2", once=True)
def wait_for(resource, is_ready=None, ready_statuses=None,
failure_statuses=None, status_attr="status", update_resource=None,
timeout=60, check_interval=1):
"""Waits for the given resource to come into the one of the given statuses.
The method can be used to check resource for status with a `is_ready`
function or with a list of expected statuses and the status attribute
In case when the is_ready checker is not provided the resource should have
status_attr. It may be an object attribute or a dictionary key. The value
of the attribute is checked against ready statuses list and failure
statuses. In case of a failure the wait exits with an exception. The
resource is updated between iterations with an update_resource call.
:param is_ready: A predicate that should take the resource object and
return True iff it is ready to be returned
:param ready_statuses: List of statuses which mean that the resource is
ready
:param failure_statuses: List of statuses which mean that an error has
occurred while waiting for the resource
:param status_attr: The name of the status attribute of the resource
:param update_resource: Function that should take the resource object
and return an 'updated' resource. If set to
None, no result updating is performed
:param timeout: Timeout in seconds after which a TimeoutException will be
raised
:param check_interval: Interval in seconds between the two consecutive
readiness checks
:returns: The "ready" resource object
"""
if is_ready is not None:
return wait_is_ready(resource=resource, is_ready=is_ready,
update_resource=update_resource, timeout=timeout,
check_interval=check_interval)
else:
return wait_for_status(resource=resource,
ready_statuses=ready_statuses,
failure_statuses=failure_statuses,
status_attr=status_attr,
update_resource=update_resource,
timeout=timeout,
check_interval=check_interval)
@utils.log_deprecated("Use wait_for_status instead.", "0.1.2", once=True)
def wait_is_ready(resource, is_ready, update_resource=None,
timeout=60, check_interval=1):
resource_repr = getattr(resource, "name", repr(resource))
start = time.time()
while True:
if update_resource is not None:
resource = update_resource(resource)
if is_ready(resource):
return resource
time.sleep(check_interval)
if time.time() - start > timeout:
raise exceptions.TimeoutException(
desired_status=str(is_ready),
resource_name=resource_repr,
resource_type=resource.__class__.__name__,
resource_id=getattr(resource, "id", "<no id>"),
resource_status=get_status(resource))
def wait_for_status(resource, ready_statuses, failure_statuses=None,
status_attr="status", update_resource=None,
timeout=60, check_interval=1, check_deletion=False):
resource_repr = getattr(resource, "name", repr(resource))
if not isinstance(ready_statuses, (set, list, tuple)):
raise ValueError("Ready statuses should be supplied as set, list or "
"tuple")
if failure_statuses and not isinstance(failure_statuses,
(set, list, tuple)):
raise ValueError("Failure statuses should be supplied as set, list or "
"tuple")
# make all statuses upper case
ready_statuses = set([s.upper() for s in ready_statuses or []])
failure_statuses = set([s.upper() for s in failure_statuses or []])
if len(ready_statuses & failure_statuses) > 0:
raise ValueError(
"Can't wait for resource's %s status. Ready and Failure"
"statuses conflict." % resource_repr)
if not ready_statuses:
raise ValueError(
"Can't wait for resource's %s status. No ready "
"statuses provided" % resource_repr)
if not update_resource:
raise ValueError(
"Can't wait for resource's %s status. No update method."
% resource_repr)
start = time.time()
latest_status = get_status(resource, status_attr)
latest_status_update = start
while True:
try:
resource = update_resource(resource)
except exceptions.GetResourceNotFound:
if check_deletion:
return
else:
raise
status = get_status(resource, status_attr)
if status != latest_status:
current_time = time.time()
delta = current_time - latest_status_update
LOG.debug(
"Waiting for resource %(resource)s. Status changed: "
"%(latest)s => %(current)s in %(delta)s" %
{"resource": resource_repr, "latest": latest_status,
"current": status, "delta": delta})
latest_status = status
latest_status_update = current_time
if status in ready_statuses:
return resource
if status in failure_statuses:
raise exceptions.GetResourceErrorStatus(
resource=resource,
status=status,
fault="Status in failure list %s" % str(failure_statuses))
time.sleep(check_interval)
if time.time() - start > timeout:
raise exceptions.TimeoutException(
desired_status=ready_statuses,
resource_name=resource_repr,
resource_type=resource.__class__.__name__,
resource_id=getattr(resource, "id", "<no id>"),
resource_status=get_status(resource))
@utils.log_deprecated("Use wait_for_status instead.", "0.1.2", once=True)
def wait_for_delete(resource, update_resource=None, timeout=60,
check_interval=1):
"""Wait for the full deletion of resource.
:param update_resource: Function that should take the resource object
and return an 'updated' resource, or raise
exception rally.exceptions.GetResourceNotFound
that means that resource is deleted.
:param timeout: Timeout in seconds after which a TimeoutException will be
raised
:param check_interval: Interval in seconds between the two consecutive
readiness checks
"""
start = time.time()
while True:
try:
resource = update_resource(resource)
except exceptions.GetResourceNotFound:
break
time.sleep(check_interval)
if time.time() - start > timeout:
raise exceptions.TimeoutException(
desired_status="deleted",
resource_name=getattr(resource, "name", repr(resource)),
resource_type=resource.__class__.__name__,
resource_id=getattr(resource, "id", "<no id>"),
resource_status=get_status(resource))
def format_exc(exc):
return [exc.__class__.__name__, str(exc), traceback.format_exc()]
def infinite_run_args_generator(args_func):
for i in itertools.count():
yield args_func(i)
def check_service_status(client, service_name):
"""Check if given openstack service is enabled and state is up."""
try:
for service in client.services.list():
if service_name in str(service):
if service.status == "enabled" and service.state == "up":
return True
except nova_exc.NotFound:
LOG.warning(_("Unable to retrieve a list of available services from "
"nova. Pre-Grizzly OpenStack deployment?"))
return False
return False
class ActionBuilder(object):
"""Builder class for mapping and creating action objects.
An action list is an array of single key/value dicts which takes
the form:
[{"action": times}, {"action": times}...]
Here 'action' is a string which indicates a action to perform and
'times' is a non-zero positive integer which specifies how many
times to run the action in sequence.
This utility builder class will build and return methods which
wrapper the action call the given amount of times.
"""
SCHEMA_TEMPLATE = {
"type": "array",
"$schema": consts.JSON_SCHEMA,
"items": {
"type": "object",
"properties": {},
"additionalProperties": False,
"minItems": 0
}
}
ITEM_TEMPLATE = {
"type": "integer",
"minimum": 0,
"exclusiveMinimum": True,
"optional": True
}
def __init__(self, action_keywords):
"""Create a new instance of the builder for the given action keywords.
:param action_keywords: A list of strings which are the keywords this
instance of the builder supports.
"""
self._bindings = {}
self.schema = dict(ActionBuilder.SCHEMA_TEMPLATE)
for kw in action_keywords:
self.schema["items"]["properties"][kw] = (
ActionBuilder.ITEM_TEMPLATE)
def bind_action(self, action_key, action, *args, **kwargs):
"""Bind an action to an action key.
Static args/kwargs can be optionally binded.
:param action_key: The action keyword to bind the action to.
:param action: A method/function to call for the action.
:param args: (optional) Static positional args to prepend
to all invocations of the action.
:param kwargs: (optional) Static kwargs to prepend to all
invocations of the action.
"""
self.validate([{action_key: 1}])
self._bindings[action_key] = {
"action": action,
"args": args or (),
"kwargs": kwargs or {}
}
def validate(self, actions):
"""Validate the list of action objects against the builder schema.
:param actions: The list of action objects to validate.
"""
jsonschema.validate(actions, self.schema)
def _build(self, func, times, *args, **kwargs):
"""Build the wrapper action call."""
def _f():
for i in range(times):
func(*args, **kwargs)
return _f
def build_actions(self, actions, *args, **kwargs):
"""Build a list of callable actions.
A list of callable actions based on the given action object list and
the actions bound to this builder.
:param actions: A list of action objects to build callable
action for.
:param args: (optional) Positional args to pass into each
built action. These will be appended to any args set for the
action via its binding.
:param kwargs: (optional) Keyword args to pass into each built
action. These will be appended to any kwards set for the action
via its binding.
"""
self.validate(actions)
bound_actions = []
for action in actions:
action_key = list(action)[0]
times = action.get(action_key)
binding = self._bindings.get(action_key)
dft_kwargs = dict(binding["kwargs"])
dft_kwargs.update(kwargs or {})
bound_actions.append(
self._build(binding["action"], times,
*(binding["args"] + args), **dft_kwargs))
return bound_actions
| |
"""
Contains the :class:`database <tinydb.database.TinyDB>` and
:class:`tables <tinydb.database.Table>` implementation.
"""
from . import JSONStorage
from .utils import LRUCache, iteritems, itervalues
class Element(dict):
"""
Represents an element stored in the database.
This is a transparent proxy for database elements. It exists
to provide a way to access an element's id via ``el.eid``.
"""
def __init__(self, value=None, eid=None, **kwargs):
super(Element, self).__init__(**kwargs)
if value is not None:
self.update(value)
self.eid = eid
class StorageProxy(object):
def __init__(self, storage, table_name):
self._storage = storage
self._table_name = table_name
def read(self):
try:
raw_data = (self._storage.read() or {})[self._table_name]
except KeyError:
return {}
data = {}
for key, val in iteritems(raw_data):
eid = int(key)
data[eid] = Element(val, eid)
return data
def write(self, values):
data = self._storage.read() or {}
data[self._table_name] = values
self._storage.write(data)
class TinyDB(object):
"""
The main class of TinyDB.
Gives access to the database, provides methods to insert/search/remove
and getting tables.
"""
DEFAULT_TABLE = '_default'
DEFAULT_STORAGE = JSONStorage
def __init__(self, *args, **kwargs):
"""
Create a new instance of TinyDB.
All arguments and keyword arguments will be passed to the underlying
storage class (default: :class:`~tinydb.storages.JSONStorage`).
:param storage: The class of the storage to use. Will be initialized
with ``args`` and ``kwargs``.
"""
storage = kwargs.pop('storage', TinyDB.DEFAULT_STORAGE)
table = kwargs.pop('default_table', TinyDB.DEFAULT_TABLE)
# Prepare the storage
self._opened = False
#: :type: Storage
self._storage = storage(*args, **kwargs)
self._opened = True
# Prepare the default table
self._table_cache = {}
self._table = self.table(table)
def table(self, name=DEFAULT_TABLE, **options):
"""
Get access to a specific table.
Creates a new table, if it hasn't been created before, otherwise it
returns the cached :class:`~tinydb.Table` object.
:param name: The name of the table.
:type name: str
:param cache_size: How many query results to cache.
"""
if name in self._table_cache:
return self._table_cache[name]
table = self.table_class(StorageProxy(self._storage, name), **options)
self._table_cache[name] = table
if not table._read():
table._write({})
return table
def tables(self):
"""
Get the names of all tables in the database.
:returns: a set of table names
:rtype: set[str]
"""
return set(self._storage.read())
def purge_tables(self):
"""
Purge all tables from the database. **CANNOT BE REVERSED!**
"""
self._storage.write({})
self._table_cache.clear()
def close(self):
"""
Close the database.
"""
self._opened = False
self._storage.close()
def __enter__(self):
return self
def __exit__(self, *args):
if self._opened is True:
self.close()
def __getattr__(self, name):
"""
Forward all unknown attribute calls to the underlying standard table.
"""
return getattr(self._table, name)
# Methods that are executed on the default table
# Because magic methods are not handlet by __getattr__ we need to forward
# them manually here
def __len__(self):
"""
Get the total number of elements in the default table.
>>> db = TinyDB('db.json')
>>> len(db)
0
"""
return len(self._table)
class Table(object):
"""
Represents a single TinyDB Table.
"""
def __init__(self, storage, cache_size=10):
"""
Get access to a table.
:param storage: Access to the storage
:type storage: StorageProxyus
:param cache_size: Maximum size of query cache.
"""
self._storage = storage
self._query_cache = LRUCache(capacity=cache_size)
data = self._read()
if data:
self._last_id = max(i for i in data)
else:
self._last_id = 0
def process_elements(self, func, cond=None, eids=None):
"""
Helper function for processing all elements specified by condition
or IDs.
A repeating pattern in TinyDB is to run some code on all elements
that match a condition or are specified by their ID. This is
implemented in this function.
The function passed as ``func`` has to be a callable. It's first
argument will be the data currently in the database. It's second
argument is the element ID of the currently processed element.
See: :meth:`~.update`, :meth:`.remove`
:param func: the function to execute on every included element.
first argument: all data
second argument: the current eid
:param cond: elements to use, or
:param eids: elements to use
:returns: the element IDs that were affected during processed
"""
data = self._read()
if eids is not None:
# Processed element specified by id
for eid in eids:
func(data, eid)
else:
# Collect affected eids
eids = []
# Processed elements specified by condition
for eid in list(data):
if cond(data[eid]):
func(data, eid)
eids.append(eid)
self._write(data)
return eids
def clear_cache(self):
"""
Clear the query cache.
A simple helper that clears the internal query cache.
"""
self._query_cache.clear()
def _get_next_id(self):
"""
Increment the ID used the last time and return it
"""
current_id = self._last_id + 1
self._last_id = current_id
return current_id
def _read(self):
"""
Reading access to the DB.
:returns: all values
:rtype: dict
"""
return self._storage.read()
def _write(self, values):
"""
Writing access to the DB.
:param values: the new values to write
:type values: dict
"""
self._query_cache.clear()
self._storage.write(values)
def __len__(self):
"""
Get the total number of elements in the table.
"""
return len(self._read())
def all(self):
"""
Get all elements stored in the table.
:returns: a list with all elements.
:rtype: list[Element]
"""
return list(itervalues(self._read()))
def insert(self, element):
"""
Insert a new element into the table.
:param element: the element to insert
:returns: the inserted element's ID
"""
eid = self._get_next_id()
if not isinstance(element, dict):
raise ValueError('Element is not a dictionary')
data = self._read()
data[eid] = element
self._write(data)
return eid
def insert_multiple(self, elements):
"""
Insert multiple elements into the table.
:param elements: a list of elements to insert
:returns: a list containing the inserted elements' IDs
"""
eids = []
data = self._read()
for element in elements:
eid = self._get_next_id()
eids.append(eid)
data[eid] = element
self._write(data)
return eids
def remove(self, cond=None, eids=None):
"""
Remove all matching elements.
:param cond: the condition to check against
:type cond: query
:param eids: a list of element IDs
:type eids: list
:returns: a list containing the removed element's ID
"""
return self.process_elements(lambda data, eid: data.pop(eid),
cond, eids)
def update(self, fields, cond=None, eids=None):
"""
Update all matching elements to have a given set of fields.
:param fields: the fields that the matching elements will have
or a method that will update the elements
:type fields: dict | dict -> None
:param cond: which elements to update
:type cond: query
:param eids: a list of element IDs
:type eids: list
:returns: a list containing the updated element's ID
"""
if callable(fields):
return self.process_elements(
lambda data, eid: fields(data[eid]),
cond, eids
)
else:
return self.process_elements(
lambda data, eid: data[eid].update(fields),
cond, eids
)
def purge(self):
"""
Purge the table by removing all elements.
"""
self._write({})
self._last_id = 0
def search(self, cond):
"""
Search for all elements matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching elements
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache[cond]
elements = [element for element in self.all() if cond(element)]
self._query_cache[cond] = elements
return elements
def get(self, cond=None, eid=None):
"""
Get exactly one element specified by a query or and ID.
Returns ``None`` if the element doesn't exist
:param cond: the condition to check against
:type cond: Query
:param eid: the element's ID
:returns: the element or None
:rtype: Element | None
"""
# Cannot use process_elements here because we want to return a
# specific element
if eid is not None:
# Element specified by ID
return self._read().get(eid, None)
# Element specified by condition
for element in self.all():
if cond(element):
return element
def count(self, cond):
"""
Count the elements matching a condition.
:param cond: the condition use
:type cond: Query
"""
return len(self.search(cond))
def contains(self, cond=None, eids=None):
"""
Check wether the database contains an element matching a condition or
an ID.
If ``eids`` is set, it checks if the db contains an element with one
of the specified.
:param cond: the condition use
:type cond: Query
:param eids: the element IDs to look for
"""
if eids is not None:
# Elements specified by ID
return any(self.get(eid=eid) for eid in eids)
# Element specified by condition
return self.get(cond) is not None
# Set the default table class
TinyDB.table_class = Table
| |
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
import subprocess
import praw
import datetime
import pyperclip
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
challengePageSubmissionId = '6xecnc'
flaskport = 8883
thisMonthName = "September"
nextMonthName = "October"
readAllCommentsWhichCanBeSlower = False
sorryTooLateToSignUpReplyText = "Sorry, but the late signup grace period for " + thisMonthName + " is over, so you can't officially join this challenge. But feel free to follow along anyway, and comment all you want. And be sure to join us for the " + nextMonthName + " challenge. Signup posts for " + nextMonthName + " will begin during the last week of " + thisMonthName + "."
reinstatedReplyText = "OK, I've reinstated you. You should start showing up on the list again starting tomorrow."
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
submission = None
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
if readAllCommentsWhichCanBeSlower:
submission.replace_more_comments(limit=None, threshold=0)
return submission
def getCommentsForSubmission(submission):
return [comment for comment in praw.helpers.flatten_tree(submission.comments) if comment.__class__ == praw.objects.Comment]
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatechallenge.html')
def moderatechallenge():
currentDayOfMonthIndex = datetime.date.today().day
lateCheckinGracePeriodIsInEffect = currentDayOfMonthIndex <= 3
global commentHashesAndComments
global submission
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submission = getSubmissionForRedditSession(redditSession)
flat_comments = getCommentsForSubmission(submission)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy display.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.write('</form>')
stringio.write('<form action="updategooglechart.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="update-google-chart.py">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.permalink)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Checkin':
print "checkin - " + username
subprocess.call(['./checkin.py', username])
comment.upvote()
retireCommentHash(commentHash)
if actionToTake == 'Signup and checkin':
print "signup and checkin - " + username
subprocess.call(['./signup-and-checkin.sh', username])
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Relapse':
print "relapse - " + username
subprocess.call(['./relapse.py', username])
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Reinstate with automatic comment':
print "reinstate - " + username
subprocess.call(['./reinstate.py', username])
comment.reply(reinstatedReplyText)
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Reply with sorry-too-late comment':
print "reply with sorry-too-late comment - " + username
comment.reply(sorryTooLateToSignUpReplyText)
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplaytoclipboard.html', methods=["POST"])
def copydisplaytoclipboard():
actionToTake = request.form["actiontotake"]
if actionToTake == 'Copy display.py stdout to clipboard':
subprocess.call(['./display.py'])
if actionToTake == 'Automatically post display.py stdout':
subprocess.call(['./display.py'])
submissionText = pyperclip.paste()
submission.edit(submissionText)
return Response("hello", mimetype='text/html')
@app.route('/updategooglechart.html', methods=["POST"])
def updategooglechart():
print "TODO: Copy display to clipboard"
subprocess.call(['./update-google-chart.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
| |
<<<<<<< HEAD
<<<<<<< HEAD
import os
import sys
import importlib.machinery
from idlelib.TreeWidget import TreeItem
from idlelib.ClassBrowser import ClassBrowser, ModuleBrowserTreeItem
from idlelib.PyShell import PyShellFileList
class PathBrowser(ClassBrowser):
def __init__(self, flist, _htest=False):
"""
_htest - bool, change box location when running htest
"""
self._htest = _htest
self.init(flist)
def settitle(self):
self.top.wm_title("Path Browser")
self.top.wm_iconname("Path Browser")
def rootnode(self):
return PathBrowserTreeItem()
class PathBrowserTreeItem(TreeItem):
def GetText(self):
return "sys.path"
def GetSubList(self):
sublist = []
for dir in sys.path:
item = DirBrowserTreeItem(dir)
sublist.append(item)
return sublist
class DirBrowserTreeItem(TreeItem):
def __init__(self, dir, packages=[]):
self.dir = dir
self.packages = packages
def GetText(self):
if not self.packages:
return self.dir
else:
return self.packages[-1] + ": package"
def GetSubList(self):
try:
names = os.listdir(self.dir or os.curdir)
except OSError:
return []
packages = []
for name in names:
file = os.path.join(self.dir, name)
if self.ispackagedir(file):
nn = os.path.normcase(name)
packages.append((nn, name, file))
packages.sort()
sublist = []
for nn, name, file in packages:
item = DirBrowserTreeItem(file, self.packages + [name])
sublist.append(item)
for nn, name in self.listmodules(names):
item = ModuleBrowserTreeItem(os.path.join(self.dir, name))
sublist.append(item)
return sublist
def ispackagedir(self, file):
if not os.path.isdir(file):
return 0
init = os.path.join(file, "__init__.py")
return os.path.exists(init)
def listmodules(self, allnames):
modules = {}
suffixes = importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
sorted = []
for suff in suffixes:
i = -len(suff)
for name in allnames[:]:
normed_name = os.path.normcase(name)
if normed_name[i:] == suff:
mod_name = name[:i]
if mod_name not in modules:
modules[mod_name] = None
sorted.append((normed_name, name))
allnames.remove(name)
sorted.sort()
return sorted
def _path_browser(parent):
flist = PyShellFileList(parent)
PathBrowser(flist, _htest=True)
parent.mainloop()
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_pathbrowser', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_path_browser)
=======
import os
import sys
import importlib.machinery
from idlelib.TreeWidget import TreeItem
from idlelib.ClassBrowser import ClassBrowser, ModuleBrowserTreeItem
from idlelib.PyShell import PyShellFileList
class PathBrowser(ClassBrowser):
def __init__(self, flist, _htest=False):
"""
_htest - bool, change box location when running htest
"""
self._htest = _htest
self.init(flist)
def settitle(self):
self.top.wm_title("Path Browser")
self.top.wm_iconname("Path Browser")
def rootnode(self):
return PathBrowserTreeItem()
class PathBrowserTreeItem(TreeItem):
def GetText(self):
return "sys.path"
def GetSubList(self):
sublist = []
for dir in sys.path:
item = DirBrowserTreeItem(dir)
sublist.append(item)
return sublist
class DirBrowserTreeItem(TreeItem):
def __init__(self, dir, packages=[]):
self.dir = dir
self.packages = packages
def GetText(self):
if not self.packages:
return self.dir
else:
return self.packages[-1] + ": package"
def GetSubList(self):
try:
names = os.listdir(self.dir or os.curdir)
except OSError:
return []
packages = []
for name in names:
file = os.path.join(self.dir, name)
if self.ispackagedir(file):
nn = os.path.normcase(name)
packages.append((nn, name, file))
packages.sort()
sublist = []
for nn, name, file in packages:
item = DirBrowserTreeItem(file, self.packages + [name])
sublist.append(item)
for nn, name in self.listmodules(names):
item = ModuleBrowserTreeItem(os.path.join(self.dir, name))
sublist.append(item)
return sublist
def ispackagedir(self, file):
if not os.path.isdir(file):
return 0
init = os.path.join(file, "__init__.py")
return os.path.exists(init)
def listmodules(self, allnames):
modules = {}
suffixes = importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
sorted = []
for suff in suffixes:
i = -len(suff)
for name in allnames[:]:
normed_name = os.path.normcase(name)
if normed_name[i:] == suff:
mod_name = name[:i]
if mod_name not in modules:
modules[mod_name] = None
sorted.append((normed_name, name))
allnames.remove(name)
sorted.sort()
return sorted
def _path_browser(parent):
flist = PyShellFileList(parent)
PathBrowser(flist, _htest=True)
parent.mainloop()
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_pathbrowser', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_path_browser)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import os
import sys
import importlib.machinery
from idlelib.TreeWidget import TreeItem
from idlelib.ClassBrowser import ClassBrowser, ModuleBrowserTreeItem
from idlelib.PyShell import PyShellFileList
class PathBrowser(ClassBrowser):
def __init__(self, flist, _htest=False):
"""
_htest - bool, change box location when running htest
"""
self._htest = _htest
self.init(flist)
def settitle(self):
self.top.wm_title("Path Browser")
self.top.wm_iconname("Path Browser")
def rootnode(self):
return PathBrowserTreeItem()
class PathBrowserTreeItem(TreeItem):
def GetText(self):
return "sys.path"
def GetSubList(self):
sublist = []
for dir in sys.path:
item = DirBrowserTreeItem(dir)
sublist.append(item)
return sublist
class DirBrowserTreeItem(TreeItem):
def __init__(self, dir, packages=[]):
self.dir = dir
self.packages = packages
def GetText(self):
if not self.packages:
return self.dir
else:
return self.packages[-1] + ": package"
def GetSubList(self):
try:
names = os.listdir(self.dir or os.curdir)
except OSError:
return []
packages = []
for name in names:
file = os.path.join(self.dir, name)
if self.ispackagedir(file):
nn = os.path.normcase(name)
packages.append((nn, name, file))
packages.sort()
sublist = []
for nn, name, file in packages:
item = DirBrowserTreeItem(file, self.packages + [name])
sublist.append(item)
for nn, name in self.listmodules(names):
item = ModuleBrowserTreeItem(os.path.join(self.dir, name))
sublist.append(item)
return sublist
def ispackagedir(self, file):
if not os.path.isdir(file):
return 0
init = os.path.join(file, "__init__.py")
return os.path.exists(init)
def listmodules(self, allnames):
modules = {}
suffixes = importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
sorted = []
for suff in suffixes:
i = -len(suff)
for name in allnames[:]:
normed_name = os.path.normcase(name)
if normed_name[i:] == suff:
mod_name = name[:i]
if mod_name not in modules:
modules[mod_name] = None
sorted.append((normed_name, name))
allnames.remove(name)
sorted.sort()
return sorted
def _path_browser(parent):
flist = PyShellFileList(parent)
PathBrowser(flist, _htest=True)
parent.mainloop()
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_pathbrowser', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_path_browser)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
# -*- coding: utf-8 -*-
"""
.. module:: djstripe.event_handlers.
:synopsis: dj-stripe - webhook event handlers for the various models
.. moduleauthor:: Bill Huneke (@wahuneke)
.. moduleauthor:: Alex Kavanaugh (@akavanau)
.. moduleauthor:: Lee Skillen (@lskillen)
Stripe docs for Events: https://stripe.com/docs/api#events
Stripe docs for Webhooks: https://stripe.com/docs/webhooks
TODO: Implement webhook event handlers for all the models that need to respond to webhook events.
NOTE: Event data is not guaranteed to be in the correct API version format. See #116.
When writing a webhook handler, make sure to first re-retrieve the object you wish to
process.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from . import webhooks
from .enums import SourceType
from .models import Card, Charge, Coupon, Customer, Invoice, InvoiceItem, Plan, Subscription, Transfer
from .utils import convert_tstamp
logger = logging.getLogger(__name__)
@webhooks.handler_all
def customer_event_attach(event):
"""Make the related customer available on the event for all handlers to use.
Does not create Customer objects.
"""
event.customer = None
crud_type = CrudType.determine(event=event, exact=True)
if event.category == "customer" and crud_type.valid:
customer_stripe_id = event.data.get("object", {}).get("id")
else:
customer_stripe_id = event.data.get("object", {}).get("customer")
if customer_stripe_id:
try:
event.customer = Customer.objects.get(stripe_id=customer_stripe_id)
except Customer.DoesNotExist:
pass
@webhooks.handler("customer")
def customer_webhook_handler(event):
"""Handle updates to customer objects.
First determines the crud_type and then handles the event if a customer exists locally.
As customers are tied to local users, djstripe will not create customers that
do not already exist locally.
Docs and an example customer webhook response: https://stripe.com/docs/api#customer_object
"""
if event.customer:
# As customers are tied to local users, djstripe will not create
# customers that do not already exist locally.
_handle_crud_like_event(target_cls=Customer, event=event, crud_exact=True, crud_valid=True)
@webhooks.handler("customer.discount")
def customer_discount_webhook_handler(event):
"""Handle updates to customer discount objects.
Docs: https://stripe.com/docs/api#discounts
Because there is no concept of a "Discount" model in dj-stripe (due to the
lack of a stripe id on them), this is a little different to the other
handlers.
"""
crud_type = CrudType.determine(event=event)
discount_data = event.data.get("object", {})
coupon_data = discount_data.get("coupon", {})
if crud_type.created or crud_type.updated:
coupon, _ = _handle_crud_like_event(
target_cls=Coupon,
event=event,
data=coupon_data,
stripe_id=coupon_data.get("id")
)
coupon_start = discount_data.get("start")
coupon_end = discount_data.get("end")
else:
coupon = None
coupon_start = None
coupon_end = None
event.customer.coupon = coupon
event.customer.coupon_start = convert_tstamp(coupon_start)
event.customer.coupon_end = convert_tstamp(coupon_end)
event.customer.save()
@webhooks.handler("customer.source")
def customer_source_webhook_handler(event):
"""Handle updates to customer payment-source objects.
Docs: https://stripe.com/docs/api#customer_object-sources.
"""
customer_data = event.data.get("object", {})
source_type = customer_data.get("object", {})
# TODO: handle other types of sources (https://stripe.com/docs/api#customer_object-sources)
if source_type == SourceType.card:
_handle_crud_like_event(target_cls=Card, event=event)
@webhooks.handler("customer.subscription")
def customer_subscription_webhook_handler(event):
"""Handle updates to customer subscription objects.
Docs an example subscription webhook response: https://stripe.com/docs/api#subscription_object
"""
_handle_crud_like_event(target_cls=Subscription, event=event)
@webhooks.handler("transfer", "charge", "coupon", "invoice", "invoiceitem", "plan")
def other_object_webhook_handler(event):
"""Handle updates to transfer, charge, invoice, invoiceitem and plan objects.
Docs for:
- charge: https://stripe.com/docs/api#charges
- coupon: https://stripe.com/docs/api#coupons
- invoice: https://stripe.com/docs/api#invoices
- invoiceitem: https://stripe.com/docs/api#invoiceitems
- plan: https://stripe.com/docs/api#plans
"""
target_cls = {
"charge": Charge,
"coupon": Coupon,
"invoice": Invoice,
"invoiceitem": InvoiceItem,
"plan": Plan,
"transfer": Transfer
}.get(event.category)
_handle_crud_like_event(target_cls=target_cls, event=event)
#
# Helpers
#
class CrudType(object):
"""Helper object to determine CRUD-like event state."""
created = False
updated = False
deleted = False
def __init__(self, **kwargs):
"""Set attributes."""
for k, v in kwargs.items():
setattr(self, k, v)
@property
def valid(self):
"""Return True if this is a CRUD-like event."""
return self.created or self.updated or self.deleted
@classmethod
def determine(cls, event, verb=None, exact=False):
"""
Determine if the event verb is a crud_type (without the 'R') event.
:param verb: The event verb to examine.
:type verb: string (``str``/`unicode``)
:param exact: If True, match crud_type to event verb string exactly.
:param type: ``bool``
:returns: The CrudType state object.
:rtype: ``CrudType``
"""
verb = verb or event.verb
def check(crud_type_event):
if exact:
return verb == crud_type_event
else:
return verb.endswith(crud_type_event)
created = updated = deleted = False
if check("updated"):
updated = True
elif check("created"):
created = True
elif check("deleted"):
deleted = True
return cls(created=created, updated=updated, deleted=deleted)
def _handle_crud_like_event(target_cls, event, data=None, verb=None,
stripe_id=None, customer=None, crud_type=None,
crud_exact=False, crud_valid=False):
"""
Helper to process crud_type-like events for objects.
Non-deletes (creates, updates and "anything else" events) are treated as
update_or_create events - The object will be retrieved locally, then it is
synchronised with the Stripe API for parity.
Deletes only occur for delete events and cause the object to be deleted
from the local database, if it existed. If it doesn't exist then it is
ignored (but the event processing still succeeds).
:param target_cls: The djstripe model being handled.
:type: ``djstripe.stripe_objects.StripeObject``
:param data: The event object data (defaults to ``event.data``).
:param verb: The event verb (defaults to ``event.verb``).
:param stripe_id: The object Stripe ID (defaults to ``object.id``).
:param customer: The customer object (defaults to ``event.customer``).
:param crud_type: The CrudType object (determined by default).
:param crud_exact: If True, match verb against CRUD type exactly.
:param crud_valid: If True, CRUD type must match valid type.
:returns: The object (if any) and the event CrudType.
:rtype: ``tuple(obj, CrudType)``
"""
data = data or event.data
stripe_id = stripe_id or data.get("object", {}).get("id", None)
if not stripe_id:
# We require an object when applying CRUD-like events, so if there's
# no ID the event is ignored/dropped. This happens in events such as
# invoice.upcoming, which refer to a future (non-existant) invoice.
logger.debug(
"Ignoring %r Stripe event without object ID: %r",
event.type, event)
return
verb = verb or event.verb
customer = customer or event.customer
crud_type = crud_type or CrudType.determine(event=event, verb=verb, exact=crud_exact)
obj = None
if crud_valid and not crud_type.valid:
logger.debug(
"Ignoring %r Stripe event without valid CRUD type: %r",
event.type, event)
return
if crud_type.deleted:
try:
obj = target_cls.objects.get(stripe_id=stripe_id)
obj.delete()
except target_cls.DoesNotExist:
pass
else:
# Any other event type (creates, updates, etc.) - This can apply to
# verbs that aren't strictly CRUD but Stripe do intend an update. Such
# as invoice.payment_failed.
kwargs = {"stripe_id": stripe_id}
if hasattr(target_cls, 'customer'):
kwargs["customer"] = customer
data = target_cls(**kwargs).api_retrieve()
obj = target_cls.sync_from_stripe_data(data)
return obj, crud_type
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "kenjutsu"
cfg.versionfile_source = "kenjutsu/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| |
# -*- coding: utf-8 -*-
"""
CKernel evaluation of blaze AIR.
"""
from __future__ import absolute_import, division, print_function
import operator
from pykit.ir import visit, copy_function
from dynd import nd, ndt
import blaze
import blz
import datashape
from ....datadescriptor import DyNDDataDescriptor, BLZDataDescriptor
from ..pipeline import run_pipeline
from ..frontend import ckernel_impls, ckernel_lift, allocation
#------------------------------------------------------------------------
# Interpreter
#------------------------------------------------------------------------
def interpret(func, env, args, storage=None, **kwds):
assert len(args) == len(func.args)
# Make a copy, since we're going to mutate our IR!
func, _ = copy_function(func)
# If it's a BLZ output, we want an interpreter that streams
# the processing through in chunks
if storage is not None:
if len(func.type.restype.shape) == 0:
raise TypeError('Require an array, not a scalar, for outputting to BLZ')
env['stream-outer'] = True
result_ndim = env['result-ndim'] = len(func.type.restype.shape)
else:
# Convert any persistent inputs to memory
# TODO: should stream the computation in this case
for i, arg in enumerate(args):
if isinstance(arg._data, BLZDataDescriptor):
args[i] = arg[:]
# Update environment with dynd type information
dynd_types = dict((arg, get_dynd_type(array))
for arg, array in zip(func.args, args)
if isinstance(array._data, DyNDDataDescriptor))
env['dynd-types'] = dynd_types
# Lift ckernels
func, env = run_pipeline(func, env, run_time_passes)
if storage is None:
# Evaluate once
values = dict(zip(func.args, args))
interp = CKernelInterp(values)
visit(interp, func)
return interp.result
else:
res_shape, res_dt = datashape.to_numpy(func.type.restype)
dim_size = operator.index(res_shape[0])
row_size = ndt.type(str(func.type.restype.subarray(1))).data_size
chunk_size = min(max(1, (1024*1024) // row_size), dim_size)
# Evaluate by streaming the outermost dimension,
# and using the BLZ data descriptor's append
dst_dd = BLZDataDescriptor(blz.zeros((0,)+res_shape[1:], res_dt,
rootdir=storage.path))
# Loop through all the chunks
for chunk_start in range(0, dim_size, chunk_size):
# Tell the interpreter which chunk size to use (last
# chunk might be smaller)
chunk_size = min(chunk_size, dim_size - chunk_start)
# Evaluate the chunk
args_chunk = [arg[chunk_start:chunk_start+chunk_size]
if len(arg.dshape.shape) == result_ndim
else arg for arg in args]
values = dict(zip(func.args, args_chunk))
interp = CKernelChunkInterp(values, chunk_size, result_ndim)
visit(interp, func)
chunk = interp.result._data.dynd_arr()
dst_dd.append(chunk)
return blaze.Array(dst_dd)
#------------------------------------------------------------------------
# Passes
#------------------------------------------------------------------------
run_time_passes = [
ckernel_impls,
allocation,
ckernel_lift,
]
#------------------------------------------------------------------------
# Interpreter
#------------------------------------------------------------------------
class CKernelInterp(object):
"""
Interpret low-level AIR in the most straightforward way possible.
Low-level AIR contains the following operations:
alloc/dealloc
ckernel
There is a huge number of things we can still do, like blocking and
parallelism.
Blocking
========
This should probably happen through a "blocking-ckernel" wrapper
Parallelism
===========
Both data-parallelism by executing ckernels over slices, and executing
disjoint sub-expressions in parallel.
"""
def __init__(self, values):
self.values = values # { Op : py_val }
def op_alloc(self, op):
dshape = op.type
storage = op.metadata.get('storage') # TODO: storage!
self.values[op] = blaze.empty(dshape, storage=storage)
def op_dealloc(self, op):
alloc, = op.args
del self.values[alloc]
def op_convert(self, op):
input = self.values[op.args[0]]
input = input._data.dynd_arr()
result = nd.array(input, type=ndt.type(str(op.type)))
result = blaze.Array(DyNDDataDescriptor(result))
self.values[op] = result
def op_pykernel(self, op):
raise RuntimeError("Shouldn't be seeing a pykernel here...")
def op_ckernel(self, op):
deferred_ckernel = op.args[0]
args = [self.values[arg] for arg in op.args[1]]
dst = args[0]
srcs = args[1:]
dst_descriptor = dst._data
src_descriptors = [src._data for src in srcs]
out = dst_descriptor.dynd_arr()
inputs = [desc.dynd_arr() for desc in src_descriptors]
# Execute!
deferred_ckernel.__call__(out, *inputs)
# Operations are rewritten to already refer to 'dst'
# We are essentially a 'void' operation
self.values[op] = None
def op_ret(self, op):
retvar = op.args[0]
self.result = self.values[retvar]
class CKernelChunkInterp(object):
"""
Like CKernelInterp, but for processing one chunk.
"""
def __init__(self, values, chunk_size, result_ndim):
self.values = values # { Op : py_val }
self.chunk_size = chunk_size
self.result_ndim = result_ndim
def op_alloc(self, op):
dshape = op.type
# Allocate a chunk instead of the whole thing
if len(dshape.shape) == self.result_ndim:
chunk = nd.empty(self.chunk_size, str(dshape.subarray(1)))
else:
chunk = nd.empty(str(dshape))
self.values[op] = blaze.array(chunk)
def op_dealloc(self, op):
alloc, = op.args
del self.values[alloc]
def op_convert(self, op):
input = self.values[op.args[0]]
input = input._data.dynd_arr()
result = nd.array(input, type=ndt.type(str(op.type)))
result = blaze.Array(DyNDDataDescriptor(result))
self.values[op] = result
def op_ckernel(self, op):
deferred_ckernel = op.args[0]
args = [self.values[arg] for arg in op.args[1]]
dst = args[0]
srcs = args[1:]
dst_descriptor = dst._data
src_descriptors = [src._data for src in srcs]
out = dst_descriptor.dynd_arr()
inputs = [desc.dynd_arr() for desc in src_descriptors]
# TODO: Remove later, explicit casting necessary for now because
# of BLZ/numpy interop effect.
for i, (inp, tp) in enumerate(zip(inputs, deferred_ckernel.types[1:])):
tp = ndt.type(tp)
if nd.type_of(inp) != tp:
inputs[i] = nd.array(inp, type=tp)
# Execute!
deferred_ckernel.__call__(out, *inputs)
# Operations are rewritten to already refer to 'dst'
# We are essentially a 'void' operation
self.values[op] = None
def op_ret(self, op):
retvar = op.args[0]
self.result = self.values[retvar]
#------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------
def get_dynd_type(array):
return nd.type_of(array._data.dynd_arr())
| |
import numpy as np
import pp
from pp.routing.connect import connect_strip
from pp.routing.connect import connect_elec
from pp.port import is_electrical_port
from pp.port import flipped
BEND_RADIUS = pp.config.BEND_RADIUS
def route_elec_ports_to_side(ports, side="north", wire_sep=20.0, x=None, y=None):
return route_ports_to_side(
ports, side=side, bend_radius=0, separation=wire_sep, x=x, y=y
)
def route_ports_to_side(
ports, side="north", x=None, y=None, routing_func=None, **kwargs
):
""" Routes ports to a given side
Args:
ports: the list of ports to be connected to the side
can also be a dictionnary, a <pp.Component> or a phidl
<ComponentReference>
side should be 'north', 'south', 'east' or 'west'
x: only for east/west side routing: the x position where the ports should be sent
If None, will use the eastest/westest value
y: only for south/north side routing: the y position where the ports should be send
If None, will use the southest/northest value
routing_func: the routing function. By default uses either `connect_elec`
or `connect_strip` depending on the ports layer.
kwargs: may include:
`bend_radius`
`extend_bottom`, `extend_top` for east/west routing
`extend_left`, `extend_right` for south/north routing
"""
if not ports:
return [], []
# Accept list of ports, Component or dict of ports
if isinstance(ports, dict):
ports = list(ports.values())
elif isinstance(ports, pp.Component) or isinstance(ports, pp.ComponentReference):
ports = list(ports.ports.values())
# Convenient default selection for connection function point to point
if routing_func is None:
if is_electrical_port(ports[0]):
routing_func = connect_elec
else:
routing_func = connect_strip
# Choose which
if side in ["north", "south"]:
func_route = connect_ports_to_y
if y is not None:
xy = y
else:
xy = side
elif side in ["west", "east"]:
if x is not None:
xy = x
else:
xy = side
func_route = connect_ports_to_x
return func_route(ports, xy, routing_func=routing_func, **kwargs)
def route_ports_to_north(list_ports, **kwargs):
return route_ports_to_side(list_ports, side="north", **kwargs)
def route_ports_to_south(list_ports, **kwargs):
return route_ports_to_side(list_ports, side="south", **kwargs)
def route_ports_to_west(list_ports, **kwargs):
return route_ports_to_side(list_ports, side="west", **kwargs)
def route_ports_to_east(list_ports, **kwargs):
return route_ports_to_side(list_ports, side="east", **kwargs)
def connect_ports_to_x(
list_ports,
x="east",
separation=10.0,
bend_radius=BEND_RADIUS,
extend_bottom=0,
extend_top=0,
extension_length=0,
y0_bottom=None,
y0_top=None,
routing_func=connect_strip,
routing_func_args={},
backward_port_side_split_index=0,
):
"""
* ``list_ports``: reasonably well behaved list of ports
i.e
ports facing north ports are norther than any other ports
ports facing south ports are souther ...
ports facing west ports are the wester ...
ports facing east ports are the easter ...
* ``x``: float or string:
if float: x coordinate to which the ports will be routed
if string: "east" -> route to east
if string: "west" -> route to west
* ``backward_port_side_split_index``: integer
this integer represents and index in the list of backwards ports
(bottom to top)
all ports with an index strictly lower or equal are routed bottom
all ports with an index larger or equal are routed top
Returns:
- a list of connectors which can be added to an element list
- a list of the new optical ports
"""
north_ports = [p for p in list_ports if p.angle == 90]
south_ports = [p for p in list_ports if p.angle == 270]
east_ports = [p for p in list_ports if p.angle == 0]
west_ports = [p for p in list_ports if p.angle == 180]
epsilon = 1.0
a = epsilon + max(bend_radius, separation)
xs = [p.x for p in list_ports]
ys = [p.y for p in list_ports]
if y0_bottom is None:
y0_bottom = min(ys) - a
y0_bottom -= extend_bottom
if y0_top is None:
y0_top = max(ys) + a
y0_top += extend_top
if x == "west" and extension_length > 0:
extension_length = -extension_length
if x == "east":
x = max([p.x for p in list_ports]) + a
elif x == "west":
x = min([p.x for p in list_ports]) - a
elif type(x) == float:
pass
else:
pass
# raise ValueError('``x`` should be a float or "east" or "west"')
sort_key_west_to_east = lambda p: p.x
sort_key_east_to_west = lambda p: -p.x
sort_key_south_to_north = lambda p: p.y
sort_key_north_to_south = lambda p: -p.y
if x < min(xs):
sort_key_north = sort_key_west_to_east
sort_key_south = sort_key_west_to_east
forward_ports = west_ports
backward_ports = east_ports
angle = 0
elif x > max(xs):
sort_key_south = sort_key_east_to_west
sort_key_north = sort_key_east_to_west
forward_ports = east_ports
backward_ports = west_ports
angle = 180
else:
raise ValueError("x should be either to the east or to the west of all ports")
"""
First route the bottom-half of the back ports
(back ports are the one facing opposite side of x)
Then route the south ports
then the front ports
then the north ports
"""
# forward_ports.sort()
north_ports.sort(key=sort_key_north)
south_ports.sort(key=sort_key_south)
forward_ports.sort(key=sort_key_south_to_north)
backward_ports.sort(key=sort_key_south_to_north)
backward_ports_thru_south = backward_ports[0:backward_port_side_split_index]
backward_ports_thru_north = backward_ports[backward_port_side_split_index:]
backward_ports_thru_south.sort(key=sort_key_south_to_north)
backward_ports_thru_north.sort(key=sort_key_north_to_south)
elements = []
ports = []
def add_port(p, y, l_elements, l_ports, start_straight=0.01):
new_port = p._copy()
new_port.angle = angle
new_port.position = (x + extension_length, y)
l_elements += [
routing_func(
p,
new_port,
start_straight=start_straight,
bend_radius=bend_radius,
**routing_func_args
)
]
l_ports += [flipped(new_port)]
y_optical_bot = y0_bottom
for p in south_ports:
add_port(p, y_optical_bot, elements, ports)
y_optical_bot -= separation
for p in forward_ports:
add_port(p, p.y, elements, ports)
y_optical_top = y0_top
for p in north_ports:
add_port(p, y_optical_top, elements, ports)
y_optical_top += separation
start_straight = 0.01
start_straight0 = 0
max_x = max(xs)
min_x = min(xs)
for p in backward_ports_thru_north:
# Extend ports if necessary
if angle == 0 and p.x < max_x:
start_straight0 = max_x - p.x
elif angle == 180 and p.x > min_x:
start_straight0 = p.x - min_x
else:
start_straight0 = 0
add_port(
p,
y_optical_top,
elements,
ports,
start_straight=start_straight + start_straight0,
)
y_optical_top += separation
start_straight += separation
start_straight = 0.01
start_straight0 = 0
for p in backward_ports_thru_south:
# Extend ports if necessary
if angle == 0 and p.x < max_x:
start_straight0 = max_x - p.x
elif angle == 180 and p.x > min_x:
start_straight0 = p.x - min_x
else:
start_straight0 = 0
add_port(
p,
y_optical_bot,
elements,
ports,
start_straight=start_straight + start_straight0,
)
y_optical_bot -= separation
start_straight += separation
return elements, ports
def connect_ports_to_y(
list_ports,
y="north",
separation=10.0,
bend_radius=BEND_RADIUS,
x0_left=None,
x0_right=None,
extension_length=0,
extend_left=0,
extend_right=0,
routing_func=connect_strip,
routing_func_args={},
backward_port_side_split_index=0,
):
"""
* ``list_ports``: reasonably well behaved list of ports
i.e
ports facing north ports are norther than any other ports
ports facing south ports are souther ...
ports facing west ports are the wester ...
ports facing east ports are the easter ...
* ``y``: float or string:
if float: y coordinate to which the ports will be routed
if string: "north" -> route to north
if string: "south" -> route to south
* ``backward_port_side_split_index``: integer
this integer represents and index in the list of backwards ports
(sorted from left to right)
all ports with an index strictly larger are routed right
all ports with an index lower or equal are routed left
Returns:
- a list of connectors which can be added to an element list
- a list of the new optical ports
"""
if y == "south" and extension_length > 0:
extension_length = -extension_length
da = 45
north_ports = [p for p in list_ports if p.angle > 90 - da and p.angle < 90 + da]
south_ports = [p for p in list_ports if p.angle > 270 - da and p.angle < 270 + da]
east_ports = [p for p in list_ports if p.angle < da or p.angle > 360 - da]
west_ports = [p for p in list_ports if p.angle < 180 + da and p.angle > 180 - da]
epsilon = 1.0
a = bend_radius + max(bend_radius, separation)
xs = [p.x for p in list_ports]
ys = [p.y for p in list_ports]
x0_right = x0_right or max(xs) + a
x0_right += extend_right
x0_left = x0_left or min(xs) - a
x0_left -= extend_left
if y == "north":
y = (
max([p.y + a * np.abs(np.cos(p.angle * np.pi / 180)) for p in list_ports])
+ epsilon
)
elif y == "south":
y = (
min([p.y - a * np.abs(np.cos(p.angle * np.pi / 180)) for p in list_ports])
- epsilon
)
elif type(y) == float:
pass
else:
pass
# raise ValueError('``y`` should be a float or "north" or "south"')
sort_key_west_to_east = lambda p: p.x
sort_key_east_to_west = lambda p: -p.x
sort_key_south_to_north = lambda p: p.y
sort_key_north_to_south = lambda p: -p.y
if y <= min(ys):
sort_key_east = sort_key_south_to_north
sort_key_west = sort_key_south_to_north
forward_ports = south_ports
backward_ports = north_ports
angle = 90.0
elif y >= max(ys):
sort_key_west = sort_key_north_to_south
sort_key_east = sort_key_north_to_south
forward_ports = north_ports
backward_ports = south_ports
angle = -90.0
else:
raise ValueError("y should be either to the north or to the south of all ports")
"""
First route the bottom-half of the back ports
(back ports are the one facing opposite side of x)
Then route the south ports
then the front ports
then the north ports
"""
west_ports.sort(key=sort_key_west)
east_ports.sort(key=sort_key_east)
forward_ports.sort(key=sort_key_west_to_east)
backward_ports.sort(key=sort_key_east_to_west)
backward_ports.sort(key=sort_key_west_to_east)
backward_ports_thru_west = backward_ports[0:backward_port_side_split_index]
backward_ports_thru_east = backward_ports[backward_port_side_split_index:]
backward_ports_thru_west.sort(key=sort_key_west_to_east)
backward_ports_thru_east.sort(key=sort_key_east_to_west)
elements = []
ports = []
def add_port(p, x, l_elements, l_ports, start_straight=0.01):
new_port = p._copy()
new_port.angle = angle
new_port.position = (x, y + extension_length)
if np.sum(np.abs((new_port.position - p.position) ** 2)) < 1e-12:
l_ports += [flipped(new_port)]
return
try:
l_elements += [
routing_func(
p,
new_port,
start_straight=start_straight,
bend_radius=bend_radius,
**routing_func_args
)
]
l_ports += [flipped(new_port)]
except Exception as e:
print("**************************")
print("Could not connect")
print(p)
print(new_port)
print("**************************")
raise e
x_optical_left = x0_left
for p in west_ports:
add_port(p, x_optical_left, elements, ports)
x_optical_left -= separation
for p in forward_ports:
add_port(p, p.x, elements, ports)
x_optical_right = x0_right
for p in east_ports:
add_port(p, x_optical_right, elements, ports)
x_optical_right += separation
start_straight = 0.01
for p in backward_ports_thru_east:
add_port(p, x_optical_right, elements, ports, start_straight=start_straight)
x_optical_right += separation
start_straight += separation
start_straight = 0.01
for p in backward_ports_thru_west:
add_port(p, x_optical_left, elements, ports, start_straight=start_straight)
x_optical_left -= separation
start_straight += separation
return elements, ports
def demo():
from pp.component import Component
from pp.layers import LAYER
def dummy():
cmp = Component()
xs = [0.0, 10.0, 25.0, 50.0]
ys = [0.0, 10.0, 25.0, 50.0]
a = 5
xl = min(xs) - a
xr = max(xs) + a
yb = min(ys) - a
yt = max(ys) + a
cmp.add_polygon([(xl, yb), (xl, yt), (xr, yt), (xr, yb)], LAYER.WG)
for i, y in enumerate(ys):
p0 = (xl, y)
p1 = (xr, y)
cmp.add_port(name="W{}".format(i), midpoint=p0, orientation=180, width=0.5)
cmp.add_port(name="E{}".format(i), midpoint=p1, orientation=0, width=0.5)
for i, x in enumerate(xs):
p0 = (x, yb)
p1 = (x, yt)
cmp.add_port(name="S{}".format(i), midpoint=p0, orientation=270, width=0.5)
cmp.add_port(name="N{}".format(i), midpoint=p1, orientation=90, width=0.5)
return cmp
def top_level():
cmp = Component()
_dummy_t = dummy()
sides = ["north", "south", "east", "west"]
positions = [(0, 0), (400, 0), (400, 400), (0, 400)]
for pos, side in zip(positions, sides):
dummy_ref = _dummy_t.ref(position=pos)
cmp.add(dummy_ref)
conns, ports = route_ports_to_side(dummy_ref, side)
for e in conns:
cmp.add(e)
for i, p in enumerate(ports):
cmp.add_port(name="{}{}".format(side[0], i), port=p)
return cmp
pp.show(top_level())
if __name__ == "__main__":
demo()
| |
import datetime
import os
import re
import shutil
from . import util
_rev_file = re.compile(r'.*\.py$')
_legacy_rev = re.compile(r'([a-f0-9]+)\.py$')
_mod_def_re = re.compile(r'(upgrade|downgrade)_([a-z0-9]+)')
_slug_re = re.compile(r'\w+')
_default_file_template = "%(rev)s_%(slug)s"
_relative_destination = re.compile(r'(?:\+|-)\d+')
class ScriptDirectory(object):
"""Provides operations upon an Alembic script directory.
This object is useful to get information as to current revisions,
most notably being able to get at the "head" revision, for schemes
that want to test if the current revision in the database is the most
recent::
from alembic.script import ScriptDirectory
from alembic.config import Config
config = Config()
config.set_main_option("script_location", "myapp:migrations")
script = ScriptDirectory.from_config(config)
head_revision = script.get_current_head()
"""
def __init__(self, dir, file_template=_default_file_template,
truncate_slug_length=40):
self.dir = dir
self.versions = os.path.join(self.dir, 'versions')
self.file_template = file_template
self.truncate_slug_length = truncate_slug_length or 40
if not os.access(dir, os.F_OK):
raise util.CommandError("Path doesn't exist: %r. Please use "
"the 'init' command to create a new "
"scripts folder." % dir)
@classmethod
def from_config(cls, config):
"""Produce a new :class:`.ScriptDirectory` given a :class:`.Config`
instance.
The :class:`.Config` need only have the ``script_location`` key
present.
"""
script_location = config.get_main_option('script_location')
if script_location is None:
raise util.CommandError("No 'script_location' key "
"found in configuration.")
truncate_slug_length = config.get_main_option("truncate_slug_length")
if truncate_slug_length is not None:
truncate_slug_length = int(truncate_slug_length)
return ScriptDirectory(
util.coerce_resource_to_filename(script_location),
file_template=config.get_main_option(
'file_template',
_default_file_template),
truncate_slug_length=truncate_slug_length
)
def walk_revisions(self, base="base", head="head"):
"""Iterate through all revisions.
This is actually a breadth-first tree traversal,
with leaf nodes being heads.
"""
if head == "head":
heads = set(self.get_heads())
else:
heads = set([head])
while heads:
todo = set(heads)
heads = set()
for head in todo:
if head in heads:
break
for sc in self.iterate_revisions(head, base):
if sc.is_branch_point and sc.revision not in todo:
heads.add(sc.revision)
break
else:
yield sc
def get_revision(self, id_):
"""Return the :class:`.Script` instance with the given rev id."""
id_ = self.as_revision_number(id_)
try:
return self._revision_map[id_]
except KeyError:
# do a partial lookup
revs = [x for x in self._revision_map
if x is not None and x.startswith(id_)]
if not revs:
raise util.CommandError("No such revision '%s'" % id_)
elif len(revs) > 1:
raise util.CommandError(
"Multiple revisions start "
"with '%s', %s..." % (
id_,
", ".join("'%s'" % r for r in revs[0:3])
))
else:
return self._revision_map[revs[0]]
_get_rev = get_revision
def as_revision_number(self, id_):
"""Convert a symbolic revision, i.e. 'head' or 'base', into
an actual revision number."""
if id_ == 'head':
id_ = self.get_current_head()
elif id_ == 'base':
id_ = None
return id_
_as_rev_number = as_revision_number
def iterate_revisions(self, upper, lower):
"""Iterate through script revisions, starting at the given
upper revision identifier and ending at the lower.
The traversal uses strictly the `down_revision`
marker inside each migration script, so
it is a requirement that upper >= lower,
else you'll get nothing back.
The iterator yields :class:`.Script` objects.
"""
if upper is not None and _relative_destination.match(upper):
relative = int(upper)
revs = list(self._iterate_revisions("head", lower))
revs = revs[-relative:]
if len(revs) != abs(relative):
raise util.CommandError("Relative revision %s didn't "
"produce %d migrations" % (upper, abs(relative)))
return iter(revs)
elif lower is not None and _relative_destination.match(lower):
relative = int(lower)
revs = list(self._iterate_revisions(upper, "base"))
revs = revs[0:-relative]
if len(revs) != abs(relative):
raise util.CommandError("Relative revision %s didn't "
"produce %d migrations" % (lower, abs(relative)))
return iter(revs)
else:
return self._iterate_revisions(upper, lower)
def _iterate_revisions(self, upper, lower):
lower = self.get_revision(lower)
upper = self.get_revision(upper)
orig = lower.revision if lower else 'base', \
upper.revision if upper else 'base'
script = upper
while script != lower:
if script is None and lower is not None:
raise util.CommandError(
"Revision %s is not an ancestor of %s" % orig)
yield script
downrev = script.down_revision
script = self._revision_map[downrev]
def _upgrade_revs(self, destination, current_rev):
revs = self.iterate_revisions(destination, current_rev)
return [
(script.module.upgrade, script.down_revision, script.revision,
script.doc)
for script in reversed(list(revs))
]
def _downgrade_revs(self, destination, current_rev):
revs = self.iterate_revisions(current_rev, destination)
return [
(script.module.downgrade, script.revision, script.down_revision,
script.doc)
for script in revs
]
def run_env(self):
"""Run the script environment.
This basically runs the ``env.py`` script present
in the migration environment. It is called exclusively
by the command functions in :mod:`alembic.command`.
"""
util.load_python_file(self.dir, 'env.py')
@property
def env_py_location(self):
return os.path.abspath(os.path.join(self.dir, "env.py"))
@util.memoized_property
def _revision_map(self):
map_ = {}
for file_ in os.listdir(self.versions):
script = Script._from_filename(self.versions, file_)
if script is None:
continue
if script.revision in map_:
util.warn("Revision %s is present more than once" %
script.revision)
map_[script.revision] = script
for rev in map_.values():
if rev.down_revision is None:
continue
if rev.down_revision not in map_:
util.warn("Revision %s referenced from %s is not present"
% (rev.down_revision, rev))
rev.down_revision = None
else:
map_[rev.down_revision].add_nextrev(rev.revision)
map_[None] = None
return map_
def _rev_path(self, rev_id, message, create_date):
slug = "_".join(_slug_re.findall(message or "")).lower()
if len(slug) > self.truncate_slug_length:
slug = slug[:self.truncate_slug_length].rsplit('_', 1)[0] + '_'
filename = "%s.py" % (
self.file_template % {
'rev': rev_id,
'slug': slug,
'year': create_date.year,
'month': create_date.month,
'day': create_date.day,
'hour': create_date.hour,
'minute': create_date.minute,
'second': create_date.second
}
)
return os.path.join(self.versions, filename)
def get_current_head(self):
"""Return the current head revision.
If the script directory has multiple heads
due to branching, an error is raised.
Returns a string revision number.
"""
current_heads = self.get_heads()
if len(current_heads) > 1:
raise util.CommandError('Only a single head is supported. The '
'script directory has multiple heads (due to branching), which '
'must be resolved by manually editing the revision files to '
'form a linear sequence. Run `alembic branches` to see the '
'divergence(s).')
raise util.CommandError("Only a single head supported so far...")
if current_heads:
return current_heads[0]
else:
return None
_current_head = get_current_head
"""the 0.2 name, for backwards compat."""
def get_heads(self):
"""Return all "head" revisions as strings.
Returns a list of string revision numbers.
This is normally a list of length one,
unless branches are present. The
:meth:`.ScriptDirectory.get_current_head()` method
can be used normally when a script directory
has only one head.
"""
heads = []
for script in self._revision_map.values():
if script and script.is_head:
heads.append(script.revision)
return heads
def get_base(self):
"""Return the "base" revision as a string.
This is the revision number of the script that
has a ``down_revision`` of None.
Behavior is not defined if more than one script
has a ``down_revision`` of None.
"""
for script in self._revision_map.values():
if script.down_revision is None \
and script.revision in self._revision_map:
return script.revision
else:
return None
def _generate_template(self, src, dest, **kw):
util.status("Generating %s" % os.path.abspath(dest),
util.template_to_file,
src,
dest,
**kw
)
def _copy_file(self, src, dest):
util.status("Generating %s" % os.path.abspath(dest),
shutil.copy,
src, dest)
def generate_revision(self, revid, message, refresh=False, **kw):
"""Generate a new revision file.
This runs the ``script.py.mako`` template, given
template arguments, and creates a new file.
:param revid: String revision id. Typically this
comes from ``alembic.util.rev_id()``.
:param message: the revision message, the one passed
by the -m argument to the ``revision`` command.
:param refresh: when True, the in-memory state of this
:class:`.ScriptDirectory` will be updated with a new
:class:`.Script` instance representing the new revision;
the :class:`.Script` instance is returned.
If False, the file is created but the state of the
:class:`.ScriptDirectory` is unmodified; ``None``
is returned.
"""
current_head = self.get_current_head()
create_date = datetime.datetime.now()
path = self._rev_path(revid, message, create_date)
self._generate_template(
os.path.join(self.dir, "script.py.mako"),
path,
up_revision=str(revid),
down_revision=current_head,
create_date=create_date,
message=message if message is not None else ("empty message"),
**kw
)
if refresh:
script = Script._from_path(path)
self._revision_map[script.revision] = script
if script.down_revision:
self._revision_map[script.down_revision].\
add_nextrev(script.revision)
return script
else:
return None
class Script(object):
"""Represent a single revision file in a ``versions/`` directory.
The :class:`.Script` instance is returned by methods
such as :meth:`.ScriptDirectory.iterate_revisions`.
"""
nextrev = frozenset()
def __init__(self, module, rev_id, path):
self.module = module
self.revision = rev_id
self.path = path
self.down_revision = getattr(module, 'down_revision', None)
revision = None
"""The string revision number for this :class:`.Script` instance."""
module = None
"""The Python module representing the actual script itself."""
path = None
"""Filesystem path of the script."""
down_revision = None
"""The ``down_revision`` identifier within the migration script."""
@property
def doc(self):
"""Return the docstring given in the script."""
return re.split("\n\n", self.longdoc)[0]
@property
def longdoc(self):
"""Return the docstring given in the script."""
doc = self.module.__doc__
if doc:
if hasattr(self.module, "_alembic_source_encoding"):
doc = doc.decode(self.module._alembic_source_encoding)
return doc.strip()
else:
return ""
def add_nextrev(self, rev):
self.nextrev = self.nextrev.union([rev])
@property
def is_head(self):
"""Return True if this :class:`.Script` is a 'head' revision.
This is determined based on whether any other :class:`.Script`
within the :class:`.ScriptDirectory` refers to this
:class:`.Script`. Multiple heads can be present.
"""
return not bool(self.nextrev)
@property
def is_branch_point(self):
"""Return True if this :class:`.Script` is a branch point.
A branchpoint is defined as a :class:`.Script` which is referred
to by more than one succeeding :class:`.Script`, that is more
than one :class:`.Script` has a `down_revision` identifier pointing
here.
"""
return len(self.nextrev) > 1
@property
def log_entry(self):
return \
"Rev: %s%s%s\n" \
"Parent: %s\n" \
"Path: %s\n" \
"\n%s\n" % (
self.revision,
" (head)" if self.is_head else "",
" (branchpoint)" if self.is_branch_point else "",
self.down_revision,
self.path,
"\n".join(
" %s" % para
for para in self.longdoc.splitlines()
)
)
def __str__(self):
return "%s -> %s%s%s, %s" % (
self.down_revision,
self.revision,
" (head)" if self.is_head else "",
" (branchpoint)" if self.is_branch_point else "",
self.doc)
@classmethod
def _from_path(cls, path):
dir_, filename = os.path.split(path)
return cls._from_filename(dir_, filename)
@classmethod
def _from_filename(cls, dir_, filename):
if not _rev_file.match(filename):
return None
module = util.load_python_file(dir_, filename)
if not hasattr(module, "revision"):
# attempt to get the revision id from the script name,
# this for legacy only
m = _legacy_rev.match(filename)
if not m:
raise util.CommandError(
"Could not determine revision id from filename %s. "
"Be sure the 'revision' variable is "
"declared inside the script (please see 'Upgrading "
"from Alembic 0.1 to 0.2' in the documentation)."
% filename)
else:
revision = m.group(1)
else:
revision = module.revision
return Script(module, revision, os.path.join(dir_, filename))
| |
import sys, os.path, time, pickle, bz2
from pprint import pprint
from zope.interface import implements
from twisted.python import usage
from twisted.internet import reactor
from foolscap.logging.interfaces import IIncidentReporter
from foolscap.logging import levels, app_versions
from foolscap.eventual import eventually
from foolscap import base32
TIME_FORMAT = "%Y-%m-%d--%H-%M-%S"
class IncidentQualifier:
"""I am responsible for deciding what qualifies as an Incident. I look at
the event stream and watch for a 'triggering event', then signal my
handler when the events that I've seen are severe enought to warrant
recording the recent history in an 'incident log file'.
My event() method should be called with each event. When I declare an
incident, I will call my handler's declare_incident(ev) method, with the
triggering event. Since event() will be fired from an eventual-send
queue, the incident will be declared slightly later than the triggering
event.
"""
def set_handler(self, handler):
self.handler = handler
def check_event(self, ev):
if ev['level'] >= levels.WEIRD:
return True
return False
def event(self, ev):
if self.check_event(ev) and self.handler:
self.handler.declare_incident(ev)
class IncidentReporter:
"""Once an Incident has been declared, I am responsible for making a
durable record all relevant log events. I do this by creating a logfile
(a pickle of log event dictionaries) and copying everything from the
history buffer into it. I can copy a small number of future events into
it as well, to record what happens as the application copes with the
situtation.
I am responsible for just a single incident.
I am created with a reference to a FoolscapLogger instance, from which I
will grab the contents of the history buffer.
When I have closed the incident logfile, I will notify the logger by
calling their incident_recorded() method, passing it the local filename
of the logfile I created and the triggering event. This can be used to
notify remote subscribers about the incident that just occurred.
"""
implements(IIncidentReporter)
TRAILING_DELAY = 5.0 # gather 5 seconds of post-trigger events
TRAILING_EVENT_LIMIT = 100 # or 100 events, whichever comes first
def __init__(self, basedir, logger, tubid_s):
self.basedir = basedir
self.logger = logger
self.tubid_s = tubid_s
self.active = True
def is_active(self):
return self.active
def format_time(self, when):
return time.strftime(TIME_FORMAT, time.gmtime(when)) + "Z"
def incident_declared(self, triggering_event):
self.trigger = triggering_event
# choose a name for the logfile
now = time.time()
unique = os.urandom(4)
unique_s = base32.encode(unique)
self.name = "incident-%s-%s" % (self.format_time(now), unique_s)
filename = self.name + ".flog"
self.abs_filename = os.path.join(self.basedir, filename)
self.abs_filename_bz2 = self.abs_filename + ".bz2"
self.abs_filename_bz2_tmp = self.abs_filename + ".bz2.tmp"
# open logfile. We use both an uncompressed one and a compressed one.
self.f1 = open(self.abs_filename, "wb")
self.f2 = bz2.BZ2File(self.abs_filename_bz2_tmp, "wb")
# write header with triggering_event
header = {"header": {"type": "incident",
"trigger": triggering_event,
"versions": app_versions.versions,
"pid": os.getpid(),
}}
pickle.dump(header, self.f1)
pickle.dump(header, self.f2)
if self.TRAILING_DELAY is not None:
# subscribe to events that occur after this one
self.still_recording = True
self.remaining_events = self.TRAILING_EVENT_LIMIT
self.logger.addObserver(self.trailing_event)
# use self.logger.buffers, copy events into logfile
events = list(self.logger.get_buffered_events())
events.sort(lambda a,b: cmp(a['num'], b['num']))
for e in events:
wrapper = {"from": self.tubid_s,
"rx_time": now,
"d": e}
pickle.dump(wrapper, self.f1)
pickle.dump(wrapper, self.f2)
self.f1.flush()
# the BZ2File has no flush method
if self.TRAILING_DELAY is None:
self.active = False
eventually(self.finished_recording)
else:
# now we wait for the trailing events to arrive
self.timer = reactor.callLater(self.TRAILING_DELAY,
self.stop_recording)
def trailing_event(self, ev):
if not self.still_recording:
return
self.remaining_events -= 1
if self.remaining_events >= 0:
wrapper = {"from": self.tubid_s,
"rx_time": time.time(),
"d": ev}
pickle.dump(wrapper, self.f1)
pickle.dump(wrapper, self.f2)
return
self.stop_recording()
def new_trigger(self, ev):
# it is too late to add this to the header. We could add it to a
# trailer, though.
pass
def stop_recording(self):
self.still_recording = False
self.active = False
if self.timer.active():
self.timer.cancel()
self.logger.removeObserver(self.trailing_event)
# Observers are notified through an eventually() call, so we might
# get a few more after the observer is removed. We use
# self.still_recording to hush them.
eventually(self.finished_recording)
def finished_recording(self):
self.f2.close()
os.rename(self.abs_filename_bz2_tmp, self.abs_filename_bz2)
# the compressed logfile has closed successfully. We no longer care
# about the uncompressed one.
self.f1.close()
os.unlink(self.abs_filename)
# now we can tell the world about our new incident report
eventually(self.logger.incident_recorded,
self.abs_filename_bz2, self.name, self.trigger)
class NonTrailingIncidentReporter(IncidentReporter):
TRAILING_DELAY = None
class ClassifyOptions(usage.Options):
stdout = sys.stdout
stderr = sys.stderr
synopsis = "Usage: flogtool classify-incident [options] INCIDENTFILE.."
optFlags = [
("verbose", "v", "show trigger details for unclassifiable incidents"),
]
optParameters = [
("classifier-directory", "c", ".",
"directory with classify_*.py functions to import"),
]
def parseArgs(self, *files):
self.files = files
class IncidentClassifierBase:
def __init__(self):
self.classifiers = []
def add_classifier(self, f):
# there are old .tac files that call this explicitly
self.classifiers.append(f)
def add_classify_files(self, plugindir):
plugindir = os.path.expanduser(plugindir)
for fn in os.listdir(plugindir):
if not (fn.startswith("classify_") and fn.endswith(".py")):
continue
f = open(os.path.join(plugindir, fn), "r")
localdict = {}
exec f in localdict
self.add_classifier(localdict["classify_incident"])
def load_incident(self, abs_fn):
assert abs_fn.endswith(".bz2")
f = bz2.BZ2File(abs_fn, "r")
header = pickle.load(f)["header"]
events = []
while True:
try:
wrapped = pickle.load(f)
except (EOFError, ValueError):
break
events.append(wrapped["d"])
f.close()
return (header, events)
def classify_incident(self, incident):
categories = set()
for f in self.classifiers:
(header, events) = incident
trigger = header["trigger"]
c = f(trigger)
if c: # allow the classifier to return None, or [], or ["foo"]
if isinstance(c, str):
c = [c] # or just "foo"
categories.update(c)
if not categories:
categories.add("unknown")
return categories
class IncidentClassifier(IncidentClassifierBase):
def run(self, options):
self.add_classify_files(options["classifier-directory"])
out = options.stdout
for f in options.files:
abs_fn = os.path.expanduser(f)
incident = self.load_incident(abs_fn)
categories = self.classify_incident(incident)
print >>out, "%s: %s" % (f, ",".join(sorted(categories)))
if list(categories) == ["unknown"] and options["verbose"]:
(header, events) = incident
trigger = header["trigger"]
from foolscap.logging.log import format_message
print >>out, format_message(trigger)
pprint(trigger, stream=out)
if 'failure' in trigger:
print >>out," FAILURE:"
lines = str(trigger['failure']).split("\n")
for line in lines:
print >>out, " %s" % (line,)
print >>out, ""
| |
'''
ChoiceGroup & Choice managing views
'''
from django import forms
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from django.views.generic import UpdateView
from django.views.generic.edit import ModelFormMixin
from ngw.core import perms
from ngw.core.models import ChoiceGroup, ContactField
from ngw.core.views.generic import InGroupAcl
#######################################################################
#
# Choice groups edit / add
#
#######################################################################
class ChoicesWidget(forms.MultiWidget):
def __init__(self, ndisplay, attrs=None):
widgets = []
attrs_value = attrs or {}
attrs_key = attrs or {}
attrs_value['style'] = 'width:85%'
attrs_key['style'] = 'width:7%; margin-left:1ex;'
for i in range(ndisplay):
widgets.append(forms.TextInput(attrs=attrs_value))
widgets.append(forms.TextInput(attrs=attrs_key))
super().__init__(widgets, attrs)
self.ndisplay = ndisplay
def decompress(self, value):
if value:
return value.split('\u001f')
nonelist = []
for i in range(self.ndisplay):
nonelist.append(None)
return nonelist
class ChoicesField(forms.MultiValueField):
def __init__(self, ndisplay, *args, **kwargs):
fields = []
for i in range(ndisplay):
fields.append(forms.CharField())
fields.append(forms.CharField())
super().__init__(fields, *args, **kwargs)
self.ndisplay = ndisplay
def compress(self, data_list):
if data_list:
return '\u001f'.join(data_list)
return None
def clean(self, value):
keys = []
raw_values = forms.MultiValueField.clean(self, value)
if raw_values:
possibles_values = raw_values.split('\u001f')
else:
possibles_values = []
# print('possibles_values=', repr(possibles_values))
# check there is no duplicate keys
# necessary since keys are the id used in <select>
for i in range(len(possibles_values)//2):
v, k = possibles_values[2*i], possibles_values[2*i+1]
if not v:
continue # ignore lines without values
if not k:
continue # empty keys are ok
if k in keys:
raise forms.ValidationError(_(
'You cannot have two keys with the same value. Leave empty'
' for automatic generation.'))
keys.append(k)
return possibles_values
class ChoiceGroupForm(forms.ModelForm):
class Meta:
model = ChoiceGroup
fields = ['sort_by_key']
def __init__(self, *args, **kwargs):
choicegroup = kwargs.get('instance', None)
super().__init__(*args, **kwargs)
ndisplay = 0
self.initial['possible_values'] = []
if choicegroup:
choices = choicegroup.ordered_choices
for c in choices:
self.initial['possible_values'].append(c[1])
self.initial['possible_values'].append(c[0])
ndisplay += 1
for i in range(3): # add 3 blank lines to add data
self.initial['possible_values'].append('')
self.initial['possible_values'].append('')
ndisplay += 1
self.fields['possible_values'] = ChoicesField(
label=_('Possible values'),
required=False,
widget=ChoicesWidget(ndisplay=ndisplay),
ndisplay=ndisplay)
def save(self):
choicegroup = super().save()
possibles_values = self.cleaned_data['possible_values']
choices = {}
# first ignore lines with empty keys, and update auto_key
auto_key = 0
for i in range(len(possibles_values)//2):
v, k = possibles_values[2*i], possibles_values[2*i+1]
if not v:
continue # ignore lines whose value is empty
if k: # key is not left empty for automatic generation
if k.isdigit():
intk = int(k)
if intk > auto_key:
auto_key = intk
choices[k] = v
auto_key += 1
# now generate key for empty ones
for i in range(len(possibles_values)//2):
v, k = possibles_values[2*i], possibles_values[2*i+1]
if not v:
continue # ignore lines whose value is empty
if not k: # key is left empty for automatic generation
k = str(auto_key)
auto_key += 1
choices[k] = v
# print('choices=', choices)
for c in choicegroup.choices.all():
k = c.key
if k in choices.keys():
# print('UPDATING', k)
c.value = choices[k]
c.save()
del choices[k]
else: # that key has be deleted
# print('DELETING', k)
c.delete()
for k, v in choices.items():
# print('ADDING', k)
choicegroup.choices.create(key=k, value=v)
return choicegroup
class ChoiceEditMixin(ModelFormMixin):
template_name = 'choice_edit.html'
form_class = ChoiceGroupForm
model = ChoiceGroup
# pk_url_kwarg = 'id'
def get_object(self):
fid = self.kwargs.get('id')
field = ContactField.objects.get(pk=fid)
if field.contact_group_id != self.contactgroup.id:
raise PermissionDenied
self.field = field
return field.choice_group
def form_valid(self, form):
request = self.request
# choicegroup = form.save()
form.save()
# TODO show field name
messages.add_message(request, messages.SUCCESS,
_('Choices have been saved successfully.'))
return HttpResponseRedirect('..')
def get_context_data(self, **kwargs):
context = {}
cg = self.contactgroup
if self.object:
title = _('Editing choices for {}').format(self.field)
id = self.object.id
else:
title = _('Adding a new {}').format(
ChoiceGroup.get_class_verbose_name())
id = None
context['title'] = title
context['id'] = id
context['objtype'] = ChoiceGroup
context['nav'] = cg.get_smart_navbar()
context['nav'].add_component(('fields', _('contact fields')))
if id:
context['nav'].add_component(self.object.get_navcomponent())
context['nav'].add_component(('choices', _('choices')))
else:
context['nav'].add_component(('add', _('add'))) # obsolete
context['active_submenu'] = 'fields'
context.update(kwargs)
return super().get_context_data(**context)
class ChoiceEditView(InGroupAcl, ChoiceEditMixin, UpdateView):
def check_perm_groupuser(self, group, user):
if not group.userperms & perms.CHANGE_CG:
raise PermissionDenied
class Choice2EditView(ChoiceEditView):
def get_object(self):
fid = self.kwargs.get('id')
field = ContactField.objects.get(pk=fid)
if field.contact_group_id != self.contactgroup.id:
raise PermissionDenied
self.field = field
return field.choice_group2
| |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from . import config
import psycopg2
from urllib.parse import urlparse
import sys
import time
import os
def print_color(msg, color=None): # XXX: can't import
if not sys.stdout.isatty():
print(msg)
return
color_codes = {
"black": 0,
"red": 1,
"green": 2,
"yellow": 3,
"blue": 4,
"magenta": 5,
"cyan": 6,
"white": 7,
}
head = "\x1b[3%dm" % color_codes.get(color, 7)
foot = "\x1b[39;49m"
print(head + msg + foot)
MAX_CONS = 10
types_mapping = {
'date': (1082,),
'time': (1083,),
'timestamp': (1114, 1184),
}
for name, typeoid in types_mapping.items():
psycopg2.extensions.register_type(psycopg2.extensions.new_type(typeoid, name, lambda x, cr: x))
connections = {}
active_db = None
active_schema = None
con_id=0
def connect(dbname,schema=None):
print("DB.connect db=%s schema=%s pid=%s"%(dbname,schema,os.getpid()))
try:
if (dbname,schema) in connections:
return connections[(dbname,schema)]
if len(connections) >= MAX_CONS:
print_color("need to close oldest connection (pid=%s, num_cons=%s)" %
(os.getpid(), len(connections)), "red")
print("existing connections: %s" % ",".join(sorted([x for x in connections.keys()])))
close_oldest_connection()
url = config.get_db_url(dbname)
res = urlparse(url)
args = {
"host": res.hostname,
"database": res.path[1:],
}
if res.port:
args["port"] = res.port
if res.username:
args["user"] = res.username
if res.password:
args["password"] = res.password
db = Connection(**args)
print(" => con_id=%s"%db.con_id)
db._dbname = dbname
db._schema = schema
connections[(dbname,schema)] = db
return db
except Exception as e:
import traceback
traceback.print_exc(file=sys.stdout)
raise Exception("Failed to connect: %s" % e)
def set_active_db(dbname):
global active_db, active_schema
active_db = dbname
active_schema = None
def set_active_schema(schema):
global active_schema
active_schema = schema
def get_active_db():
return active_db
def get_active_schema():
return active_schema
def get_connection():
#print("DB.get_connection db=%s schema=%s"%(active_db,active_schema))
if not active_db:
return None
db = connections.get((active_db,active_schema))
if db and db.is_closed():
del connections[(active_db,active_schema)]
db = None
if not db:
db = connect(active_db,active_schema)
#print("db.get_connection db=%s pid=%s back_pid=%s"%(active_db,os.getpid(),db._db.get_backend_pid()))
return db
class Transaction:
def __enter__(self):
self.db=get_connection()
self.db.begin()
def __exit__(self,ex_type,ex_val,tb):
if ex_type is None:
self.db.commit()
else:
self.db.rollback()
def close_oldest_connection():
print("db.close_oldest_connection pid=%s" % os.getpid())
oldest_time = None
oldest_db = None
for dbname, con in connections.items():
if oldest_time is None or con.con_time < oldest_time:
oldest_time = con.con_time
oldest_db = dbname
con = connections[oldest_db]
con.close()
class Connection():
def __init__(self, **args):
global con_id
try:
self._db = psycopg2.connect(**args)
# self._db.set_session(psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE) # postgres out-of-shared-memory-error
# self._db.set_session(psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ)
self._db.set_session(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
self.con_time = time.strftime("%Y-%m-%d %H:%M:%S")
self.con_id = con_id
con_id+=1
except Exception as e:
raise Exception("failed to connect: %s" % e)
def begin(self):
print(">>> db.begin db=%s schema=%s pid=%s back_pid=%s"%(self._dbname,self._schema,os.getpid(),self._db.get_backend_pid()))
if self.is_closed():
raise Exception("Connection is closed")
try:
res = self._db.get_transaction_status()
if res != psycopg2.extensions.TRANSACTION_STATUS_IDLE:
raise Exception("Failed to start transaction (%d)" % res)
if self._schema:
self.execute("SET search_path TO %s"%self._schema)
#time.sleep(0.1) # XXX
except Exception as e:
print_color("WARNING: failed to start database transaction, closing connection (db=%s, pid=%s)" %
(self._dbname, os.getpid()), "red")
self.close()
raise e
def execute(self, query, *args):
#print("DB.execute con_id=%s db=%s schema=%s q=%s"%(self.con_id,self._dbname,self._schema,query))
if self.is_closed():
raise Exception("Connection is closed")
try:
cr = self._db.cursor()
if args:
cr.execute(query, args)
else:
cr.execute(query)
except Exception as e:
import traceback
traceback.print_exc()
print_color("WARNING: failed to execute database query, closing connection (db=%s, pid=%s)" %
(self._dbname, os.getpid()), "red")
print("QUERY:", query)
print("ARGS:", args)
self.close()
raise e
#print(" ...done")
def query(self, query, *args):
#print("query",query,args)
if self.is_closed():
raise Exception("Connection is closed")
try:
cr = self._db.cursor()
cr.execute(query, args)
col_names = [d[0] for d in cr.description]
#print(" ...done")
return [Row(zip(col_names, r)) for r in cr]
except Exception as e:
import traceback
traceback.print_exc()
print_color("WARNING: failed to execute database query, closing connection (db=%s, pid=%s)" %
(self._dbname, os.getpid()), "red")
print("QUERY:", query)
print("ARGS:", args)
self.close()
raise e
def get(self, query, *args):
res = self.query(query, *args)
return res and res[0] or None
def commit(self):
#print("DB.commit con_id=%s db=%s schema=%s pid=%s back_pid=%s"%(self.con_id,self._dbname,self._schema,os.getpid(),self._db.get_backend_pid()))
if self.is_closed():
raise Exception("Connection is closed")
try:
self._db.commit()
except Exception as e:
print_color("WARNING: failed to commit database transaction, closing connection (db=%s, pid=%s)" %
(self._dbname, os.getpid()), "red")
self.close()
raise e
def rollback(self):
#print("<<< db.rollback pid=%s" % os.getpid())
if self.is_closed():
return
try:
self._db.rollback()
except Exception as e:
print_color("WARNING: failed to rollback database transaction, closing connection (db=%s, pid=%s)" %
(self._dbname, os.getpid()), "red")
self.close()
raise e
def close(self):
#print("closing database connection (db=%s, pid=%s)" % (self._dbname, os.getpid()))
try:
self._db.close()
except:
print("db connection close failed, skipping...")
pass
del connections[(self._dbname,self._schema)]
def is_closed(self): # XXX: this only checks if connection was closed by client (not by server)...
return self._db.closed != 0
class Row(dict):
def __getattr__(self, name):
return self[name]
def list_databases():
db = connect("template1")
res = db.query("SELECT datname FROM pg_database WHERE datistemplate = false AND datname!='postgres'")
db_list = [r.datname for r in res]
hide = config.get("hide_databases", "")
if hide:
hide_dbs = [x.strip() for x in hide.split(",")]
db_list = [x for x in db_list if x not in hide_dbs]
db.close()
return db_list
| |
from osuapi import OsuApi, AHConnector
from discord.ext import commands
from .utils import utils
import datetime
import discord
import logging
import aiohttp
import os
log = logging.getLogger(__name__)
class Core(commands.Cog):
"""
The core of Nurevam, just essentials.
"""
def __init__(self,bot):
self.bot = bot
self.redis=bot.db.redis
self.bot.say_edit = bot.say
def get_bot_uptime(self): #to calculates how long it been up
now = datetime.datetime.utcnow()
delta = now - self.bot.uptime
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
if days:
fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds'
else:
fmt = '{h} hours, {m} minutes, and {s} seconds'
return fmt.format(d=days, h=hours, m=minutes, s=seconds)
def get_time_delta(self,person):
delta = datetime.datetime.utcnow() - person
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
if days:
fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds'
else:
fmt = '{h} hours, {m} minutes, and {s} seconds'
return fmt.format(d=days, h=hours, m=minutes, s=seconds)
@commands.command()
async def uptime(self,ctx): #Showing Time that bot been total run
"""Prints the uptime."""
await self.bot.say(ctx,content = "```py\nI have been up for {}\n```".format(self.get_bot_uptime()))
@commands.command()
async def prefix(self,ctx):
prefix = (await self.redis.get("{}:Config:CMD_Prefix".format(ctx.message.guild.id)))
prefix = set(prefix + ctx.prefix) #if user didnt set any, it will be default to ! which set prefix to be None? In case it is not, we can add current prefix to it.
await self.bot.say(ctx,content = "```\n{}\n```".format(",".join(prefix)))
@commands.command()
async def info(self,ctx,*,person:discord.Member = None):
"""
About Nurevam or person by mention info
"""
if not person:
guild = len(self.bot.guilds)
member = len(set(self.bot.get_all_members()))
app = await self.bot.application_info()
msg = "Name:{}".format(self.bot.user)
if ctx.message.guild.me.nick:
msg += "\nNickname:{}".format(ctx.message.guild.me.nick)
msg += "\nCreator: {}".format(app.owner)
msg += "\nServer:{}\nMembers:{}".format(guild,member)
link = "If you want to invite this bot to your server, you can check it out here <http://nurevam.site>!"
return await self.bot.say(ctx,content = "```xl\n{}\n```\n{}\n".format(msg,link))
else:
e = discord.Embed()
e.title = "{} - {}".format(person,person.id)
e.set_thumbnail(url = person.avatar_url)
e.add_field(name = "Created at", value="{} - ({})".format(person.created_at,self.get_time_delta(person.created_at)),inline=False)
e.add_field(name = "Joined at", value="{} - ({})".format(person.joined_at,self.get_time_delta(person.joined_at)),inline=False)
e.add_field(name = "Total Roles", value=str(len(person.roles)),inline=False)
if person.colour.value:
e.colour = person.color
await self.bot.say(ctx,embed = e)
@commands.command()
async def serverinfo(self,ctx):
"""
Give info about this server
"""
g = ctx.guild
embed = discord.Embed()
embed.set_thumbnail(url = g.icon_url)
embed.title = "{} - {}".format(g.name,g.id)
embed.add_field(name = "Owner",value="{} - {}".format(g.owner,g.owner.id),inline=False)
embed.add_field(name = "Created at", value = str(g.created_at), inline=False)
embed.add_field(name = "Total Roles", value= str(len(g.roles)), inline=False)
embed.add_field(name = "Total Members", value= str(g.member_count), inline=False)
embed.add_field(name = "Premium Member", value= str(g.premium_subscription_count), inline=False)
embed.add_field(name = "Premium Tier", value= str(g.premium_tier), inline=False)
await self.bot.say(ctx,embed = embed)
@commands.command(hidden=True)
async def command(self,ctx):
"""
Type !help {command} for more info on a command.
You can also type !help {category} for more info on a category.
For example, !help level (If you have level plugin enable!)
"""
await ctx.send("Yes this is a command.")
@commands.command(hidden=True)
async def category(self,ctx):
"""
Type !help command for additional info on a command.
You can also type !help category for additional info on a category.
For example, type !help Level (If you have the level plugin enable!)
"""
await ctx.send("Yes this is a category.")
@commands.command(brief = "Showing which plugin is enable")
async def plugin(self,ctx):
"""
Red = Disable
Blue = Enable
Any problem such as plugins on dashboard is enable but show disable here, info Owner
"""
special_case = {"Anime":"myanimelist","Anti Raid":"antiraid"}
plugin_setting = await self.redis.hgetall("{}:Config:Cogs".format(ctx.message.guild.id))
embed = discord.Embed()
cogs = self.bot.cogs.keys()
for x in cogs:
setting = u"\U0001F534" #red
if x in ("Core", "Remindme", "Tools", "REPL","Events"): # A Owner's thing only.
if ctx.message.author.id != self.bot.owner.id:
continue
setting = u"\U0001F535" #blue
if x.lower() in plugin_setting or special_case.get(x) in plugin_setting:
setting = u"\U0001F535" #blue
embed.add_field(name = x,value = setting)
if ctx.message.guild.me.colour.value:
embed.colour = ctx.message.guild.me.colour
embed.set_footer(text = "{} = Disable | {} = Enable".format(u"\U0001F534",u"\U0001F535"))
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(rate = 1,per=300,type = commands.BucketType.user)
async def feedback(self,ctx,*,msg):
"""
Gives any feedback about bot. Cooldown: 5 min
For example, reporting bot, new idea/suggestions.
A quicker way to get hold of owner without joining server.
Sooner or later, bot may(not) contact you via PMS about status of your requests.
Only able to make feedback once a five minute.
"""
embed = discord.Embed()
embed.set_author(name = ctx.message.author,icon_url=ctx.message.author.avatar_url or ctx.message.author.default_avatar_url)
embed.add_field(name = "Author",value = "**ID**:{0.id}".format(ctx.message))
embed.add_field(name = "Server",value = "**Name**:{0.guild.name}\n**ID**:{0.guild.id}\n**Channel**:{0.channel.name} - {0.channel.id}".format(ctx.message))
embed.add_field(name = "Feedback",value = msg)
channel = self.bot.get_channel(292133726370922497)
await channel.send(embed=embed)
await ctx.send(u"\U0001F44C"+", Thank you for your valuable feedback. \nHopefully, the owner will reply to you soon.")
@commands.command(hidden=True)
@commands.check(utils.is_owner)
async def pm(self,ctx,user_id:int,*,msg):
user = self.bot.get_user(user_id)
print(user)
print(msg)
if user is None:
return await ctx.send("User wasn't found.")
message = "I have got a message from the owner,{}\n```fix\n{}\n```" \
"\n\nPlease note that the owner will not able to see any message of this before or after.\n" \
"To reply back, please use !reply <message>".format(self.bot.owner,msg)
await user.send(message)
await ctx.send(u"\U0001F44C")
@commands.command(hidden = True)
async def reply(self,ctx,*,msg):
channel = self.bot.get_channel(295075318430040065)
if channel is None:
return await ctx.send("Appear so, reply system is down...")
embed = discord.Embed()
embed.set_author(name = ctx.message.author,icon_url=ctx.message.author.avatar_url)
embed.add_field(name = "Author",value = "**ID**:{0.author.id}".format(ctx.message))
embed.add_field(name = "Reply",value = msg,inline=False)
await channel.send(embed=embed)
await ctx.send(u"\U0001F44C")
def setup(bot):
bot.add_cog(Core(bot))
| |
import simplejson
import jsonpickle
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, make_response, jsonify, Markup, Response
def graph_line(pluginData, func_name, func_nfo):
"""
converts from the original format to highcharts format. returning the JSON
"""
#"#Base\t%GC",
#"1\t40.209569984588065",
#"2\t39.756647339140365",
#//categoriesX = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49'];
#//data = [{
#// name: 'Tokyo',
#// data: [7.0, 6.9, 9.5, 14.5, 18.2, 21.5, 25.2, 26.5, 23.3, 18.3, 13.9, 9.6]
#// }, {
#// name: 'New York',
#// data: [-0.2, 0.8, 5.7, 11.3, 17.0, 22.0, 24.8, 24.1, 20.1, 14.1, 8.6, 2.5]
#// }, {
#// name: 'Berlin',
#// data: [-0.9, 0.6, 3.5, 8.4, 13.5, 17.0, 18.6, 17.9, 14.3, 9.0, 3.9, 1.0]
#// }, {
#// name: 'London',
#// data: [3.9, 4.2, 5.7, 8.5, 11.9, 15.2, 17.0, 16.6, 14.2, 10.3, 6.6, 4.8]
#// }];
#//
#//title = "Relative Enrichment Over Read Length";
#//subTitle = "FastQC Report";
#//titleY = "%";
#//unityY = "%";
#//titleX = "Position in Read (bp)"
#//unityX = "bp";
#function graph_lines(data, container, title, subTitle, titleY, unityY, titleX, unityX, categoriesX) {
lists = []
categoriesX = []
dataD = {}
title, subTitle, titleY, unityY, titleX, unityX, minY, maxY, minX, maxX = func_nfo
print "graph_line ",
print func_nfo
for line in pluginData:
#print line
cols = line.split('\t')
if cols[0][0] == "#":
lists = []
#titleY = cols[0][1:]
#titleX = cols[1]
for colName in cols:
lists.append(colName)
else:
categoriesX.append(str(cols[0]))
for colIndex in range(len(cols)):
if colIndex == 0:
continue
try:
colVal = float(cols[colIndex])
except ValueError:
colVal = cols[colIndex]
colName = lists[colIndex]
if colName in dataD:
dataD[colName].append(colVal)
else:
dataD[colName] = [ colVal ]
data = []
for colName, colData in dataD.items():
data.append({
'name': colName,
'data': colData
})
res = {
'categoriesX' : categoriesX,
'data' : data,
'title' : title,
'subTitle' : subTitle,
'titleY' : titleY,
'unityY' : unityY,
'titleX' : titleX,
'unityX' : unityX,
'minY' : minY,
'maxY' : maxY,
'minX' : minX,
'maxX' : maxX,
'_dst_func' : 'graph_lines',
}
#print res
return jsonpickle.encode(res)
def candle_stick(pluginData, func_name, func_nfo):
"""
converts from the original format to highcharts format. returning the JSON
"""
#"Per base sequence quality": {
# "data": [
# 0 1 2 3 4 5 6
# "#Base\tMean\tMedian\tLower Quartile\tUpper Quartile\t10th Percentile\t90th Percentile",
# "1\t36.064507832356185\t37.0\t37.0\t40.0\t28.0\t40.0",
# "2\t35.4651882904503\t37.0\t35.0\t40.0\t25.0\t40.0",
# "3\t35.13829905728201\t37.0\t34.0\t40.0\t24.0\t40.0",
# "4\t34.98659335540772\t37.0\t34.0\t40.0\t24.0\t40.0",
# "5\t34.9416264163553\t37.0\t34.0\t40.0\t23.0\t40.0",
#//[1131926400000,61.54,61.98,60.91,61.45],
#//[1132012800000,61.60,63.08,61.46,62.28],
#//[1132099200000,63.15,65.06,63.09,64.95],
#//[1347321600000,665.27,670.10,656.55,660.59],
# open high low close
title, subTitle, titleY, unityY, titleX, unityX, minY, maxY, minX, maxX = func_nfo
print func_nfo
data1 = []
data2 = []
data3 = []
data4 = []
for line in pluginData:
#print line
cols = line.split('\t')
if cols[0][0] == "#":
continue
else:
#print "'%s'" % line
pos = cols[0]
dashPos = pos.find('-')
if dashPos != -1:
pos = float( int( pos[:dashPos] ) + int(pos[dashPos+1:]) ) / 2.0
#print "%d + %d = %d" % (int( pos[:dashPos] ), int(pos[dashPos+1:]), posf)
#pos = posf
pos = int(pos)
data1.append([ pos, float(cols[3]), float(cols[4]) ])
data2.append([ pos, float(cols[5]), float(cols[6]) ])
data3.append([ pos, float(cols[1]) ])
data4.append([ pos, float(cols[2]) ])
res = {
#'categoriesX' : categoriesX,
'data1' : data1,
'data2' : data2,
'data3' : data3,
'data4' : data4,
'title' : title,
'subTitle' : subTitle,
'titleY' : titleY,
'unityY' : unityY,
'titleX' : titleX,
'unityX' : unityX,
'minY' : minY,
'maxY' : maxY,
'minX' : minX,
'maxX' : maxX,
'_dst_func' : 'graph_candle',
}
return jsonpickle.encode(res)
def stacked(pluginData, func_name, func_nfo):
"""
converts from the original format to highcharts format. returning the JSON
"""
#var categoriesX = ['Apples', 'Oranges', 'Pears', 'Grapes', 'Bananas'];
#var data = [{
# name: 'John',
# data: [5, 3, 4, 7, 2]
# }, {
# name: 'Jane',
# data: [2, 2, 3, 2, 1]
# }, {
# name: 'Joe',
# data: [3, 4, 4, 2, 5]
# }];
#"contaminationData": {
# "EcoliM": [
# 99.66,
# 0.0,
# 0.12,
# 0.08,
# 0.14
# ],
lists = []
categoriesX = []
categoriesY = []
data = []
title, subTitle, titleY, unityY, titleX, unityX, minY, maxY, minX, maxX = func_nfo
print func_nfo
for spp_name in sorted(pluginData):
values = pluginData[spp_name]
if spp_name[0] == "_":
categoriesY = values
if len(categoriesY) == 0:
categoriesY = ['%Unmapped', '%One hit one library', '%Multiple hits one library', '%One hit multiple libraries', '%Multiple hits multiple libraries']
dataD = {}
for spp_name in sorted(pluginData):
values = pluginData[spp_name]
categoriesX.append(spp_name)
for pos in range(len(values)):
y_label = categoriesY[pos]
if y_label not in dataD:
dataD[y_label] = []
dataD[y_label].append(values[pos])
#if spp_name[0] != "_":
# data.append({
# 'name': spp_name,
# 'data': values
# })
#else:
# categoriesX = values
for y_label in dataD:
data.append({
'name': y_label,
'data': dataD[y_label]
})
res = {
'categoriesX' : categoriesX,
'data' : data,
'title' : title,
'subTitle' : subTitle,
'titleY' : titleY,
'unityY' : unityY,
'titleX' : titleX,
'unityX' : unityX,
'minY' : minY,
'maxY' : maxY,
'minX' : minX,
'maxX' : maxX,
'_dst_func' : 'graph_stacked',
}
return jsonpickle.encode(res)
def parseGraph(projectName, projectStatus, projectSample, sequenceTech, libraryName, fileName, pluginName, pluginKey, pluginValue):
"""
parse the graph request returning the icon, link and JSON url according to the data
"""
res = []
"""
get URL safe names to be added as parameters
"""
lnkConstLst = []
for kv in ( [ 'projectName', Markup.escape( projectName ) ],
[ 'projectStatus', Markup.escape( projectStatus ) ],
[ 'projectSample', Markup.escape( projectSample ) ],
[ 'sequenceTech', Markup.escape( sequenceTech ) ],
[ 'libraryName', Markup.escape( libraryName ) ],
[ 'fileName', Markup.escape( fileName ) ],
[ 'pluginName', Markup.escape( pluginName ) ],
[ 'pluginKey', Markup.escape( pluginKey ) ],
):
lnkConstLst.append("=".join(kv))
lnkConst = "&".join(lnkConstLst)
url_for_download = url_for('download' )
url_forfullscreen = url_for('fullscreen')
if isinstance(pluginValue, dict):
"""
if dict, it means we have more than one picture here.
"""
res.append("<table>")
res.append(" <tr>")
if (pluginName, pluginKey, None) in graph_mapper:
"""
if IMAGENAME is None in mapper, it means there's only one picture and no Icon.
add standard icon
"""
lnk = "%s?%s&srctype=json" % (url_for_download , lnkConst)
href = "%s?%s&srctype=json" % (url_forfullscreen, lnkConst)
res.append("""<a href="%(href)s" lnk="%(lnk)s" class="graphjson" title="%(title)s" target="_blank">%(dsc)s</a></td>""" %\
{ 'href': href, 'lnk': lnk, 'title': pluginKey, 'dsc': '<i class="icon-picture"/>' })
else:
for imageName in pluginValue:
"""
for each image in this plugin:
- add icon if present
- if raw data, add link to JSON, else, add link to image
"""
imgdownload = "%s?%s&imageName=%s&srctype=" % (url_for_download , lnkConst, imageName)
imgfullscre = "%s?%s&imageName=%s&srctype=" % (url_forfullscreen, lnkConst, imageName)
dsc = Markup.escape(imageName)
if 'ico' in pluginValue[imageName]:
ico_name = 'pluginsIcons/' + pluginName + '/' + pluginValue[imageName]['ico'].replace(" ", "_").lower() + '.png'
url_for_ico = url_for('static', filename=ico_name)
dsc = '<img src="%(url)s" class="icon">' % { 'url': url_for_ico }
else:
dsc = '<i class="icon-picture"/>'
fmtdsc = { 'title': imageName, 'dsc': dsc, 'urldown': imgdownload, 'urlfull': imgfullscre }
if (pluginName, pluginKey, imageName) in graph_mapper and \
'data' in pluginValue[imageName] and \
pluginValue[imageName]['data'] is not None:
fmtdsc['outtype'] = 'json'
else:
if 'img' in pluginValue[imageName] and pluginValue[imageName]['img'] is not None:
fmtdsc['outtype'] = 'img'
if 'outtype' in fmtdsc:
res.append(""" <td><a href="%(urlfull)s%(outtype)s" lnk="%(urldown)s%(outtype)s" class="graph%(outtype)s" title="%(title)s" target="_blank">%(dsc)s</a></td>""" % fmtdsc)
else:
res.append(""" <td title="%(title)s">%(dsc)s</td>""" % fmtdsc)
res.append(" </tr>")
res.append("</table>")
else:
lnk = "%s?%s" % (url_for_download, lnkConst)
#res.append("""<td><img src="%s" class="graphSmall" title="%s"/></td>""" % (lnk, pluginKey))
#res.append("""<td><a href="%s" class="graph" title="%s" target="_blank">%s</a></td>""" % (lnk, pluginKey, pluginKey))
res.append("""<a href="%(lnk)s" class="graphimg" title="%(title)s" target="_blank">%(dsc)s</a>""" % \
{ 'lnk': lnk, 'title': pluginKey, 'dsc': '<i class="icon-picture"/>'})
return "\n".join(res)
#TODO: make global
graph_mapper = {
# parser func , graph parser js, title, subTitle, titleY, , unityY, titleX , unityX, miny, maxy, minx, maxx
('fastqc' , 'fastqcGraphs' , 'Sequence Duplication Levels' ): [graph_line , 'graph_lines' , [ 'Sequence Duplication Levels >=38.4%' , "FastQC" , "Perc (%)" , "" , "Duplication Level" , "x" , 0, 100, None, None ] ],
('fastqc' , 'fastqcGraphs' , 'Per base sequence content' ): [graph_line , 'graph_lines' , [ 'Sequence Content Across all Bases' , "FastQC" , "Perc (%)" , "" , "Position in Read" , "bp" , 0, 100, None, None ] ],
('fastqc' , 'fastqcGraphs' , 'Per sequence GC content' ): [graph_line , 'graph_lines' , [ 'GC Distribution Over all Sequences' , "FastQC" , "Num Sequences" , "" , "Mean GC Content (%)", "" , 0, None, None, None ] ],
('fastqc' , 'fastqcGraphs' , 'Sequence Length Distribution'): [graph_line , 'graph_lines' , [ 'Distribution of sequence Lengths Over all Sequences', "FastQC" , "Num Sequences" , "" , "Sequence Length" , "bp" , 0, None, None, None ] ],
('fastqc' , 'fastqcGraphs' , 'Per base GC content' ): [graph_line , 'graph_lines' , [ 'GC Content Across all Bases' , "FastQC" , "Perc (%)" , "" , "Position in Read" , "bp" , 0, 100, None, None ] ],
('fastqc' , 'fastqcGraphs' , 'Per sequence quality scores' ): [graph_line , 'graph_lines' , [ 'Quality Score Distribution Over all Sequences' , "FastQC" , "Num Sequences" , "" , "Position in Read" , "bp" , 0, None, None, None ] ],
('fastqc' , 'fastqcGraphs' , 'Per base N content' ): [graph_line , 'graph_lines' , [ 'N Content Across all Bases' , "FastQC" , "Perc (%)" , "" , "Position in Read" , "bp" , 0, 100, None, None ] ],
('fastqc' , 'fastqcGraphs' , 'Per base sequence quality' ): [candle_stick, 'candle_stick', [ 'Quality Scores Across all Bases' , "FastQC" , "Quality (Q)" , "" , "Position in Read" , "bp" , 0, 42, None, None ] ],
('contamination', 'contaminationData' , None ): [stacked , 'stacked' , [ 'Contamination' , "FastQ Screen", "Perc Mapped (%)", "" , "Species Db" , "" , None, None, None, None ] ],
#('fastqc' , 'fastqcGraphs' , 'Kmer Content' ): [graph_line , 'graph_lines' ],
#over represented sequences
#('contamination', 'contaminationGraph', None ): [stacked , 'stacked' , ['contaminationGraph' , "FastQ Screen", "Perc" , "%", "Number", "#"] ],
}
| |
""" Create a Nifti file from specified series data
Take the data in the specified series directory and convert it to a 4D
nifti image in RAS+ orientation, and then save the new data to disk.
If the series is an anatomical scan, the output Nifti will be 3D. If the series
is a functional scan, the output Nifti will be 4D. In all cases, the affine
transformation in the Nifti header will simply convert from voxel space
to mm space using the image voxel sizes (and not moving the origin at all)
"""
import os
import sys
from os.path import join
from utils.general_utils import initializeSession
# Get the full path to where the pyneal_scanner directory is. This assumes
# getSeries.py lives in the pyneal_scanner directory.
pynealScannerDir = os.path.dirname(os.path.abspath(__file__))
def getSeries_GE(scannerDirs):
""" Build nifti image from series data, GE format
Assumes series data are represented as dicom files, one per slice. The path
to the output Nifti file will be printed to stdOut upon completion (in
general, expect to find it in the pyneal_scanner/data directory)
Parameters
----------
scannerDirs : object
instance of `GE_utils.GE_DirStructure`. Has attributes for the relevant
paths for the current session. `scannerDirs` is one of the variables
returned by running `general_utils.initializeSession()`
"""
from utils.GE_utils import GE_BuildNifti
# prompt user to specifiy a series. Make sure that it is a valid
# series before continuing
seriesDirs = scannerDirs.get_seriesDirs()
while True:
selectedSeries = input('Which Series?: ')
if selectedSeries in seriesDirs:
break
else:
print('{} is not a valid series choice!'.format(selectedSeries))
# prompt user to specify an output name, and format to remove any spaces
outputPrefix = input('Output Prefix: ')
outputPrefix = outputPrefix.replace(' ', '')
# progress updates
print('='*5)
print('Building Nifti...')
print('\tinput series: {}'.format(selectedSeries))
print('\toutput prefix: {}'.format(outputPrefix))
# get the full path to the series dir
seriesDir = join(scannerDirs.sessionDir, selectedSeries)
# create an instance of the GE_NiftiBuilder
niftiBuilder = GE_BuildNifti(seriesDir)
output_fName = '{}_{}.nii.gz'.format(outputPrefix, selectedSeries)
print('Successfully built Nifti image: {}\n'.format(output_fName))
# save Nifti image
output_path = join(pynealScannerDir, 'data', output_fName)
saveNifti(niftiBuilder, output_path)
def getSeries_Philips(scannerDirs):
""" Build nifti image from series data, Philips format
Assumes series data are represented as par/rec file pairs, one per volume.
The path to the output Nifti file will be printed to stdOut upon completion
(in general, expect to find it in the pyneal_scanner/data directory)
Parameters
----------
scannerDirs : object
instance of `Philips_utils.Philips_DirStructure`. Has attributes for
the relvant paths for the current session. `scannerDirs` is one of the
variables returned by running `general_utils.initializeSession()`
"""
from utils.Philips_utils import Philips_BuildNifti
# prompt user to specifiy a series. Make sure that it is a valid
# series before continuing
seriesDirs = scannerDirs.get_seriesDirs()
while True:
selectedSeries = input('Which Series?: ')
if selectedSeries in seriesDirs:
break
else:
print('{} is not a valid series choice!'.format(selectedSeries))
# prompt user to specify an output name, and format to remove any spaces
outputPrefix = input('Output Prefix: ')
outputPrefix = outputPrefix.replace(' ', '')
# progress updates
print('='*5)
print('Building Nifti...')
print('\tinput series: {}'.format(selectedSeries))
print('\toutput name: {}'.format(outputPrefix))
# get the full path to the series dir
seriesDir = join(scannerDirs.sessionDir, selectedSeries)
# create an instance of the Siemens_NiftiBuilder
niftiBuilder = Philips_BuildNifti(seriesDir)
output_fName = '{}_{}.nii.gz'.format(outputPrefix, selectedSeries)
print('Successfully built Nifti image: {}\n'.format(output_fName))
# save Nifti image
output_path = join(pynealScannerDir, 'data', output_fName)
saveNifti(niftiBuilder, output_path)
def getSeries_Siemens(scannerDirs):
""" Build nifti image from series data, Siemens format
Assumes series data are represented as dicom mosaic files, one per volume.
The path to the output Nifti file will be printed to stdOut upon completion
(in general, expect to find it in the pyneal_scanner/data directory)
Parameters
----------
scannerDirs : object
instance of `Siemens_utils.Siemens_DirStructure`. Has attributes for
the relvant paths for the current session. `scannerDirs` is one of the
variables returned by running `general_utils.initializeSession()`
"""
from utils.Siemens_utils import Siemens_BuildNifti
# prompt user to specifiy a series. Make sure that it is a valid
# series before continuing
currentSeries = scannerDirs.getUniqueSeries()
while True:
selectedSeries = input('Which Series?: ')
if selectedSeries.zfill(6) in currentSeries:
break
else:
print('{} is not a valid series choice!'.format(selectedSeries))
# prompt user to specify an output name, and format to remove any spaces
outputPrefix = input('Output Prefix: ')
outputPrefix = outputPrefix.replace(' ', '')
# progress updates
print('='*5)
print('Building Nifti...')
print('\tinput series: {}'.format(selectedSeries))
print('\toutput name: {}'.format(outputPrefix))
# create an instance of the Siemens_NiftiBuilder
niftiBuilder = Siemens_BuildNifti(scannerDirs.sessionDir, selectedSeries)
output_fName = '{}_{}.nii.gz'.format(outputPrefix, selectedSeries)
print('Successfully built Nifti image: {}\n'.format(output_fName))
# save the nifti image
output_path = join(pynealScannerDir, 'data', output_fName)
saveNifti(niftiBuilder, output_path)
def saveNifti(niftiBuilder, outputPath):
""" Save the nifti file to disk. Path to output file printed to stdOut
Parameters
----------
niftiBuilder : object
instance of the niftiBuilder class for this scanning environment
outputPath : string
full path to where you want to save nifti file
"""
# make sure the output dir exists
outputDir, fName = os.path.split(outputPath)
if not os.path.exists(outputDir):
os.makedirs(outputDir)
niftiBuilder.write_nifti(outputPath)
print('saved at: {}'.format(outputPath))
if __name__ == '__main__':
# initialize the session classes:
scannerSettings, scannerDirs = initializeSession()
# print all of the current series dirs to the terminal
scannerDirs.print_currentSeries()
# load the appropriate tools for this scanning environment
scannerMake = scannerSettings.allSettings['scannerMake']
if scannerMake == 'GE':
getSeries_GE(scannerDirs)
elif scannerMake == 'Philips':
getSeries_Philips(scannerDirs)
elif scannerMake == 'Siemens':
getSeries_Siemens(scannerDirs)
else:
print('Unrecognized scanner make: {}'.format(scannerMake))
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
from json_parse import OrderedDict
from memoize import memoize
class ParseException(Exception):
"""Thrown when data in the model is invalid.
"""
def __init__(self, parent, message):
hierarchy = _GetModelHierarchy(parent)
hierarchy.append(message)
Exception.__init__(
self, 'Model parse exception at:\n' + '\n'.join(hierarchy))
class Model(object):
"""Model of all namespaces that comprise an API.
Properties:
- |namespaces| a map of a namespace name to its model.Namespace
"""
def __init__(self):
self.namespaces = {}
def AddNamespace(self, json, source_file, include_compiler_options=False):
"""Add a namespace's json to the model and returns the namespace.
"""
namespace = Namespace(json,
source_file,
include_compiler_options=include_compiler_options)
self.namespaces[namespace.name] = namespace
return namespace
class Namespace(object):
"""An API namespace.
Properties:
- |name| the name of the namespace
- |unix_name| the unix_name of the namespace
- |source_file| the file that contained the namespace definition
- |source_file_dir| the directory component of |source_file|
- |source_file_filename| the filename component of |source_file|
- |platforms| if not None, the list of platforms that the namespace is
available to
- |types| a map of type names to their model.Type
- |functions| a map of function names to their model.Function
- |events| a map of event names to their model.Function
- |properties| a map of property names to their model.Property
- |compiler_options| the compiler_options dict, only present if
|include_compiler_options| is True
"""
def __init__(self, json, source_file, include_compiler_options=False):
self.name = json['namespace']
self.unix_name = UnixName(self.name)
self.source_file = source_file
self.source_file_dir, self.source_file_filename = os.path.split(source_file)
self.parent = None
self.platforms = _GetPlatforms(json)
toplevel_origin = Origin(from_client=True, from_json=True)
self.types = _GetTypes(self, json, self, toplevel_origin)
self.functions = _GetFunctions(self, json, self)
self.events = _GetEvents(self, json, self)
self.properties = _GetProperties(self, json, self, toplevel_origin)
if include_compiler_options:
self.compiler_options = json.get('compiler_options', {})
class Origin(object):
"""Stores the possible origin of model object as a pair of bools. These are:
|from_client| indicating that instances can originate from users of
generated code (for example, function results), or
|from_json| indicating that instances can originate from the JSON (for
example, function parameters)
It is possible for model objects to originate from both the client and json,
for example Types defined in the top-level schema, in which case both
|from_client| and |from_json| would be True.
"""
def __init__(self, from_client=False, from_json=False):
if not from_client and not from_json:
raise ValueError('One of from_client or from_json must be true')
self.from_client = from_client
self.from_json = from_json
class Type(object):
"""A Type defined in the json.
Properties:
- |name| the type name
- |namespace| the Type's namespace
- |description| the description of the type (if provided)
- |properties| a map of property unix_names to their model.Property
- |functions| a map of function names to their model.Function
- |events| a map of event names to their model.Event
- |origin| the Origin of the type
- |property_type| the PropertyType of this Type
- |item_type| if this is an array, the type of items in the array
- |simple_name| the name of this Type without a namespace
- |additional_properties| the type of the additional properties, if any is
specified
"""
def __init__(self,
parent,
name,
json,
namespace,
origin):
self.name = name
self.namespace = namespace
self.simple_name = _StripNamespace(self.name, namespace)
self.unix_name = UnixName(self.name)
self.description = json.get('description', None)
self.origin = origin
self.parent = parent
self.instance_of = json.get('isInstanceOf', None)
# TODO(kalman): Only objects need functions/events/properties, but callers
# assume that all types have them. Fix this.
self.functions = _GetFunctions(self, json, namespace)
self.events = _GetEvents(self, json, namespace)
self.properties = _GetProperties(self, json, namespace, origin)
json_type = json.get('type', None)
if json_type == 'array':
self.property_type = PropertyType.ARRAY
self.item_type = Type(
self, '%sType' % name, json['items'], namespace, origin)
elif '$ref' in json:
self.property_type = PropertyType.REF
self.ref_type = json['$ref']
elif 'enum' in json and json_type == 'string':
self.property_type = PropertyType.ENUM
self.enum_values = [value for value in json['enum']]
elif json_type == 'any':
self.property_type = PropertyType.ANY
elif json_type == 'binary':
self.property_type = PropertyType.BINARY
elif json_type == 'boolean':
self.property_type = PropertyType.BOOLEAN
elif json_type == 'integer':
self.property_type = PropertyType.INTEGER
elif (json_type == 'double' or
json_type == 'number'):
self.property_type = PropertyType.DOUBLE
elif json_type == 'string':
self.property_type = PropertyType.STRING
elif 'choices' in json:
self.property_type = PropertyType.CHOICES
self.choices = [Type(self,
# The name of the choice type - there had better be
# either a type or a $ref specified for the choice.
json.get('type', json.get('$ref')),
json,
namespace,
origin)
for json in json['choices']]
elif json_type == 'object':
if not (
'properties' in json or
'additionalProperties' in json or
'functions' in json or
'events' in json):
raise ParseException(self, name + " has no properties or functions")
self.property_type = PropertyType.OBJECT
additional_properties_json = json.get('additionalProperties', None)
if additional_properties_json is not None:
self.additional_properties = Type(self,
'additionalProperties',
additional_properties_json,
namespace,
origin)
else:
self.additional_properties = None
elif json_type == 'function':
self.property_type = PropertyType.FUNCTION
# Sometimes we might have an unnamed function, e.g. if it's a property
# of an object. Use the name of the property in that case.
function_name = json.get('name', name)
self.function = Function(self, function_name, json, namespace, origin)
else:
raise ParseException(self, 'Unsupported JSON type %s' % json_type)
class Function(object):
"""A Function defined in the API.
Properties:
- |name| the function name
- |platforms| if not None, the list of platforms that the function is
available to
- |params| a list of parameters to the function (order matters). A separate
parameter is used for each choice of a 'choices' parameter
- |description| a description of the function (if provided)
- |callback| the callback parameter to the function. There should be exactly
one
- |optional| whether the Function is "optional"; this only makes sense to be
present when the Function is representing a callback property
- |simple_name| the name of this Function without a namespace
- |returns| the return type of the function; None if the function does not
return a value
"""
def __init__(self,
parent,
name,
json,
namespace,
origin):
self.name = name
self.simple_name = _StripNamespace(self.name, namespace)
self.platforms = _GetPlatforms(json)
self.params = []
self.description = json.get('description')
self.callback = None
self.optional = json.get('optional', False)
self.parent = parent
self.nocompile = json.get('nocompile')
options = json.get('options', {})
self.conditions = options.get('conditions', [])
self.actions = options.get('actions', [])
self.supports_listeners = options.get('supportsListeners', True)
self.supports_rules = options.get('supportsRules', False)
def GeneratePropertyFromParam(p):
return Property(self, p['name'], p, namespace, origin)
self.filters = [GeneratePropertyFromParam(filter)
for filter in json.get('filters', [])]
callback_param = None
for param in json.get('parameters', []):
if param.get('type') == 'function':
if callback_param:
# No ParseException because the webstore has this.
# Instead, pretend all intermediate callbacks are properties.
self.params.append(GeneratePropertyFromParam(callback_param))
callback_param = param
else:
self.params.append(GeneratePropertyFromParam(param))
if callback_param:
self.callback = Function(self,
callback_param['name'],
callback_param,
namespace,
Origin(from_client=True))
self.returns = None
if 'returns' in json:
self.returns = Type(self,
'%sReturnType' % name,
json['returns'],
namespace,
origin)
class Property(object):
"""A property of a type OR a parameter to a function.
Properties:
- |name| name of the property as in the json. This shouldn't change since
it is the key used to access DictionaryValues
- |unix_name| the unix_style_name of the property. Used as variable name
- |optional| a boolean representing whether the property is optional
- |description| a description of the property (if provided)
- |type_| the model.Type of this property
- |simple_name| the name of this Property without a namespace
"""
def __init__(self, parent, name, json, namespace, origin):
"""Creates a Property from JSON.
"""
self.parent = parent
self.name = name
self._unix_name = UnixName(self.name)
self._unix_name_used = False
self.origin = origin
self.simple_name = _StripNamespace(self.name, namespace)
self.description = json.get('description', None)
self.optional = json.get('optional', None)
self.instance_of = json.get('isInstanceOf', None)
# HACK: only support very specific value types.
is_allowed_value = (
'$ref' not in json and
('type' not in json or json['type'] == 'integer'
or json['type'] == 'string'))
self.value = None
if 'value' in json and is_allowed_value:
self.value = json['value']
if 'type' not in json:
# Sometimes the type of the value is left out, and we need to figure
# it out for ourselves.
if isinstance(self.value, int):
json['type'] = 'integer'
elif isinstance(self.value, basestring):
json['type'] = 'string'
else:
# TODO(kalman): support more types as necessary.
raise ParseException(
parent,
'"%s" is not a supported type for "value"' % type(self.value))
self.type_ = Type(parent, name, json, namespace, origin)
def GetUnixName(self):
"""Gets the property's unix_name. Raises AttributeError if not set.
"""
if not self._unix_name:
raise AttributeError('No unix_name set on %s' % self.name)
self._unix_name_used = True
return self._unix_name
def SetUnixName(self, unix_name):
"""Set the property's unix_name. Raises AttributeError if the unix_name has
already been used (GetUnixName has been called).
"""
if unix_name == self._unix_name:
return
if self._unix_name_used:
raise AttributeError(
'Cannot set the unix_name on %s; '
'it is already used elsewhere as %s' %
(self.name, self._unix_name))
self._unix_name = unix_name
unix_name = property(GetUnixName, SetUnixName)
class _Enum(object):
"""Superclass for enum types with a "name" field, setting up repr/eq/ne.
Enums need to do this so that equality/non-equality work over pickling.
"""
@staticmethod
def GetAll(cls):
"""Yields all _Enum objects declared in |cls|.
"""
for prop_key in dir(cls):
prop_value = getattr(cls, prop_key)
if isinstance(prop_value, _Enum):
yield prop_value
def __init__(self, name):
self.name = name
def __repr(self):
return self.name
def __eq__(self, other):
return type(other) == type(self) and other.name == self.name
def __ne__(self, other):
return not (self == other)
class _PropertyTypeInfo(_Enum):
def __init__(self, is_fundamental, name):
_Enum.__init__(self, name)
self.is_fundamental = is_fundamental
class PropertyType(object):
"""Enum of different types of properties/parameters.
"""
INTEGER = _PropertyTypeInfo(True, "integer")
INT64 = _PropertyTypeInfo(True, "int64")
DOUBLE = _PropertyTypeInfo(True, "double")
BOOLEAN = _PropertyTypeInfo(True, "boolean")
STRING = _PropertyTypeInfo(True, "string")
ENUM = _PropertyTypeInfo(False, "enum")
ARRAY = _PropertyTypeInfo(False, "array")
REF = _PropertyTypeInfo(False, "ref")
CHOICES = _PropertyTypeInfo(False, "choices")
OBJECT = _PropertyTypeInfo(False, "object")
FUNCTION = _PropertyTypeInfo(False, "function")
BINARY = _PropertyTypeInfo(False, "binary")
ANY = _PropertyTypeInfo(False, "any")
@memoize
def UnixName(name):
'''Returns the unix_style name for a given lowerCamelCase string.
'''
unix_name = []
for i, c in enumerate(name):
if c.isupper() and i > 0:
# Replace lowerUpper with lower_Upper.
if name[i - 1].islower():
unix_name.append('_')
# Replace ACMEWidgets with ACME_Widgets
elif i + 1 < len(name) and name[i + 1].islower():
unix_name.append('_')
if c == '.':
# Replace hello.world with hello_world.
unix_name.append('_')
else:
# Everything is lowercase.
unix_name.append(c.lower())
return ''.join(unix_name)
def _StripNamespace(name, namespace):
if name.startswith(namespace.name + '.'):
return name[len(namespace.name + '.'):]
return name
def _GetModelHierarchy(entity):
"""Returns the hierarchy of the given model entity."""
hierarchy = []
while entity is not None:
hierarchy.append(getattr(entity, 'name', repr(entity)))
if isinstance(entity, Namespace):
hierarchy.insert(0, ' in %s' % entity.source_file)
entity = getattr(entity, 'parent', None)
hierarchy.reverse()
return hierarchy
def _GetTypes(parent, json, namespace, origin):
"""Creates Type objects extracted from |json|.
"""
types = OrderedDict()
for type_json in json.get('types', []):
type_ = Type(parent, type_json['id'], type_json, namespace, origin)
types[type_.name] = type_
return types
def _GetFunctions(parent, json, namespace):
"""Creates Function objects extracted from |json|.
"""
functions = OrderedDict()
for function_json in json.get('functions', []):
function = Function(parent,
function_json['name'],
function_json,
namespace,
Origin(from_json=True))
functions[function.name] = function
return functions
def _GetEvents(parent, json, namespace):
"""Creates Function objects generated from the events in |json|.
"""
events = OrderedDict()
for event_json in json.get('events', []):
event = Function(parent,
event_json['name'],
event_json,
namespace,
Origin(from_client=True))
events[event.name] = event
return events
def _GetProperties(parent, json, namespace, origin):
"""Generates Property objects extracted from |json|.
"""
properties = OrderedDict()
for name, property_json in json.get('properties', {}).items():
properties[name] = Property(parent, name, property_json, namespace, origin)
return properties
class _PlatformInfo(_Enum):
def __init__(self, name):
_Enum.__init__(self, name)
class Platforms(object):
"""Enum of the possible platforms.
"""
CHROMEOS = _PlatformInfo("chromeos")
CHROMEOS_TOUCH = _PlatformInfo("chromeos_touch")
LINUX = _PlatformInfo("linux")
MAC = _PlatformInfo("mac")
WIN = _PlatformInfo("win")
def _GetPlatforms(json):
if 'platforms' not in json:
return None
platforms = []
for platform_name in json['platforms']:
for platform_enum in _Enum.GetAll(Platforms):
if platform_name == platform_enum.name:
platforms.append(platform_enum)
break
return platforms
| |
# -*- coding: utf-8 -*-
import copy
import itertools
import types
import cv2
import numpy as np
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from sldc import DefaultTileBuilder, Image, TileTopologyIterator
from cell_counting.base_method import BaseMethod
from cell_counting.cnn_architectures import FCRN_A, FCRN_B, sgd_compile
from cell_counting.subwindows import mk_subwindows
from cell_counting.utils import open_image
__author__ = "Ulysse Rubens <urubens@uliege.be>"
__version__ = "0.1"
def lr_scheduler(epoch):
step = 24
num = epoch % step
if num == 0 and epoch != 0:
lr_scheduler.lrate = lr_scheduler.lrate - lr_scheduler.lrate / 2.
print('Learning rate for epoch {} is {}.'.format(epoch + 1, lr_scheduler.lrate))
return np.float(lr_scheduler.lrate)
class FCRN(BaseMethod):
def __init__(self, build_fn=None, callbacks=None, **sk_params):
super(FCRN, self).__init__(build_fn, **sk_params)
self.callbacks = callbacks
self.__model = None
def check_params(self, params):
# Just for compatibility with Keras
pass
def get_params(self, **params):
res = super(FCRN, self).get_params(**params)
res.update({'callbacks': self.callbacks})
return res
def fit(self, x, y, **kwargs):
self.sk_params['callbacks'] = self.callbacks
self.sk_params['verbose'] = 2
lr_scheduler.lrate = self.sk_params['learning_rate']
if self.build_fn is None:
self.__model = self.build_fcrn(**self.filter_sk_params(self.build_fcrn))
elif not isinstance(self.build_fn, types.FunctionType) and not isinstance(self.build_fn, types.MethodType):
self.__model = self.build_fn(**self.filter_sk_params(self.build_fn.__call__))
else:
self.__model = self.build_fn(**self.filter_sk_params(self.build_fn))
fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))
fit_args.update(kwargs)
del fit_args['batch_size']
# Make subwindows for training
_x, _y = mk_subwindows(x, y, None, flatten=False, **self.filter_sk_params(mk_subwindows))
_y = np.expand_dims(_y, axis=4)
# Data generator
seed = np.random.randint(2 ** 32 - 1)
exceptions_y_datagen = ['featurewise_center',
'samplewise_center',
'featurewise_std_normalization',
'samplewise_std_normalization']
X_datagen = ImageDataGenerator(**self.filter_sk_params(ImageDataGenerator.__init__))
y_datagen = ImageDataGenerator(**self.filter_sk_params(ImageDataGenerator.__init__,
exceptions=exceptions_y_datagen))
X_datagen.fit(_x, augment=True, seed=seed)
y_datagen.fit(_y, augment=True, seed=seed)
X_gen = X_datagen.flow(_x, None, batch_size=self.sk_params['batch_size'], seed=seed)
y_gen = y_datagen.flow(_y, None, batch_size=self.sk_params['batch_size'], seed=seed)
datagen = itertools.izip(X_gen, y_gen)
self.__history = self.__model.fit_generator(
datagen,
steps_per_epoch=_x.shape[0] / self.sk_params['batch_size'],
**fit_args)
return self.__history
def predict(self, X, **kwargs):
kwargs = self.filter_sk_params(Sequential.predict, kwargs)
div = 8
max_width = 512
max_height = 512
overlap = 30
dtb = DefaultTileBuilder()
_X = []
for x in X:
x = open_image(x, flag='RGB') # TODO: get mask
_x = np.zeros((x.shape[0], x.shape[1]))
count = np.zeros((x.shape[0], x.shape[1]))
tile_iterator = TiledImage(x).tile_iterator(dtb, max_width=max_width,
max_height=max_height, overlap=overlap)
for tile in tile_iterator:
height = tile.width
top = tile.offset_x
bottom = top + height
width = tile.height
left = tile.offset_y
right = left + width
__x = np.expand_dims(cv2.copyMakeBorder(x[top:bottom, left:right],
0, ((height // div * div + div) - height),
0, ((width // div * div + div) - width),
borderType=cv2.BORDER_DEFAULT),
axis=0)
_x[top:bottom, left:right] += np.squeeze(self.model.predict(__x, **kwargs))[:height, :width]
count[top:bottom, left:right] += 1
_x[count > 1] = _x[count > 1] / count[count > 1]
# TODO: remove positions outside mask
_X.append(_x)
return np.squeeze(_X)
@property
def history(self):
return self.__history
@property
def model(self):
return self.__model
@model.setter
def model(self, value):
self.__model = value
def save(self, filename):
return self.model.save(filename)
@staticmethod
def build_fcrn(architecture='FCRN-test', regularizer=None, initializer='orthogonal',
batch_normalization=False, learning_rate=0.01, momentum=0.9, decay=0.,
nesterov=False, input_shape=(None, None, 3)):
if architecture == 'FCRN-A':
arch = FCRN_A(input_shape, regularizer, initializer,
batch_normalization)
elif architecture == 'FCRN-B':
arch = FCRN_B(input_shape, regularizer, initializer,
batch_normalization)
else:
raise ValueError('Unknown method.')
model = sgd_compile(arch, learning_rate, momentum, decay, nesterov)
model.summary()
return model
class TiledImage(Image):
def __init__(self, np_array):
self.np_array = np.array(np_array)
@property
def width(self):
return self.np_array.shape[0]
@property
def height(self):
return self.np_array.shape[1]
@property
def np_image(self):
return self.np_array
@property
def channels(self):
return self.np_array.shape[2]
def tile_iterator(self, builder, max_width=1024, max_height=1024, overlap=0):
"""Build and return a tile iterator that iterates over the image
Parameters
----------
builder: TileBuilder
The builder to user for actually constructing the tiles while iterating over the image
max_width: int (optional, default: 1024)
The maximum width of the tiles to build
max_height: int (optional, default: 1024)
The maximum height of the tiles to build
overlap: int (optional, default: 0)
The overlapping between tiles
Returns
-------
iterator: TileTopologyIterator
An iterator that iterates over a tile topology of the image
"""
topology = self.tile_topology(builder, max_width=max_width, max_height=max_height, overlap=overlap)
return TileTopologyIterator(builder, topology)
| |
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.interval.IntervalGlobal import *
from DistributedMinigame import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.toonbase import ToontownTimer
from toontown.toon import ToonHead
from toontown.suit import SuitDNA
from toontown.suit import Suit
from toontown.char import Char
import ArrowKeys
import random
from toontown.toonbase import ToontownGlobals
import string
from toontown.toonbase import TTLocalizer
import TugOfWarGameGlobals
from direct.showutil import Rope
from toontown.effects import Splash
from toontown.effects import Ripples
from toontown.toonbase import TTLocalizer
import MinigamePowerMeter
from direct.task.Task import Task
from otp.nametag import NametagGlobals
class DistributedTugOfWarGame(DistributedMinigame):
bgm = 'phase_4/audio/bgm/MG_tug_o_war.ogg'
toonAnimNames = ['neutral',
'tug-o-war',
'slip-forward',
'slip-backward',
'victory',
'sad-neutral']
suitAnimNames = ['neutral',
'tug-o-war',
'slip-forward',
'slip-backward',
'flail',
'victory']
UPDATE_TIMER_TASK = 'TugOfWarGameUpdateTimerTask'
UPDATE_KEY_PRESS_RATE_TASK = 'TugOfWarGameUpdateKeyPressRateTask'
UPDATE_ROPE_TASK = 'TugOfWarGameUpdateRopeTask'
H_TO_L = 0
L_TO_H = 1
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedTugOfWarGame', [State.State('off', self.enterOff, self.exitOff, ['waitForGoSignal']),
State.State('waitForGoSignal', self.enterWaitForGoSignal, self.exitWaitForGoSignal, ['tug', 'cleanup']),
State.State('tug', self.enterTug, self.exitTug, ['gameDone', 'cleanup']),
State.State('gameDone', self.enterGameDone, self.exitGameDone, ['cleanup']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'off', 'cleanup')
self.addChildGameFSM(self.gameFSM)
self.gameType = TugOfWarGameGlobals.TOON_VS_TOON
self.suit = None
self.suitId = 666
self.suitType = 'f'
self.suitLevel = 1
self.sides = {}
self.avList = [[], []]
self.buttons = [0, 1]
self.mouseMode = 0
self.mouseSide = 0
self.fallenList = []
self.handycap = 2.0
self.advantage = 1.0
self.tugRopes = []
self.ropePts = []
self.ropeTex = []
self.rightHandDict = {}
self.posDict = {}
self.hprDict = {}
self.offsetDict = {}
self.pullingDict = {}
self.dropShadowDict = {}
self.arrowKeys = None
self.keyTTL = []
self.idealRate = 2.0
self.idealForce = 0.0
self.keyRate = 0
self.allOutMode = 0
self.rateMatchAward = 0
self.targetRateList = [[8, 6],
[5, 7],
[6, 8],
[6, 10],
[7, 11],
[8, 12]]
self.nextRateIndex = 0
self.drinkPositions = []
for k in xrange(4):
self.drinkPositions.append(VBase3(-.2 + 0.2 * k, 16 + 2 * k, 0.0))
self.rng = RandomNumGen.RandomNumGen(1000)
self.introTrack = None
self.showTrack = None
self.setupTrack = None
self.animTracks = {}
self.randomNumGen = None
return
def getTitle(self):
return TTLocalizer.TugOfWarGameTitle
def getInstructions(self):
return TTLocalizer.TugOfWarInstructions
def getMaxDuration(self):
return TugOfWarGameGlobals.GAME_DURATION
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.hide()
self.room = loader.loadModel('phase_4/models/minigames/tug_of_war_dock')
self.room.reparentTo(hidden)
ropeModel = loader.loadModel('phase_4/models/minigames/tug_of_war_rope')
self.ropeTexture = ropeModel.findTexture('*')
ropeModel.removeNode()
self.sky = loader.loadModel('phase_3.5/models/props/TT_sky')
self.dropShadow = loader.loadModel('phase_3/models/props/drop_shadow')
self.correctSound = base.loadSfx('phase_4/audio/sfx/MG_pos_buzzer.ogg')
self.sndHitWater = base.loadSfx('phase_4/audio/sfx/MG_cannon_splash.ogg')
self.whistleSound = base.loadSfx('phase_4/audio/sfx/AA_sound_whistle.ogg')
self.music = base.loadMusic(self.bgm)
self.roundText = DirectLabel(text=' ', text_fg=(0, 1, 0, 1), frameColor=(1, 1, 1, 0), text_font=ToontownGlobals.getSignFont(), pos=(0.014, 0, -.84), scale=0.2)
self.powerMeter = MinigamePowerMeter.MinigamePowerMeter(17)
self.powerMeter.reparentTo(aspect2d)
self.powerMeter.setPos(0, 0, 0.4)
self.powerMeter.setPower(8)
self.powerMeter.setTarget(8)
self.arrows = [None] * 2
for x in xrange(len(self.arrows)):
self.arrows[x] = loader.loadModel('phase_3/models/props/arrow')
self.arrows[x].reparentTo(self.powerMeter)
self.arrows[x].hide()
self.arrows[x].setScale(0.2 - 0.4 * x, 0.2, 0.2)
self.arrows[x].setPos(0.12 - 0.24 * x, 0, -.26)
self.disableArrow(self.arrows[x])
self.splash = Splash.Splash(render)
self.suitSplash = Splash.Splash(render)
self.ripples = Ripples.Ripples(render)
self.suitRipples = Ripples.Ripples(render)
return
def toggleMouseMode(self, param):
self.mouseMode = not self.mouseMode
if self.mouseMode:
mpos = param.getMouse()
if mpos[0] < 0:
self.hilightArrow(self.arrows[1])
else:
self.hilightArrow(self.arrows[0])
self.__spawnMouseSpeedTask()
else:
self.__releaseHandler(0)
self.__releaseHandler(1)
self.__killMouseSpeedTask()
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
del self.lt
self.timer.destroy()
del self.timer
self.room.removeNode()
del self.room
self.sky.removeNode()
del self.sky
del self.dropShadowDict
self.dropShadow.removeNode()
del self.dropShadow
del self.correctSound
del self.sndHitWater
del self.whistleSound
del self.music
self.roundText.destroy()
del self.roundText
if self.powerMeter:
self.powerMeter.destroy()
del self.powerMeter
for x in self.arrows:
if x:
x.removeNode()
del x
del self.arrows
self.splash.destroy()
del self.splash
self.suitSplash.destroy()
del self.suitSplash
if self.ripples != None:
self.ripples.stop()
self.ripples.detachNode()
del self.ripples
if self.suitRipples != None:
self.suitRipples.stop()
self.suitRipples.detachNode()
del self.suitRipples
for x in self.avList:
del x
del self.avList
for x in self.tugRopes:
if x != None:
x.detachNode()
del x
del self.tugRopes
for x in self.ropePts:
if x:
for t in x:
del t
del x
del self.ropePts
for x in self.ropeTex:
if x:
for t in x:
t.destroy()
del t
del x
del self.ropeTex
del self.posDict
del self.hprDict
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
if self.suit:
self.suit.delete()
del self.suit
del self.sides
del self.buttons
del self.pullingDict
del self.rightHandDict
for x in self.drinkPositions:
del x
del self.drinkPositions
del self.offsetDict
del self.keyTTL
del self.rng
return
def onstage(self):
self.notify.debug('onstage')
DistributedMinigame.onstage(self)
self.lt = base.localAvatar
NametagGlobals.setGlobalNametagScale(1)
self.arrowKeys = ArrowKeys.ArrowKeys()
self.room.reparentTo(render)
self.room.setPosHpr(0.0, 18.39, -ToontownGlobals.FloorOffset, 0.0, 0.0, 0.0)
self.room.setScale(0.4)
self.sky.setZ(-5)
self.sky.reparentTo(render)
self.dropShadow.setColor(0, 0, 0, 0.5)
camera.reparentTo(render)
camera.setPosHpr(-11.4427, 9.03559, 2.80094, -49.104, -0.89, 0)
self.dropShadow.setBin('fixed', 0, 1)
self.splash.reparentTo(render)
self.suitSplash.reparentTo(render)
base.playMusic(self.music, looping=1, volume=1)
for x in xrange(len(self.arrows)):
self.arrows[x].show()
for avId in self.avIdList:
self.pullingDict[avId] = 0
def offstage(self):
self.notify.debug('offstage')
DistributedMinigame.offstage(self)
self.music.stop()
if self.introTrack:
self.introTrack.finish()
del self.introTrack
self.introTrack = None
for track in self.animTracks.values():
if track:
track.finish()
del track
self.animTracks = {}
if self.showTrack:
self.showTrack.finish()
del self.showTrack
self.showTrack = None
if self.setupTrack:
self.setupTrack.finish()
del self.setupTrack
self.setupTrack = None
base.camLens.setMinFov(ToontownGlobals.DefaultCameraFov/(4./3.))
base.camLens.setNearFar(ToontownGlobals.DefaultCameraNear, ToontownGlobals.DefaultCameraFar)
NametagGlobals.setGlobalNametagScale(1.0)
if self.arrowKeys:
self.arrowKeys.setPressHandlers(self.arrowKeys.NULL_HANDLERS)
self.arrowKeys.setReleaseHandlers(self.arrowKeys.NULL_HANDLERS)
self.arrowKeys.destroy()
del self.arrowKeys
self.arrowKeys = None
self.room.reparentTo(hidden)
self.sky.reparentTo(hidden)
self.splash.reparentTo(hidden)
self.splash.stop()
self.suitSplash.reparentTo(hidden)
self.suitSplash.stop()
self.ripples.reparentTo(hidden)
self.ripples.stop()
self.hideControls()
self.roundText.hide()
for avId in self.avIdList:
av = self.getAvatar(avId)
if av:
av.loop('neutral')
av.resetLOD()
av.dropShadow.show()
for x in self.tugRopes:
if x != None:
x.reparentTo(hidden)
if self.suit:
self.suit.reparentTo(hidden)
for avId in self.avIdList:
if self.dropShadowDict.has_key(avId):
self.dropShadowDict[avId].reparentTo(hidden)
if self.dropShadowDict.has_key(self.suitId):
self.dropShadowDict[self.suitId].reparentTo(hidden)
return
def initCamera(self):
birdseyePosHpr = [1.95461,
18.4891,
38.4646,
1.18185,
-87.5308,
0]
introPosHpr = [None] * 2
introPosHpr[0] = [VBase3(-11.4427, 9.03559, 2.80094), VBase3(-49.104, -0.732374, 0)]
introPosHpr[1] = [VBase3(16.9291, 13.9302, 2.64282), VBase3(66.9685, -6.195, 0)]
gameCamHpr = VBase3(-1.13, 1.042, 0)
gameCamPos = VBase3(0, 1.0838, 2.745)
camera.reparentTo(render)
camera.setPosHpr(introPosHpr[self.sides[self.localAvId]][0], introPosHpr[self.sides[self.localAvId]][1])
lerpDur = 8
self.introTrack = LerpPosHprInterval(camera, lerpDur, pos=gameCamPos, hpr=gameCamHpr, blendType='easeInOut', name=self.uniqueName('introLerpCameraPos'))
self.introTrack.start()
base.camLens.setMinFov((60 + 2 * self.numPlayers)/(4./3.))
base.camLens.setFar(450.0)
return
def sendGameType(self, index, suit):
if not self.hasLocalToon:
return
self.gameType = index
self.suitLevel = suit
if suit == 1:
self.suitType = 'pp'
elif suit == 2:
self.suitType = 'dt'
elif suit == 3:
self.suitType = 'gh'
elif suit == 4:
self.suitType = 'cr'
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.setGameReady(self):
return
self.initToons()
self.createSuits()
self.calculatePositions()
self.initHandycaps()
self.initRopes()
self.initCamera()
self.animTracks = {}
for avId in self.avIdList:
self.animTracks[avId] = None
self.animTracks[self.suitId] = None
self.showTrack = None
self.setupTrack = None
self.__initGameVars()
return
def hideControls(self):
for x in xrange(len(self.arrows)):
self.arrows[x].hide()
for rope in self.tugRopes:
if rope != None:
rope.reparentTo(hidden)
for tex in self.ropeTex:
if tex != None:
for texi in tex:
if texi:
texi.reparentTo(hidden)
if self.powerMeter != None:
self.powerMeter.unbind(DGG.B1PRESS)
self.powerMeter.unbind(DGG.B1RELEASE)
self.powerMeter.hide()
return
def setUpRopes(self, notTaut):
if self.numPlayers == 1:
suitRightHand = self.suit.getRightHand()
toonRightHand = self.rightHandDict[self.avIdList[0]]
if notTaut:
self.tugRopes[0].setup(3, ((toonRightHand, (0, 0, 0)), (render, (0, 18, -1)), (suitRightHand, (0, 0, 0))), [0,
0,
0,
1,
1,
1])
else:
midPt = (suitRightHand.getPos() - toonRightHand.getPos()) / 2.0
self.tugRopes[0].setup(3, ((toonRightHand, (0, 0, 0)), (toonRightHand, (0, 0, 0)), (suitRightHand, (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].reparentTo(render)
elif self.numPlayers == 2:
if self.gameType == TugOfWarGameGlobals.TOON_VS_COG:
self.tugRopes[0].setup(3, ((self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
suitRightHand = self.suit.getRightHand()
toonRightHand = self.rightHandDict[self.avIdList[1]]
if notTaut:
self.tugRopes[1].setup(3, ((toonRightHand, (0, 0, 0)), (render, (0, 18, -1)), (suitRightHand, (0, 0, 0))), [0,
0,
0,
1,
1,
1])
else:
midPt = (suitRightHand.getPos() - toonRightHand.getPos()) / 2.0
self.tugRopes[1].setup(3, ((toonRightHand, (0, 0, 0)), (toonRightHand, (0, 0, 0)), (suitRightHand, (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].reparentTo(render)
self.tugRopes[1].reparentTo(render)
else:
if notTaut:
self.tugRopes[0].setup(3, ((self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (render, (0, 18, -1)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
else:
self.tugRopes[0].setup(3, ((self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].reparentTo(render)
elif self.numPlayers == 3:
if self.gameType == TugOfWarGameGlobals.TOON_VS_COG:
self.tugRopes[1].setup(3, ((self.rightHandDict[self.avIdList[1]], (0, 0, 0)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0)), (self.rightHandDict[self.avIdList[2]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].setup(3, ((self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
suitRightHand = self.suit.getRightHand()
toonRightHand = self.rightHandDict[self.avIdList[2]]
if notTaut:
self.tugRopes[2].setup(3, ((toonRightHand, (0, 0, 0)), (render, (0, 18, -1)), (suitRightHand, (0, 0, 0))), [0,
0,
0,
1,
1,
1])
else:
midPt = (suitRightHand.getPos() - toonRightHand.getPos()) / 2.0
self.tugRopes[2].setup(3, ((toonRightHand, (0, 0, 0)), (toonRightHand, (0, 0, 0)), (suitRightHand, (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].reparentTo(render)
self.tugRopes[1].reparentTo(render)
self.tugRopes[2].reparentTo(render)
else:
if notTaut:
self.tugRopes[1].setup(3, ((self.rightHandDict[self.avIdList[1]], (0, 0, 0)), (render, (0, 18, -1)), (self.rightHandDict[self.avIdList[2]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
else:
self.tugRopes[1].setup(3, ((self.rightHandDict[self.avIdList[1]], (0, 0, 0)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0)), (self.rightHandDict[self.avIdList[2]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].setup(3, ((self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].reparentTo(render)
self.tugRopes[1].reparentTo(render)
elif self.numPlayers == 4:
if self.gameType == TugOfWarGameGlobals.TOON_VS_COG:
self.tugRopes[2].setup(3, ((self.rightHandDict[self.avIdList[2]], (0, 0, 0)), (self.rightHandDict[self.avIdList[2]], (0, 0, 0)), (self.rightHandDict[self.avIdList[3]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[1].setup(3, ((self.rightHandDict[self.avIdList[1]], (0, 0, 0)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0)), (self.rightHandDict[self.avIdList[2]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].setup(3, ((self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
suitRightHand = self.suit.getRightHand()
toonRightHand = self.rightHandDict[self.avIdList[3]]
if notTaut:
self.tugRopes[3].setup(3, ((toonRightHand, (0, 0, 0)), (render, (0, 18, -1)), (suitRightHand, (0, 0, 0))), [0,
0,
0,
1,
1,
1])
else:
midPt = (suitRightHand.getPos() - toonRightHand.getPos()) / 2.0
self.tugRopes[3].setup(3, ((toonRightHand, (0, 0, 0)), (toonRightHand, (0, 0, 0)), (suitRightHand, (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].reparentTo(render)
self.tugRopes[1].reparentTo(render)
self.tugRopes[2].reparentTo(render)
self.tugRopes[3].reparentTo(render)
else:
self.tugRopes[2].setup(3, ((self.rightHandDict[self.avIdList[2]], (0, 0, 0)), (self.rightHandDict[self.avIdList[2]], (0, 0, 0)), (self.rightHandDict[self.avIdList[3]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].setup(3, ((self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[0]], (0, 0, 0)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
if notTaut:
self.tugRopes[1].setup(3, ((self.rightHandDict[self.avIdList[1]], (0, 0, 0)), (render, (0, 18, -1)), (self.rightHandDict[self.avIdList[2]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
else:
self.tugRopes[1].setup(3, ((self.rightHandDict[self.avIdList[1]], (0, 0, 0)), (self.rightHandDict[self.avIdList[1]], (0, 0, 0)), (self.rightHandDict[self.avIdList[2]], (0, 0, 0))), [0,
0,
0,
1,
1,
1])
self.tugRopes[0].reparentTo(render)
self.tugRopes[1].reparentTo(render)
self.tugRopes[2].reparentTo(render)
def initToons(self):
for avId in self.avIdList:
toon = self.getAvatar(avId)
if toon:
toon.reparentTo(render)
toon.useLOD(1000)
toon.startBlink()
toon.startLookAround()
for anim in self.toonAnimNames:
toon.pose(anim, 0)
toon.pose('tug-o-war', 3)
self.rightHandDict[avId] = toon.getRightHands()[0]
toon.loop('neutral')
toon.dropShadow.hide()
self.dropShadowDict[avId] = self.dropShadow.copyTo(hidden)
self.dropShadowDict[avId].reparentTo(toon)
self.dropShadowDict[avId].setScale(0.35)
def calculatePositions(self):
hprPositions = [VBase3(240, 0, 0), VBase3(120, 0, 0)]
dockPositions = []
for k in xrange(5):
dockPositions.append(VBase3(-9.0 + 1.5 * k, 18, 0.1))
for k in xrange(5):
dockPositions.append(VBase3(3 + 1.5 * k, 18, 0.1))
self.sendUpdate('sendNewAvIdList', [self.avIdList])
if self.numPlayers == 1:
if self.gameType == TugOfWarGameGlobals.TOON_VS_COG:
self.posDict[self.suitId] = dockPositions[7]
self.posDict[self.avIdList[0]] = dockPositions[2]
self.hprDict[self.avIdList[0]] = hprPositions[0]
else:
self.notify.warning("can't play toon vs. toon with one player")
elif self.numPlayers == 2:
if self.gameType == TugOfWarGameGlobals.TOON_VS_COG:
self.arrangeByHeight(self.avIdList, self.H_TO_L, 0, 1)
self.posDict[self.suitId] = dockPositions[7]
self.posDict[self.avIdList[0]] = dockPositions[1]
self.posDict[self.avIdList[1]] = dockPositions[2]
self.hprDict[self.avIdList[0]] = hprPositions[0]
self.hprDict[self.avIdList[1]] = hprPositions[0]
else:
self.randomNumGen.shuffle(self.avIdList)
self.posDict[self.avIdList[0]] = dockPositions[2]
self.posDict[self.avIdList[1]] = dockPositions[7]
self.hprDict[self.avIdList[0]] = hprPositions[0]
self.hprDict[self.avIdList[1]] = hprPositions[1]
elif self.numPlayers == 3:
if self.gameType == TugOfWarGameGlobals.TOON_VS_COG:
self.arrangeByHeight(self.avIdList, self.H_TO_L, 0, 2)
self.posDict[self.suitId] = dockPositions[7]
self.posDict[self.avIdList[0]] = dockPositions[0]
self.posDict[self.avIdList[1]] = dockPositions[1]
self.posDict[self.avIdList[2]] = dockPositions[2]
self.hprDict[self.avIdList[0]] = hprPositions[0]
self.hprDict[self.avIdList[1]] = hprPositions[0]
self.hprDict[self.avIdList[2]] = hprPositions[0]
else:
self.randomNumGen.shuffle(self.avIdList)
self.arrangeByHeight(self.avIdList, self.H_TO_L, 0, 1)
self.posDict[self.avIdList[0]] = dockPositions[1]
self.posDict[self.avIdList[1]] = dockPositions[2]
self.posDict[self.avIdList[2]] = dockPositions[7]
self.hprDict[self.avIdList[0]] = hprPositions[0]
self.hprDict[self.avIdList[1]] = hprPositions[0]
self.hprDict[self.avIdList[2]] = hprPositions[1]
elif self.numPlayers == 4:
if self.gameType == TugOfWarGameGlobals.TOON_VS_COG:
self.arrangeByHeight(self.avIdList, self.H_TO_L, 0, 3)
self.posDict[self.suitId] = dockPositions[6]
self.posDict[self.avIdList[0]] = dockPositions[0]
self.posDict[self.avIdList[1]] = dockPositions[1]
self.posDict[self.avIdList[2]] = dockPositions[2]
self.posDict[self.avIdList[3]] = dockPositions[3]
self.hprDict[self.avIdList[0]] = hprPositions[0]
self.hprDict[self.avIdList[1]] = hprPositions[0]
self.hprDict[self.avIdList[2]] = hprPositions[0]
self.hprDict[self.avIdList[3]] = hprPositions[0]
else:
self.randomNumGen.shuffle(self.avIdList)
self.arrangeByHeight(self.avIdList, self.H_TO_L, 0, 1)
self.arrangeByHeight(self.avIdList, self.L_TO_H, 2, 3)
self.posDict[self.avIdList[0]] = dockPositions[1]
self.posDict[self.avIdList[1]] = dockPositions[2]
self.posDict[self.avIdList[2]] = dockPositions[7]
self.posDict[self.avIdList[3]] = dockPositions[8]
self.hprDict[self.avIdList[0]] = hprPositions[0]
self.hprDict[self.avIdList[1]] = hprPositions[0]
self.hprDict[self.avIdList[2]] = hprPositions[1]
self.hprDict[self.avIdList[3]] = hprPositions[1]
for x in self.avIdList:
self.offsetDict[x] = 0
if self.posDict[x][0] < 0:
self.sides[x] = 0
self.avList[0].append(x)
else:
self.sides[x] = 1
self.avList[1].append(x)
for avId in self.avIdList:
toon = self.getAvatar(avId)
toon.setPos(self.posDict[avId])
toon.setHpr(self.hprDict[avId])
def arrangeByHeight(self, avIdList, order, iStart, iFin):
for i in xrange(iStart, iFin + 1):
for j in xrange(i + 1, iFin + 1):
if order == self.H_TO_L and self.rightHandDict[avIdList[i]].getZ() < self.rightHandDict[avIdList[j]].getZ() or order == self.L_TO_H and self.rightHandDict[avIdList[i]].getZ() > self.rightHandDict[avIdList[j]].getZ():
temp = avIdList[i]
avIdList[i] = avIdList[j]
avIdList[j] = temp
def disableArrow(self, a):
a.setColor(1, 0, 0, 0.3)
def enableArrow(self, a):
a.setColor(1, 0, 0, 1)
def hilightArrow(self, a):
a.setColor(1, 0.7, 0, 1)
def unhilightArrow(self, a):
self.enableArrow(a)
def handleDisabledAvatar(self, avId):
self.notify.debug('handleDisabledAvatar')
self.notify.debug('avatar ' + str(avId) + ' disabled')
DistributedMinigame.handleDisabledAvatar(self, avId)
def __playing(self):
return self.gameFSM.getCurrentState() != self.gameFSM.getFinalState()
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
if not self.__playing():
return
DistributedMinigame.setGameStart(self, timestamp)
self.gameFSM.request('waitForGoSignal')
def __initGameVars(self):
pass
def makeToonLookatCamera(self, toon):
toon.headsUp(camera)
def setText(self, t, newtext):
t['text'] = newtext
def setTextFG(self, t, fg):
t['text_fg'] = fg
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterWaitForGoSignal(self):
self.notify.debug('enterWaitForGoSignal')
self.powerMeter.show()
self.sendUpdate('reportPlayerReady', [self.sides[self.localAvId]])
self.roundText.show()
taskMgr.doMethodLater(TugOfWarGameGlobals.WAIT_FOR_GO_TIMEOUT, self.waitForGoTimeoutTask, self.taskName('wait-for-go-timeout'))
def exitWaitForGoSignal(self):
taskMgr.remove(self.taskName('wait-for-go-timeout'))
def enterTug(self):
self.notify.debug('enterTug')
self.__spawnUpdateIdealRateTask()
self.__spawnUpdateTimerTask()
self.__spawnUpdateKeyPressRateTask()
taskMgr.doMethodLater(TugOfWarGameGlobals.TUG_TIMEOUT, self.tugTimeoutTask, self.taskName('tug-timeout'))
if self.suit:
self.suit.loop('tug-o-war')
def exitTug(self):
self.notify.debug('exitTug')
if self.suit:
self.suit.loop('neutral')
self.timer.stop()
self.timer.hide()
taskMgr.remove(self.taskName('tug-timeout'))
def enterGameDone(self):
pass
def exitGameDone(self):
pass
def enterCleanup(self):
self.notify.debug('enterCleanup')
self.__killUpdateIdealRateTask()
self.__killUpdateTimerTask()
self.__killUpdateKeyPressRateTask()
self.__killUpdateRopeTask()
def exitCleanup(self):
pass
def __gameTimerExpired(self):
self.notify.debug('game timer expired')
if self.arrowKeys:
self.arrowKeys.setPressHandlers(self.arrowKeys.NULL_HANDLERS)
self.arrowKeys.setReleaseHandlers(self.arrowKeys.NULL_HANDLERS)
def __pressHandler(self, index):
self.notify.debug('pressHandler')
if index == self.buttons[0]:
self.hilightArrow(self.arrows[index])
self.keyTTL.insert(0, 1.0)
self.buttons.reverse()
def __releaseHandler(self, index):
self.notify.debug('releaseHandler')
if index in self.buttons:
self.unhilightArrow(self.arrows[index])
def __updateKeyPressRateTask(self, task):
if self.gameFSM.getCurrentState().getName() != 'tug':
return Task.done
for i in xrange(len(self.keyTTL)):
self.keyTTL[i] -= 0.1
for i in xrange(len(self.keyTTL)):
if self.keyTTL[i] <= 0:
a = self.keyTTL[0:i]
del self.keyTTL
self.keyTTL = a
break
self.keyRate = len(self.keyTTL)
if self.keyRate == self.idealRate or self.keyRate == self.idealRate + 1:
self.rateMatchAward += 0.3
else:
self.rateMatchAward = 0
self.__spawnUpdateKeyPressRateTask()
return Task.done
def __updateTimerTask(self, task):
if self.gameFSM.getCurrentState().getName() != 'tug':
return Task.done
self.currentForce = self.computeForce(self.keyRate)
self.sendUpdate('reportCurrentKeyRate', [self.keyRate, self.currentForce])
self.setSpeedGauge()
self.setAnimState(self.localAvId, self.keyRate)
self.__spawnUpdateTimerTask()
return Task.done
def __spawnUpdateTimerTask(self):
taskMgr.remove(self.taskName(self.UPDATE_TIMER_TASK))
taskMgr.doMethodLater(TugOfWarGameGlobals.SEND_UPDATE, self.__updateTimerTask, self.taskName(self.UPDATE_TIMER_TASK))
def __killUpdateTimerTask(self):
taskMgr.remove(self.taskName(self.UPDATE_TIMER_TASK))
def __spawnUpdateKeyPressRateTask(self):
taskMgr.remove(self.taskName(self.UPDATE_KEY_PRESS_RATE_TASK))
taskMgr.doMethodLater(0.1, self.__updateKeyPressRateTask, self.taskName(self.UPDATE_KEY_PRESS_RATE_TASK))
def __killUpdateKeyPressRateTask(self):
taskMgr.remove(self.taskName(self.UPDATE_KEY_PRESS_RATE_TASK))
def __spawnUpdateIdealRateTask(self):
self.idealRate = self.targetRateList[self.nextRateIndex][1]
self.idealForce = self.advantage * (4 + 0.4 * self.idealRate)
taskMgr.doMethodLater(self.targetRateList[self.nextRateIndex][0], self.__updateIdealRateTask, self.taskName('targetRateTimer'))
def __updateIdealRateTask(self, task):
self.nextRateIndex = self.nextRateIndex + 1
if self.nextRateIndex < len(self.targetRateList):
if self.nextRateIndex == len(self.targetRateList) - 1:
self.allOutMode = 1
self.idealRate = self.targetRateList[self.nextRateIndex][1]
self.idealForce = self.advantage * (4 + 0.4 * self.idealRate)
taskMgr.doMethodLater(self.targetRateList[self.nextRateIndex][0], self.__updateIdealRateTask, self.taskName('targetRateTimer'))
return Task.done
def __killUpdateIdealRateTask(self):
taskMgr.remove(self.taskName('targetRateTimer'))
def sendGoSignal(self, index):
if not self.hasLocalToon:
return
self.notify.debug('sendGoSignal')
self.buttons = index
self.setupTrack = None
self.showTrack = None
def startTimer(self = self):
self.currentStartTime = int(globalClock.getFrameTime() * 1000)
time = 10
self.timer.show()
self.timer.setTime(TugOfWarGameGlobals.GAME_DURATION)
self.timer.countdown(TugOfWarGameGlobals.GAME_DURATION, self.__gameTimerExpired)
def enableKeys(self = self):
def keyPress(self, index):
self.__pressHandler(index)
def keyRelease(self, index):
self.__releaseHandler(index)
self.arrowKeys.setPressHandlers([lambda self = self, keyPress = keyPress: keyPress(self, 2),
lambda self = self, keyPress = keyPress: keyPress(self, 3),
lambda self = self, keyPress = keyPress: keyPress(self, 1),
lambda self = self, keyPress = keyPress: keyPress(self, 0)])
self.arrowKeys.setReleaseHandlers([lambda self = self, keyRelease = keyRelease: keyRelease(self, 2),
lambda self = self, keyRelease = keyRelease: keyRelease(self, 3),
lambda self = self, keyRelease = keyRelease: keyRelease(self, 1),
lambda self = self, keyRelease = keyRelease: keyRelease(self, 0)])
for x in index:
self.enableArrow(self.arrows[x])
if self.introTrack != None:
self.introTrack.finish()
self.introTrack = None
self.setupTrack = Sequence(Func(self.setText, self.roundText, TTLocalizer.TugOfWarGameReady), Wait(1.5), Func(base.playSfx, self.whistleSound), Func(self.setText, self.roundText, TTLocalizer.TugOfWarGameGo), Func(self.roundText.setScale, 0.3), Wait(1.5), Func(startTimer), Func(enableKeys), Func(self.gameFSM.request, 'tug'), Func(self.setText, self.roundText, ' '), Func(self.roundText.setScale, 0.2))
self.setupTrack.start()
return
def sendStopSignal(self, winners, losers, tieers):
if not self.hasLocalToon:
return
self.notify.debug('sendStopSignal')
self.gameFSM.request('gameDone')
self.hideControls()
reactSeq = Sequence()
exitSeq = Sequence()
suitSlipTime = 0
if self.gameFSM.getCurrentState().getName() == 'cleanup' or not self.randomNumGen:
return
if self.suit:
#For the Alpha Blueprint ARG
if base.config.GetBool('want-blueprint4-ARG', False):
MinigameGlobals.generateDebugARGPhrase()
if self.suitId in winners:
newPos = VBase3(2.65, 18, 0.1)
randInt = self.randomNumGen.randrange(0, 10)
oopsTrack = Wait(0)
if randInt < 3:
suitSlipTime = 2.2
waterPos = VBase3(0, 16, -5)
newPos -= VBase3(0.4, 0, 0)
self.suitSplash.stop()
self.suitSplash.setPos(waterPos[0], waterPos[1], -1.8)
self.suitSplash.setScale(3.5, 3.5, 1)
self.suitRipples.setPos(waterPos[0], waterPos[1], -1.7)
self.suitRipples.setScale(1, 1, 1)
startHpr = self.suit.getHpr()
destHpr = startHpr + VBase3(0, 0, -30)
oopsTrack = Sequence(Parallel(Func(self.suit.play, 'flail', None, 26, 38), LerpHprInterval(self.suit, 0.5, destHpr, startHpr=startHpr)), Parallel(Func(self.suit.play, 'slip-forward'), LerpPosInterval(self.suit, duration=1, pos=waterPos), Sequence(Wait(0.55), Func(base.playSfx, self.sndHitWater), Func(self.suitSplash.play), Func(self.ripples.play))))
reactSeq.append(Sequence(Func(self.suit.loop, 'victory'), Wait(2.6), LerpPosInterval(self.suit, duration=2, pos=newPos), oopsTrack, Func(self.suit.loop, 'neutral')))
for avId in self.avIdList:
toon = self.getAvatar(avId)
toon.loop('neutral')
if avId in winners:
reactSeq.append(Func(toon.loop, 'victory'))
elif avId in losers:
reactSeq.append(Func(toon.loop, 'neutral'))
else:
reactSeq.append(Func(toon.loop, 'neutral'))
if self.localAvId in winners:
exitSeq.append(Func(self.setText, self.roundText, TTLocalizer.TugOfWarGameEnd))
exitSeq.append(Wait(5.0))
elif self.localAvId in losers:
exitSeq.append(Func(self.setText, self.roundText, TTLocalizer.TugOfWarGameEnd))
exitSeq.append(Wait(4.8 + suitSlipTime))
else:
exitSeq.append(Func(self.setText, self.roundText, TTLocalizer.TugOfWarGameTie))
exitSeq.append(Wait(2.5))
exitSeq.append(Func(self.gameOver))
self.showTrack = Parallel(reactSeq, exitSeq)
for x in self.animTracks.values():
if x != None:
x.finish()
self.showTrack.start()
if self.arrowKeys:
self.arrowKeys.setPressHandlers(self.arrowKeys.NULL_HANDLERS)
self.arrowKeys.setReleaseHandlers(self.arrowKeys.NULL_HANDLERS)
return
def remoteKeyRateUpdate(self, avId, keyRate):
if not self.hasLocalToon:
return
if avId != self.localAvId:
self.setAnimState(avId, keyRate)
def sendSuitPosition(self, suitOffset):
if not self.hasLocalToon:
return
if self.gameFSM.getCurrentState().getName() != 'tug':
return
self.suitOffset = suitOffset
self.moveSuits()
def sendCurrentPosition(self, avIdList, offsetList):
if not self.hasLocalToon:
return
if self.gameFSM.getCurrentState().getName() != 'tug':
return
for i in xrange(len(avIdList)):
self.offsetDict[avIdList[i]] = offsetList[i]
self.moveToons()
self.setUpRopes(0)
def createSuits(self):
if self.gameType == TugOfWarGameGlobals.TOON_VS_COG:
self.suit = Suit.Suit()
self.origSuitPosHpr = [VBase3(6.0, 18, 0.1), VBase3(120, 0, 0)]
self.suitOffset = 0
d = SuitDNA.SuitDNA()
d.newSuit(self.suitType)
self.suit.setDNA(d)
self.suit.reparentTo(render)
self.suit.setPos(self.origSuitPosHpr[0])
self.suit.setHpr(self.origSuitPosHpr[1])
for anim in self.suitAnimNames:
self.suit.pose(anim, 0)
self.suit.pose('tug-o-war', 0)
self.dropShadowDict[self.suitId] = self.dropShadow.copyTo(hidden)
self.dropShadowDict[self.suitId].reparentTo(self.suit)
self.dropShadowDict[self.suitId].setScale(0.45)
def initHandycaps(self):
if self.numPlayers == 3 and self.gameType == TugOfWarGameGlobals.TOON_VS_TOON:
if len(self.avList[0]) == 1:
toon = self.getAvatar(self.avList[0][0])
if self.avList[0][0] == self.localAvId:
self.advantage = 2.0
toon.applyCheesyEffect(ToontownGlobals.CEBigHead)
elif len(self.avList[1]) == 1:
toon = self.getAvatar(self.avList[1][0])
if self.avList[1][0] == self.localAvId:
self.advantage = 2.0
toon.applyCheesyEffect(ToontownGlobals.CEBigHead)
def setSpeedGauge(self):
self.powerMeter.setPower(self.keyRate)
self.powerMeter.setTarget(self.idealRate)
if not self.allOutMode:
self.powerMeter.updateTooSlowTooFast()
if not self.allOutMode:
index = float(self.currentForce) / self.idealForce
bonus = 0.0
if index > 1:
bonus = max(1, index - 1)
index = 1
color = (0,
0.75 * index + 0.25 * bonus,
0.75 * (1 - index),
0.5)
self.powerMeter.setBarColor(color)
else:
self.powerMeter.setBarColor((0, 1, 0, 0.5))
def setAnimState(self, avId, keyRate):
if self.gameFSM.getCurrentState().getName() != 'tug':
return
toon = self.getAvatar(avId)
if keyRate > 0 and self.pullingDict[avId] == 0:
toon.loop('tug-o-war')
self.pullingDict[avId] = 1
if keyRate <= 0 and self.pullingDict[avId] == 1:
toon.pose('tug-o-war', 3)
toon.startLookAround()
self.pullingDict[avId] = 0
def moveSuits(self):
if self.gameType != TugOfWarGameGlobals.TOON_VS_COG:
return
origPos = self.origSuitPosHpr[0]
curPos = self.suit.getPos()
newPos = VBase3(origPos[0] + self.suitOffset, curPos[1], curPos[2])
if self.animTracks[self.suitId] != None:
if self.animTracks[self.suitId].isPlaying():
self.animTracks[self.suitId].finish()
self.checkIfFallen()
if self.suitId not in self.fallenList:
self.animTracks[self.suitId] = Sequence(LerpPosInterval(self.suit, duration=TugOfWarGameGlobals.SEND_UPDATE, pos=newPos), Func(self.checkIfFallen))
self.animTracks[self.suitId].start()
return
def moveToons(self):
for avId in self.avIdList:
if avId not in self.fallenList:
toon = self.getAvatar(avId)
if toon:
origPos = self.posDict[avId]
curPos = toon.getPos()
newPos = VBase3(origPos[0] + self.offsetDict[avId] / self.handycap, curPos[1], curPos[2])
if self.animTracks[avId] != None:
if self.animTracks[avId].isPlaying():
self.animTracks[avId].finish()
self.checkIfFallen(avId)
if avId not in self.fallenList:
self.animTracks[avId] = Sequence(LerpPosInterval(toon, duration=TugOfWarGameGlobals.SEND_UPDATE, pos=newPos), Func(self.checkIfFallen, avId))
self.animTracks[avId].start()
return
def checkIfFallen(self, avId = None):
if avId == None:
if self.suitId not in self.fallenList:
curPos = self.suit.getPos()
if curPos[0] < 0 and curPos[0] > -2 or curPos[0] > 0 and curPos[0] < 2:
self.hideControls()
self.throwInWater()
losingSide = 1
self.sendUpdate('reportEndOfContest', [losingSide])
elif avId not in self.fallenList:
toon = self.getAvatar(avId)
if toon:
curPos = toon.getPos()
if curPos[0] < 0 and curPos[0] > -2 or curPos[0] > 0 and curPos[0] < 2:
self.hideControls()
losingSide = self.sides[avId]
for avId in self.avIdList:
if self.sides[avId] == losingSide:
self.throwInWater(avId)
self.sendUpdate('reportEndOfContest', [losingSide])
return
def throwInWater(self, avId = None):
if avId == None:
self.fallenList.append(self.suitId)
waterPos = self.drinkPositions.pop()
newPos = VBase3(waterPos[0], waterPos[1], waterPos[2] - self.suit.getHeight() / 1.5)
self.suit.loop('neutral')
self.dropShadowDict[self.suitId].reparentTo(hidden)
loser = self.suit
animId = self.suitId
else:
self.fallenList.append(avId)
toon = self.getAvatar(avId)
waterPos = self.drinkPositions.pop()
newPos = VBase3(waterPos[0], waterPos[1], waterPos[2] - toon.getHeight())
toon.loop('neutral')
self.dropShadowDict[avId].reparentTo(hidden)
loser = toon
animId = avId
if self.animTracks[animId] != None:
if self.animTracks[animId].isPlaying():
self.animTracks[animId].finish()
self.splash.setPos(newPos[0], newPos[1], -1.8)
self.splash.setScale(2.5, 2.5, 1)
self.ripples.setPos(newPos[0], newPos[1], -1.7)
self.ripples.setScale(1, 1, 1)
self.animTracks[animId] = Sequence(Parallel(ActorInterval(actor=loser, animName='slip-forward', duration=2.0), LerpPosInterval(loser, duration=2.0, pos=newPos), Sequence(Wait(1.0), Parallel(Func(base.playSfx, self.sndHitWater), Func(self.splash.play), Func(self.ripples.play)))), Func(loser.loop, 'neutral'))
self.animTracks[animId].start()
return
def computeForce(self, keyRate):
F = 0
if self.allOutMode == 1:
F = 0.75 * keyRate
else:
stdDev = 0.25 * self.idealRate
F = self.advantage * (self.rateMatchAward + 4 + 0.4 * self.idealRate) * math.pow(math.e, -math.pow(keyRate - self.idealRate, 2) / (2.0 * math.pow(stdDev, 2)))
return F
def initRopes(self):
if self.gameType == TugOfWarGameGlobals.TOON_VS_COG:
numRopes = self.numPlayers
else:
numRopes = self.numPlayers - 1
for x in xrange(0, numRopes):
rope = Rope.Rope(self.uniqueName('TugRope' + str(x)))
if rope.showRope:
rope.ropeNode.setRenderMode(RopeNode.RMBillboard)
rope.ropeNode.setThickness(0.2)
rope.setTexture(self.ropeTexture)
rope.ropeNode.setUvMode(RopeNode.UVDistance)
rope.ropeNode.setUvDirection(1)
rope.setTransparency(1)
rope.setColor(0.89, 0.89, 0.6, 1)
self.tugRopes.append(rope)
self.setUpRopes(1)
def __spawnUpdateRopeTask(self):
taskMgr.remove(self.taskName(self.UPDATE_ROPE_TASK))
taskMgr.add(self.__updateRopeTask, self.taskName(self.UPDATE_ROPE_TASK))
def __updateRopeTask(self, task):
if self.tugRopes != None:
for i in xrange(len(self.tugRopes)):
if self.tugRopes[i] != None:
self.ropePts[i] = self.tugRopes[i].getPoints(len(self.ropeTex[i]))
for j in xrange(len(self.ropePts[i])):
self.ropeTex[i][j].setPos(self.ropePts[i][j])
return Task.cont
def __killUpdateRopeTask(self):
taskMgr.remove(self.taskName(self.UPDATE_ROPE_TASK))
def tugTimeoutTask(self, task):
self.gameOver()
return Task.done
def waitForGoTimeoutTask(self, task):
self.gameOver()
return Task.done
def __spawnMouseSpeedTask(self):
taskMgr.remove(self.taskName('mouseSpeed'))
taskMgr.add(self.__mouseSpeedTask, self.taskName('mouseSpeed'))
def __killMouseSpeedTask(self):
taskMgr.remove(self.taskName('mouseSpeed'))
def __mouseSpeedTask(self, task):
dx = 0.1
if self.mouseMode:
mx = base.mouseWatcherNode.getMouseX()
my = base.mouseWatcherNode.getMouseY()
if self.mouseSide == 0:
if mx > dx:
self.mouseSide = 1
self.__releaseHandler(1)
self.__pressHandler(0)
elif mx > -dx:
self.__releaseHandler(1)
elif self.mouseSide == 1:
if mx < -dx:
self.mouseSide = 0
self.__releaseHandler(0)
self.__pressHandler(1)
elif mx < dx:
self.__releaseHandler(0)
return Task.cont
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import json
import logging
import os
import random
import re
import sys
import time
import Queue
import threading
import shelve
import uuid
from logging import Formatter
from geopy.geocoders import GoogleV3
from pgoapi import PGoApi
from pgoapi.utilities import f2i, get_cell_ids
import cell_workers
from base_task import BaseTask
from plugin_loader import PluginLoader
from api_wrapper import ApiWrapper
from cell_workers.utils import distance
from event_manager import EventManager
from human_behaviour import sleep
from item_list import Item
from metrics import Metrics
from sleep_schedule import SleepSchedule
from pokemongo_bot.event_handlers import LoggingHandler, SocketIoHandler, ColoredLoggingHandler, SocialHandler
from pokemongo_bot.socketio_server.runner import SocketIoRunner
from pokemongo_bot.websocket_remote_control import WebsocketRemoteControl
from pokemongo_bot.base_dir import _base_dir
from worker_result import WorkerResult
from tree_config_builder import ConfigException, MismatchTaskApiVersion, TreeConfigBuilder
from inventory import init_inventory, player
from sys import platform as _platform
from pgoapi.protos.POGOProtos.Enums import BadgeType_pb2
import struct
class FileIOException(Exception):
pass
class PokemonGoBot(object):
@property
def position(self):
return self.api.actual_lat, self.api.actual_lng, self.api.actual_alt
@property
def noised_position(self):
return self.api.noised_lat, self.api.noised_lng, self.api.noised_alt
#@position.setter # these should be called through api now that gps replication is there...
#def position(self, position_tuple):
# self.api._position_lat, self.api._position_lng, self.api._position_alt = position_tuple
@property
def player_data(self):
"""
Returns the player data as received from the API.
:return: The player data.
:rtype: dict
"""
return self._player
@property
def stardust(self):
return filter(lambda y: y['name'] == 'STARDUST', self._player['currencies'])[0]['amount']
@stardust.setter
def stardust(self, value):
filter(lambda y: y['name'] == 'STARDUST', self._player['currencies'])[0]['amount'] = value
def __init__(self, db, config):
self.database = db
self.config = config
super(PokemonGoBot, self).__init__()
self.fort_timeouts = dict()
self.pokemon_list = json.load(
open(os.path.join(_base_dir, 'data', 'pokemon.json'))
)
self.item_list = json.load(open(os.path.join(_base_dir, 'data', 'items.json')))
# @var Metrics
self.metrics = Metrics(self)
self.latest_inventory = None
self.cell = None
self.recent_forts = [None] * config.forts_max_circle_size
self.tick_count = 0
self.softban = False
self.start_position = None
self.last_map_object = None
self.last_time_map_object = 0
self.logger = logging.getLogger(type(self).__name__)
self.alt = self.config.gps_default_altitude
# Make our own copy of the workers for this instance
self.workers = []
# Theading setup for file writing
self.web_update_queue = Queue.Queue(maxsize=1)
self.web_update_thread = threading.Thread(target=self.update_web_location_worker)
self.web_update_thread.start()
# Heartbeat limiting
self.heartbeat_threshold = self.config.heartbeat_threshold
self.heartbeat_counter = 0
self.last_heartbeat = time.time()
self.capture_locked = False # lock catching while moving to VIP pokemon
client_id_file_path = os.path.join(_base_dir, 'data', 'mqtt_client_id')
saved_info = shelve.open(client_id_file_path)
key = 'client_id'.encode('utf-8')
if key in saved_info:
self.config.client_id = saved_info[key]
else:
self.config.client_id = str(uuid.uuid4())
saved_info[key] = self.config.client_id
saved_info.close()
def start(self):
self._setup_event_system()
self._setup_logging()
self.sleep_schedule = SleepSchedule(self, self.config.sleep_schedule) if self.config.sleep_schedule else None
if self.sleep_schedule:
self.sleep_schedule.work()
self._setup_api()
self._load_recent_forts()
init_inventory(self)
self.display_player_info()
self._print_character_info()
if self.config.pokemon_bag_show_at_start and self.config.pokemon_bag_pokemon_info:
self._print_list_pokemon()
random.seed()
def _setup_event_system(self):
handlers = []
if self.config.logging and 'color' in self.config.logging and self.config.logging['color']:
handlers.append(ColoredLoggingHandler(self))
else:
handlers.append(LoggingHandler(self))
if self.config.enable_social:
handlers.append(SocialHandler(self))
if self.config.websocket_server_url:
if self.config.websocket_start_embedded_server:
self.sio_runner = SocketIoRunner(self.config.websocket_server_url)
self.sio_runner.start_listening_async()
websocket_handler = SocketIoHandler(
self,
self.config.websocket_server_url
)
handlers.append(websocket_handler)
if self.config.websocket_remote_control:
remote_control = WebsocketRemoteControl(self).start()
# @var EventManager
self.event_manager = EventManager(self.config.walker_limit_output, *handlers)
self._register_events()
if self.config.show_events:
self.event_manager.event_report()
sys.exit(1)
# Registering event:
# self.event_manager.register_event("location", parameters=['lat', 'lng'])
#
# Emitting event should be enough to add logging and send websocket
# message: :
# self.event_manager.emit('location', 'level'='info', data={'lat': 1, 'lng':1}),
def _register_events(self):
self.event_manager.register_event(
'location_found',
parameters=('position', 'location')
)
self.event_manager.register_event('api_error')
self.event_manager.register_event('config_error')
self.event_manager.register_event('login_started')
self.event_manager.register_event('login_failed')
self.event_manager.register_event('login_successful')
self.event_manager.register_event('set_start_location')
self.event_manager.register_event('load_cached_location')
self.event_manager.register_event('location_cache_ignored')
self.event_manager.register_event('debug')
# ignore candy above threshold
self.event_manager.register_event(
'ignore_candy_above_thresold',
parameters=(
'name',
'amount',
'threshold'
)
)
self.event_manager.register_event(
'position_update',
parameters=(
'current_position',
'last_position',
'distance', # optional
'distance_unit' # optional
)
)
self.event_manager.register_event(
'path_lap_update',
parameters=(
'number_lap',
'number_lap_max'
)
)
self.event_manager.register_event(
'path_lap_end',
parameters=(
'duration',
'resume'
)
)
self.event_manager.register_event('location_cache_error')
self.event_manager.register_event('bot_start')
self.event_manager.register_event('bot_exit')
self.event_manager.register_event('bot_interrupted')
# sleep stuff
self.event_manager.register_event(
'next_sleep',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_sleep',
parameters=(
'time_hms',
'wake'
)
)
# random pause
self.event_manager.register_event(
'next_random_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_pause',
parameters=(
'time_hms',
'resume'
)
)
# recycle stuff
self.event_manager.register_event(
'next_force_recycle',
parameters=(
'time'
)
)
self.event_manager.register_event('force_recycle')
# random alive pause
self.event_manager.register_event(
'next_random_alive_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_alive_pause',
parameters=(
'time_hms',
'resume'
)
)
# fort stuff
self.event_manager.register_event(
'spun_fort',
parameters=(
'fort_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'lured_pokemon_found',
parameters=(
'fort_id',
'fort_name',
'encounter_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'moving_to_fort',
parameters=(
'fort_name',
'distance'
)
)
self.event_manager.register_event(
'moving_to_lured_fort',
parameters=(
'fort_name',
'distance',
'lure_distance'
)
)
self.event_manager.register_event(
'spun_pokestop',
parameters=(
'pokestop', 'exp', 'items'
)
)
self.event_manager.register_event(
'pokestop_empty',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_out_of_range',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_on_cooldown',
parameters=('pokestop', 'minutes_left')
)
self.event_manager.register_event(
'unknown_spin_result',
parameters=('status_code',)
)
self.event_manager.register_event('pokestop_searching_too_often')
self.event_manager.register_event('arrived_at_fort')
# pokemon stuff
self.event_manager.register_event(
'catchable_pokemon',
parameters=(
'pokemon_id',
'spawn_point_id',
'encounter_id',
'latitude',
'longitude',
'expiration_timestamp_ms',
'pokemon_name'
)
)
self.event_manager.register_event(
'pokemon_appeared',
parameters=(
'pokemon',
'ncp',
'cp',
'iv',
'iv_display',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event('no_pokeballs')
self.event_manager.register_event('enough_ultraballs')
self.event_manager.register_event(
'pokemon_catch_rate',
parameters=(
'catch_rate',
'ball_name',
'berry_name',
'berry_count'
)
)
self.event_manager.register_event(
'threw_berry',
parameters=(
'berry_name',
'ball_name',
'new_catch_rate'
)
)
self.event_manager.register_event(
'threw_pokeball',
parameters=(
'throw_type',
'spin_label',
'ball_name',
'success_percentage',
'count_left'
)
)
self.event_manager.register_event(
'pokemon_capture_failed',
parameters=('pokemon',)
)
self.event_manager.register_event(
'pokemon_vanished',
parameters=(
'pokemon',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event(
'vanish_limit_reached',
parameters=(
'duration',
'resume'
)
)
self.event_manager.register_event('pokemon_not_in_range')
self.event_manager.register_event('pokemon_inventory_full')
self.event_manager.register_event(
'pokemon_caught',
parameters=(
'pokemon',
'ncp', 'cp', 'iv', 'iv_display', 'exp',
'stardust',
'encounter_id',
'latitude',
'longitude',
'pokemon_id',
'daily_catch_limit',
'caught_last_24_hour',
)
)
self.event_manager.register_event(
'pokemon_evolved',
parameters=('pokemon', 'iv', 'cp', 'xp', 'candy')
)
self.event_manager.register_event(
'pokemon_upgraded',
parameters=('pokemon', 'iv', 'cp', 'candy', 'stardust')
)
self.event_manager.register_event('skip_evolve')
self.event_manager.register_event('threw_berry_failed', parameters=('status_code',))
self.event_manager.register_event('vip_pokemon')
self.event_manager.register_event('gained_candy', parameters=('quantity', 'type'))
self.event_manager.register_event('catch_limit')
self.event_manager.register_event('spin_limit')
self.event_manager.register_event('show_best_pokemon', parameters=('pokemons'))
# level up stuff
self.event_manager.register_event(
'level_up',
parameters=(
'previous_level',
'current_level'
)
)
self.event_manager.register_event(
'level_up_reward',
parameters=('items',)
)
# lucky egg
self.event_manager.register_event(
'used_lucky_egg',
parameters=('amount_left',)
)
self.event_manager.register_event('lucky_egg_error')
# softban
self.event_manager.register_event('softban')
self.event_manager.register_event('softban_fix')
self.event_manager.register_event('softban_fix_done')
# egg incubating
self.event_manager.register_event(
'incubate_try',
parameters=(
'incubator_id',
'egg_id'
)
)
self.event_manager.register_event(
'incubate',
parameters=('distance_in_km',)
)
self.event_manager.register_event(
'next_egg_incubates',
parameters=('eggs_left', 'eggs_inc', 'eggs')
)
self.event_manager.register_event('incubator_already_used')
self.event_manager.register_event('egg_already_incubating')
self.event_manager.register_event(
'egg_hatched',
parameters=(
'name', 'cp', 'ncp', 'iv_ads', 'iv_pct', 'exp', 'stardust', 'candy'
)
)
self.event_manager.register_event('egg_hatched_fail')
# discard item
self.event_manager.register_event(
'item_discarded',
parameters=(
'amount', 'item', 'maximum'
)
)
self.event_manager.register_event(
'item_discard_skipped',
parameters=('space',)
)
self.event_manager.register_event(
'item_discard_fail',
parameters=('item',)
)
# inventory
self.event_manager.register_event('inventory_full')
# release
self.event_manager.register_event(
'keep_best_release',
parameters=(
'amount', 'pokemon', 'criteria'
)
)
self.event_manager.register_event(
'future_pokemon_release',
parameters=(
'pokemon', 'cp', 'iv', 'below_iv', 'below_cp', 'cp_iv_logic'
)
)
self.event_manager.register_event(
'pokemon_release',
parameters=('pokemon', 'iv', 'cp', 'candy')
)
# polyline walker
self.event_manager.register_event(
'polyline_request',
parameters=('url',)
)
# cluster
self.event_manager.register_event(
'found_cluster',
parameters=(
'num_points', 'forts', 'radius', 'distance'
)
)
self.event_manager.register_event(
'arrived_at_cluster',
parameters=(
'num_points', 'forts', 'radius'
)
)
# rename
self.event_manager.register_event(
'rename_pokemon',
parameters=('old_name', 'current_name',)
)
self.event_manager.register_event(
'pokemon_nickname_invalid',
parameters=('nickname',)
)
self.event_manager.register_event(
'unset_pokemon_nickname',
parameters=('old_name',)
)
# Move To map pokemon
self.event_manager.register_event(
'move_to_map_pokemon_fail',
parameters=('message',)
)
self.event_manager.register_event(
'move_to_map_pokemon_updated_map',
parameters=('lat', 'lon')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_to',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_encounter',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_move_towards',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_back',
parameters=('last_lat', 'last_lon')
)
self.event_manager.register_event(
'moving_to_pokemon_throught_fort',
parameters=('fort_name', 'distance','poke_name','poke_dist')
)
self.event_manager.register_event(
'move_to_map_pokemon',
parameters=('message')
)
# cached recent_forts
self.event_manager.register_event('loaded_cached_forts')
self.event_manager.register_event('cached_fort')
self.event_manager.register_event(
'no_cached_forts',
parameters=('path', )
)
self.event_manager.register_event(
'error_caching_forts',
parameters=('path', )
)
# database shit
self.event_manager.register_event('catch_log')
self.event_manager.register_event('vanish_log')
self.event_manager.register_event('evolve_log')
self.event_manager.register_event('login_log')
self.event_manager.register_event('transfer_log')
self.event_manager.register_event('pokestop_log')
self.event_manager.register_event('softban_log')
self.event_manager.register_event('eggs_hatched_log')
self.event_manager.register_event(
'badges',
parameters=('badge', 'level')
)
self.event_manager.register_event(
'player_data',
parameters=('player_data', )
)
self.event_manager.register_event(
'forts_found',
parameters=('json')
)
# UseIncense
self.event_manager.register_event(
'use_incense',
parameters=('type', 'incense_count')
)
def tick(self):
self.health_record.heartbeat()
self.cell = self.get_meta_cell()
if self.sleep_schedule:
self.sleep_schedule.work()
now = time.time() * 1000
for fort in self.cell["forts"]:
timeout = fort.get("cooldown_complete_timestamp_ms", 0)
if timeout >= now:
self.fort_timeouts[fort["id"]] = timeout
self.tick_count += 1
# Check if session token has expired
self.check_session(self.position)
for worker in self.workers:
if worker.work() == WorkerResult.RUNNING:
return
def get_meta_cell(self):
location = self.position[0:2]
cells = self.find_close_cells(*location)
# Combine all cells into a single dict of the items we care about.
forts = []
wild_pokemons = []
catchable_pokemons = []
for cell in cells:
if "forts" in cell and len(cell["forts"]):
forts += cell["forts"]
if "wild_pokemons" in cell and len(cell["wild_pokemons"]):
wild_pokemons += cell["wild_pokemons"]
if "catchable_pokemons" in cell and len(cell["catchable_pokemons"]):
catchable_pokemons += cell["catchable_pokemons"]
# If there are forts present in the cells sent from the server or we don't yet have any cell data, return all data retrieved
if len(forts) > 1 or not self.cell:
return {
"forts": forts,
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons
}
# If there are no forts present in the data from the server, keep our existing fort data and only update the pokemon cells.
else:
return {
"forts": self.cell["forts"],
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons
}
def update_web_location(self, cells=[], lat=None, lng=None, alt=None):
# we can call the function with no arguments and still get the position
# and map_cells
if lat is None:
lat = self.api._position_lat
if lng is None:
lng = self.api._position_lng
if alt is None:
alt = self.api._position_alt
# dont cache when teleport_to
if self.api.teleporting:
return
if cells == []:
location = self.position[0:2]
cells = self.find_close_cells(*location)
user_data_cells = os.path.join(_base_dir, 'data', 'cells-%s.json' % self.config.username)
try:
with open(user_data_cells, 'w') as outfile:
json.dump(cells, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_web_location = os.path.join(
_base_dir, 'web', 'location-%s.json' % self.config.username
)
# alt is unused atm but makes using *location easier
try:
with open(user_web_location, 'w') as outfile:
json.dump({
'lat': lat,
'lng': lng,
'alt': alt,
'cells': cells
}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_data_lastlocation = os.path.join(
_base_dir, 'data', 'last-location-%s.json' % self.config.username
)
try:
with open(user_data_lastlocation, 'w') as outfile:
json.dump({'lat': lat, 'lng': lng, 'alt': alt, 'start_position': self.start_position}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
def emit_forts_event(self,response_dict):
map_objects = response_dict.get(
'responses', {}
).get('GET_MAP_OBJECTS', {})
status = map_objects.get('status', None)
map_cells = []
if status and status == 1:
map_cells = map_objects['map_cells']
if map_cells and len(map_cells):
for cell in map_cells:
if "forts" in cell and len(cell["forts"]):
self.event_manager.emit(
'forts_found',
sender=self,
level='debug',
formatted='Found forts {json}',
data={'json': json.dumps(cell["forts"])}
)
def find_close_cells(self, lat, lng):
cellid = get_cell_ids(lat, lng)
timestamp = [0, ] * len(cellid)
response_dict = self.get_map_objects(lat, lng, timestamp, cellid)
map_objects = response_dict.get(
'responses', {}
).get('GET_MAP_OBJECTS', {})
status = map_objects.get('status', None)
map_cells = []
if status and status == 1:
map_cells = map_objects['map_cells']
position = (lat, lng, 0)
map_cells.sort(
key=lambda x: distance(
lat,
lng,
x['forts'][0]['latitude'],
x['forts'][0]['longitude']) if x.get('forts', []) else 1e6
)
return map_cells
def _setup_logging(self):
log_level = logging.ERROR
if self.config.debug:
log_level = logging.DEBUG
logging.getLogger("requests").setLevel(log_level)
logging.getLogger("websocket").setLevel(log_level)
logging.getLogger("socketio").setLevel(log_level)
logging.getLogger("engineio").setLevel(log_level)
logging.getLogger("socketIO-client").setLevel(log_level)
logging.getLogger("pgoapi").setLevel(log_level)
logging.getLogger("rpc_api").setLevel(log_level)
if self.config.logging:
logging_format = '%(message)s'
logging_format_options = ''
if ('show_log_level' not in self.config.logging) or self.config.logging['show_log_level']:
logging_format = '[%(levelname)s] ' + logging_format
if ('show_process_name' not in self.config.logging) or self.config.logging['show_process_name']:
logging_format = '[%(name)10s] ' + logging_format
if ('show_thread_name' not in self.config.logging) or self.config.logging['show_thread_name']:
logging_format = '[%(threadName)s] ' + logging_format
if ('show_datetime' not in self.config.logging) or self.config.logging['show_datetime']:
logging_format = '[%(asctime)s] ' + logging_format
logging_format_options = '%Y-%m-%d %H:%M:%S'
formatter = Formatter(logging_format,logging_format_options)
for handler in logging.root.handlers[:]:
handler.setFormatter(formatter)
def check_session(self, position):
# Check session expiry
if self.api._auth_provider and self.api._auth_provider._ticket_expire:
# prevent crash if return not numeric value
if not self.is_numeric(self.api._auth_provider._ticket_expire):
self.logger.info("Ticket expired value is not numeric", 'yellow')
return
remaining_time = \
self.api._auth_provider._ticket_expire / 1000 - time.time()
if remaining_time < 60:
self.event_manager.emit(
'api_error',
sender=self,
level='info',
formatted='Session stale, re-logging in.'
)
self.api = ApiWrapper(config=self.config)
self.api.set_position(*position)
self.login()
self.api.activate_signature(self.get_encryption_lib())
@staticmethod
def is_numeric(s):
try:
float(s)
return True
except ValueError:
return False
def login(self):
self.event_manager.emit(
'login_started',
sender=self,
level='info',
formatted="Login procedure started."
)
lat, lng = self.position[0:2]
self.api.set_position(lat, lng, self.alt) # or should the alt kept to zero?
while not self.api.login(
self.config.auth_service,
str(self.config.username),
str(self.config.password)):
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login error, server busy. Waiting 10 seconds to try again."
)
time.sleep(10)
with self.database as conn:
c = conn.cursor()
c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='login'")
result = c.fetchone()
while True:
if result[0] == 1:
conn.execute('''INSERT INTO login (timestamp, message) VALUES (?, ?)''', (time.time(), 'LOGIN_SUCCESS'))
break
else:
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login table not founded, skipping log"
)
break
self.event_manager.emit(
'login_successful',
sender=self,
level='info',
formatted="Login successful."
)
def get_encryption_lib(self):
if _platform == "Windows" or _platform == "win32":
# Check if we are on 32 or 64 bit
if sys.maxsize > 2**32:
file_name = 'encrypt_64.dll'
else:
file_name = 'encrypt.dll'
else:
file_name = 'encrypt.so'
if self.config.encrypt_location == '':
path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
else:
path = self.config.encrypt_location
full_path = path + '/'+ file_name
if not os.path.isfile(full_path):
self.logger.error(file_name + ' is not found! Please place it in the bots root directory or set encrypt_location in config.')
self.logger.info('Platform: '+ _platform + ' ' + file_name + ' directory: '+ path)
sys.exit(1)
else:
self.logger.info('Found '+ file_name +'! Platform: ' + _platform + ' ' + file_name + ' directory: ' + path)
return full_path
def _setup_api(self):
# instantiate pgoapi @var ApiWrapper
self.api = ApiWrapper(config=self.config)
# provide player position on the earth
self._set_starting_position()
self.login()
# chain subrequests (methods) into one RPC call
self.api.activate_signature(self.get_encryption_lib())
self.logger.info('')
# send empty map_cells and then our position
self.update_web_location()
def _print_character_info(self):
# get player profile call
# ----------------------
response_dict = self.api.get_player()
# print('Response dictionary: \n\r{}'.format(json.dumps(response_dict, indent=2)))
currency_1 = "0"
currency_2 = "0"
if response_dict:
self._player = response_dict['responses']['GET_PLAYER']['player_data']
player = self._player
else:
self.logger.info(
"The API didn't return player info, servers are unstable - "
"retrying.", 'red'
)
sleep(5)
self._print_character_info()
# @@@ TODO: Convert this to d/m/Y H:M:S
creation_date = datetime.datetime.fromtimestamp(
player['creation_timestamp_ms'] / 1e3)
creation_date = creation_date.strftime("%Y/%m/%d %H:%M:%S")
pokecoins = '0'
stardust = '0'
items_inventory = inventory.items()
if 'amount' in player['currencies'][0]:
pokecoins = player['currencies'][0]['amount']
if 'amount' in player['currencies'][1]:
stardust = player['currencies'][1]['amount']
self.logger.info('')
self.logger.info('--- {username} ---'.format(**player))
self.logger.info(
'Pokemon Bag: {}/{}'.format(
inventory.Pokemons.get_space_used(),
inventory.get_pokemon_inventory_size()
)
)
self.logger.info(
'Items: {}/{}'.format(
inventory.Items.get_space_used(),
inventory.get_item_inventory_size()
)
)
self.logger.info(
'Stardust: {}'.format(stardust) +
' | Pokecoins: {}'.format(pokecoins)
)
# Items Output
self.logger.info(
'PokeBalls: ' + str(items_inventory.get(1).count) +
' | GreatBalls: ' + str(items_inventory.get(2).count) +
' | UltraBalls: ' + str(items_inventory.get(3).count) +
' | MasterBalls: ' + str(items_inventory.get(4).count))
self.logger.info(
'RazzBerries: ' + str(items_inventory.get(701).count) +
' | BlukBerries: ' + str(items_inventory.get(702).count) +
' | NanabBerries: ' + str(items_inventory.get(703).count))
self.logger.info(
'LuckyEgg: ' + str(items_inventory.get(301).count) +
' | Incubator: ' + str(items_inventory.get(902).count) +
' | TroyDisk: ' + str(items_inventory.get(501).count))
self.logger.info(
'Potion: ' + str(items_inventory.get(101).count) +
' | SuperPotion: ' + str(items_inventory.get(102).count) +
' | HyperPotion: ' + str(items_inventory.get(103).count) +
' | MaxPotion: ' + str(items_inventory.get(104).count))
self.logger.info(
'Incense: ' + str(items_inventory.get(401).count) +
' | IncenseSpicy: ' + str(items_inventory.get(402).count) +
' | IncenseCool: ' + str(items_inventory.get(403).count))
self.logger.info(
'Revive: ' + str(items_inventory.get(201).count) +
' | MaxRevive: ' + str(items_inventory.get(202).count))
self.logger.info('')
def _print_list_pokemon(self):
# get pokemon list
bag = inventory.pokemons().all()
id_list =list(set(map(lambda x: x.pokemon_id, bag)))
id_list.sort()
pokemon_list = [filter(lambda x: x.pokemon_id == y, bag) for y in id_list]
show_count = self.config.pokemon_bag_show_count
show_candies = self.config.pokemon_bag_show_candies
poke_info_displayed = self.config.pokemon_bag_pokemon_info
def get_poke_info(info, pokemon):
poke_info = {
'cp': 'CP {}'.format(pokemon.cp),
'iv_ads': 'A/D/S {}/{}/{}'.format(pokemon.iv_attack, pokemon.iv_defense, pokemon.iv_stamina),
'iv_pct': 'IV {}'.format(pokemon.iv),
'ivcp': 'IVCP {}'.format(round(pokemon.ivcp,2)),
'ncp': 'NCP {}'.format(round(pokemon.cp_percent,2)),
'level': "Level {}".format(pokemon.level),
'hp': 'HP {}/{}'.format(pokemon.hp, pokemon.hp_max),
'moveset': 'Moves: {}'.format(pokemon.moveset),
'dps': 'DPS {}'.format(round(pokemon.moveset.dps, 2))
}
if info not in poke_info:
raise ConfigException("info '{}' isn't available for displaying".format(info))
return poke_info[info]
self.logger.info('Pokemon:')
for pokes in pokemon_list:
line_p = '#{} {}'.format(pokes[0].pokemon_id, pokes[0].name)
if show_count:
line_p += '[{}]'.format(len(pokes))
if show_candies:
line_p += '[{} candies]'.format(pokes[0].candy_quantity)
line_p += ': '
poke_info = ['({})'.format(', '.join([get_poke_info(x, p) for x in poke_info_displayed])) for p in pokes]
self.logger.info(line_p + ' | '.join(poke_info))
self.logger.info('')
def use_lucky_egg(self):
return self.api.use_item_xp_boost(item_id=301)
def _set_starting_position(self):
self.event_manager.emit(
'set_start_location',
sender=self,
level='info',
formatted='Setting start location.'
)
has_position = False
if self.config.test:
# TODO: Add unit tests
return
if self.config.location:
location_str = self.config.location
location = self.get_pos_by_name(location_str.replace(" ", ""))
msg = "Location found: {location} {position}"
self.event_manager.emit(
'location_found',
sender=self,
level='info',
formatted=msg,
data={
'location': location_str,
'position': location
}
)
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='info',
formatted="Now at {current_position}",
data={
'current_position': self.position,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
self.start_position = self.position
has_position = True
if self.config.location_cache:
try:
# save location flag used to pull the last known location from
# the location.json
self.event_manager.emit(
'load_cached_location',
sender=self,
level='debug',
formatted='Loading cached location...'
)
json_file = os.path.join(_base_dir, 'data', 'last-location-%s.json' % self.config.username)
try:
with open(json_file, "r") as infile:
location_json = json.load(infile)
except (IOError, ValueError):
# Unable to read json file.
# File may be corrupt. Create a new one.
location_json = []
except:
raise FileIOException("Unexpected error reading from {}".web_inventory)
location = (
location_json['lat'],
location_json['lng'],
location_json['alt'],
)
# If location has been set in config, only use cache if starting position has not differed
if has_position and 'start_position' in location_json:
last_start_position = tuple(location_json.get('start_position', []))
# Start position has to have been set on a previous run to do this check
if last_start_position and last_start_position != self.start_position:
msg = 'Going to a new place, ignoring cached location.'
self.event_manager.emit(
'location_cache_ignored',
sender=self,
level='debug',
formatted=msg
)
return
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='debug',
formatted='Loaded location {current_position} from cache',
data={
'current_position': location,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
has_position = True
except Exception:
if has_position is False:
sys.exit(
"No cached Location. Please specify initial location."
)
self.event_manager.emit(
'location_cache_error',
sender=self,
level='debug',
formatted='Parsing cached location failed.'
)
def get_pos_by_name(self, location_name):
# Check if given location name, belongs to favorite_locations
favorite_location_coords = self._get_pos_by_fav_location(location_name)
if favorite_location_coords is not None:
return favorite_location_coords
# Check if the given location is already a coordinate.
if ',' in location_name:
possible_coordinates = re.findall(
"[-]?\d{1,3}[.]\d{3,7}", location_name
)
if len(possible_coordinates) >= 2:
# 2 matches, this must be a coordinate. We'll bypass the Google
# geocode so we keep the exact location.
self.logger.info(
'[x] Coordinates found in passed in location, '
'not geocoding.'
)
return float(possible_coordinates[0]), float(possible_coordinates[1]), (float(possible_coordinates[2]) if len(possible_coordinates) == 3 else self.alt)
geolocator = GoogleV3(api_key=self.config.gmapkey)
loc = geolocator.geocode(location_name, timeout=10)
return float(loc.latitude), float(loc.longitude), float(loc.altitude)
def _get_pos_by_fav_location(self, location_name):
location_name = location_name.lower()
coords = None
for location in self.config.favorite_locations:
if location.get('name').lower() == location_name:
coords = re.findall(
"[-]?\d{1,3}[.]\d{3,7}", location.get('coords').strip()
)
if len(coords) >= 2:
self.logger.info('Favorite location found: {} ({})'.format(location_name, coords))
break
#TODO: This is real bad
if coords is None:
return coords
else:
return float(coords[0]), float(coords[1]), (float(coords[2]) if len(coords) == 3 else self.alt)
def heartbeat(self):
# Remove forts that we can now spin again.
now = time.time()
self.fort_timeouts = {id: timeout for id, timeout
in self.fort_timeouts.iteritems()
if timeout >= now * 1000}
if now - self.last_heartbeat >= self.heartbeat_threshold:
self.last_heartbeat = now
request = self.api.create_request()
request.get_player()
request.check_awarded_badges()
responses = request.call()
if responses['responses']['GET_PLAYER']['success'] == True:
# we get the player_data anyway, might as well store it
self._player = responses['responses']['GET_PLAYER']['player_data']
self.event_manager.emit(
'player_data',
sender=self,
level='debug',
formatted='player_data: {player_data}',
data={'player_data': self._player}
)
if responses['responses']['CHECK_AWARDED_BADGES']['success'] == True:
# store awarded_badges reponse to be used in a task or part of heartbeat
self._awarded_badges = responses['responses']['CHECK_AWARDED_BADGES']
if self._awarded_badges.has_key('awarded_badges'):
i = 0
for badge in self._awarded_badges['awarded_badges']:
badgelevel = self._awarded_badges['awarded_badge_levels'][i]
badgename = BadgeType_pb2._BADGETYPE.values_by_number[badge].name
i += 1
self.event_manager.emit(
'badges',
sender=self,
level='info',
formatted='awarded badge: {badge}, lvl {level}',
data={'badge': badgename,
'level': badgelevel}
)
human_behaviour.action_delay(3, 10)
inventory.refresh_inventory()
try:
self.web_update_queue.put_nowait(True) # do this outside of thread every tick
except Queue.Full:
pass
def update_web_location_worker(self):
while True:
self.web_update_queue.get()
self.update_web_location()
def display_player_info(self):
player_stats = player()
if player_stats:
nextlvlxp = (int(player_stats.next_level_xp) - int(player_stats.exp))
self.logger.info(
'Level: {}'.format(player_stats.level) +
' (Next Level: {} XP)'.format(nextlvlxp) +
' (Total: {} XP)'
''.format(player_stats.exp))
self.logger.info(
'Pokemon Captured: '
'{}'.format(player_stats.pokemons_captured) +
' | Pokestops Visited: '
'{}'.format(player_stats.poke_stop_visits))
def get_forts(self, order_by_distance=False):
forts = [fort
for fort in self.cell['forts']
if 'latitude' in fort and 'type' in fort]
if order_by_distance:
forts.sort(key=lambda x: distance(
self.position[0],
self.position[1],
x['latitude'],
x['longitude']
))
return forts
def get_map_objects(self, lat, lng, timestamp, cellid):
if time.time() - self.last_time_map_object < self.config.map_object_cache_time:
return self.last_map_object
self.last_map_object = self.api.get_map_objects(
latitude=f2i(lat),
longitude=f2i(lng),
since_timestamp_ms=timestamp,
cell_id=cellid
)
self.emit_forts_event(self.last_map_object)
#if self.last_map_object:
# print self.last_map_object
self.last_time_map_object = time.time()
return self.last_map_object
def _load_recent_forts(self):
if not self.config.forts_cache_recent_forts:
return
cached_forts_path = os.path.join(_base_dir, 'data', 'recent-forts-%s.json' % self.config.username)
try:
# load the cached recent forts
cached_recent_forts = []
try:
with open(cached_forts_path) as f:
cached_recent_forts = json.load(f)
except (IOError, ValueError) as e:
self.logger.info('[x] Error while opening cached forts: %s' % e)
pass
except:
raise FileIOException("Unexpected error opening {}".cached_forts_path)
num_cached_recent_forts = len(cached_recent_forts)
num_recent_forts = len(self.recent_forts)
# Handles changes in max_circle_size
if not num_recent_forts:
self.recent_forts = []
elif num_recent_forts > num_cached_recent_forts:
self.recent_forts[-num_cached_recent_forts:] = cached_recent_forts
elif num_recent_forts < num_cached_recent_forts:
self.recent_forts = cached_recent_forts[-num_recent_forts:]
else:
self.recent_forts = cached_recent_forts
self.event_manager.emit(
'loaded_cached_forts',
sender=self,
level='debug',
formatted='Loaded cached forts...'
)
except IOError:
self.event_manager.emit(
'no_cached_forts',
sender=self,
level='debug',
formatted='Starting new cached forts for {path}',
data={'path': cached_forts_path}
)
| |
"""RangeMap class definition."""
from abc import ABCMeta, abstractmethod
from bisect import bisect_left, bisect_right
from collections.abc import Collection, Mapping, Set
from .sentinel import NOT_SET
class MappedRange:
"""Represents a subrange of a RangeMap.
This is a glorified namedtuple.
"""
__slots__ = ('start', 'stop', 'value')
def __init__(self, start, stop, value):
"""Create a mapped range.
Args:
start: The start of the range, inclusive.
stop: The end of the range, exclusive.
value: The mapped value.
"""
self.start = start
self.stop = stop
self.value = value
# Implement __iter__ so we can unpack this
def __iter__(self):
yield self.start
yield self.stop
yield self.value
def __str__(self):
return '[{start!r}, {stop!r}) -> {value!r}'.format(
start=self.start,
stop=self.stop,
value=self.value,
)
def __repr__(self):
return '{class_name}({start!r}, {stop!r}, {value!r})'.format(
class_name=self.__class__.__name__,
start=self.start,
stop=self.stop,
value=self.value,
)
def __eq__(self, other):
if isinstance(other, MappedRange):
return (self.start, self.stop, self.value) ==\
(other.start, other.stop, other.value)
return False
class RangeMapView(Collection):
"""Base class for views of RangeMaps."""
__metaclass__ = ABCMeta
def __init__(self, mapping):
"""Create a RangeMapView from a RangeMap."""
self._mapping = mapping
def __len__(self):
return len(self._mapping)
@abstractmethod
def __iter__(self):
raise NotImplementedError
@abstractmethod
def __contains__(self, item):
raise NotImplementedError
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
@property
def mapping(self):
"""Return the underlying RangeMap."""
return self._mapping
class RangeMapKeysView(RangeMapView, Set):
"""A view of the keys that mark the starts of subranges of a RangeMap.
Since iterating over all the keys is impossible, the view only
iterates over the keys that start each subrange.
"""
def __contains__(self, key):
return key in self.mapping
def __iter__(self):
for mapped_range in self.mapping.ranges():
yield mapped_range.start
class RangeMapItemsView(RangeMapView, Set):
"""A view of the items that mark the starts of subranges of a RangeMap.
Since iterating over all the items is impossible, the view only
iterates over the items that start each subrange.
"""
def __contains__(self, item):
# TODO should item be a MappedRange instead of a 2-tuple
key, value = item
try:
mapped_value = self.mapping[key]
except KeyError:
return False
else:
return mapped_value == value
def __iter__(self):
for mapped_range in self.mapping.ranges():
yield (mapped_range.start, mapped_range.value)
class RangeMapValuesView(RangeMapView):
"""A view on the values that mark the start of subranges of a RangeMap.
Since iterating over all the values is impossible, the view only
iterates over the values that start each subrange.
"""
def __contains__(self, value):
for mapped_range in self.mapping.ranges():
if mapped_range.value == value:
return True
return False
def __iter__(self):
for mapped_range in self.mapping.ranges():
yield mapped_range.value
def _check_start_stop(start, stop):
"""Check that start and stop are valid - orderable and in the right order.
Raises:
ValueError: if stop <= start
TypeError: if unorderable
"""
if start is not None and stop is not None and stop <= start:
raise ValueError('stop must be > start')
def _check_key_slice(key):
if not isinstance(key, slice):
raise TypeError('Can only set and delete slices')
if key.step is not None:
raise ValueError('Cannot set or delete slices with steps')
class RangeMap(Mapping):
"""Map ranges of orderable elements to values."""
def __init__(self, iterable=None, default_value=NOT_SET):
"""Create a RangeMap.
A mapping or other iterable can be passed to initialize the RangeMap.
If mapping is passed, it is interpreted as a mapping from range start
indices to values.
If an iterable is passed, each element will define a range in the
RangeMap and should be formatted (start, stop, value).
default_value is a an optional keyword argument that will initialize the
entire RangeMap to that value. Any missing ranges will be mapped to that
value. However, if ranges are subsequently deleted they will be removed
and *not* mapped to the default_value.
Args:
iterable: A Mapping or an Iterable to initialize from.
default_value: If passed, the return value for all keys less than the
least key in mapping or missing ranges in iterable. If no mapping
or iterable, the return value for all keys.
"""
self._keys = [None]
self._values = [default_value]
if iterable:
if isinstance(iterable, Mapping):
self._init_from_mapping(iterable)
else:
self._init_from_iterable(iterable)
@classmethod
def from_mapping(cls, mapping):
"""Create a RangeMap from a mapping of interval starts to values."""
obj = cls()
obj._init_from_mapping(mapping)
return obj
def _init_from_mapping(self, mapping):
for key, value in sorted(mapping.items()):
self.set(value, key)
@classmethod
def from_iterable(cls, iterable):
"""Create a RangeMap from an iterable of tuples defining each range.
Each element of the iterable is a tuple (start, stop, value).
"""
obj = cls()
obj._init_from_iterable(iterable)
return obj
def _init_from_iterable(self, iterable):
for start, stop, value in iterable:
self.set(value, start=start, stop=stop)
def __str__(self):
range_format = '({range.start}, {range.stop}): {range.value}'
values = ', '.join([range_format.format(range=r) for r in self.ranges()])
return 'RangeMap(%s)' % values
def __repr__(self):
range_format = '({range.start!r}, {range.stop!r}, {range.value!r})'
values = ', '.join([range_format.format(range=r) for r in self.ranges()])
return 'RangeMap([%s])' % values
def ranges(self, start=None, stop=None):
"""Generate MappedRanges for all mapped ranges.
Yields:
MappedRange
"""
_check_start_stop(start, stop)
if start is None:
start_loc = 1
else:
start_loc = bisect_right(self._keys, start, lo=1)
if stop is None:
stop_loc = len(self._keys)
else:
stop_loc = bisect_left(self._keys, stop, lo=1)
start_val = self._values[start_loc - 1]
candidate_keys = [start] + self._keys[start_loc:stop_loc] + [stop]
candidate_values = [start_val] + self._values[start_loc:stop_loc]
for i, value in enumerate(candidate_values):
if value is not NOT_SET:
start_key = candidate_keys[i]
stop_key = candidate_keys[i + 1]
yield MappedRange(start_key, stop_key, value)
def __contains__(self, key):
try:
self._getitem(key)
except KeyError:
return False
else:
return True
def __iter__(self):
for key, value in zip(self._keys, self._values):
if value is not NOT_SET:
yield key
def __bool__(self):
# any(val is not NOT_SET for val in self._values)
if len(self._keys) > 1:
return True
else:
return self._values[0] != NOT_SET
__nonzero__ = __bool__
def _getitem(self, key):
"""Get the value for a key (not a slice)."""
if key is None:
loc = 0
else:
loc = bisect_right(self._keys, key, lo=1) - 1
value = self._values[loc]
if value is NOT_SET:
raise KeyError(key)
else:
return value
def get(self, key, restval=None):
"""Get the value of the range containing key, otherwise return restval."""
try:
return self._getitem(key)
except KeyError:
return restval
def get_range(self, start=None, stop=None):
"""Return a RangeMap for the range start to stop.
Returns:
A RangeMap
"""
return self.from_iterable(self.ranges(start, stop))
def set(self, value, start=None, stop=None):
"""Set the range from start to stop to value."""
_check_start_stop(start, stop)
# start_index, stop_index will denote the section we are replacing
if start is None:
start_index = 0
else:
start_index = bisect_left(self._keys, start, lo=1)
if self._values[start_index - 1] == value:
# We're setting a range where the left range has the same
# value, so create one big range
start_index -= 1
start = self._keys[start_index]
new_keys = [start]
new_values = [value]
if stop is None:
stop_index = len(self._keys)
else:
stop_index = bisect_right(self._keys, stop, lo=1)
stop_value = self._values[stop_index - 1]
if value != stop_value:
new_keys.append(stop)
new_values.append(stop_value)
self._keys[start_index:stop_index] = new_keys
self._values[start_index:stop_index] = new_values
def delete(self, start=None, stop=None):
"""Delete the range from start to stop from self.
Raises:
KeyError: If part of the passed range isn't mapped.
"""
_check_start_stop(start, stop)
if start is None:
start_loc = 0
else:
start_loc = bisect_right(self._keys, start, lo=1) - 1
if stop is None:
stop_loc = len(self._keys)
else:
stop_loc = bisect_left(self._keys, stop, lo=1)
for value in self._values[start_loc:stop_loc]:
if value is NOT_SET:
raise KeyError((start, stop))
# this is inefficient, we've already found the sub ranges
self.set(NOT_SET, start=start, stop=stop)
def empty(self, start=None, stop=None):
"""Empty the range from start to stop.
Like delete, but no Error is raised if the entire range isn't mapped.
"""
self.set(NOT_SET, start=start, stop=stop)
def clear(self):
"""Remove all elements."""
self._keys = [None]
self._values = [NOT_SET]
@property
def start(self):
"""Get the start key of the first range.
None if RangeMap is empty or unbounded to the left.
"""
if self._values[0] is NOT_SET:
try:
return self._keys[1]
except IndexError:
# This is empty or everything is mapped to a single value
return None
else:
# This is unbounded to the left
return self._keys[0]
@property
def end(self):
"""Get the stop key of the last range.
None if RangeMap is empty or unbounded to the right.
"""
if self._values[-1] is NOT_SET:
return self._keys[-1]
else:
# This is unbounded to the right
return None
def __eq__(self, other):
if isinstance(other, RangeMap):
return (
self._keys == other._keys
and self._values == other._values
)
else:
return False
def __getitem__(self, key):
try:
_check_key_slice(key)
except TypeError:
return self._getitem(key)
else:
return self.get_range(key.start, key.stop)
def __setitem__(self, key, value):
_check_key_slice(key)
self.set(value, key.start, key.stop)
def __delitem__(self, key):
_check_key_slice(key)
self.delete(key.start, key.stop)
def __len__(self):
count = 0
for v in self._values:
if v is not NOT_SET:
count += 1
return count
def keys(self):
"""Return a view of the keys."""
return RangeMapKeysView(self)
def values(self):
"""Return a view of the values."""
return RangeMapValuesView(self)
def items(self):
"""Return a view of the item pairs."""
return RangeMapItemsView(self)
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keypair management extension."""
import webob
import webob.exc
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import keypairs
from nova.api.openstack.compute.views import keypairs as keypairs_view
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
from nova.objects import keypair as keypair_obj
from nova.policies import keypairs as kp_policies
class KeypairController(wsgi.Controller):
"""Keypair API controller for the OpenStack API."""
_view_builder_class = keypairs_view.ViewBuilder
def __init__(self):
super(KeypairController, self).__init__()
self.api = compute_api.KeypairAPI()
@wsgi.Controller.api_version("2.10")
@wsgi.response(201)
@wsgi.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v210)
def create(self, req, body):
"""Create or import keypair.
A policy check restricts users from creating keys for other users
params: keypair object with:
name (required) - string
public_key (optional) - string
type (optional) - string
user_id (optional) - string
"""
# handle optional user-id for admin only
user_id = body['keypair'].get('user_id')
return self._create(req, body, key_type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@wsgi.response(201)
@wsgi.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v22)
def create(self, req, body): # noqa
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
Keypair will have the type ssh or x509, specified by type.
You can send a public_key to add an existing ssh/x509 key.
params: keypair object with:
name (required) - string
public_key (optional) - string
type (optional) - string
"""
return self._create(req, body, key_type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@wsgi.expected_errors((400, 403, 409))
@validation.schema(keypairs.create_v20, "2.0", "2.0")
@validation.schema(keypairs.create, "2.1", "2.1")
def create(self, req, body): # noqa
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
You can send a public_key to add an existing ssh key.
params: keypair object with:
name (required) - string
public_key (optional) - string
"""
return self._create(req, body)
def _create(self, req, body, user_id=None, key_type=False):
context = req.environ['nova.context']
params = body['keypair']
name = common.normalize_name(params['name'])
key_type_value = params.get('type', keypair_obj.KEYPAIR_TYPE_SSH)
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'create',
target={'user_id': user_id})
return_priv_key = False
try:
if 'public_key' in params:
keypair = self.api.import_key_pair(
context, user_id, name, params['public_key'],
key_type_value)
else:
keypair, private_key = self.api.create_key_pair(
context, user_id, name, key_type_value)
keypair['private_key'] = private_key
return_priv_key = True
except exception.KeypairLimitExceeded as e:
raise webob.exc.HTTPForbidden(explanation=str(e))
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
raise webob.exc.HTTPConflict(explanation=exc.format_message())
return self._view_builder.create(keypair,
private_key=return_priv_key,
key_type=key_type)
@wsgi.Controller.api_version("2.1", "2.1")
@validation.query_schema(keypairs.delete_query_schema_v20)
@wsgi.response(202)
@wsgi.expected_errors(404)
def delete(self, req, id):
self._delete(req, id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@validation.query_schema(keypairs.delete_query_schema_v20)
@wsgi.response(204)
@wsgi.expected_errors(404)
def delete(self, req, id): # noqa
self._delete(req, id)
@wsgi.Controller.api_version("2.10") # noqa
@validation.query_schema(keypairs.delete_query_schema_v275, '2.75')
@validation.query_schema(keypairs.delete_query_schema_v210, '2.10', '2.74')
@wsgi.response(204)
@wsgi.expected_errors(404)
def delete(self, req, id): # noqa
# handle optional user-id for admin only
user_id = self._get_user_id(req)
self._delete(req, id, user_id=user_id)
def _delete(self, req, id, user_id=None):
"""Delete a keypair with a given name."""
context = req.environ['nova.context']
# handle optional user-id for admin only
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'delete',
target={'user_id': user_id})
try:
self.api.delete_key_pair(context, user_id, id)
except exception.KeypairNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
def _get_user_id(self, req):
if 'user_id' in req.GET.keys():
user_id = req.GET.getall('user_id')[0]
return user_id
@wsgi.Controller.api_version("2.10")
@validation.query_schema(keypairs.show_query_schema_v275, '2.75')
@validation.query_schema(keypairs.show_query_schema_v210, '2.10', '2.74')
@wsgi.expected_errors(404)
def show(self, req, id):
# handle optional user-id for admin only
user_id = self._get_user_id(req)
return self._show(req, id, key_type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@validation.query_schema(keypairs.show_query_schema_v20)
@wsgi.expected_errors(404)
def show(self, req, id): # noqa
return self._show(req, id, key_type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@validation.query_schema(keypairs.show_query_schema_v20)
@wsgi.expected_errors(404)
def show(self, req, id): # noqa
return self._show(req, id)
def _show(self, req, id, key_type=False, user_id=None):
"""Return data for the given key name."""
context = req.environ['nova.context']
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'show',
target={'user_id': user_id})
try:
keypair = self.api.get_key_pair(context, user_id, id)
except exception.KeypairNotFound as exc:
raise webob.exc.HTTPNotFound(explanation=exc.format_message())
return self._view_builder.show(keypair, key_type=key_type)
@wsgi.Controller.api_version("2.35")
@validation.query_schema(keypairs.index_query_schema_v275, '2.75')
@validation.query_schema(keypairs.index_query_schema_v235, '2.35', '2.74')
@wsgi.expected_errors(400)
def index(self, req):
user_id = self._get_user_id(req)
return self._index(req, key_type=True, user_id=user_id, links=True)
@wsgi.Controller.api_version("2.10", "2.34") # noqa
@validation.query_schema(keypairs.index_query_schema_v210)
@wsgi.expected_errors(())
def index(self, req): # noqa
# handle optional user-id for admin only
user_id = self._get_user_id(req)
return self._index(req, key_type=True, user_id=user_id)
@wsgi.Controller.api_version("2.2", "2.9") # noqa
@validation.query_schema(keypairs.index_query_schema_v20)
@wsgi.expected_errors(())
def index(self, req): # noqa
return self._index(req, key_type=True)
@wsgi.Controller.api_version("2.1", "2.1") # noqa
@validation.query_schema(keypairs.index_query_schema_v20)
@wsgi.expected_errors(())
def index(self, req): # noqa
return self._index(req)
def _index(self, req, key_type=False, user_id=None, links=False):
"""List of keypairs for a user."""
context = req.environ['nova.context']
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'index',
target={'user_id': user_id})
if api_version_request.is_supported(req, min_version='2.35'):
limit, marker = common.get_limit_and_marker(req)
else:
limit = marker = None
try:
key_pairs = self.api.get_key_pairs(
context, user_id, limit=limit, marker=marker)
except exception.MarkerNotFound as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return self._view_builder.index(req, key_pairs, key_type=key_type,
links=links)
| |
"""
pythonUntappd.py - v0.1
Python wrapper for the Untappd API - https://untappd.com/api/docs
Author - Mackenzie Marshall <mackenziemarshall.com>
"""
import json
import urllib
import urllib.request
#import urllib2
class api:
"""
Arguments:
client_id = Untappd API Client ID
client_secret = Untappd API Client Secret
"""
def __init__(self, client_id, client_secret):
self.url = 'https://api.untappd.com/v4/'
self.client_id = client_id
self.client_secret = client_secret
self.auth = None
self.user_auth_params = None
def set_auth(self, auth):
"""
Method to set the auth token for a request given by Untappd's API after user authorization
Argument:
auth = Untappd API access token
"""
self.auth = auth
def _get_api_auth_token(self):
"""
Internal function to get the access token if set, of the client ID and secret
"""
if self.auth:
return "access_token=" + self.auth
else:
return "client_id=" + self.client_id + "&client_secret=" + self.client_secret
def _get_access_token(self):
"""
Internal function to return the authed users access token
"""
return "access_token=" + self.auth
def _do_get(self, method, auth, params):
"""
Internal Function to send GETd requests
Arguments:
method = Untappd API method
authorization = URL encoding of Untappd API authorization tokens
params = Params for the API request
"""
url = self.url + method + "?" + auth
if params:
params = urllib.parse.urlencode(params)
url = url + "&" + params
response = urllib.request.urlopen(url).read()
else:
response = urllib.request.urlopen(url).read()
return json.loads(response)
def _do_post(self, method, auth, params):
"""
Internal Function to send POST requests
Arguments:
method = Untappd API method
authorization = URL encoding of Untappd API authorization tokens
params = Params for the API request
"""
url = self.url + method + "?" + auth
params = urllib.parse.urlencode(params)
response = urllib.request.urlopen(url, params).read()
return json.loads(response)
"""
Untappd API Feed Calls
"""
def friend_feed(self, max_id=None, limit=None):
"""
Returns the friends checkin feed
Arguments:
max_id = checkin id the results will start with (optional)
limit = number of results to return (optional)
"""
method = 'checkin/recent'
auth = self._get_access_token()
params = {}
if max_id:
params['max_id'] = max_id
if limit:
params['limit'] = limit
return self._do_get(method, auth, params)
def user_feed(self, username, max_id=None, limit=None):
"""
Returns the checkin feed of a specific user
Arguments:
username = the username of the user
max_id = checkin id the results will start with (optional)
limit = number of results to return (optional)
"""
method = 'user/checkin/' + username
auth = self._get_api_auth_token()
params = {}
if max_id:
params['max_id'] = max_id
if limit:
params['limit'] = limit
return self._do_get(method, auth, params)
def pub_feed(self, **kwargs):
"""
Returns the checkin feed of around a location
Arguments:
min_id = the checkin id of the most recent checkin (optional)
lng = the longitude of the public feed (optional)
lat = the latitude of the public feed (optional)
radius = the max radius the checkins start from (optional)
max_id = checkin id the results will start with (optional)
limit = number of results to return (optional)
"""
method = 'thepub/local'
auth = self._get_api_auth_token()
params = {}
if 'min_id' in kwargs:
params['min_id'] = kwargs['min_id']
if 'lng' in kwargs:
params['lng'] = kwargs['lng']
if 'lat' in kwargs:
params['lat'] = kwargs['lat']
if 'radius' in kwargs:
params['radius'] = kwargs['radius']
if 'max_id' in kwargs:
params['max_id'] = kwargs['max_id']
if 'limit' in kwargs:
params['limit'] = kwargs['limit']
return self._do_get(method, auth, params)
def venue_feed(self, venue_id, min_id=None, max_id=None, limit=None):
"""
Returns the feed of a venue
Arguments:
venue_id = the id of the venue
min_id = the checkin id of the most recent checkin (optional)
max_id = checkin id the results will start with (optional)
limit = number of results to return (optional)
"""
method = "venue/checkins/" + venue_id
auth = self._get_api_auth_token()
params = {}
if min_id:
params['min_id'] = min_id
if max_id:
params['max_id'] = max_id
if limit:
params['limit'] = limit
return self._do_get(method, auth, params)
def beer_feed(self, beer_id, min_id=None, max_id=None, limit=None):
"""
Returns the feed of a beer
Arguments:
beer_id = the id of the beer
min_id = the checkin id of the most recent checkin (optional)
max_id = checkin id the results will start with (optional)
limit = number of results to return (optional)
"""
method = "beer/checkins/" + beer_id
auth = self._get_api_auth_token()
params = {}
if min_id:
params['min_id'] = min_id
if max_id:
params['max_id'] = max_id
if limit:
params['limit'] = limit
return self._do_get(method, auth, params)
def brewery_feed(self, brewery_id, min_id=None, max_id=None, limit=None):
"""
Returns the feed of a brewery
Arguments:
brewery_id = the id of the brewery
min_id = the checkin id of the most recent checkin (optional)
max_id = checkin id the results will start with (optional)
limit = number of results to return (optional)
"""
method = "brewery/checkins/" + brewery_id
auth = self._get_api_auth_token()
params = {}
if min_id:
params['min_id'] = min_id
if max_id:
params['max_id'] = max_id
if limit:
params['limit'] = limit
return self._do_get(method, auth, params)
"""
Untappd API Info Calls
"""
def brewery_info(self, brewery_id, compact=None):
"""
Returns the information of a brewery
Arguments:
brewery_id = the id of the brewery
compact = pass "true" to return a compact listing of the brewery (optional)
"""
method = "brewery/info/" + brewery_id
auth = self._get_api_auth_token()
params = {}
if compact:
params['compact'] = compact
return self._do_get(method, auth, params)
def beer_info(self, beer_id, compact=None):
"""
Returns the information of a beer
Arguments:
beer_id = the id of the beer
compact = pass "true" to return a compact listing of the beer (optional)
"""
method = "beer/info/" + beer_id
auth = self._get_api_auth_token()
params = {}
if compact:
params['compact'] = compact
return self._do_get(method, auth, params)
def venue_info(self, venue_id, compact=None):
"""
Returns the information of a venue
Arguments:
venue_id = the id of the venue
compact = pass "true" to return a compact listing of the venue (optional)
"""
method = "venue/info/" + venue_id
auth = self._get_api_auth_token()
params = {}
if compact:
params['compact'] = compact
return self._do_get(method, auth, params)
def checkin_info(self, checkin_id):
"""
Returns the information of a checkin
Arguments:
checkin_id = the id of the checkin
"""
method = "checkin/view/" + checkin_id
auth = self._get_api_auth_token()
return self._do_get(method, auth, {})
def user_info(self, username, compact=None):
"""
Returns the information of a user
Arguments:
user_id = the id of the user
compact = pass "true" to return a compact listing of the user (optional)
"""
method = "user/info/" + username
auth = self._get_api_auth_token()
params = {}
if compact:
params['compact'] = compact
return self._do_get(method, auth, params)
"""
Untappd API User Detail Calls
"""
def user_badges(self, username, offset=None):
"""
Returns a list of the users badges
Arguments:
username = the username of the user
offset = the numeric offset where the results start (optional)
"""
method = "user/badges/" + username
auth = self._get_access_token()
params = {}
if offset:
params['offset'] = offset
return self._do_get(method, auth, params)
def user_friends(self, username, offset=None, limit=None):
"""
Returns a list of the users friends
Arguments:
username = the username of the user
offset = the numeric offset where the results start (optional)
limit = number of results to return (optional)
"""
method = "user/friends/" + username
auth = self._get_api_auth_token()
params = {}
if offset:
params['offset'] = offset
if limit:
params['limit'] = limit
return self._do_get(method, auth, params)
def user_wishlist(self, username, sort=None, offset=None):
"""
Returns a list of the users wishlisted beers
Arguments:
username = the username of the user
sort = the value by which to sort the list (optional)
offset = the numeric offset where the results start (optional)
"""
method = "user/wishlist/" + username
auth = self._get_api_auth_token()
params = {}
if sort:
params['sort'] = sort
if offset:
params['offset'] = offset
return self._do_get(method, auth, params)
def user_distinct_beers(self, username, sort=None, offset=None):
"""
Returns a list of the distinct beers a user has had
Arguments:
username = the username of a user
sort = the value by which to sort the list (optional)
offset = the numeric offset where the results start (optional)
"""
method = "user/beers/" + username
auth = self._get_api_auth_token()
params = {}
if sort:
params['sort'] = sort
if offset:
params['offset'] = offset
return self._do_get(method, auth, params)
"""
Untappd API Search Calls
"""
def brewery_search(self, query):
"""
Returns the breweries matching a query
Arguments:
query = the search term to search by
"""
method = "search/brewery"
auth = self._get_api_auth_token()
params = {
"q": query
}
return self._do_get(method, auth, params)
def beer_search(self, query, sort=None):
"""
Returns the beer matching a query
Arguments:
query = the search term to search by
sort = the value by which to sort the list (optional)
"""
method = "search/beer"
auth = self._get_api_auth_token()
params = {
"q": query
}
if sort:
params['sort'] = sort
return self._do_get(method, auth, params)
def beer_trending(self):
"""
Returns the trending macro and micro beers
"""
method = "beer/trending"
auth = self._get_api_auth_token()
return self._do_get(method, auth, {})
"""
Untappd API User Actions
"""
def checkin(self, gmt_offset, timezone, beer_id, **kwargs):
"""
Checks in a beer for a user
Arguments:
gmt_offset = the numeric offset the user is away from GMT
timezone = the timezone of the user
beer_id = the id of the beer the user is checking in
foursquare_id = MD5 hash of the venue id (optional)
geolat = the numeric latitude of the user, required if adding location (optional)
geolng = the numeric longitude of the user, required if adding location (optional)
shout = text to be added as a comment to the checkin (optional)
rating = the numeric rating for the beer being checked in (optional)
facebook = pass "on" to post the checkin to Facebook (optional)
twitter = pass "on" to post the checkin to Twitter (optional)
foursquare = pass "on" to post the checkin to Foursquare (optional)
"""
method = "checkin/add"
auth = self._get_access_token()
params = {
"gmt_offset": gmt_offset,
"timezone": timezone,
"bid": beer_id
}
if "foursquare_id" in kwargs:
params['foursquare_id'] = kwargs['foursquare_id']
if "geolat" in kwargs:
params["geolat"] = kwargs["geolat"]
if "geolng" in kwargs:
params["geolng"] = kwargs["geolng"]
if "shout" in kwargs:
params["shout"] = kwargs["shout"]
if "rating" in kwargs:
params["rating"] = kwargs["rating"]
if "facebook" in kwargs:
params["facebook"] = kwargs["facebook"]
if "twitter" in kwargs:
params["twitter"] = kwargs["twitter"]
if "foursquare" in kwargs:
params["foursquare"] = kwargs["foursquare"]
return self._do_post(method, auth, params)
def add_comment(self, checkin_id, comment):
"""
Adds a comment to a checkin
Arguments:
checkin_id = the id of the checkin to add a comment to
comment = the text to add as a comment
"""
method = "checkin/addcomment/" + checkin_id
auth = self._get_access_token()
params = {
"comment": comment
}
return self._do_post(method, auth, params)
def remove_comment(self, comment_id):
"""
Removes a comment on a checkin
Arguments:
comment_id = the id of the comment to be removed
"""
method = "checkin/deletecomment/" + comment_id
auth = self._get_access_token()
return self._do_post(method, auth, {})
def toast(self, checkin_id):
"""
Toggles the toast option on a checkin for a user
Arguments:
checkin_id = the id of the checkin to toggle the toast option
"""
method = "checkin/toast/" + checkin_id
auth = self._get_access_token()
return self._do_post(method, auth, {})
def add_to_wishlist(self, beer_id):
"""
Adds a beer to a users wishlist
Arguments:
beer_id = the beer id of the beer to add to the wishlist
"""
method = "user/wishlist/add"
auth = self._get_access_token()
params = {
"bid": beer_id
}
return self._do_get(method, auth, params)
def remove_from_wishlist(self, beer_id):
"""
Removes a beer from a users wishlist
Arguments:
beer_id = the beer id of the beer to remove from the wishlist
"""
method = "user/wishlist/delete"
auth = self._get_access_token()
params = {
"bid": beer_id
}
return self._do_get(method, auth, params)
"""
Untappd API Friends Calls
"""
def pending_friends(self):
"""
Returns a list of all the pending friend requests for a user
"""
method = "user/pending"
auth = self._get_access_token()
return self._do_get(method, auth, {})
def accept_friend(self, target_user):
"""
Accepts the friend request for a user
Arguments:
target_user = the username of the friend request we are accepting
"""
method = "friend/accept/" + target_user
auth = self._get_access_token()
return self._do_post(method, auth, {})
def reject_friend(self, target_user):
"""
Rejects the friend request for a user
Arguments:
target_user = the username of the friend request we are rejecting
"""
method = "friend/reject/" + target_user
auth = self._get_access_token()
return self._do_post(method, auth, {})
def remove_friend(self, target_user):
"""
Removes a friend
Arguments:
target_user = the username of the friend request we are removing
"""
method = "friend/remove/" + target_user
auth = self._get_access_token()
return self._do_post(method, auth, {})
def request_friend(self, target_user):
"""
Requests friendship to a user
Arguments:
target_user = the username of the friend request we are requesting
"""
method = "friend/request/" + target_user
auth = self._get_access_token()
return self._do_post(method, auth, {})
"""
Untappd API Misc Calls
"""
def notifications(self):
"""
Returns a list of notifications for a user
"""
method = "notifications"
auth = self._get_access_token()
return self._do_get(method, auth, {})
def foursquare_venue_lookup(self, venue_id):
"""
Converts a Foursquare v2 ID in to a Untappd Venue ID
Arguments:
venue_id = the Foursquare v2 ID you wish to convert
"""
method = "venue/foursquare_lookup/" + venue_id
auth = self._get_api_auth_token()
return self._do_get(method, auth, {})
| |
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import string
import mock
from oslo_log import log as logging
from os_brick.initiator import linuxfc
from os_brick.tests import base
LOG = logging.getLogger(__name__)
class LinuxFCTestCase(base.TestCase):
def setUp(self):
super(LinuxFCTestCase, self).setUp()
self.cmds = []
mock.patch.object(os.path, 'exists', return_value=True).start()
self.addCleanup(mock.patch.stopall)
self.lfc = linuxfc.LinuxFibreChannel(None, execute=self.fake_execute)
def fake_execute(self, *cmd, **kwargs):
self.cmds.append(string.join(cmd))
return "", None
def test_rescan_hosts(self):
hbas = [{'host_device': 'foo'},
{'host_device': 'bar'}, ]
self.lfc.rescan_hosts(hbas)
expected_commands = ['tee -a /sys/class/scsi_host/foo/scan',
'tee -a /sys/class/scsi_host/bar/scan']
self.assertEqual(expected_commands, self.cmds)
def test_get_fc_hbas_fail(self):
def fake_exec1(a, b, c, d, run_as_root=True, root_helper='sudo'):
raise OSError
def fake_exec2(a, b, c, d, run_as_root=True, root_helper='sudo'):
return None, 'None found'
self.lfc._execute = fake_exec1
hbas = self.lfc.get_fc_hbas()
self.assertEqual(0, len(hbas))
self.lfc._execute = fake_exec2
hbas = self.lfc.get_fc_hbas()
self.assertEqual(0, len(hbas))
def test_get_fc_hbas(self):
def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'):
return SYSTOOL_FC, None
self.lfc._execute = fake_exec
hbas = self.lfc.get_fc_hbas()
self.assertEqual(2, len(hbas))
hba1 = hbas[0]
self.assertEqual(hba1["ClassDevice"], "host0")
hba2 = hbas[1]
self.assertEqual(hba2["ClassDevice"], "host2")
def test_get_fc_hbas_info(self):
def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'):
return SYSTOOL_FC, None
self.lfc._execute = fake_exec
hbas_info = self.lfc.get_fc_hbas_info()
expected_info = [{'device_path': '/sys/devices/pci0000:20/'
'0000:20:03.0/0000:21:00.0/'
'host0/fc_host/host0',
'host_device': 'host0',
'node_name': '50014380242b9751',
'port_name': '50014380242b9750'},
{'device_path': '/sys/devices/pci0000:20/'
'0000:20:03.0/0000:21:00.1/'
'host2/fc_host/host2',
'host_device': 'host2',
'node_name': '50014380242b9753',
'port_name': '50014380242b9752'}, ]
self.assertEqual(expected_info, hbas_info)
def test_get_fc_wwpns(self):
def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'):
return SYSTOOL_FC, None
self.lfc._execute = fake_exec
wwpns = self.lfc.get_fc_wwpns()
expected_wwpns = ['50014380242b9750', '50014380242b9752']
self.assertEqual(expected_wwpns, wwpns)
def test_get_fc_wwnns(self):
def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'):
return SYSTOOL_FC, None
self.lfc._execute = fake_exec
wwnns = self.lfc.get_fc_wwpns()
expected_wwnns = ['50014380242b9750', '50014380242b9752']
self.assertEqual(expected_wwnns, wwnns)
SYSTOOL_FC = """
Class = "fc_host"
Class Device = "host0"
Class Device path = "/sys/devices/pci0000:20/0000:20:03.0/\
0000:21:00.0/host0/fc_host/host0"
dev_loss_tmo = "16"
fabric_name = "0x100000051ea338b9"
issue_lip = <store method only>
max_npiv_vports = "0"
node_name = "0x50014380242b9751"
npiv_vports_inuse = "0"
port_id = "0x960d0d"
port_name = "0x50014380242b9750"
port_state = "Online"
port_type = "NPort (fabric via point-to-point)"
speed = "8 Gbit"
supported_classes = "Class 3"
supported_speeds = "1 Gbit, 2 Gbit, 4 Gbit, 8 Gbit"
symbolic_name = "QMH2572 FW:v4.04.04 DVR:v8.03.07.12-k"
system_hostname = ""
tgtid_bind_type = "wwpn (World Wide Port Name)"
uevent =
vport_create = <store method only>
vport_delete = <store method only>
Device = "host0"
Device path = "/sys/devices/pci0000:20/0000:20:03.0/0000:21:00.0/host0"
edc = <store method only>
optrom_ctl = <store method only>
reset = <store method only>
uevent = "DEVTYPE=scsi_host"
Class Device = "host2"
Class Device path = "/sys/devices/pci0000:20/0000:20:03.0/\
0000:21:00.1/host2/fc_host/host2"
dev_loss_tmo = "16"
fabric_name = "0x100000051ea33b79"
issue_lip = <store method only>
max_npiv_vports = "0"
node_name = "0x50014380242b9753"
npiv_vports_inuse = "0"
port_id = "0x970e09"
port_name = "0x50014380242b9752"
port_state = "Online"
port_type = "NPort (fabric via point-to-point)"
speed = "8 Gbit"
supported_classes = "Class 3"
supported_speeds = "1 Gbit, 2 Gbit, 4 Gbit, 8 Gbit"
symbolic_name = "QMH2572 FW:v4.04.04 DVR:v8.03.07.12-k"
system_hostname = ""
tgtid_bind_type = "wwpn (World Wide Port Name)"
uevent =
vport_create = <store method only>
vport_delete = <store method only>
Device = "host2"
Device path = "/sys/devices/pci0000:20/0000:20:03.0/0000:21:00.1/host2"
edc = <store method only>
optrom_ctl = <store method only>
reset = <store method only>
uevent = "DEVTYPE=scsi_host"
"""
class LinuxFCS390XTestCase(LinuxFCTestCase):
def setUp(self):
super(LinuxFCS390XTestCase, self).setUp()
self.cmds = []
self.lfc = linuxfc.LinuxFibreChannelS390X(None,
execute=self.fake_execute)
def test_get_fc_hbas_info(self):
def fake_exec(a, b, c, d, run_as_root=True, root_helper='sudo'):
return SYSTOOL_FC_S390X, None
self.lfc._execute = fake_exec
hbas_info = self.lfc.get_fc_hbas_info()
expected = [{'device_path': '/sys/devices/css0/0.0.02ea/'
'0.0.3080/host0/fc_host/host0',
'host_device': 'host0',
'node_name': '1234567898765432',
'port_name': 'c05076ffe680a960'}]
self.assertEqual(expected, hbas_info)
def test_configure_scsi_device(self):
device_number = "0.0.2319"
target_wwn = "0x50014380242b9751"
lun = 1
self.lfc.configure_scsi_device(device_number, target_wwn, lun)
expected_commands = [('tee -a /sys/bus/ccw/drivers/zfcp/'
'0.0.2319/0x50014380242b9751/unit_add')]
self.assertEqual(expected_commands, self.cmds)
def test_deconfigure_scsi_device(self):
device_number = "0.0.2319"
target_wwn = "0x50014380242b9751"
lun = 1
self.lfc.deconfigure_scsi_device(device_number, target_wwn, lun)
expected_commands = [('tee -a /sys/bus/ccw/drivers/zfcp/'
'0.0.2319/0x50014380242b9751/unit_remove')]
self.assertEqual(expected_commands, self.cmds)
SYSTOOL_FC_S390X = """
Class = "fc_host"
Class Device = "host0"
Class Device path = "/sys/devices/css0/0.0.02ea/0.0.3080/host0/fc_host/host0"
active_fc4s = "0x00 0x00 0x01 0x00 0x00 0x00 0x00 0x00 0x00 0x00 \
0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 \
0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 "
dev_loss_tmo = "60"
maxframe_size = "2112 bytes"
node_name = "0x1234567898765432"
permanent_port_name = "0xc05076ffe6803081"
port_id = "0x010014"
port_name = "0xc05076ffe680a960"
port_state = "Online"
port_type = "NPIV VPORT"
serial_number = "IBM00000000000P30"
speed = "8 Gbit"
supported_classes = "Class 2, Class 3"
supported_fc4s = "0x00 0x00 0x01 0x00 0x00 0x00 0x00 0x00 0x00 0x00 \
0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 \
0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 "
supported_speeds = "2 Gbit, 4 Gbit"
symbolic_name = "IBM 2827 00000000000P30 \
PCHID: 0308 NPIV UlpId: 01EA0A00 DEVNO: 0.0.1234 NAME: dummy"
tgtid_bind_type = "wwpn (World Wide Port Name)"
uevent =
Device = "host0"
Device path = "/sys/devices/css0/0.0.02ea/0.0.3080/host0"
uevent = "DEVTYPE=scsi_host"
"""
| |
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.PyDataProvider2 import *
import gzip
import logging
logging.basicConfig(
format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', )
logger = logging.getLogger('paddle')
logger.setLevel(logging.INFO)
OOV_POLICY_IGNORE = 0
OOV_POLICY_USE = 1
OOV_POLICY_ERROR = 2
num_original_columns = 3
# Feature combination patterns.
# [[-1,0], [0,0]] means previous token at column 0 and current token at
# column 0 are combined as one feature.
patterns = [
[[-2, 0]],
[[-1, 0]],
[[0, 0]],
[[1, 0]],
[[2, 0]],
[[-1, 0], [0, 0]],
[[0, 0], [1, 0]],
[[-2, 1]],
[[-1, 1]],
[[0, 1]],
[[1, 1]],
[[2, 1]],
[[-2, 1], [-1, 1]],
[[-1, 1], [0, 1]],
[[0, 1], [1, 1]],
[[1, 1], [2, 1]],
[[-2, 1], [-1, 1], [0, 1]],
[[-1, 1], [0, 1], [1, 1]],
[[0, 1], [1, 1], [2, 1]],
]
dict_label = {
'B-ADJP': 0,
'I-ADJP': 1,
'B-ADVP': 2,
'I-ADVP': 3,
'B-CONJP': 4,
'I-CONJP': 5,
'B-INTJ': 6,
'I-INTJ': 7,
'B-LST': 8,
'I-LST': 9,
'B-NP': 10,
'I-NP': 11,
'B-PP': 12,
'I-PP': 13,
'B-PRT': 14,
'I-PRT': 15,
'B-SBAR': 16,
'I-SBAR': 17,
'B-UCP': 18,
'I-UCP': 19,
'B-VP': 20,
'I-VP': 21,
'O': 22
}
def make_features(sequence):
length = len(sequence)
num_features = len(sequence[0])
def get_features(pos):
if pos < 0:
return ['#B%s' % -pos] * num_features
if pos >= length:
return ['#E%s' % (pos - length + 1)] * num_features
return sequence[pos]
for i in xrange(length):
for pattern in patterns:
fname = '/'.join([get_features(i + pos)[f] for pos, f in pattern])
sequence[i].append(fname)
'''
Source file format:
Each line is for one timestep. The features are separated by space.
An empty line indicates end of a sequence.
cutoff: a list of numbers. If count of a feature is smaller than this,
it will be ignored.
if oov_policy[i] is OOV_POLICY_USE, id 0 is reserved for OOV features of
i-th column.
return a list of dict for each column
'''
def create_dictionaries(filename, cutoff, oov_policy):
def add_to_dict(sequence, dicts):
num_features = len(dicts)
for features in sequence:
l = len(features)
assert l == num_features, "Wrong number of features " + line
for i in xrange(l):
if features[i] in dicts[i]:
dicts[i][features[i]] += 1
else:
dicts[i][features[i]] = 1
num_features = len(cutoff)
dicts = []
for i in xrange(num_features):
dicts.append(dict())
f = gzip.open(filename, 'rb')
sequence = []
for line in f:
line = line.strip()
if not line:
make_features(sequence)
add_to_dict(sequence, dicts)
sequence = []
continue
features = line.split(' ')
sequence.append(features)
for i in xrange(num_features):
dct = dicts[i]
n = 1 if oov_policy[i] == OOV_POLICY_USE else 0
todo = []
for k, v in dct.iteritems():
if v < cutoff[i]:
todo.append(k)
else:
dct[k] = n
n += 1
if oov_policy[i] == OOV_POLICY_USE:
# placeholder so that len(dct) will be the number of features
# including OOV
dct['#OOV#'] = 0
logger.info('column %d dict size=%d, ignored %d' % (i, n, len(todo)))
for k in todo:
del dct[k]
f.close()
return dicts
def initializer(settings, **xargs):
cutoff = [3, 1, 0]
cutoff += [3] * len(patterns)
oov_policy = [OOV_POLICY_IGNORE, OOV_POLICY_ERROR, OOV_POLICY_ERROR]
oov_policy += [OOV_POLICY_IGNORE] * len(patterns)
dicts = create_dictionaries('data/train.txt.gz', cutoff, oov_policy)
dicts[2] = dict_label
settings.dicts = dicts
settings.oov_policy = oov_policy
input_types = []
num_features = len(dicts)
for i in xrange(num_original_columns):
input_types.append(integer_sequence(len(dicts[i])))
logger.info("slot %s size=%s" % (i, len(dicts[i])))
if patterns:
dim = 0
for i in xrange(num_original_columns, num_features):
dim += len(dicts[i])
input_types.append(sparse_binary_vector_sequence(dim))
logger.info("feature size=%s" % dim)
settings.input_types = input_types
'''
if oov_policy[i] == OOV_POLICY_USE, features in i-th column which are not
existed in dicts[i] will be assigned to id 0.
if oov_policy[i] == OOV_POLICY_ERROR, all features in i-th column MUST exist
in dicts[i].
'''
@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM)
def process(settings, filename):
input_file = filename
dicts = settings.dicts
oov_policy = settings.oov_policy
def gen_sample(sequence):
num_features = len(dicts)
sample = [list() for i in xrange(num_original_columns)]
if patterns:
sample.append([])
for features in sequence:
assert len(features) == num_features, \
"Wrong number of features: " + line
for i in xrange(num_original_columns):
id = dicts[i].get(features[i], -1)
if id != -1:
sample[i].append(id)
elif oov_policy[i] == OOV_POLICY_IGNORE:
sample[i].append(0xffffffff)
elif oov_policy[i] == OOV_POLICY_ERROR:
logger.fatal("Unknown token: %s" % features[i])
else:
sample[i].append(0)
if patterns:
dim = 0
vec = []
for i in xrange(num_original_columns, num_features):
id = dicts[i].get(features[i], -1)
if id != -1:
vec.append(dim + id)
elif oov_policy[i] == OOV_POLICY_IGNORE:
pass
elif oov_policy[i] == OOV_POLICY_ERROR:
logger.fatal("Unknown token: %s" % features[i])
else:
vec.ids.append(dim + 0)
dim += len(dicts[i])
sample[-1].append(vec)
return sample
num_features = len(dicts)
f = gzip.open(input_file, 'rb')
num_sequences = 0
sequence = []
for line in f:
line = line.strip()
if not line:
make_features(sequence)
yield gen_sample(sequence)
sequence = []
num_sequences += 1
continue
features = line.split(' ')
sequence.append(features)
f.close()
logger.info("num_sequences=%s" % num_sequences)
| |
"""
Semiconducting Materials from Analogy and Chemical Theory
A collection of fast screening tools from elemental data
"""
# get correct path for datafiles when called from another directory
from builtins import filter
from builtins import map
from builtins import range
from builtins import object
from os import path
module_directory = path.abspath(path.dirname(__file__))
data_directory = path.join(module_directory, 'data')
import itertools
from math import gcd
from operator import mul as multiply
from smact import data_loader
class Element(object):
"""Collection of standard elemental properties for given element.
Data is drawn from "data/element.txt", part of the Open Babel
package.
Atoms with a defined oxidation state draw properties from the
"Species" class.
Attributes:
Element.symbol (string) : Elemental symbol used to retrieve data
Element.name (string) : Full name of element
Element.number (int) : Proton number of element
Element.pauling_eneg (float) : Pauling electronegativity (0.0 if unknown)
Element.ionpot (float) : Ionisation potential in eV (0.0 if unknown)
Element.e_affinity (float) : Electron affinity in eV (0.0 if unknown)
Element.dipol (float) : Static dipole polarizability in 1.6488e-41 C m^2 / V (0.0 if unknown)
Element.eig (float) : Electron eigenvalue (units unknown) N.B. For Cu, Au and Ag this defaults to d-orbital
Element.eig_s (float) : Eigenvalue of s-orbital
Element.SSE (float) : Solid State Energy
Element.SSEPauling (float) : SSE based on regression fit with Pauling electronegativity
Element.oxidation_states (list) : Default list of allowed oxidation states for use in SMACT
Element.oxidation_states_sp (list) : List of oxdation states recognised by the Pymatgen Structure Predictor
Element.oxidation_states_icsd (list) : List of oxidation states that appear in the ICSD
Element.coord_envs (list): The allowed coordination enviroments for the ion
Element.covalent_radius (float) : Covalent radius of the element
Element.mass (float) : Molar mass of the element
Element.crustal_abundance (float) : Crustal abundance in the earths crust mg/kg taken from CRC
Element.HHI_p (float) : Herfindahl-Hirschman Index for elemental production
Element.HHI_r (float) : Hirfindahl-Hirschman Index for elemental reserves
Raises:
NameError: Element not found in element.txt
Warning: Element not found in Eigenvalues.csv
"""
def __init__(self, symbol):
"""Initialise Element class
Args:
symbol (str): Chemical element symbol (e.g. 'Fe')
"""
dataset = data_loader.lookup_element_data(symbol, copy=False)
if dataset == None:
raise NameError("Elemental data for {0} not found.".format(symbol))
# Set coordination-environment data from the Shannon-radius data.
# As above, it is safe to use copy = False with this Get* function.
shannon_data = data_loader.lookup_element_shannon_radius_data(symbol, copy=False)
if shannon_data != None:
coord_envs = [row['coordination'] for row in shannon_data]
else:
coord_envs = None
HHI_scores = data_loader.lookup_element_hhis(symbol)
if HHI_scores == None:
HHI_scores = (None, None)
sse_data = data_loader.lookup_element_sse_data(symbol)
if sse_data:
sse = sse_data['SolidStateEnergy']
else:
sse = None
sse_Pauling_data = data_loader.lookup_element_sse_pauling_data(symbol)
if sse_Pauling_data:
sse_Pauling = sse_Pauling_data['SolidStateEnergyPauling']
else:
sse_Pauling = None
for attribute, value in (
('coord_envs', coord_envs),
('covalent_radius', dataset['r_cov']),
('crustal_abundance', dataset['Abundance']),
('e_affinity', dataset['e_affinity']),
('eig', dataset['p_eig']),
('eig_s', dataset['s_eig']),
('HHI_p', HHI_scores[0]),
('HHI_r', HHI_scores[1]),
('ionpot', dataset['ion_pot']),
('mass', dataset['Mass']),
('name', dataset['Name']),
('number', dataset['Z']),
('oxidation_states',
data_loader.lookup_element_oxidation_states(symbol)),
('oxidation_states_icsd',
data_loader.lookup_element_oxidation_states_icsd(symbol)),
('oxidation_states_sp',
data_loader.lookup_element_oxidation_states_sp(symbol)),
('dipol', dataset['dipol']),
('pauling_eneg', dataset['el_neg']),
('SSE', sse),
('SSEPauling', sse_Pauling),
('symbol', symbol),
#('vdw_radius', dataset['RVdW']),
):
setattr(self, attribute, value)
class Species(Element):
"""
Class providing data for elements in a given chemical environment
In addition to the standard properties from the periodic table
(inherited from the Element class), Species objects use the
oxidation state and coordination environment to provide further
properties.
Attributes:
Species.symbol: Elemental symbol used to retrieve data
Species.name: Full name of element
Species.oxidation: Oxidation state of species (signed integer)
Species.coordination: Coordination number of species (integer)
Species.pauling_eneg: Pauling electronegativity (0.0 if unknown)
Species.ionpot: Ionisation potential in eV (0.0 if unknown)
Species.e_affinity: Electron affinity in eV (0.0 if unknown)
Species.eig: Electron eigenvalue (units unknown)
N.B. For Cu, Au and Ag this defaults to d-orbital.
Species.shannon_radius: Shannon radius of Species.
Species.ionic_radius: Ionic radius of Species.
Raises:
NameError: Element not found in element.txt
Warning: Element not found in Eigenvalues.csv
"""
def __init__(self,symbol,oxidation,coordination=4, radii_source="shannon"):
Element.__init__(self,symbol)
self.oxidation = oxidation
self.coordination = coordination
# Get shannon radius for the oxidation state and coordination.
self.shannon_radius = None;
if radii_source=="shannon":
shannon_data = data_loader.lookup_element_shannon_radius_data(symbol);
elif radii_source == "extended":
shannon_data = data_loader.lookup_element_shannon_radius_data_extendedML(symbol)
else:
print("Data source not recognised. Please select 'shannon' or 'extended'. ")
if shannon_data:
for dataset in shannon_data:
if dataset['charge'] == oxidation and str(coordination) == dataset['coordination'].split('_')[0]:
self.shannon_radius = dataset['crystal_radius'];
# Get ionic radius
self.ionic_radius = None;
if shannon_data:
for dataset in shannon_data:
if dataset['charge'] == oxidation and str(coordination) == dataset['coordination'].split('_')[0]:
self.ionic_radius = dataset['ionic_radius'];
# Get SSE_2015 (revised) for the oxidation state.
self.SSE_2015 = None
sse_2015_data = data_loader.lookup_element_sse2015_data(symbol);
if sse_2015_data:
for dataset in sse_2015_data:
if dataset['OxidationState'] == oxidation:
self.SSE_2015 = dataset['SolidStateEnergy2015']
else:
self.SSE_2015 = None
def ordered_elements(x,y):
"""
Return a list of element symbols, ordered by proton number in the range x -> y
Args:
x,y : integers
Returns:
list: Ordered list of element symbols
"""
with open(path.join(data_directory,
'ordered_periodic.txt'), 'r') as f:
data = f.readlines()
elements = []
for line in data:
inp = line.split()
elements.append(inp[0])
ordered_elements = []
for i in range(x,y+1):
ordered_elements.append(elements[i-1])
return ordered_elements
def element_dictionary(elements=None):
"""
Create a dictionary of initialised smact.Element objects
Accessing an Element from a dict is significantly faster than
repeadedly initialising them on-demand within nested loops.
Args:
elements (iterable of strings) : Elements to include. If None,
use all elements up to 103.
Returns:
dict: Dictionary with element symbols as keys and smact.Element
objects as data
"""
if elements == None:
elements = ordered_elements(1,103)
return {symbol: Element(symbol) for symbol in elements}
def are_eq(A,B,tolerance=1e-4):
"""Check two arrays for tolerance [1,2,3]==[1,2,3]; but [1,3,2]!=[1,2,3]
Args:
A, B (lists): 1-D list of values for approximate equality comparison
tolerance: numerical precision for equality condition
Returns:
boolean
"""
are_eq = True
if len(A) != len(B):
are_eq = False
else:
i = 0
while i < len(A):
if abs(A[i] - B[i]) > tolerance:
are_eq = False
i = i + 1
return are_eq
def lattices_are_same(lattice1, lattice2, tolerance=1e-4):
"""Checks for the equivalence of two lattices
Args:
lattice1,lattice2 : ASE crystal class
Returns:
boolean
"""
lattices_are_same = False
i = 0
for site1 in lattice1:
for site2 in lattice2:
if site1.symbol == site2.symbol:
if are_eq(site1.position,
site2.position,
tolerance=tolerance):
i += 1
if i == len(lattice1):
lattices_are_same = True
return lattices_are_same
def _gcd_recursive(*args):
"""
Get the greatest common denominator among any number of ints
"""
if len(args) == 2:
return gcd(*args)
else:
return gcd(args[0], _gcd_recursive(*args[1:]))
def _isneutral(oxidations, stoichs):
"""
Check if set of oxidation states is neutral in given stoichiometry
Args:
oxidations (tuple): Oxidation states of a set of oxidised elements
stoichs (tuple): Stoichiometry values corresponding to `oxidations`
"""
return 0 == sum(map(multiply, oxidations, stoichs))
def neutral_ratios_iter(oxidations, stoichs=False, threshold=5):
"""
Iterator for charge-neutral stoichiometries
Given a list of oxidation states of arbitrary length, yield ratios in which
these form a charge-neutral compound. Stoichiometries may be provided as a
set of legal stoichiometries per site (e.g. a known family of compounds);
otherwise all unique ratios are tried up to a threshold coefficient.
Args:
oxidations : list of integers
stoichs : stoichiometric ratios for each site (if provided)
threshold : single threshold to go up to if stoichs are not provided
Yields:
tuple: ratio that gives neutrality
"""
if not stoichs:
stoichs = [list(range(1,threshold+1))] * len(oxidations)
# First filter: remove combinations which have a common denominator
# greater than 1 (i.e. Use simplest form of each set of ratios)
# Second filter: return only charge-neutral combinations
return filter(
lambda x: _isneutral(oxidations, x) and _gcd_recursive(*x) == 1,
# Generator: enumerate all combinations of stoichiometry
itertools.product(*stoichs)
)
def neutral_ratios(oxidations, stoichs=False, threshold=5):
"""
Get a list of charge-neutral compounds
Given a list of oxidation states of arbitrary length, yield ratios in which
these form a charge-neutral compound. Stoichiometries may be provided as a
set of legal stoichiometries per site (e.g. a known family of compounds);
otherwise all unique ratios are tried up to a threshold coefficient.
Given a list of oxidation states of arbitrary length it searches for
neutral ratios in a given ratio of sites (stoichs) or up to a given
threshold.
Args:
oxidations (list of ints): Oxidation state of each site
stoichs (list of positive ints): A selection of valid stoichiometric
ratios for each site
threshold (int): Maximum stoichiometry coefficient; if no 'stoichs'
argument is provided, all combinations of integer coefficients up
to this value will be tried.
Returns:
(exists, allowed_ratios) (tuple):
exists *bool*:
True ifc any ratio exists, otherwise False
allowed_ratios *list of tuples*:
Ratios of atoms in given oxidation
states which yield a charge-neutral structure
"""
allowed_ratios = [x for x in neutral_ratios_iter(oxidations,
stoichs=stoichs,
threshold=threshold)]
return (len(allowed_ratios) > 0, allowed_ratios)
# List of metals
metals = ['Li','Be','Na','Mg','Al','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni',
'Cu','Zn','Ga','Ge','Rb','Sr','Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn','Sb',
'Cs','Ba','La','Ce', 'Pr','Nd','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb',
'Lu','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hg','Tl','Pb','Bi','Po','Fr','Ra','Ac',
'Th','Pa','U','Np','Pu','Am','Cm','Bk','Cf','Es','Fm','Md','No']
# List of elements that can be considered 'anions'.
# Similar to the Pymatgen 'electronegative elements' but excluding H, B, C & Si.
anions = ["N", "P", "As", "Sb",
"O", "S", "Se", "Te",
"F", "Cl", "Br", "I"]
# List of d-block metals
d_block = ['Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn',
'Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd',
'La','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hg']
| |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This file contains the definition of the class ParameterExploration """
from xml.sax.saxutils import unescape
import vistrails.core.db.action
from vistrails.db.domain import DBParameterExploration, IdScope
from vistrails.core.paramexplore.function import PEFunction
from vistrails.core.vistrail.module_function import ModuleFunction
from vistrails.core.vistrail.module_param import ModuleParam
from vistrails.core.vistrail.module import Module
from vistrails.core.modules.paramexplore import IntegerLinearInterpolator, \
FloatLinearInterpolator, RGBColorInterpolator, HSVColorInterpolator,\
UserDefinedFunctionInterpolator
from ast import literal_eval
import unittest
import copy
###############################################################################
class ParameterExploration(DBParameterExploration):
"""ParameterExploration
"""
def __init__(self, *args, **kwargs):
DBParameterExploration.__init__(self, *args, **kwargs)
self.set_defaults()
def __copy__(self):
""" __copy__() -> ParameterExploration - Returns a clone of itself """
return ParameterExploration.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBParameterExploration.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = ParameterExploration
cp.set_defaults(self)
return cp
def set_defaults(self, other=None):
if other is None:
self.changed = False
else:
self.changed = other.changed
@staticmethod
def convert(_parameter_exploration):
_parameter_exploration.__class__ = ParameterExploration
for function in _parameter_exploration.db_functions:
PEFunction.convert(function)
_parameter_exploration.set_defaults()
##########################################################################
# Properties
id = DBParameterExploration.db_id
action_id = DBParameterExploration.db_action_id
user = DBParameterExploration.db_user
date = DBParameterExploration.db_date
_dims = DBParameterExploration.db_dims
_layout = DBParameterExploration.db_layout
name = DBParameterExploration.db_name
functions = DBParameterExploration.db_functions
def get_dims(self):
try:
return literal_eval(self._dims)
except Exception:
return []
def set_dims(self, d):
try:
_dims = repr(d)
except Exception:
_dims = []
dims = property(get_dims, set_dims)
def get_layout(self):
try:
return literal_eval(self._layout)
except Exception:
return {}
def set_layout(self, l):
try:
_layout = repr(l)
except Exception:
_layout = '{}'
layout = property(get_layout, set_layout)
def collectParameterActions(self, pipeline):
""" collectParameterActions() -> list
Return a list of action lists corresponding to each dimension
"""
if not pipeline:
return
unescape_dict = { "'":"'", '"':'"', '
':'\n' }
from vistrails.core.modules.module_registry import get_module_registry
reg = get_module_registry()
parameterValues = [[], [], [], []]
# a list of added functions [(module_id, function_name)] = function
added_functions = {}
vistrail_vars = []
function_actions = []
for i in xrange(len(self.functions)):
pe_function = self.functions[i]
module = pipeline.db_get_object(Module.vtType, pe_function.module_id)
# collect overridden vistrail vars
if module.is_vistrail_var():
vistrail_vars.append(module.get_vistrail_var())
port_spec = reg.get_input_port_spec(module, pe_function.port_name)
tmp_f_id = -1L
tmp_p_id = -1L
for param in pe_function.parameters:
port_spec_item = port_spec.port_spec_items[param.pos]
dim = param.dimension
if dim not in [0, 1, 2, 3]:
continue
count = self.dims[dim]
# find interpolator values
values = []
text = '%s' % unescape(param.value, unescape_dict)
if param.interpolator == 'Linear Interpolation':
# need to figure out type
if port_spec_item.module == "Integer":
i_range = literal_eval(text)
p_min = int(i_range[0])
p_max =int(i_range[1])
values = IntegerLinearInterpolator(p_min, p_max,
count).get_values()
if port_spec_item.module == "Float":
i_range = literal_eval(text)
p_min = float(i_range[0])
p_max =float(i_range[1])
values = FloatLinearInterpolator(p_min, p_max,
count).get_values()
elif param.interpolator == 'RGB Interpolation':
i_range = literal_eval(text)
p_min = str(i_range[0])
p_max =str(i_range[1])
values = RGBColorInterpolator(p_min, p_max,
count).get_values()
elif param.interpolator == 'HSV Interpolation':
i_range = literal_eval(text)
p_min = str(i_range[0])
p_max =str(i_range[1])
values = HSVColorInterpolator(p_min, p_max,
count).get_values()
elif param.interpolator == 'List':
p_module = port_spec_item.descriptor.module
values = [p_module.translate_to_python(m)
for m in literal_eval(text)]
elif param.interpolator == 'User-defined Function':
p_module = port_spec_item.descriptor.module
values = UserDefinedFunctionInterpolator(p_module,
text, count).get_values()
if not values:
return None
# find parameter or create one
function = [f for f in module.functions
if f.name == port_spec.name]
if function:
function = function[0]
else:
try:
function = added_functions[(module.id,port_spec.name)]
except KeyError:
# add to function list
params = []
for psi in port_spec.port_spec_items:
parameter = ModuleParam(id=tmp_p_id,
pos=psi.pos,
name='<no description>',
val=psi.default,
type=psi.descriptor.sigstring)
params.append(parameter)
tmp_p_id -= 1
function = ModuleFunction(id=tmp_f_id,
pos=module.getNumFunctions(),
name=port_spec.name,
parameters=params)
tmp_f_id -= 1
added_functions[(module.id, port_spec.name)]=function
action = vistrails.core.db.action.create_action([('add',
function,
module.vtType,
module.id)])
function_actions.append(action)
parameter = function.params[port_spec_item.pos]
# find old parameter
old_param = parameter
actions = []
for v in values:
desc = port_spec_item.descriptor
if not isinstance(v, str):
str_value = desc.module.translate_to_string(v)
else:
str_value = v
new_param = ModuleParam(id=tmp_p_id,
pos=old_param.pos,
name=old_param.name,
alias=old_param.alias,
val=str_value,
type=old_param.type)
tmp_p_id -= 1
action_spec = ('change', old_param, new_param,
function.vtType, function.real_id)
action = vistrails.core.db.action.create_action([action_spec])
actions.append(action)
parameterValues[dim].append(actions)
return [zip(*p) for p in parameterValues], function_actions, vistrail_vars
def __eq__(self, other):
""" __eq__(other: ParameterExploration) -> boolean
Returns True if self and other have the same attributes. Used by ==
operator.
"""
if type(self) != type(other):
return False
if self.action_id != other.action_id:
return False
if self._dims != other._dims:
return False
if self._layout != other._layout:
return False
if len(self.functions) != len(other.functions):
return False
for p,q in zip(self.functions, other.functions):
if p != q:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# Testing
class TestParameterExploration(unittest.TestCase):
def create_pe(self, id_scope=IdScope()):
pe = ParameterExploration(
id=id_scope.getNewId(ParameterExploration.vtType),
action_id=6,
user='tommy',
date='2007-11-23 12:48',
dims='[1,2]',
layout='{1:"normal"}',
name='test-pe',
functions=[])
return pe
def test_copy(self):
id_scope = IdScope()
pe1 = self.create_pe(id_scope)
pe2 = copy.copy(pe1)
self.assertEquals(pe1, pe2)
self.assertEquals(pe1.id, pe2.id)
pe3 = pe1.do_copy(True, id_scope, {})
self.assertEquals(pe1, pe3)
self.assertNotEquals(pe1.id, pe3.id)
def testComparisonOperators(self):
""" Test comparison operators """
p = self.create_pe()
q = self.create_pe()
self.assertEqual(p, q)
q.action_id = 8
self.assertNotEqual(p, q)
q.action_id = 6
q._dims = '[1,4]'
self.assertNotEqual(p, q)
q._dims = '[1,2]'
q._layout = '{1:"different"}'
self.assertNotEqual(p, q)
q._layout = p._layout
q.functions = [1]
self.assertNotEqual(p, q)
if __name__ == '__main__':
unittest.main()
| |
import difflib
from xml.dom import Node
from htmltreediff.text import is_text_junk
from htmltreediff.util import (
copy_dom,
HashableTree,
FuzzyHashableTree,
is_text,
get_child,
get_location,
remove_node,
insert_or_append,
attribute_dict,
walk_dom,
)
def match_node_hash(node):
if is_text(node):
return node.nodeValue
return HashableTree(node)
def fuzzy_match_node_hash(node):
if is_text(node):
return node.nodeValue
return FuzzyHashableTree(node)
class Differ():
def __init__(self, old_dom, new_dom):
self.edit_script = []
self.old_dom = copy_dom(old_dom)
self.new_dom = copy_dom(new_dom)
def get_edit_script(self):
"""
Take two doms, and output an edit script transforming one into the other.
edit script output format
Actions:
- ('delete', location, node_properties)
delete the node, and all descendants
- ('insert', location, node_properties)
insert the node at the given location, with the given properties
The location argument is a tuple of indices.
The node properties is a dictionary possibly containing keys:
{node_type, tag_name, attributes, node_value.}
Any properties that would be empty may be ommitted. attributes is an attribute dictionary.
"""
# start diff at the body element
self.diff_location([], [])
return self.edit_script
def diff_location(self, old_location, new_location):
# Here we match up the children of the given locations. This is done in
# three steps. First we use full tree equality to match up children
# that are identical all the way down. Then, we use a heuristic
# function to match up children that are similar, based on text
# content. Lastly, we just use node tag types to match up elements. For
# the text-similar matches and the tag-only matches, we still have more
# work to do, so we recurse on these. The non-matching parts that
# remain are used to output edit script entries.
old_children = list(get_location(self.old_dom, old_location).childNodes)
new_children = list(get_location(self.new_dom, new_location).childNodes)
if not old_children and not new_children:
return
matching_blocks, recursion_indices = self.match_children(old_children, new_children)
# Apply changes for this level.
for tag, i1, i2, j1, j2 in adjusted_ops(get_opcodes(matching_blocks)):
if tag == 'delete':
assert j1 == j2
# delete range from right to left
for index, child in reversed(list(enumerate(old_children[i1:i2]))):
self.delete(old_location + [i1 + index], child)
old_children.pop(i1 + index)
elif tag == 'insert':
assert i1 == i2
# insert range from left to right
for index, child in enumerate(new_children[j1:j2]):
self.insert(new_location + [i1 + index], child)
old_children.insert(i1 + index, child)
recursion_indices = list(adjust_indices(recursion_indices, i1, i2, j1, j2))
# Recurse to deeper level.
for old_index, new_index in recursion_indices:
self.diff_location(old_location + [old_index], new_location + [new_index])
def match_children(self, old_children, new_children):
# Find whole-tree matches and fuzzy matches.
sm = match_blocks(match_node_hash, old_children, new_children)
# If the match is very poor, pretend there were no exact matching blocks at all.
if sm.ratio() < 0.3:
matching_blocks = [(len(old_children), len(new_children), 0)]
else:
matching_blocks = sm.get_matching_blocks()
# In each gap between exact matches, find fuzzy matches.
gaps = get_nonmatching_blocks(matching_blocks)
fuzzy_matching_blocks = [(0, 0, 0)]
for nonmatch in gaps:
alo, ahi, blo, bhi = nonmatch
sm_fuzzy = match_blocks(
fuzzy_match_node_hash,
old_children[alo:ahi],
new_children[blo:bhi],
)
blocks = sm_fuzzy.get_matching_blocks()
# Move blocks over to the position of the gap.
blocks = [
(alo + a, blo + b, size)
for a, b, size in blocks
]
del fuzzy_matching_blocks[-1] # Remove old sentinel.
fuzzy_matching_blocks.extend(blocks)
# We will recurse on each tree that was a fuzzy match at this level.
recursion_indices = [] # List of tuples, (old_index, new_index)
for match in fuzzy_matching_blocks:
for old_index, new_index in match_indices(match):
recursion_indices.append((old_index, new_index))
# Zip together the fuzzy and exact matches. They are treated the same
# from this point forward, except for we recurse on fuzzy matches.
matching_blocks = merge_blocks(matching_blocks, fuzzy_matching_blocks)
return matching_blocks, recursion_indices
def delete(self, location, node):
# delete from the bottom up, children before parent, right to left
for child_index, child in reversed(list(enumerate(node.childNodes))):
self.delete(location + [child_index], child)
# write deletion to the edit script
self.edit_script.append((
'delete',
location,
node_properties(node),
))
# actually delete the node
assert node.parentNode == get_location(self.old_dom, location[:-1])
assert node.ownerDocument == self.old_dom
remove_node(node)
def insert(self, location, node):
# write insertion to the edit script
self.edit_script.append((
'insert',
location,
node_properties(node),
))
# actually insert the node
node_copy = node.cloneNode(deep=False)
parent = get_location(self.old_dom, location[:-1])
next_sibling = get_child(parent, location[-1])
insert_or_append(parent, node_copy, next_sibling)
# insert from the top down, parent before children, left to right
for child_index, child in enumerate(node.childNodes):
self.insert(location + [child_index], child)
def adjusted_ops(opcodes):
"""
Iterate through opcodes, turning them into a series of insert and delete
operations, adjusting indices to account for the size of insertions and
deletions.
>>> def sequence_opcodes(old, new): return difflib.SequenceMatcher(a=old, b=new).get_opcodes()
>>> list(adjusted_ops(sequence_opcodes('abc', 'b')))
[('delete', 0, 1, 0, 0), ('delete', 1, 2, 1, 1)]
>>> list(adjusted_ops(sequence_opcodes('b', 'abc')))
[('insert', 0, 0, 0, 1), ('insert', 2, 2, 2, 3)]
>>> list(adjusted_ops(sequence_opcodes('axxa', 'aya')))
[('delete', 1, 3, 1, 1), ('insert', 1, 1, 1, 2)]
>>> list(adjusted_ops(sequence_opcodes('axa', 'aya')))
[('delete', 1, 2, 1, 1), ('insert', 1, 1, 1, 2)]
>>> list(adjusted_ops(sequence_opcodes('ab', 'bc')))
[('delete', 0, 1, 0, 0), ('insert', 1, 1, 1, 2)]
>>> list(adjusted_ops(sequence_opcodes('bc', 'ab')))
[('insert', 0, 0, 0, 1), ('delete', 2, 3, 2, 2)]
"""
while opcodes:
op = opcodes.pop(0)
tag, i1, i2, j1, j2 = op
shift = 0
if tag == 'equal':
continue
if tag == 'replace':
# change the single replace op into a delete then insert
# pay careful attention to the variables here, there's no typo
opcodes = [
('delete', i1, i2, j1, j1),
('insert', i2, i2, j1, j2),
] + opcodes
continue
yield op
if tag == 'delete':
shift = -(i2 - i1)
elif tag == 'insert':
shift = +(j2 - j1)
new_opcodes = []
for tag, i1, i2, j1, j2 in opcodes:
new_opcodes.append((
tag,
i1 + shift,
i2 + shift,
j1,
j2,
))
opcodes = new_opcodes
def node_properties(node):
d = {}
d['node_type'] = node.nodeType
d['node_name'] = node.nodeName
d['node_value'] = node.nodeValue
d['attributes'] = attribute_dict(node)
if node.nodeType == Node.TEXT_NODE:
del d['node_name'] # don't include node name for text nodes
for key, value in list(d.items()):
if not value:
del d[key]
return d
def match_indices(match):
"""Yield index tuples (old_index, new_index) for each place in the match."""
a, b, size = match
for i in range(size):
yield a + i, b + i
def get_opcodes(matching_blocks):
"""Use difflib to get the opcodes for a set of matching blocks."""
sm = difflib.SequenceMatcher(a=[], b=[])
sm.matching_blocks = matching_blocks
return sm.get_opcodes()
def _is_junk(hashable_node):
if isinstance(hashable_node, basestring):
return is_text_junk(hashable_node)
# Nodes with no text or just whitespace are junk.
for descendant in walk_dom(hashable_node.node):
if is_text(descendant):
if not is_text_junk(descendant.nodeValue):
return False
return True
def match_blocks(hash_func, old_children, new_children):
"""Use difflib to find matching blocks."""
sm = difflib.SequenceMatcher(
_is_junk,
a=[hash_func(c) for c in old_children],
b=[hash_func(c) for c in new_children],
)
return sm
def get_nonmatching_blocks(matching_blocks):
"""Given a list of matching blocks, output the gaps between them.
Non-matches have the format (alo, ahi, blo, bhi). This specifies two index
ranges, one in the A sequence, and one in the B sequence.
"""
i = j = 0
for match in matching_blocks:
a, b, size = match
yield (i, a, j, b)
i = a + size
j = b + size
def merge_blocks(a_blocks, b_blocks):
"""Given two lists of blocks, combine them, in the proper order.
Ensure that there are no overlaps, and that they are for sequences of the
same length.
"""
# Check sentinels for sequence length.
assert a_blocks[-1][2] == b_blocks[-1][2] == 0 # sentinel size is 0
assert a_blocks[-1] == b_blocks[-1]
combined_blocks = sorted(list(set(a_blocks + b_blocks)))
# Check for overlaps.
i = j = 0
for a, b, size in combined_blocks:
assert i <= a
assert j <= b
i = a + size
j = b + size
return combined_blocks
def adjust_indices(indices, i1, i2, j1, j2):
shift = (j2 - j1) - (i2 - i1)
for a, b in indices:
if a >= i2:
a += shift
yield a, b
| |
# routines for generating a list of coordinate offsets for various template structures
import numpy as np
class Template:
"""Class template for generating lists of template voxels
Parameters
----------
type: string
Required by constructor. The type of template currently available types are:
- 'RectBox' - rectangular box (1,2,3,4 dimensions) template origin is center of box
- 'RectShell' - shell of rectangular box (1,2,3,4 dimensions) template origin is center of shell
- 'Ellipsoid' - ellispoid (1,2,3,4 dimensions) template origin is center of ellipsoid
- 'EllipsoidShell' - ellipsoidal shell (1,2,3,4 dimensions) template origin is center of shell
- 'Line' - linear template template origin is first point of line
- 'Notch' - notch template template origin is point about which notch is built
- 'Cone' - cone template template origin is start of half cone
sizes: 1D int array (can be empty)
Attributes of sizes required for constructing template
dimension: int
Dimension of template
inclusion: bool
Whether or not to include anchor point (i.e. [0], [0,0],...)
handedness: 1D int array
If there are axial asymetries in the template (e.g. Notch) can pass in a vector with +1 for 'right' and -1
for 'left' (default is [1], or [1,1], or...)
axbase: List of ints (each list of length = dimension)
Basis vector specifying axis, when appropriate, for direction of template (can be empty) - component lengths
will be ignored; only whether the component is zero or nonzero, and the sign will be
considered (i.e. only co-ordinate axes and '45 degree' lines will be considered as template axes), so e.g::
[1,0] ~ [10,0] ~ x-axis in 2D
[0,1,0] ~ [0,33,0] ~ y-axis in 3D
[1,-1] ~ [30,-20] ~ [108,-1] ~ 135 degree axis in 2
if axbase is empty template will pick axes according to following conventions:
- templates requiring single axis specification (e.g. line, notch, cone) will always use positivedirection of first dimension
- templates requiring multiple axis specification, e.g. rectangular parallelipipeds and ellipsoids will choose:
- largest dimension (e.g. semi-major axis) in positive direction of first dimension
- next largest dimension (e.g. semi-minor axis) in positive direction of second dimension
- etc.
anchoff: list of ints
Offset of anchor point from template [0,0,0] in template (usually assume [0,0,0])
shift: List of int
List of ints to use if you
want to shift all points in offset array - useful, e.g.
if you want to build cooccurence arrays from offset
templates - build one template (set of offsets with no shift
and another with an appropriate shift; those can each be passed
to the feature space cluster algorithm, then those
to the cooccurence matrix builder, and that to the texture
measure generator.
"""
def __init__(self, type, sizes, dimension, inclusion, handedness=None, axbase=None, anchoff=None, shift=None):
self.type = type
self.sizes = sizes
self.dim = dimension
self.me = inclusion
self.handedness = handedness
self.offsets = []
self.axbase = axbase
self.anchoff = anchoff
self.shift = shift
# Set up default handedness
if self.handedness is None: # Nothing passed in to constructor
if self.dim == 1:
self.handedness = [1]
if self.dim == 2:
self.handedness = [1, 1]
if self.dim == 3:
self.handedness = [1, 1, 1]
if self.dim == 4:
self.handedness = [1, 1, 1, 1]
# Set up default axis directions
if self.axbase is None: # Nothing passed in to constructor
if self.dim == 1:
# pick convention for 1 dimension of positve = 1
# negative = -1
self.axbase = [1]
if self.dim == 2:
self.axbase = [1, 0]
if self.dim == 3:
self.axbase = [1, 0, 0]
if self.dim == 4:
self.axbase = [1, 0, 0, 0]
# Set up anchor point offset
if self.anchoff is None: # Nothing passed in to constructor
if self.dim == 1:
self.anchoff = [0]
if self.dim == 2:
self.anchoff = [0, 0]
if self.dim == 3:
self.anchoff = [0, 0, 0]
if self.dim == 4:
self.anchoff = [0, 0, 0, 0]
# Set up shift
if self.shift is None: # Nothing passed in to constructor
if self.dim == 1:
self.shift = [0]
if self.dim == 2:
self.shift = [0, 0]
if self.dim == 3:
self.shift = [0, 0, 0]
if self.dim == 4:
self.shift = [0, 0, 0, 0]
################# RECTBOX #######################
if type == "RectBox":
if len(self.sizes) != self.dim:
print(f"sizes array is of length {len(self.sizes)} but must be of length {self.dim} for type {type}")
# Calculate box limits
lims = np.zeros((self.dim, 2), int)
for i in range(self.dim):
lims[i, 0] = -(self.sizes[0] / 2)
if self.sizes[0] % 2 == 0:
lims[i, 1] = self.sizes[0] / 2
else:
lims[i, 1] = (self.sizes[0] / 2) + 1
if self.dim == 1:
for i in range(lims[0, 0], lims[0, 1]):
self.offsets.append([i])
if [0] in self.offsets:
self.offsets.remove([0]) # might put back later
if self.dim == 2:
for i in range(lims[0, 0], lims[0, 1]):
for j in range(lims[1, 0], lims[1, 1]):
self.offsets.append([i, j])
if [0, 0] in self.offsets:
self.offsets.remove([0, 0]) # might put back later
if self.dim == 3:
for i in range(lims[0, 0], lims[0, 1]):
for j in range(lims[1, 0], lims[1, 1]):
for k in range(lims[2, 0], lims[2, 1]):
self.offsets.append([i, j, k])
if [0, 0, 0] in self.offsets:
self.offsets.remove([0, 0, 0]) # might put back later
if self.dim == 4:
for i in range(lims[0, 0], lims[0, 1]):
for j in range(lims[1, 0], lims[1, 1]):
for k in range(lims[2, 0], lims[2, 1]):
for t in range(lims[3, 0], lims[3, 1]):
self.offsets.append([i, j, k, t])
if [0, 0, 0, 0] in self.offsets:
self.offsets.remove([0, 0, 0, 0]) # might put back later
################# RECTSHELL #######################
elif type == "RectShell":
if len(self.sizes) != self.dim:
print(f"sizes array is of length {len(self.sizes)} but must be of length {self.dim} for type {type}")
if self.dim == 1:
sub = self.sizes[0] / 2
for i in range(self.sizes[0]):
if (i == 0 or i == self.sizes[0] - 1):
self.offsets.append([i - sub])
if self.dim == 2:
sub0 = self.sizes[0] / 2
sub1 = self.sizes[1] / 2
for i in range(self.sizes[0]):
for j in range(self.sizes[1]):
if (i == 0 or i == self.sizes[0] - 1 or j == 0 or j == self.sizes[1] - 1):
self.offsets.append([i - sub0, j - sub1])
if self.dim == 3:
sub0 = self.sizes[0] / 2
sub1 = self.sizes[1] / 2
sub2 = self.sizes[2] / 2
for i in range(self.sizes[0]):
for j in range(self.sizes[1]):
for k in range(self.sizes[2]):
if (i == 0 or i == self.sizes[0] - 1 or j == 0 or j == self.sizes[1] - 1 or k == 0 or k ==
self.sizes[2] - 1):
self.offsets.append([i - sub0, j - sub1, k - sub2])
if self.dim == 4:
sub0 = self.sizes[0] / 2
sub1 = self.sizes[1] / 2
sub2 = self.sizes[2] / 2
sub3 = self.sizes[3] / 2
for i in range(self.sizes[0]):
for j in range(self.sizes[1]):
for k in range(self.sizes[2]):
for t in range(self.sizes[3]):
if (i == 0 or i == self.sizes[0] - 1 or j == 0 or j == self.sizes[1] - 1 or
k == 0 or k == self.sizes[2] - 1 or t == 0 or t == self.sizes[3] - 1):
self.offsets.append([i - sub0, j - sub1, k - sub2, t - sub3])
################# ELLIPSOID #######################
elif type == "Ellipsoid":
if len(self.sizes) != self.dim:
print(f"sizes array is of length {len(self.sizes)} but must be of length {self.dim} for type {type}")
# sys.exit(-1)
if self.dim == 1: # same as 1D rectangular box
sub = self.sizes[0] / 2
for i in range(self.sizes[0]):
self.offsets.append([i - sub])
if self.dim == 2:
sub0 = self.sizes[0] / 2
sub1 = self.sizes[1] / 2
s02 = self.sizes[0] * self.sizes[0]
s12 = self.sizes[1] * self.sizes[1]
for i in range(self.sizes[0]):
for j in range(self.sizes[1]):
bounder = ((i - sub0) * (i - sub0)) / s02 + ((j - sub1) * (j - sub1)) / s12
if (bounder <= 1.0):
self.offsets.append([i - sub0, j - sub1])
if self.dim == 3:
sub0 = self.sizes[0] / 2
sub1 = self.sizes[1] / 2
sub2 = self.sizes[2] / 2
s02 = self.sizes[0] * self.sizes[0]
s12 = self.sizes[1] * self.sizes[1]
s22 = self.sizes[2] * self.sizes[2]
for i in range(self.sizes[0]):
for j in range(self.sizes[1]):
for k in range(self.sizes[2]):
bounder = ((i - sub0) * (i - sub0)) / s02 + ((j - sub1) * (j - sub1)) / s12 + (
(k - sub2) * (
k - sub2)) / s22
if (bounder <= 1.0):
self.offsets.append([i - sub0, j - sub1, k - sub2])
if self.dim == 4:
print
"Sorry 4D ellipsoids not yet implemented"
# sys.exit(-1)
################# ELLIPSOIDSHELL #######################
elif type == "EllipsoidShell":
if len(self.sizes) != self.dim:
print(f"sizes array is of length {len(self.sizes)} but must be of length {self.dim} for type {type}")
# sys.exit(-1)
if self.dim == 1: # Same as 1D rectangular shell
sub = self.sizes[0] / 2
for i in range(self.sizes[0]):
if (i == 0 or i == self.sizes[0] - 1):
self.offsets.append([i - sub])
# FIX ME !!! - Haven't used or tested 2,3 dim ellipsoidal shells
if self.dim == 2:
sub0 = self.sizes[0] / 2
sub1 = self.sizes[1] / 2
s02 = self.sizes[0] * self.sizes[0]
s12 = self.sizes[1] * self.sizes[1]
for i in range(self.sizes[0]):
for j in range(self.sizes[1]):
bounder = ((i - sub0) * (i - sub0)) / s02 + ((j - sub1) * (j - sub1)) / s12
if (bounder > 0.9 and bounder < 1.1): # Need to figure
self.offsets.append([i - sub0, j - sub1]) # out these bounds
if self.dim == 3:
sub0 = self.sizes[0] / 2
sub1 = self.sizes[1] / 2
sub2 = self.sizes[2] / 2
s02 = self.sizes[0] * self.sizes[0]
s12 = self.sizes[1] * self.sizes[1]
s22 = self.sizes[2] * self.sizes[2]
for i in range(self.sizes[0]):
for j in range(self.sizes[1]):
for k in range(self.sizes[2]):
bounder = ((i - sub0) * (i - sub0)) / s02 + ((j - sub1) * (j - sub1)) / s12 + (
(k - sub2) * (
k - sub2)) / s22
if (bounder > 0.9 and bounder < 1.1): # Need to figure
self.offsets.append([i - sub0, j - sub1, k - sub2]) # out these bounds
if self.dim == 4:
print("Sorry 4D ellipsoidal shells not yet implemented")
# sys.exit(-1)
################# LINE #######################
elif type == "Line":
if len(self.sizes) != 1:
print(f"sizes array is of length {len(self.sizes)} but must be of length {self.dim} for type {type}")
proto = np.sign(self.axbase) # Generate axis (rely on dimension
# being correct re. above check)
for i in range(1, self.sizes[0] + 1):
self.offsets.append(list(i * proto))
################ NOTCH #######################
elif type == "Notch":
if len(self.sizes) != 1:
print(f"sizes array is of length {len(self.sizes)} but must be of length {self.dim} for type {type}")
proto = list(np.sign(self.axbase))
if self.dim == 1:
print
"Sorry, no definition for 1 dimensional notches"
# sys.exit(-1)
if self.dim == 2:
# proto must be one of [1,0],[-1,0],[0,1],[0,-1]
# if not assume [1,0]
protoset = [[1, 0], [-1, 0], [0, 1], [0, -1]]
if proto not in protoset:
proto = [1, 0]
if proto == [1, 0]:
for i in range(self.sizes[0] + 1):
for j in range(-self.sizes[0], self.sizes[0] + 1):
if (i > 0 or j <= 0):
self.offsets.append([i, j])
if proto == [-1, 0]:
for i in range(-self.sizes[0], 1):
for j in range(-self.sizes[0], self.sizes[0] + 1):
if (i < 0 or j <= 0):
self.offsets.append([i, j])
if proto == [0, 1]:
for i in range(-self.sizes[0], self.sizes[0] + 1):
for j in range(self.sizes[0] + 1):
if (j > 0 or i >= 0):
self.offsets.append([i, j])
if proto == [0, -1]:
for i in range(-self.sizes[0], self.sizes[0] + 1):
for j in range(-self.sizes[0], 1):
if (j < 0 or i >= 0):
self.offsets.append([i, j])
if self.dim == 3:
protoset = [[1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1]]
if proto not in protoset:
proto = [1, 0, 0]
if proto == [1, 0, 0]:
for i in range(self.sizes[0] + 1):
for j in range(-self.sizes[0], self.sizes[0] + 1):
for k in range(-self.sizes[0], self.sizes[0] + 1):
if (i > 0 or (i >= 0 and j > 0) or (j >= 0 and k > 0)):
self.offsets.append([i, j, k])
if proto == [-1, 0, 0]:
for i in range(-self.sizes[0], self.sizes[0]):
for j in range(-self.sizes[0], self.sizes[0] + 1):
for k in range(-self.sizes[0], self.sizes[0] + 1):
if (i < 0 or (i <= 0 and j < 0) or (j <= 0 and k < 0)):
self.offsets.append([i, j, k])
if proto == [0, 1, 0]:
for j in range(self.sizes[0] + 1):
for k in range(-self.sizes[0], self.sizes[0] + 1):
for i in range(-self.sizes[0], self.sizes[0] + 1):
if (j > 0 or (j >= 0 and i > 0) or (i >= 0 and k < 0)):
self.offsets.append([i, j, k])
if proto == [0, -1, 0]:
for j in range(-self.sizes[0], self.sizes[0]):
for k in range(-self.sizes[0], self.sizes[0] + 1):
for i in range(-self.sizes[0], self.sizes[0] + 1):
if (j < 0 or (j <= 0 and i > 0) or (i >= 0 and k < 0)):
self.offsets.append([i, j, k])
if proto == [0, 0, 1]:
for k in range(self.sizes[0] + 1):
for i in range(-self.sizes[0], self.sizes[0] + 1):
for j in range(-self.sizes[0], self.sizes[0] + 1):
if (k > 0 or (k >= 0 and j < 0) or (j <= 0 and i > 0)):
self.offsets.append([i, j, k])
if proto == [0, 0, -1]:
for k in range(-self.sizes[0], self.sizes[0]):
for i in range(-self.sizes[0], self.sizes[0] + 1):
for j in range(-self.sizes[0], self.sizes[0] + 1):
if (k < 0 or (k <= 0 and j > 0) or (j >= 0 and i < 0)):
self.offsets.append([i, j, k])
if self.dim == 4:
print
"Sorry 4D notches not yet implemented"
# sys.exit(-1)
################# CONE #######################
elif type == "Cone":
# currently only cones along coordinate axis are supported
if len(self.sizes) != 1:
print(f"sizes array is of length {len(self.sizes)} but must be of length {self.dim} for type {type}")
proto = list(np.sign(self.axbase))
if self.dim == 1:
for i in range(self.sizes[0]):
self.offsets.append([i])
if self.dim == 2:
protoset = [[1, 0], [-1, 0], [0, 1], [0, -1]]
if proto not in protoset:
proto = [1, 0]
if proto == [1, 0]:
for i in range(self.sizes[0]):
for j in range(-i, i + 1):
self.offsets.append([i, j])
if proto == [-1, 0]:
for i in range(self.sizes[0]):
for j in range(-i, i + 1):
self.offsets.append([-i, j])
if proto == [0, 1]:
for j in range(self.sizes[0]):
for i in range(-j, j + 1):
self.offsets.append([i, j])
if proto == [0, -1]:
for j in range(self.sizes[0]):
for i in range(-j, j + 1):
self.offsets.append([i, -j])
if self.dim == 3:
protoset = [[1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], [0, 0, 1], [0, 0, -1]]
if proto not in protoset:
proto = [1, 0, 0]
if proto == [1, 0, 0]:
for i in range(self.sizes[0]):
for j in range(-i, i + 1):
for k in range(-i, i + 1):
self.offsets.append([i, j, k])
if proto == [-1, 0, 0]:
for i in range(self.sizes[0]):
for j in range(-i, i + 1):
for k in range(-i, i + 1):
self.offsets.append([-i, j, k])
if proto == [0, 1, 0]:
for j in range(self.sizes[0]):
for k in range(-j, j + 1):
for i in range(-j, j + 1):
self.offsets.append([i, j, k])
if proto == [0, -1, 0]:
for j in range(self.sizes[0]):
for k in range(-j, j + 1):
for i in range(-j, j + 1):
self.offsets.append([i, -j, k])
if proto == [0, 0, 1]:
for k in range(self.sizes[0]):
for i in range(-k, k + 1):
for j in range(-k, k + 1):
self.offsets.append([i, j, k])
if proto == [0, 0, -1]:
for k in range(self.sizes[0]):
for i in range(-k, k + 1):
for j in range(-k, k + 1):
self.offsets.append([i, j, -k])
if self.dim == 4:
# just do it in 4th dimension for now
protoset = [[0, 0, 0, 1], [0, 0, 0, -1]]
if proto not in protoset:
proto = [0, 0, 0, 1]
if proto == [0, 0, 0, 1]:
for t in range(self.sizes[0]):
for i in range(-t, t + 1):
for j in range(-t, t + 1):
for k in range(-t, t + 1):
self.offsets.append([i, j, k, t])
if proto == [0, 0, 0, -1]:
for t in range(self.sizes[0]):
for i in range(-t, t + 1):
for j in range(-t, t + 1):
for k in range(-t, t + 1):
self.offsets.append([i, j, k, -t])
else:
print(f"Type {type} unknow")
for i in range(len(self.offsets)):
self.offsets[i] = list(np.array(self.offsets[i]) + np.array(self.shift))
# Add/Remove anchor point as requested
if inclusion and (self.anchoff not in self.offsets):
self.offsets.append(self.anchoff)
if (not inclusion) and (self.anchoff in self.offsets):
self.offsets.remove(self.anchoff)
# Apply handedness
tempoff = []
for off in self.offsets:
tempoff.append(list(np.array(off) * np.array(self.handedness)))
self.offsets = tempoff
| |
import django
from django.conf import settings
from django.contrib.sites.models import Site
from django.db import models, connection
from django.db.models.fields.related import create_many_related_manager, ManyToManyRel
from django.utils.translation import ugettext_lazy as _
from .compat import User
class RelationshipStatusManager(models.Manager):
# convenience methods to handle some default statuses
def following(self):
return self.get(from_slug='following')
def blocking(self):
return self.get(from_slug='blocking')
def by_slug(self, status_slug):
return self.get(
models.Q(from_slug=status_slug) |
models.Q(to_slug=status_slug) |
models.Q(symmetrical_slug=status_slug)
)
class RelationshipStatus(models.Model):
name = models.CharField(_('name'), max_length=100)
verb = models.CharField(_('verb'), max_length=100)
from_slug = models.CharField(_('from slug'), max_length=100,
help_text=_("Denote the relationship from the user, i.e. 'following'"))
to_slug = models.CharField(_('to slug'), max_length=100,
help_text=_("Denote the relationship to the user, i.e. 'followers'"))
symmetrical_slug = models.CharField(_('symmetrical slug'), max_length=100,
help_text=_("When a mutual relationship exists, i.e. 'friends'"))
login_required = models.BooleanField(_('login required'), default=False,
help_text=_("Users must be logged in to see these relationships"))
private = models.BooleanField(_('private'), default=False,
help_text=_("Only the user who owns these relationships can see them"))
objects = RelationshipStatusManager()
class Meta:
ordering = ('name',)
verbose_name = _('Relationship status')
verbose_name_plural = _('Relationship statuses')
def __unicode__(self):
return self.name
class Relationship(models.Model):
from_user = models.ForeignKey(User,
related_name='from_users', verbose_name=_('from user'))
to_user = models.ForeignKey(User,
related_name='to_users', verbose_name=_('to user'))
status = models.ForeignKey(RelationshipStatus, verbose_name=_('status'))
created = models.DateTimeField(_('created'), auto_now_add=True)
weight = models.FloatField(_('weight'), default=1.0, blank=True, null=True)
#Ajout Alex
accepted = models.BooleanField(_('Accepted'), default=False)
rejected = models.BooleanField(_('Rejected'), default=False)
#Fin Ajout
site = models.ForeignKey(Site, default=settings.SITE_ID,
verbose_name=_('site'), related_name='relationships')
class Meta:
unique_together = (('from_user', 'to_user', 'status', 'site'),)
ordering = ('created',)
verbose_name = _('Relationship')
verbose_name_plural = _('Relationships')
def __unicode__(self):
return (_('Relationship from %(from_user)s to %(to_user)s')
% {'from_user': self.from_user.username,
'to_user': self.to_user.username})
field = models.ManyToManyField(User, through=Relationship,
symmetrical=False, related_name='related_to')
class RelationshipManager(User._default_manager.__class__):
def __init__(self, instance=None, *args, **kwargs):
super(RelationshipManager, self).__init__(*args, **kwargs)
self.instance = instance
def add(self, user, status=None, symmetrical=False):
"""
Add a relationship from one user to another with the given status,
which defaults to "following".
Adding a relationship is by default asymmetrical (akin to following
someone on twitter). Specify a symmetrical relationship (akin to being
friends on facebook) by passing in :param:`symmetrical` = True
.. note::
If :param:`symmetrical` is set, the function will return a tuple
containing the two relationship objects created
"""
if not status:
status = RelationshipStatus.objects.following()
relationship, created = Relationship.objects.get_or_create(
from_user=self.instance,
to_user=user,
status=status,
site=Site.objects.get_current()
)
if symmetrical:
return (relationship, user.relationships.add(self.instance, status, False))
else:
return relationship
#Ajout Alex : prendre exemple sur le rmeove pour l'implementation
def accept(self, user, status=None):
rel = Relationship.objects.get(
from_user=user,
to_user=self.instance,
status=status,
site__pk=settings.SITE_ID)
print rel
rel.accepted=True
rel.rejected=False
rel.save()
return rel
def reject(self, user, status=None):
rel = Relationship.objects.get(
from_user=user,
to_user=self.instance,
status=status,
site__pk=settings.SITE_ID)
rel.accepted=False
rel.rejected=True
rel.save()
return True
#Fin ajout
def remove(self, user, status=None, symmetrical=False):
"""
Remove a relationship from one user to another, with the same caveats
and behavior as adding a relationship.
"""
if not status:
status = RelationshipStatus.objects.following()
res = Relationship.objects.filter(
from_user=self.instance,
to_user=user,
status=status,
site__pk=settings.SITE_ID
).delete()
if symmetrical:
return (res, user.relationships.remove(self.instance, status, False))
else:
return res
# def _get_from_query(self, status):
def _get_from_query(self, status, accepted, rejected):
#Ajouter la clause to_users__accepted=True pour ne retourner que les relationships validees
return dict(
to_users__from_user=self.instance,
to_users__status=status,
to_users__accepted=accepted,
to_users__rejected=rejected,
to_users__site__pk=settings.SITE_ID,
)
# def _get_to_query(self, status):
def _get_to_query(self, status, accepted, rejected):
#Ajouter la clause to_users__accepted=True pour ne retourner que les relationships validees
return dict(
from_users__to_user=self.instance,
from_users__status=status,
from_users__accepted=accepted,
from_users__rejected=rejected,
from_users__site__pk=settings.SITE_ID
)
#Ajout Alex
def get_awaiting_requests(self, status, accepted=False):
query=self._get_from_query(status)
return User.objects.filter(**query)
#Fin ajout
# def get_relationships(self, status, symmetrical=False):
def get_relationships(self, status, accepted=True, rejected=False, symmetrical=False):
"""
Returns a QuerySet of user objects with which the given user has
established a relationship.
"""
# query = self._get_from_query(status)
query = self._get_from_query(status, accepted, rejected)
if symmetrical:
query.update(self._get_to_query(status, accepted, rejected))
return User.objects.filter(**query)
def get_related_to(self, status, accepted=True, rejected=False):
"""
Returns a QuerySet of user objects which have created a relationship to
the given user.
"""
print self._get_to_query(status, accepted, rejected)
return User.objects.filter(**self._get_to_query(status, accepted, rejected))
def only_to(self, status):
"""
Returns a QuerySet of user objects who have created a relationship to
the given user, but which the given user has not reciprocated
"""
from_relationships = self.get_relationships(status)
to_relationships = self.get_related_to(status)
return to_relationships.exclude(pk__in=from_relationships.values_list('pk'))
def only_from(self, status):
"""
Like :method:`only_to`, returns user objects with whom the given user
has created a relationship, but which have not reciprocated
"""
from_relationships = self.get_relationships(status)
to_relationships = self.get_related_to(status)
return from_relationships.exclude(pk__in=to_relationships.values_list('pk'))
def exists(self, user, status=None, symmetrical=False):
"""
Returns boolean whether or not a relationship exists between the given
users. An optional :class:`RelationshipStatus` instance can be specified.
"""
query = dict(
to_users__from_user=self.instance,
to_users__to_user=user,
to_users__site__pk=settings.SITE_ID,
)
if status:
query.update(to_users__status=status)
if symmetrical:
query.update(
from_users__to_user=self.instance,
from_users__from_user=user,
from_users__site__pk=settings.SITE_ID
)
if status:
query.update(from_users__status=status)
return User.objects.filter(**query).exists()
# some defaults
def following(self):
return self.get_relationships(RelationshipStatus.objects.following())
def followers(self):
return self.get_related_to(RelationshipStatus.objects.following())
def blocking(self):
return self.get_relationships(RelationshipStatus.objects.blocking(), False)
def blockers(self):
return self.get_related_to(RelationshipStatus.objects.blocking(), False)
def friends(self):
return self.get_relationships(RelationshipStatus.objects.following(), True, True)
if django.VERSION < (1, 2):
RelatedManager = create_many_related_manager(RelationshipManager, Relationship)
class RelationshipsDescriptor(object):
def __get__(self, instance, instance_type=None):
qn = connection.ops.quote_name
manager = RelatedManager(
model=User,
core_filters={'related_to__pk': instance._get_pk_val()},
instance=instance,
symmetrical=False,
join_table=qn('relationships_relationship'),
source_col_name=qn('from_user_id'),
target_col_name=qn('to_user_id'),
)
return manager
elif django.VERSION > (1, 2) and django.VERSION < (1, 4):
fake_rel = ManyToManyRel(
to=User,
through=Relationship)
RelatedManager = create_many_related_manager(RelationshipManager, fake_rel)
class RelationshipsDescriptor(object):
def __get__(self, instance, instance_type=None):
manager = RelatedManager(
model=User,
core_filters={'related_to__pk': instance._get_pk_val()},
instance=instance,
symmetrical=False,
source_field_name='from_user',
target_field_name='to_user'
)
return manager
else:
fake_rel = ManyToManyRel(
to=User,
through=Relationship)
RelatedManager = create_many_related_manager(RelationshipManager, fake_rel)
class RelationshipsDescriptor(object):
def __get__(self, instance, instance_type=None):
manager = RelatedManager(
model=User,
query_field_name='related_to',
instance=instance,
symmetrical=False,
source_field_name='from_user',
target_field_name='to_user',
through=Relationship,
)
return manager
#HACK
field.contribute_to_class(User, 'relationships')
setattr(User, 'relationships', RelationshipsDescriptor())
| |
from typing import Optional, Tuple, Union
from hwt.code import If
from hwt.hdl.types.bits import Bits
from hwt.interfaces.std import Handshaked
from hwtLib.abstract.componentBuilder import AbstractComponentBuilder
class AbstractStreamBuilder(AbstractComponentBuilder):
"""
:note: see :class:`AbstractComponentBuilder`
:attention: this is just abstract class unit classes has to be specified
in concrete implementation
:cvar ~.FifoCls: FIFO unit class
:cvar ~.FifoAsyncCls: asynchronous FIFO (FIFO with separate clock per port) unit class
:cvar ~.JoinSelectCls: select order based join unit class
:cvar ~.JoinFairCls: round robin based join unit class
:cvar ~.JoinPrioritizedCls: priority based join unit class
:cvar ~.RegCls: register unit class
:cvar ~.RegCdcCls: Clock domain crossing register unit class
:cvar ~.ResizerCls: resizer unit class (used to change data width of an interface)
:cvar ~.SplitCopyCls: copy based split unit class
:cvar ~.SplitSelectCls: select order based split unit class (demultiplexer)
:cvar ~.SplitFairCls: round robin based split unit class
:cvar ~.SplitPrioritizedCls: priority based split unit class
"""
FifoCls = NotImplemented
FifoAsyncCls = NotImplemented
JoinSelectCls = NotImplemented
JoinPrioritizedCls = NotImplemented
JoinFairCls = NotImplemented
RegCls = NotImplemented
ResizerCls = NotImplemented
SplitCopyCls = NotImplemented
SplitSelectCls = NotImplemented
SplitFairCls = NotImplemented
SplitPrioritizedCls = NotImplemented
def _genericInstance(self,
unit_cls,
name,
set_params=lambda u: u,
update_params=True,
propagate_clk_rst=True):
"""
Instantiate generic component and connect basics
:param unit_cls: class of unit which is being created
:param name: name for unit_cls instance
:param set_params: function which updates parameters as is required
(parameters are already shared with self.end interface)
"""
u = unit_cls(self.getInfCls())
if update_params:
u._updateParamsFrom(self.end)
set_params(u)
setattr(self.parent, self._findSuitableName(name), u)
if propagate_clk_rst:
self._propagateClkRstn(u)
self.lastComp = u
if self.master_to_slave:
u.dataIn(self.end)
self.end = u.dataOut
else:
self.end(u.dataOut)
self.end = u.dataIn
return self
@classmethod
def _join(cls, joinCls, parent, srcInterfaces, name, configAs, extraConfigFn):
"""
Create builder from many interfaces by joining them together
:param joinCls: join component class which should be used
:param parent: unit where builder should place components
:param srcInterfacecs: sequence of interfaces which should be joined
together (lower index = higher priority)
:param configureAs: interface or another object which configuration
should be applied
:param extraConfigFn: function which is applied on join unit
in configuration phase (can be None)
"""
srcInterfaces = list(srcInterfaces)
if name is None:
if configAs is None:
name = "gen_join"
else:
name = "gen_" + configAs._name
if configAs is None:
configAs = srcInterfaces[0]
self = cls(parent, None, name=name)
u = joinCls(self._getIntfCls(configAs))
if extraConfigFn is not None:
extraConfigFn(u)
u._updateParamsFrom(configAs)
u.INPUTS = len(srcInterfaces)
setattr(self.parent, self._findSuitableName(name + "_join"), u)
self._propagateClkRstn(u)
for joinIn, inputIntf in zip(u.dataIn, srcInterfaces):
joinIn(inputIntf)
self.lastComp = u
self.end = u.dataOut
return self
@classmethod
def join_prioritized(cls, parent, srcInterfaces, name=None,
configAs=None, extraConfigFn=None):
"""
create builder from fairly joined interfaces (round robin for input select)
:note: other parameters same as in `.AbstractStreamBuilder._join`
"""
return cls._join(cls.JoinPrioritizedCls, parent, srcInterfaces, name,
configAs, extraConfigFn)
@classmethod
def join_fair(cls, parent, srcInterfaces, name=None,
configAs=None, exportSelected=False):
"""
create builder from fairly joined interfaces (round robin for input select)
:param exportSelected: if True join component will have handshaked interface
with index of selected input
:note: other parameters same as in `.AbstractStreamBuilder._join`
"""
def extraConfig(u):
u.EXPORT_SELECTED = exportSelected
return cls._join(cls.JoinFairCls, parent, srcInterfaces, name,
configAs, extraConfig)
def buff(self, items:int=1,
latency: Union[None, int, Tuple[int, int]]=None,
delay: Optional[int]=None,
init_data: tuple=()):
"""
Use registers and FIFOs to create buffer of specified parameters
:note: if items <= latency registers are used else FIFO is used
:param items: number of items in buffer
:param latency: latency of buffer (number of clk ticks required to get data
from input to input)
:param delay: delay of buffer (number of clk ticks required to get data to buffer)
:note: delay can be used as synchronization method or to solve timing related problems
because it will split valid signal path
:param init_data: a reset value of buffer (data is transfered after reset)
if items=1 and interface has just data:uint8_t signal
the init_data will be in format ((0,),)
:note: if latency or delay is None the most optimal value is used
"""
if items == 0:
assert latency is None or latency == 0
assert delay is None or delay == 0
return self
elif items == 1:
if latency is None:
latency = 1
if delay is None:
delay = 0
else:
if latency is None:
latency = 2
if delay is None:
delay = 0
if init_data is not None:
init_data = tuple(init_data)
assert len(init_data) <= items, (items, "more init data than init size", init_data)
assert isinstance(latency, tuple) or latency >= 1 and delay >= 0, (latency, delay)
if isinstance(latency, tuple) or latency == 1 or latency >= items:
# instantiate buffer as register
def applyParams(u):
u.LATENCY = latency
u.DELAY = delay
u.INIT_DATA = init_data
return self._genericInstance(self.RegCls, "reg",
set_params=applyParams)
else:
# instantiate buffer as fifo
if latency != 2 or delay != 0:
raise NotImplementedError()
def applyParams(u):
u.DEPTH = items
u.INIT_DATA = init_data
return self._genericInstance(self.FifoCls, "fifo", applyParams)
def buff_cdc(self, clk, rst, items=1):
"""
Instantiate a CDC (Clock Domain Crossing) buffer or AsyncFifo
on selected interface
:note: if items==1 CDC clock synchronization register is used
if items>1 asynchronous FIFO is used
"""
in_clk = self.getClk()
in_rst_n = self.getRstn()
if not self.master_to_slave:
in_clk, clk = clk, in_clk
in_rst_n, rst = rst, in_rst_n
def set_clk_freq(u):
u.IN_FREQ = in_clk.FREQ
u.OUT_FREQ = clk.FREQ
if items > 1:
def configure(u):
u.DEPTH = items
set_clk_freq(u)
res = self._genericInstance(
self.FifoAsyncCls, "cdcAFifo", configure,
propagate_clk_rst=False)
else:
assert items == 1, items
res = self._genericInstance(
self.RegCdcCls, "cdcReg", set_clk_freq,
propagate_clk_rst=False)
b = res.lastComp
b.dataIn_clk(in_clk)
b.dataIn_rst_n(in_rst_n)
b.dataOut_clk(clk)
b.dataOut_rst_n(rst)
return res
def split_copy(self, noOfOutputs):
"""
Clone input data to all outputs
:param noOfOutputs: number of output interfaces of the split
"""
if not self.master_to_slave:
assert len(self.end) == noOfOutputs, self.end
def setChCnt(u):
u.OUTPUTS = noOfOutputs
return self._genericInstance(self.SplitCopyCls, 'splitCopy', setChCnt)
def split_copy_to(self, *outputs):
"""
Same like split_copy, but outputs are automatically connected
:param outputs: ports on which should be outputs
of split component connected to
"""
assert self.master_to_slave, "This function does not make sense if building in reverse order"
noOfOutputs = len(outputs)
s = self.split_copy(noOfOutputs)
for toComponent, fromFork in zip(outputs, self.end):
toComponent(fromFork)
self.end = None # invalidate None because port was fully connected
return s
def split_select(self, outputSelSignalOrSequence, noOfOutputs):
"""
Create a demultiplexer with number of outputs specified by noOfOutputs
:param noOfOutputs: number of outputs of multiplexer
:param outputSelSignalOrSequence: handshaked interface (onehot encoded)
to control selected output or sequence of output indexes
which should be used (will be repeated)
"""
if not self.master_to_slave:
assert len(self.end) == noOfOutputs, self.end
def setChCnt(u):
u.OUTPUTS = noOfOutputs
self._genericInstance(self.SplitSelectCls, 'select', setChCnt)
if isinstance(outputSelSignalOrSequence, Handshaked):
self.lastComp.selectOneHot(outputSelSignalOrSequence)
else:
seq = outputSelSignalOrSequence
t = Bits(self.lastComp.selectOneHot.data._dtype.bit_length())
size = len(seq)
ohIndexes = map(lambda x: 1 << x, seq)
indexes = self.parent._sig(self.name + "split_seq",
t[size],
def_val=ohIndexes)
actual = self.parent._reg(self.name + "split_seq_index",
Bits(size.bit_length()),
0)
iin = self.lastComp.selectOneHot
iin.data(indexes[actual])
iin.vld(1)
If(iin.rd,
If(actual._eq(size - 1),
actual(0)
).Else(
actual(actual + 1)
)
)
return self
def split_select_to(self, outputSelSignalOrSequence, *outputs):
"""
Same like split_select, but outputs are automatically connected
:param outputs: ports on which should be outputs of split component connected to
"""
assert self.master_to_slave, "This function does not make sense if building in reverse order"
noOfOutputs = len(outputs)
s = self.split_select(outputSelSignalOrSequence, noOfOutputs)
for toComponent, fromFork in zip(outputs, self.end):
toComponent(fromFork)
self.end = None # invalidate None because port was fully connected
return s
def split_prioritized(self, noOfOutputs):
"""
data from input is send to output which is ready and has highest priority from all ready outputs
:param noOfOutputs: number of output interfaces of the fork
"""
if not self.master_to_slave:
assert len(self.end) == noOfOutputs, self.end
def setChCnt(u):
u.OUTPUTS = noOfOutputs
self._genericInstance(self.SplitPrioritizedCls, 'splitPrio', setChCnt)
return self
def split_prioritized_to(self, *outputs):
"""
Same like split_prioritized, but outputs are automatically connected
:param outputs: ports on which should be outputs of split component connected to
"""
assert self.master_to_slave, "This function does not make sense if building in reverse order"
noOfOutputs = len(outputs)
s = self.split_prioritized(noOfOutputs)
for toComponent, fromFork in zip(outputs, self.end):
toComponent(fromFork)
self.end = None # invalidate None because port was fully connected
return s
def split_fair(self, noOfOutputs, exportSelected=False):
"""
Create a round robin selector with number of outputs specified by noOfOutputs
:param noOfOutputs: number of outputs of multiplexer
:param exportSelected: if is True split component will have interface "selectedOneHot"
of type VldSynced wich will have one hot index of selected item
"""
if not self.master_to_slave:
assert len(self.end) == noOfOutputs, self.end
def setChCnt(u):
u.OUTPUTS = noOfOutputs
self._genericInstance(self.SplitFairCls, 'splitFair', setChCnt)
return self
def split_fair_to(self, *outputs, exportSelected=False):
"""
Same like split_fair, but outputs are automatically connected
:param outputs: ports on which should be outputs
of split component connected to
:param exportSelected: if is True split component will
have interface "selectedOneHot" of type VldSynced
which will have one hot index of selected item
"""
assert self.master_to_slave, "This function does not make sense if building in reverse order"
noOfOutputs = len(outputs)
s = self.split_fair(noOfOutputs, exportSelected=exportSelected)
for toComponent, fromFork in zip(outputs, self.end):
toComponent(fromFork)
self.end = None # invalidate None because port was fully connected
return s
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, SparkContext
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.wrapper import JavaWrapper, _jvm
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.functions import lit
class ChiSquareTest(object):
"""
Conduct Pearson's independence test for every feature against the label. For each feature,
the (feature, label) pairs are converted into a contingency matrix for which the Chi-squared
statistic is computed. All label and feature values must be categorical.
The null hypothesis is that the occurrence of the outcomes is statistically independent.
.. versionadded:: 2.2.0
"""
@staticmethod
def test(dataset, featuresCol, labelCol, flatten=False):
"""
Perform a Pearson's independence test using dataset.
.. versionadded:: 2.2.0
.. versionchanged:: 3.1.0
Added optional ``flatten`` argument.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
DataFrame of categorical labels and categorical features.
Real-valued features will be treated as categorical for each distinct value.
featuresCol : str
Name of features column in dataset, of type `Vector` (`VectorUDT`).
labelCol : str
Name of label column in dataset, of any numerical type.
flatten : bool, optional
if True, flattens the returned dataframe.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
DataFrame containing the test result for every feature against the label.
If flatten is True, this DataFrame will contain one row per feature with the following
fields:
- `featureIndex: int`
- `pValue: float`
- `degreesOfFreedom: int`
- `statistic: float`
If flatten is False, this DataFrame will contain a single Row with the following fields:
- `pValues: Vector`
- `degreesOfFreedom: Array[int]`
- `statistics: Vector`
Each of these fields has one value per feature.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import ChiSquareTest
>>> dataset = [[0, Vectors.dense([0, 0, 1])],
... [0, Vectors.dense([1, 0, 1])],
... [1, Vectors.dense([2, 1, 1])],
... [1, Vectors.dense([3, 1, 1])]]
>>> dataset = spark.createDataFrame(dataset, ["label", "features"])
>>> chiSqResult = ChiSquareTest.test(dataset, 'features', 'label')
>>> chiSqResult.select("degreesOfFreedom").collect()[0]
Row(degreesOfFreedom=[3, 1, 0])
>>> chiSqResult = ChiSquareTest.test(dataset, 'features', 'label', True)
>>> row = chiSqResult.orderBy("featureIndex").collect()
>>> row[0].statistic
4.0
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.ChiSquareTest
args = [_py2java(sc, arg) for arg in (dataset, featuresCol, labelCol, flatten)]
return _java2py(sc, javaTestObj.test(*args))
class Correlation(object):
"""
Compute the correlation matrix for the input dataset of Vectors using the specified method.
Methods currently supported: `pearson` (default), `spearman`.
.. versionadded:: 2.2.0
Notes
-----
For Spearman, a rank correlation, we need to create an RDD[Double] for each column
and sort it in order to retrieve the ranks and then join the columns back into an RDD[Vector],
which is fairly costly. Cache the input Dataset before calling corr with `method = 'spearman'`
to avoid recomputing the common lineage.
"""
@staticmethod
def corr(dataset, column, method="pearson"):
"""
Compute the correlation matrix with specified method using dataset.
.. versionadded:: 2.2.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
A DataFrame.
column : str
The name of the column of vectors for which the correlation coefficient needs
to be computed. This must be a column of the dataset, and it must contain
Vector objects.
method : str, optional
String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`.
Returns
-------
A DataFrame that contains the correlation matrix of the column of vectors. This
DataFrame contains a single row and a single column of name `METHODNAME(COLUMN)`.
Examples
--------
>>> from pyspark.ml.linalg import DenseMatrix, Vectors
>>> from pyspark.ml.stat import Correlation
>>> dataset = [[Vectors.dense([1, 0, 0, -2])],
... [Vectors.dense([4, 5, 0, 3])],
... [Vectors.dense([6, 7, 0, 8])],
... [Vectors.dense([9, 0, 0, 1])]]
>>> dataset = spark.createDataFrame(dataset, ['features'])
>>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0]
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...],
[ 0.0556..., 1. , NaN, 0.9135...],
[ NaN, NaN, 1. , NaN],
[ 0.4004..., 0.9135..., NaN, 1. ]])
>>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0]
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ],
[ 0.1054..., 1. , NaN, 0.9486... ],
[ NaN, NaN, 1. , NaN],
[ 0.4 , 0.9486... , NaN, 1. ]])
"""
sc = SparkContext._active_spark_context
javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation
args = [_py2java(sc, arg) for arg in (dataset, column, method)]
return _java2py(sc, javaCorrObj.corr(*args))
class KolmogorovSmirnovTest(object):
"""
Conduct the two-sided Kolmogorov Smirnov (KS) test for data sampled from a continuous
distribution.
By comparing the largest difference between the empirical cumulative
distribution of the sample data and the theoretical distribution we can provide a test for the
the null hypothesis that the sample data comes from that theoretical distribution.
.. versionadded:: 2.4.0
"""
@staticmethod
def test(dataset, sampleCol, distName, *params):
"""
Conduct a one-sample, two-sided Kolmogorov-Smirnov test for probability distribution
equality. Currently supports the normal distribution, taking as parameters the mean and
standard deviation.
.. versionadded:: 2.4.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
a Dataset or a DataFrame containing the sample of data to test.
sampleCol : str
Name of sample column in dataset, of any numerical type.
distName : str
a `string` name for a theoretical distribution, currently only support "norm".
params : float
a list of `float` values specifying the parameters to be used for the theoretical
distribution. For "norm" distribution, the parameters includes mean and variance.
Returns
-------
A DataFrame that contains the Kolmogorov-Smirnov test result for the input sampled data.
This DataFrame will contain a single Row with the following fields:
- `pValue: Double`
- `statistic: Double`
Examples
--------
>>> from pyspark.ml.stat import KolmogorovSmirnovTest
>>> dataset = [[-1.0], [0.0], [1.0]]
>>> dataset = spark.createDataFrame(dataset, ['sample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 0.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
>>> dataset = [[2.0], [3.0], [4.0]]
>>> dataset = spark.createDataFrame(dataset, ['sample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 3.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.KolmogorovSmirnovTest
dataset = _py2java(sc, dataset)
params = [float(param) for param in params]
return _java2py(
sc, javaTestObj.test(dataset, sampleCol, distName, _jvm().PythonUtils.toSeq(params))
)
class Summarizer(object):
"""
Tools for vectorized statistics on MLlib Vectors.
The methods in this package provide various statistics for Vectors contained inside DataFrames.
This class lets users pick the statistics they would like to extract for a given column.
.. versionadded:: 2.4.0
Examples
--------
>>> from pyspark.ml.stat import Summarizer
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> summarizer = Summarizer.metrics("mean", "count")
>>> df = sc.parallelize([Row(weight=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(weight=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> df.select(summarizer.summary(df.features, df.weight)).show(truncate=False)
+-----------------------------------+
|aggregate_metrics(features, weight)|
+-----------------------------------+
|{[1.0,1.0,1.0], 1} |
+-----------------------------------+
<BLANKLINE>
>>> df.select(summarizer.summary(df.features)).show(truncate=False)
+--------------------------------+
|aggregate_metrics(features, 1.0)|
+--------------------------------+
|{[1.0,1.5,2.0], 2} |
+--------------------------------+
<BLANKLINE>
>>> df.select(Summarizer.mean(df.features, df.weight)).show(truncate=False)
+--------------+
|mean(features)|
+--------------+
|[1.0,1.0,1.0] |
+--------------+
<BLANKLINE>
>>> df.select(Summarizer.mean(df.features)).show(truncate=False)
+--------------+
|mean(features)|
+--------------+
|[1.0,1.5,2.0] |
+--------------+
<BLANKLINE>
"""
@staticmethod
@since("2.4.0")
def mean(col, weightCol=None):
"""
return a column of mean summary
"""
return Summarizer._get_single_metric(col, weightCol, "mean")
@staticmethod
@since("3.0.0")
def sum(col, weightCol=None):
"""
return a column of sum summary
"""
return Summarizer._get_single_metric(col, weightCol, "sum")
@staticmethod
@since("2.4.0")
def variance(col, weightCol=None):
"""
return a column of variance summary
"""
return Summarizer._get_single_metric(col, weightCol, "variance")
@staticmethod
@since("3.0.0")
def std(col, weightCol=None):
"""
return a column of std summary
"""
return Summarizer._get_single_metric(col, weightCol, "std")
@staticmethod
@since("2.4.0")
def count(col, weightCol=None):
"""
return a column of count summary
"""
return Summarizer._get_single_metric(col, weightCol, "count")
@staticmethod
@since("2.4.0")
def numNonZeros(col, weightCol=None):
"""
return a column of numNonZero summary
"""
return Summarizer._get_single_metric(col, weightCol, "numNonZeros")
@staticmethod
@since("2.4.0")
def max(col, weightCol=None):
"""
return a column of max summary
"""
return Summarizer._get_single_metric(col, weightCol, "max")
@staticmethod
@since("2.4.0")
def min(col, weightCol=None):
"""
return a column of min summary
"""
return Summarizer._get_single_metric(col, weightCol, "min")
@staticmethod
@since("2.4.0")
def normL1(col, weightCol=None):
"""
return a column of normL1 summary
"""
return Summarizer._get_single_metric(col, weightCol, "normL1")
@staticmethod
@since("2.4.0")
def normL2(col, weightCol=None):
"""
return a column of normL2 summary
"""
return Summarizer._get_single_metric(col, weightCol, "normL2")
@staticmethod
def _check_param(featuresCol, weightCol):
if weightCol is None:
weightCol = lit(1.0)
if not isinstance(featuresCol, Column) or not isinstance(weightCol, Column):
raise TypeError("featureCol and weightCol should be a Column")
return featuresCol, weightCol
@staticmethod
def _get_single_metric(col, weightCol, metric):
col, weightCol = Summarizer._check_param(col, weightCol)
return Column(
JavaWrapper._new_java_obj(
"org.apache.spark.ml.stat.Summarizer." + metric, col._jc, weightCol._jc
)
)
@staticmethod
def metrics(*metrics):
"""
Given a list of metrics, provides a builder that it turns computes metrics from a column.
See the documentation of :py:class:`Summarizer` for an example.
The following metrics are accepted (case sensitive):
- mean: a vector that contains the coefficient-wise mean.
- sum: a vector that contains the coefficient-wise sum.
- variance: a vector tha contains the coefficient-wise variance.
- std: a vector tha contains the coefficient-wise standard deviation.
- count: the count of all vectors seen.
- numNonzeros: a vector with the number of non-zeros for each coefficients
- max: the maximum for each coefficient.
- min: the minimum for each coefficient.
- normL2: the Euclidean norm for each coefficient.
- normL1: the L1 norm of each coefficient (sum of the absolute values).
.. versionadded:: 2.4.0
Notes
-----
Currently, the performance of this interface is about 2x~3x slower than using the RDD
interface.
Examples
--------
metrics : str
metrics that can be provided.
Returns
-------
:py:class:`pyspark.ml.stat.SummaryBuilder`
"""
sc = SparkContext._active_spark_context
js = JavaWrapper._new_java_obj(
"org.apache.spark.ml.stat.Summarizer.metrics", _to_seq(sc, metrics)
)
return SummaryBuilder(js)
class SummaryBuilder(JavaWrapper):
"""
A builder object that provides summary statistics about a given column.
Users should not directly create such builders, but instead use one of the methods in
:py:class:`pyspark.ml.stat.Summarizer`
.. versionadded:: 2.4.0
"""
def __init__(self, jSummaryBuilder):
super(SummaryBuilder, self).__init__(jSummaryBuilder)
def summary(self, featuresCol, weightCol=None):
"""
Returns an aggregate object that contains the summary of the column with the requested
metrics.
.. versionadded:: 2.4.0
Parameters
----------
featuresCol : str
a column that contains features Vector object.
weightCol : str, optional
a column that contains weight value. Default weight is 1.0.
Returns
-------
:py:class:`pyspark.sql.Column`
an aggregate column that contains the statistics. The exact content of this
structure is determined during the creation of the builder.
"""
featuresCol, weightCol = Summarizer._check_param(featuresCol, weightCol)
return Column(self._java_obj.summary(featuresCol._jc, weightCol._jc))
class MultivariateGaussian(object):
"""Represents a (mean, cov) tuple
.. versionadded:: 3.0.0
Examples
--------
>>> from pyspark.ml.linalg import DenseMatrix, Vectors
>>> from pyspark.ml.stat import MultivariateGaussian
>>> m = MultivariateGaussian(Vectors.dense([11,12]), DenseMatrix(2, 2, (1.0, 3.0, 5.0, 2.0)))
>>> (m.mean, m.cov.toArray())
(DenseVector([11.0, 12.0]), array([[ 1., 5.],
[ 3., 2.]]))
"""
def __init__(self, mean, cov):
self.mean = mean
self.cov = cov
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.stat
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy="1.13")
except TypeError:
pass
globs = pyspark.ml.stat.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder.master("local[2]").appName("ml.stat tests").getOrCreate()
sc = spark.sparkContext
globs["sc"] = sc
globs["spark"] = spark
failure_count, test_count = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.losses.python.losses.loss_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
import tensorflow as tf
class AbsoluteDifferenceLossTest(tf.test.TestCase):
def setUp(self):
self._predictions = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._targets = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.absolute_difference(
self._predictions, self._predictions, weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(5.5 * weight, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(5.5 * weight, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 0.0], shape=[2,])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 0.0], shape=[2, 1])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weight = tf.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weight = tf.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weight = tf.zeros((2, 3))
loss = tf.contrib.losses.absolute_difference(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SoftmaxCrossEntropyLossTest(tf.test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.softmax_cross_entropy(logits, labels, weight=None)
def testAllCorrect(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = 2.3
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), weight * 10.0, 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = 2.3
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(
logits, labels, tf.constant(weight))
self.assertAlmostEqual(loss.eval(), weight * 10.0, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), (1.2 + 3.4 + 5.6) * 10.0 / 3.0, 3)
def testAllWrongAllWeightsMissing(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSomeWeightsMissing(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = tf.contrib.losses.softmax_cross_entropy(logits, labels, weight)
self.assertAlmostEqual(loss.eval(), 12.0, 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
weight = tf.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
tf.contrib.losses.softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testSoftmaxLabelSmoothing(self):
with self.test_session():
# Softmax Cross Entropy Loss is:
# -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100] the log partion function becomes
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
logits = tf.constant([[100.0, -100.0, -100.0]])
labels = tf.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = tf.contrib.losses.softmax_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
class SparseSoftmaxCrossEntropyLossTest(tf.test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0], [1], [2]])
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=None)
def testAllCorrectInt32Labels(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0], [1], [2]], dtype=tf.int32)
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectInt64Labels(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[0], [1], [2]], dtype=tf.int64)
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectNonColumnLabels(self):
with self.test_session():
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([0, 1, 2])
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongInt32Labels(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]], dtype=tf.int32)
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongInt64Labels(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]], dtype=tf.int64)
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongNonColumnLabels(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([2, 0, 1])
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = 2.3
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight)
self.assertAlmostEqual(loss.eval(), weight * 10.0, 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = 2.3
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, tf.constant(weight))
self.assertAlmostEqual(loss.eval(), weight * 10.0, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = tf.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight)
self.assertAlmostEqual(loss.eval(), (1.2 + 3.4 + 5.6) * 10.0 / 3.0, 3)
def testNonZeroLossWithColumnWeights(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = tf.constant([[1.2], [3.4], [5.6]])
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight)
self.assertAlmostEqual(loss.eval(), (1.2 + 3.4 + 5.6) * 10.0 / 3.0, 3)
def testAllWrongAllWeightsMissing(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = tf.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSomeWeightsMissing(self):
logits = tf.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = tf.constant([[2], [0], [1]])
weight = tf.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight)
self.assertAlmostEqual(loss.eval(), 12.0, 3)
def testMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0], [1], [2]])
weight = tf.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0], [1], [2]])
weight = tf.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0], [1], [2], [3]])
weight = tf.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = tf.constant([[0], [1], [2], [3]])
weight = tf.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=weight).eval()
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = tf.constant([[0, 1], [2, 3]])
weight = tf.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(tf.errors.InvalidArgumentError):
tf.contrib.losses.sparse_softmax_cross_entropy(
logits, labels, weight=weight).eval()
class SigmoidCrossEntropyLossTest(tf.test.TestCase):
def testAllCorrectSigmoid(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
logits = tf.placeholder(tf.float32, shape=(None, 1))
labels = tf.placeholder(tf.float32, shape=(None, 1))
weight = tf.ones_like(logits, dtype=tf.float32)
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels, weight)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={
logits: np.ones((32, 1)),
labels: np.ones((32, 1)),
})
self.assertAlmostEqual(loss, 0.313, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
logits = tf.placeholder(tf.float32, shape=(None, 2))
labels = tf.placeholder(tf.float32, shape=(None, 2))
weight = tf.ones_like(logits, dtype=tf.float32)
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels, weight)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={
logits: np.ones((32, 2)),
labels: np.ones((32, 2)),
})
self.assertAlmostEqual(loss, 0.313, 3)
def testAllWrongSigmoid(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = tf.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weight = tf.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
loss = tf.contrib.losses.sigmoid_cross_entropy(
logits, labels, weight=weight)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 1700.0 / 7.0, 3)
def testMultiCorrectSigmoid(self):
logits = tf.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = tf.constant([[1, 0, 1],
[1, 1, 0],
[0, 1, 1]])
loss = tf.contrib.losses.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.test_session():
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSigmoidLabelSmoothingCorrect(self):
with self.test_session():
logits = tf.constant([[100.0, -100.0, -100.0]])
labels = tf.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
# z' = z * (1 - L) + 0.5 L
# 1 -> 1 - 0.5 L
# 0 -> 0.5 L
# here we expect:
# 1/3 * (100 - 100 * (1 - 0.5 L) + 0
# + 0 + 100 * (0.5 L) + 0
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
loss = tf.contrib.losses.sigmoid_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.test_session():
label_smoothing = 0.1
sigmoid_logits = tf.constant([[100.0, -100.0, -100.0]])
sigmoid_labels = tf.constant([[1, 0, 1]])
sigmoid_loss = tf.contrib.losses.sigmoid_cross_entropy(
sigmoid_logits, sigmoid_labels, label_smoothing=label_smoothing)
softmax_logits = tf.constant([[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
softmax_labels = tf.constant([[0, 1], [1, 0], [0, 1]])
softmax_loss = tf.contrib.losses.softmax_cross_entropy(
softmax_logits, softmax_labels, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3)
class LogLossTest(tf.test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
targets = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_targets = targets
epsilon = 1e-7
self._expected_losses = np.multiply(
targets, np.log(predictions + epsilon)) + np.multiply(
1 - targets, np.log(1 - predictions + epsilon))
self._predictions = tf.constant(predictions)
self._targets = tf.constant(targets)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.log_loss(self._targets, self._targets, weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.log_loss(self._targets, self._targets)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = tf.placeholder(tf.float32, shape=self._np_targets.shape)
loss = tf.contrib.losses.log_loss(tf_predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(feed_dict={
tf_predictions: self._np_targets}), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.log_loss(self._predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = tf.placeholder(tf.float32,
shape=self._np_predictions.shape)
weight = 2.3
loss = tf.contrib.losses.log_loss(
tf_predictions, self._targets, tf.constant(weight))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = tf.placeholder(tf.float32, shape=[None, None])
weight = 2.3
loss = tf.contrib.losses.log_loss(
tf_predictions, self._targets, tf.constant(weight))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weight * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 3.4], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weight = tf.constant([1.2, 0], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape((2, 3)))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0,
loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weight = tf.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape((2, 3)))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0,
loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weight = tf.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.log_loss(self._predictions, self._targets, weight)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
loss = tf.contrib.losses.log_loss(
self._predictions,
self._targets,
weight=tf.constant(weight, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
tf_predictions = tf.placeholder(tf.float32, shape=[2, 3])
loss = tf.contrib.losses.log_loss(
tf_predictions,
self._targets,
weight=tf.constant(weight, shape=(2, 3)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weight = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
loss = tf.contrib.losses.log_loss(
self._predictions,
self._targets,
weight=tf.constant(weight, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weight = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weight)
tf_predictions = tf.placeholder(tf.float32, shape=[2, 3])
tf_weight = tf.constant(weight, shape=(2, 3))
loss = tf.contrib.losses.log_loss(tf_predictions, self._targets, tf_weight)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weight = tf.zeros(shape=(2, 3))
loss = tf.contrib.losses.log_loss(
self._predictions, self._targets, tf_weight)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class HingeLossTest(tf.test.TestCase):
def testIncompatibleShapes(self):
with self.test_session():
logits = tf.constant([[-1.0], [2.1]])
target = tf.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = tf.contrib.losses.hinge_loss(logits, target).eval()
def testAllOutsideMargin(self):
with self.test_session():
logits = tf.constant([1.2, -1.4, -1.0, 2.1])
target = tf.constant([1.0, 0.0, 0.0, 1.0])
loss = tf.contrib.losses.hinge_loss(logits, target)
self.assertAllClose(loss.eval(), [0.0, 0.0, 0.0, 0.0], atol=1e-3)
def testSomeInsideMargin(self):
with self.test_session():
logits = tf.constant([[-0.7], [-1.4], [1.4], [0.6]])
target = tf.constant([[0.0], [0.0], [1.0], [1.0]])
loss = tf.contrib.losses.hinge_loss(logits, target)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), [[0.3], [0.0], [0.0], [0.4]], atol=1e-3)
def testSomeMisclassified(self):
with self.test_session():
logits = tf.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
target = tf.constant([[[1.0], [0.0], [0.0], [1.0]]])
loss = tf.contrib.losses.hinge_loss(logits, target)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(
loss.eval(), [[[0.0], [1.4], [0.0], [2.1]]], atol=1e-3)
class MeanSquaredErrorTest(tf.test.TestCase):
def setUp(self):
self._predictions = tf.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._targets = tf.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testDeprecatedName(self):
loss = tf.contrib.losses.sum_of_squares(
self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.mean_squared_error(
self._predictions, self._predictions, weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets)
with self.test_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(49.5 * weight, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(49.5 * weight, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 3.4], shape=[2,])
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weight = tf.constant([1.2, 3.4], shape=[2, 1])
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weight = tf.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weight = tf.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weight = tf.zeros((2, 3))
loss = tf.contrib.losses.mean_squared_error(
self._predictions, self._targets, weight)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class MeanPairwiseSquaresErrorTest(tf.test.TestCase):
def setUp(self):
self._predictions = np.array([[4, 8, 12],
[8, 1, 3]])
self._targets = np.array([[1, 9, 2],
[-5, -5, 7]])
batch_size, dims = self._targets.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size, 1))
for b in range(batch_size):
for i in range(dims):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._targets[b, i].item() - self._targets[b, j].item()
tmp = (x-y) * (x-y)
total[b] += tmp
self._expected_losses = np.divide(total, 9.0)
def testDeprecatedName(self):
loss = tf.contrib.losses.sum_of_pairwise_squares(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets),
weight=None)
def testAllCorrectNoLossWeight(self):
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testGradientWithZeroWeight(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
inputs = tf.ones((2, 3))
weights = tf.get_variable('weights',
shape=[3, 4],
initializer=tf.truncated_normal_initializer())
predictions = tf.matmul(inputs, weights)
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions,
predictions,
0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = tf.initialize_all_variables()
with self.test_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=weight)
with self.test_session():
self.assertAlmostEqual(weight * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weight = 2.3
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(weight * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
weight = 0
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
weight = 2.3
tf_predictions = tf.placeholder(tf.float32, shape=self._predictions.shape)
tf_targets = tf.placeholder(tf.float32, shape=self._targets.shape)
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf_predictions,
targets=tf_targets,
weight=tf.constant(weight))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={
tf_predictions: self._predictions,
tf_targets: self._targets,
})
self.assertAlmostEqual(weight * np.sum(self._expected_losses), loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weight = np.asarray([2.0, 1.0]).reshape((2, 1))
expected_losses = np.multiply(weight, self._expected_losses)
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight, shape=[2]))
with self.test_session():
self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3)
def testZeroLossWithOneDimBatchZeroWeights(self):
weight = np.asarray([0.0, 0.0]).reshape((2, 1))
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self):
weight = np.asarray([1.2, 3.4]).reshape((2, 1))
expected_losses = np.multiply(weight, self._expected_losses)
tf_predictions = tf.placeholder(tf.float32, shape=self._predictions.shape)
tf_targets = tf.placeholder(tf.int32, shape=self._targets.shape)
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf_predictions,
targets=tf_targets,
weight=tf.constant(weight, shape=[2]))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={
tf_predictions: self._predictions,
tf_targets: self._targets,
})
self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
def testLossWithAllZeroBatchSpecificWeights(self):
weight = np.zeros((2, 1))
loss = tf.contrib.losses.mean_pairwise_squared_error(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
weight=tf.constant(weight, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class CosineDistanceLossTest(tf.test.TestCase):
def setUp(self):
self._predictions = np.asarray([[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]]).reshape((3, 2, 3))
self._targets = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets),
dim=2,
weight=None)
def testAllCorrectNoWeights(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._targets),
targets=tf.constant(self._targets),
dim=2)
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2)
with self.test_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix((
'0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
targets = np.matrix((
'0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = tf.constant(predictions, shape=(3, 1, 3), dtype=tf.float32)
tf_targets = tf.constant(targets, shape=(3, 1, 3), dtype=tf.float32)
loss = tf.contrib.losses.cosine_distance(tf_preds, tf_targets, dim=2)
with self.test_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0]))
with self.test_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testValueErrorThrownWithShapelessPlaceholder(self):
tf_predictions = tf.placeholder(tf.float32)
with self.test_session():
with self.assertRaises(ValueError):
tf.contrib.losses.cosine_distance(
predictions=tf_predictions,
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = tf.placeholder(tf.float32, shape=self._targets.shape)
loss = tf.contrib.losses.cosine_distance(
predictions=tf_predictions,
targets=tf.constant(self._targets),
dim=2,
weight=tf.constant([1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.zeros((3,)))
with self.test_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = tf.contrib.losses.cosine_distance(
predictions=tf.constant(self._predictions),
targets=tf.constant(self._targets),
dim=2,
weight=tf.zeros((3, 2)))
with self.test_session():
self.assertEqual(0, loss.eval())
class ComputeWeightedLossTest(tf.test.TestCase):
def testHingeLoss(self):
logits = tf.constant([1.2, 0.4, -1.0, -1.1])
target = tf.constant([1.0, 0.0, 0.0, 1.0])
losses = tf.contrib.losses.hinge_loss(logits, target)
self.assertFalse(tf.contrib.losses.get_losses())
loss = tf.contrib.losses.compute_weighted_loss(losses)
self.assertTrue(tf.contrib.losses.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
self.assertAllClose(loss.eval(), 3.5/4.0, atol=1e-3)
class AddLossTest(tf.test.TestCase):
def testAddExternalLoss(self):
logits = tf.constant([1.2, 0.4, -1.0, -1.1])
target = tf.constant([1.0, 0.0, 0.0, 1.0])
losses = tf.contrib.losses.hinge_loss(logits, target)
self.assertFalse(tf.contrib.losses.get_losses())
tf.contrib.losses.add_loss(tf.reduce_mean(losses))
self.assertTrue(tf.contrib.losses.get_losses())
total_loss = tf.contrib.losses.get_total_loss()
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
self.assertAllClose(total_loss.eval(), 3.5/4.0, atol=1e-3)
def testNoneLossCollection(self):
logits = tf.constant([1.2, 0.4, -1.0, -1.1])
target = tf.constant([1.0, 0.0, 0.0, 1.0])
losses = tf.contrib.losses.hinge_loss(logits, target)
self.assertFalse(tf.contrib.losses.get_losses())
tf.contrib.losses.add_loss(tf.reduce_mean(losses), loss_collection=None)
self.assertFalse(tf.contrib.losses.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
def testNoCollectLosses(self):
logits = tf.constant([1.2, 0.4, -1.0, -1.1])
target = tf.constant([1.0, 0.0, 0.0, 1.0])
self.assertFalse(tf.contrib.losses.get_losses())
with tf.contrib.framework.arg_scope([tf.contrib.losses.add_loss],
loss_collection=None):
tf.contrib.losses.absolute_difference(logits, target)
tf.contrib.losses.log_loss(logits, target)
tf.contrib.losses.mean_squared_error(logits, target)
tf.contrib.losses.sigmoid_cross_entropy(logits, target)
tf.contrib.losses.softmax_cross_entropy(logits, target)
self.assertFalse(tf.contrib.losses.get_losses())
if __name__ == '__main__':
tf.test.main()
| |
"""The tests for the NSW Rural Fire Service Feeds platform."""
import datetime
from unittest.mock import ANY
from aio_geojson_nsw_rfs_incidents import NswRuralFireServiceIncidentsFeed
from asynctest.mock import patch, MagicMock, call
from homeassistant.components import geo_location
from homeassistant.components.geo_location import ATTR_SOURCE
from homeassistant.components.nsw_rural_fire_service_feed.geo_location import (
ATTR_EXTERNAL_ID,
SCAN_INTERVAL,
ATTR_CATEGORY,
ATTR_FIRE,
ATTR_LOCATION,
ATTR_COUNCIL_AREA,
ATTR_STATUS,
ATTR_TYPE,
ATTR_SIZE,
ATTR_RESPONSIBLE_AGENCY,
ATTR_PUBLICATION_DATE,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component, async_fire_time_changed
import homeassistant.util.dt as dt_util
CONFIG = {
geo_location.DOMAIN: [{"platform": "nsw_rural_fire_service_feed", CONF_RADIUS: 200}]
}
CONFIG_WITH_CUSTOM_LOCATION = {
geo_location.DOMAIN: [
{
"platform": "nsw_rural_fire_service_feed",
CONF_RADIUS: 200,
CONF_LATITUDE: 15.1,
CONF_LONGITUDE: 25.2,
}
]
}
def _generate_mock_feed_entry(
external_id,
title,
distance_to_home,
coordinates,
category=None,
location=None,
attribution=None,
publication_date=None,
council_area=None,
status=None,
entry_type=None,
fire=True,
size=None,
responsible_agency=None,
):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
feed_entry.category = category
feed_entry.location = location
feed_entry.attribution = attribution
feed_entry.publication_date = publication_date
feed_entry.council_area = council_area
feed_entry.status = status
feed_entry.type = entry_type
feed_entry.fire = fire
feed_entry.size = size
feed_entry.responsible_agency = responsible_agency
return feed_entry
async def test_setup(hass):
"""Test the general setup of the platform."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234",
"Title 1",
15.5,
(-31.0, 150.0),
category="Category 1",
location="Location 1",
attribution="Attribution 1",
publication_date=datetime.datetime(
2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc
),
council_area="Council Area 1",
status="Status 1",
entry_type="Type 1",
size="Size 1",
responsible_agency="Agency 1",
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (-31.1, 150.1), fire=False
)
mock_entry_3 = _generate_mock_feed_entry("3456", "Title 3", 25.5, (-31.2, 150.2))
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (-31.3, 150.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"aio_geojson_client.feed.GeoJsonFeed.update"
) as mock_feed_update:
mock_feed_update.return_value = (
"OK",
[mock_entry_1, mock_entry_2, mock_entry_3],
)
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG)
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
state = hass.states.get("geo_location.title_1")
assert state is not None
assert state.name == "Title 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234",
ATTR_LATITUDE: -31.0,
ATTR_LONGITUDE: 150.0,
ATTR_FRIENDLY_NAME: "Title 1",
ATTR_CATEGORY: "Category 1",
ATTR_LOCATION: "Location 1",
ATTR_ATTRIBUTION: "Attribution 1",
ATTR_PUBLICATION_DATE: datetime.datetime(
2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc
),
ATTR_FIRE: True,
ATTR_COUNCIL_AREA: "Council Area 1",
ATTR_STATUS: "Status 1",
ATTR_TYPE: "Type 1",
ATTR_SIZE: "Size 1",
ATTR_RESPONSIBLE_AGENCY: "Agency 1",
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: "nsw_rural_fire_service_feed",
ATTR_ICON: "mdi:fire",
}
assert round(abs(float(state.state) - 15.5), 7) == 0
state = hass.states.get("geo_location.title_2")
assert state is not None
assert state.name == "Title 2"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345",
ATTR_LATITUDE: -31.1,
ATTR_LONGITUDE: 150.1,
ATTR_FRIENDLY_NAME: "Title 2",
ATTR_FIRE: False,
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: "nsw_rural_fire_service_feed",
ATTR_ICON: "mdi:alarm-light",
}
assert round(abs(float(state.state) - 20.5), 7) == 0
state = hass.states.get("geo_location.title_3")
assert state is not None
assert state.name == "Title 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456",
ATTR_LATITUDE: -31.2,
ATTR_LONGITUDE: 150.2,
ATTR_FRIENDLY_NAME: "Title 3",
ATTR_FIRE: True,
ATTR_UNIT_OF_MEASUREMENT: "km",
ATTR_SOURCE: "nsw_rural_fire_service_feed",
ATTR_ICON: "mdi:fire",
}
assert round(abs(float(state.state) - 25.5), 7) == 0
# Simulate an update - one existing, one new entry,
# one outdated entry
mock_feed_update.return_value = (
"OK",
[mock_entry_1, mock_entry_4, mock_entry_3],
)
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed_update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, removes all entities
mock_feed_update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
# Collect events.
await hass.async_block_till_done()
async def test_setup_with_custom_location(hass):
"""Test the setup with a custom location."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 20.5, (-31.1, 150.1))
with patch(
"aio_geojson_nsw_rfs_incidents.feed_manager.NswRuralFireServiceIncidentsFeed",
wraps=NswRuralFireServiceIncidentsFeed,
) as mock_feed_manager, patch(
"aio_geojson_client.feed.GeoJsonFeed.update"
) as mock_feed_update:
mock_feed_update.return_value = "OK", [mock_entry_1]
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(
hass, geo_location.DOMAIN, CONFIG_WITH_CUSTOM_LOCATION
)
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert mock_feed_manager.call_args == call(
ANY, (15.1, 25.2), filter_categories=[], filter_radius=200.0
)
| |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.provision.test.test_install -*-
"""
Install flocker on a remote node.
"""
import posixpath
from textwrap import dedent
from urlparse import urljoin, urlparse
from effect import Func, Effect
import yaml
from zope.interface import implementer
from characteristic import attributes
from pyrsistent import PRecord, field
from ._libcloud import INode
from ._common import PackageSource, Variants
from ._ssh import (
run, run_from_args,
sudo, sudo_from_args,
put,
run_remotely
)
from ._effect import sequence
from flocker import __version__ as version
from flocker.cli import configure_ssh
from flocker.common.version import (
get_installable_version, get_package_key_suffix, is_release,
)
# A systemctl sub-command to start or restart a service. We use restart here
# so that if it is already running it gets restart (possibly necessary to
# respect updated configuration) and because restart will also start it if it
# is not running.
START = "restart"
ZFS_REPO = {
'centos-7': "https://s3.amazonaws.com/archive.zfsonlinux.org/"
"epel/zfs-release.el7.noarch.rpm",
}
ARCHIVE_BUCKET = 'clusterhq-archive'
def get_repository_url(distribution, flocker_version):
"""
Return the URL for the repository of a given distribution.
For ``yum``-using distributions this gives the URL to a package that adds
entries to ``/etc/yum.repos.d``. For ``apt``-using distributions, this
gives the URL for a repo containing a Packages(.gz) file.
:param bytes distribution: The Linux distribution to get a repository for.
:param bytes flocker_version: The version of Flocker to get a repository
for.
:return bytes: The URL pointing to a repository of packages.
:raises: ``UnsupportedDistribution`` if the distribution is unsupported.
"""
distribution_to_url = {
# TODO instead of hardcoding keys, use the _to_Distribution map
# and then choose the name
'centos-7': "https://{archive_bucket}.s3.amazonaws.com/"
"{key}/clusterhq-release$(rpm -E %dist).noarch.rpm".format(
archive_bucket=ARCHIVE_BUCKET,
key='centos',
),
# This could hardcode the version number instead of using
# ``lsb_release`` but that allows instructions to be shared between
# versions, and for earlier error reporting if you try to install on a
# separate version. The $(ARCH) part must be left unevaluated, hence
# the backslash escapes (one to make shell ignore the $ as a
# substitution marker, and then doubled to make Python ignore the \ as
# an escape marker). The output of this value then goes into
# /etc/apt/sources.list which does its own substitution on $(ARCH)
# during a subsequent apt-get update
'ubuntu-14.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
'ubuntu-15.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
}
try:
return distribution_to_url[distribution]
except KeyError:
raise UnsupportedDistribution()
def get_repo_options(flocker_version):
"""
Get a list of options for enabling necessary yum repositories.
:param bytes flocker_version: The version of Flocker to get options for.
:return: List of bytes for enabling (or not) a testing repository.
"""
is_dev = not is_release(flocker_version)
if is_dev:
return ['--enablerepo=clusterhq-testing']
else:
return []
class UnsupportedDistribution(Exception):
"""
Raised if trying to support a distribution which is not supported.
"""
@attributes(['distribution'])
class DistributionNotSupported(NotImplementedError):
"""
Raised when the provisioning step is not supported on the given
distribution.
:ivar bytes distribution: The distribution that isn't supported.
"""
def __str__(self):
return "Distribution not supported: %s" % (self.distribution,)
@implementer(INode)
class ManagedNode(PRecord):
"""
A node managed by some other system (eg by hand or by another piece of
orchestration software).
"""
address = field(type=bytes, mandatory=True)
private_address = field(type=(bytes, type(None)),
initial=None, mandatory=True)
distribution = field(type=bytes, mandatory=True)
def task_client_installation_test():
"""
Check that the CLI is working.
"""
return run_from_args(['flocker-deploy', '--version'])
def install_cli_commands_yum(distribution, package_source):
"""
Install Flocker CLI on CentOS.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
commands = [
sudo(command="yum install -y " + get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))),
]
if use_development_branch:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/tmp/clusterhq-build.repo'))
commands.append(sudo_from_args([
'cp', '/tmp/clusterhq-build.repo',
'/etc/yum.repos.d/clusterhq-build.repo']))
repo_options = ['--enablerepo=clusterhq-build']
else:
repo_options = get_repo_options(
flocker_version=get_installable_version(version))
if package_source.os_version:
package = 'clusterhq-flocker-cli-%s' % (package_source.os_version,)
else:
package = 'clusterhq-flocker-cli'
# Install Flocker CLI and all dependencies
commands.append(sudo_from_args(
["yum", "install"] + repo_options + ["-y", package]))
return sequence(commands)
def install_cli_commands_ubuntu(distribution, package_source):
"""
Install flocker CLI on Ubuntu.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
commands = [
# Minimal images often have cleared apt caches and are missing
# packages that are common in a typical release. These commands
# ensure that we start from a good base system with the required
# capabilities, particularly that the add-apt-repository command
# and HTTPS URLs are supported.
# FLOC-1880 will ensure these are necessary and sufficient.
sudo_from_args(["apt-get", "update"]),
sudo_from_args([
"apt-get", "-y", "install", "apt-transport-https",
"software-properties-common"]),
# Add ClusterHQ repo for installation of Flocker packages.
sudo(command='add-apt-repository -y "deb {} /"'.format(
get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))))
]
if use_development_branch:
# Add BuildBot repo for running tests
commands.append(sudo_from_args([
"add-apt-repository", "-y", "deb {} /".format(base_url)]))
# During a release, the ClusterHQ repo may contain packages with
# a higher version number than the Buildbot repo for a branch.
# Use a pin file to ensure that any Buildbot repo has higher
# priority than the ClusterHQ repo.
buildbot_host = urlparse(package_source.build_server).hostname
commands.append(put(dedent('''\
Package: *
Pin: origin {}
Pin-Priority: 900
'''.format(buildbot_host)), '/tmp/apt-pref'))
commands.append(sudo_from_args([
'mv', '/tmp/apt-pref', '/etc/apt/preferences.d/buildbot-900']))
# Update to read package info from new repos
commands.append(sudo_from_args(["apt-get", "update"]))
if package_source.os_version:
package = 'clusterhq-flocker-cli=%s' % (package_source.os_version,)
else:
package = 'clusterhq-flocker-cli'
# Install Flocker CLI and all dependencies
commands.append(sudo_from_args([
'apt-get', '-y', '--force-yes', 'install', package]))
return sequence(commands)
_task_install_commands = {
'centos-7': install_cli_commands_yum,
'ubuntu-14.04': install_cli_commands_ubuntu,
'ubuntu-15.04': install_cli_commands_ubuntu,
}
def task_install_cli(distribution, package_source=PackageSource()):
"""
Install flocker CLI on a distribution.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
return _task_install_commands[distribution](distribution, package_source)
def install_cli(package_source, node):
"""
Return an effect to run the CLI installation tasks on a remote node.
:param package_source: Package source description
:param node: Remote node description
"""
return run_remotely(
node.get_default_username(), node.address,
task_install_cli(node.distribution, package_source))
def task_configure_brew_path():
"""
Configure non-interactive shell to use all paths.
By default, OSX provides a minimal $PATH, for programs run via SSH. In
particular /usr/local/bin (which contains `brew`) isn't in the path. This
configures the path to have it there.
"""
return put(
path='.bashrc',
content=dedent("""\
if [ -x /usr/libexec/path_helper ]; then
eval `/usr/libexec/path_helper -s`
fi
"""))
def task_test_homebrew(recipe):
"""
The commands used to install a Homebrew recipe for Flocker and test it.
This taps the ClusterHQ/tap tap, which means that Homebrew looks in the
ClusterHQ/homebrew-tap GitHub repository for any recipe name given.
:param bytes recipe: The name of a recipe in a either the official Homebrew
tap or ClusterHQ/tap, or a URL pointing to a recipe.
:return Effect: Commands used to install a Homebrew recipe for Flocker and
test it.
"""
return sequence([
run_from_args(['brew', 'tap', 'ClusterHQ/tap']),
run("brew update"),
run("brew install {recipe}".format(recipe=recipe)),
run("brew test {recipe}".format(recipe=recipe)),
])
def task_install_ssh_key():
"""
Install the authorized ssh keys of the current user for root as well.
"""
return sequence([
sudo_from_args(['cp', '.ssh/authorized_keys',
'/root/.ssh/authorized_keys']),
])
def task_upgrade_kernel(distribution):
"""
Upgrade kernel.
"""
if distribution == 'centos-7':
return sequence([
run_from_args([
"yum", "install", "-y", "kernel-devel", "kernel"]),
run_from_args(['sync']),
])
elif distribution == 'ubuntu-14.04':
# Not required.
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def task_disable_selinux(distribution):
"""
Disable SELinux for this session and permanently.
XXX: Remove this when we work out suitable SELinux settings.
See https://clusterhq.atlassian.net/browse/FLOC-619.
"""
if distribution in ('centos-7',):
return sequence([
run("if selinuxenabled; then setenforce 0; fi"),
run("test -e /etc/selinux/config && "
"sed --in-place='.preflocker' "
"'s/^SELINUX=.*$/SELINUX=disabled/g' "
"/etc/selinux/config"),
])
elif distribution in ('ubuntu-14.04',):
# Ubuntu does not have SELinux enabled
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def _remove_private_key(content):
"""
Remove most of the contents of a private key file for logging.
"""
prefix = '-----BEGIN PRIVATE KEY-----'
suffix = '-----END PRIVATE KEY-----'
start = content.find(prefix)
if start < 0:
# no private key
return content
# Keep prefix, subsequent newline, and 4 characters at start of key
trim_start = start + len(prefix) + 5
end = content.find(suffix, trim_start)
if end < 0:
end = len(content)
# Keep suffix and previous 4 characters and newline at end of key
trim_end = end - 5
if trim_end <= trim_start:
# strangely short key, keep all content
return content
return content[:trim_start] + '...REMOVED...' + content[trim_end:]
def task_install_control_certificates(ca_cert, control_cert, control_key):
"""
Install certificates and private key required by the control service.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath control_cert: Path to control service certificate on
local machine.
:param FilePath control_key: Path to control service private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/control-service.crt",
content=control_cert.getContent()),
put(path="/etc/flocker/control-service.key",
content=control_key.getContent(),
log_content_filter=_remove_private_key),
])
def task_install_node_certificates(ca_cert, node_cert, node_key):
"""
Install certificates and private key required by a node.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath node_cert: Path to node certificate on
local machine.
:param FilePath node_key: Path to node private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/node.crt",
content=node_cert.getContent()),
put(path="/etc/flocker/node.key",
content=node_key.getContent(),
log_content_filter=_remove_private_key),
])
def task_enable_docker(distribution):
"""
Start docker and configure it to start automatically.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(["systemctl", "enable", "docker.service"]),
run_from_args(["systemctl", "start", "docker.service"]),
])
elif distribution == 'ubuntu-14.04':
# Ubuntu enables docker service during installation
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def open_firewalld(service):
"""
Open firewalld port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(command + [service])
for command in [['firewall-cmd', '--permanent', '--add-service'],
['firewall-cmd', '--add-service']]])
def open_ufw(service):
"""
Open ufw port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(['ufw', 'allow', service])
])
def task_enable_flocker_control(distribution):
"""
Enable flocker-control service.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-control']),
run_from_args(['systemctl', START, 'flocker-control']),
])
elif distribution == 'ubuntu-14.04':
# Since the flocker-control service is currently installed
# alongside the flocker-dataset-agent service, the default control
# service configuration does not automatically start the
# service. Here, we provide an override file to start it.
return sequence([
put(
path='/etc/init/flocker-control.override',
content=dedent('''\
start on runlevel [2345]
stop on runlevel [016]
'''),
),
run("echo 'flocker-control-api\t4523/tcp\t\t\t# Flocker Control API port' >> /etc/services"), # noqa
run("echo 'flocker-control-agent\t4524/tcp\t\t\t# Flocker Control Agent port' >> /etc/services"), # noqa
run_from_args(['service', 'flocker-control', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_open_control_firewall(distribution):
"""
Open the firewall for flocker-control.
"""
if distribution in ('centos-7',):
open_firewall = open_firewalld
elif distribution == 'ubuntu-14.04':
open_firewall = open_ufw
else:
raise DistributionNotSupported(distribution=distribution)
return sequence([
open_firewall(service)
for service in ['flocker-control-api', 'flocker-control-agent']
])
# Set of dataset fields which are *not* sensitive. Only fields in this
# set are logged. This should contain everything except usernames and
# passwords (or equivalents). Implemented as a whitelist in case new
# security fields are added.
_ok_to_log = frozenset((
'auth_plugin',
'auth_url',
'backend',
'region',
'zone',
))
def _remove_dataset_fields(content):
"""
Remove non-whitelisted fields from dataset for logging.
"""
content = yaml.safe_load(content)
dataset = content['dataset']
for key in dataset:
if key not in _ok_to_log:
dataset[key] = 'REMOVED'
return yaml.safe_dump(content)
def task_configure_flocker_agent(control_node, dataset_backend,
dataset_backend_configuration):
"""
Configure the flocker agents by writing out the configuration file.
:param bytes control_node: The address of the control agent.
:param DatasetBackend dataset_backend: The volume backend the nodes are
configured with.
:param dict dataset_backend_configuration: The backend specific
configuration options.
"""
dataset_backend_configuration = dataset_backend_configuration.copy()
dataset_backend_configuration.update({
u"backend": dataset_backend.name,
})
put_config_file = put(
path='/etc/flocker/agent.yml',
content=yaml.safe_dump(
{
"version": 1,
"control-service": {
"hostname": control_node,
"port": 4524,
},
"dataset": dataset_backend_configuration,
},
),
log_content_filter=_remove_dataset_fields
)
return sequence([put_config_file])
def task_enable_flocker_agent(distribution):
"""
Enable the flocker agents.
:param bytes distribution: The distribution name.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-dataset-agent']),
run_from_args(['systemctl', START, 'flocker-dataset-agent']),
run_from_args(['systemctl', 'enable', 'flocker-container-agent']),
run_from_args(['systemctl', START, 'flocker-container-agent']),
])
elif distribution == 'ubuntu-14.04':
return sequence([
run_from_args(['service', 'flocker-dataset-agent', 'start']),
run_from_args(['service', 'flocker-container-agent', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_create_flocker_pool_file():
"""
Create a file-back zfs pool for flocker.
"""
return sequence([
run('mkdir -p /var/opt/flocker'),
run('truncate --size 10G /var/opt/flocker/pool-vdev'),
run('zpool create flocker /var/opt/flocker/pool-vdev'),
])
def task_install_zfs(distribution, variants=set()):
"""
Install ZFS on a node.
:param bytes distribution: The distribution the node is running.
:param set variants: The set of variant configurations to use when
"""
commands = []
if distribution == 'ubuntu-14.04':
commands += [
# ZFS not available in base Ubuntu - add ZFS repo
run_from_args([
"add-apt-repository", "-y", "ppa:zfs-native/stable"]),
]
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
# Package spl-dkms sometimes does not have libc6-dev as a
# dependency, add it before ZFS installation requires it.
# See https://github.com/zfsonlinux/zfs/issues/3298
run_from_args(["apt-get", "-y", "install", "libc6-dev"]),
run_from_args(['apt-get', '-y', 'install', 'zfsutils']),
]
elif distribution in ('centos-7',):
commands += [
run_from_args(["yum", "install", "-y", ZFS_REPO[distribution]]),
]
if distribution == 'centos-7':
commands.append(
run_from_args(["yum", "install", "-y", "epel-release"]))
if Variants.ZFS_TESTING in variants:
commands += [
run_from_args(['yum', 'install', '-y', 'yum-utils']),
run_from_args([
'yum-config-manager', '--enable', 'zfs-testing'])
]
commands += [
run_from_args(['yum', 'install', '-y', 'zfs']),
]
else:
raise DistributionNotSupported(distribution)
return sequence(commands)
def configure_zfs(node, variants):
"""
Configure ZFS for use as a Flocker backend.
:param INode node: The node to configure ZFS on.
:param set variants: The set of variant configurations to use when
:return Effect:
"""
return sequence([
run_remotely(
username='root',
address=node.address,
commands=task_upgrade_kernel(
distribution=node.distribution),
),
node.reboot(),
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_zfs(
distribution=node.distribution,
variants=variants),
task_create_flocker_pool_file(),
]),
),
Effect(
Func(lambda: configure_ssh(node.address, 22))),
])
def _uninstall_flocker_ubuntu1404():
"""
Return an ``Effect`` for uninstalling the Flocker package from an Ubuntu
14.04 machine.
"""
return run_from_args([
b"apt-get", b"remove", b"-y", b"--purge", b"clusterhq-python-flocker",
])
def _uninstall_flocker_centos7():
"""
Return an ``Effect`` for uninstalling the Flocker package from a CentOS 7
machine.
"""
return sequence([
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-python-flocker",
]),
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-release",
]),
])
_flocker_uninstallers = {
"ubuntu-14.04": _uninstall_flocker_ubuntu1404,
"centos-7": _uninstall_flocker_centos7,
}
def task_uninstall_flocker(distribution):
"""
Return an ``Effect`` for uninstalling the Flocker package from the given
distribution.
"""
return _flocker_uninstallers[distribution]()
def uninstall_flocker(nodes):
"""
Return an ``Effect`` for uninstalling the Flocker package from all of the
given nodes.
"""
return _run_on_all_nodes(
nodes,
task=lambda node: task_uninstall_flocker(node.distribution)
)
def task_install_flocker(
distribution=None,
package_source=PackageSource(),
):
"""
Install flocker cluster on a distribution.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:raises: ``UnsupportedDistribution`` if the distribution is unsupported.
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
if distribution in ('ubuntu-14.04', 'ubuntu-15.04'):
commands = [
# Ensure add-apt-repository command and HTTPS URLs are supported
# FLOC-1880 will ensure these are necessary and sufficient
run_from_args([
"apt-get", "-y", "install", "apt-transport-https",
"software-properties-common"]),
# Add Docker repo for recent Docker versions
run_from_args([
"add-apt-repository", "-y", "ppa:james-page/docker"]),
# Add ClusterHQ repo for installation of Flocker packages.
run(command='add-apt-repository -y "deb {} /"'.format(
get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version)))),
]
if use_development_branch:
# Add BuildBot repo for testing
commands.append(run_from_args([
"add-apt-repository", "-y", "deb {} /".format(base_url)]))
# During a release, the ClusterHQ repo may contain packages with
# a higher version number than the Buildbot repo for a branch.
# Use a pin file to ensure that any Buildbot repo has higher
# priority than the ClusterHQ repo.
buildbot_host = urlparse(package_source.build_server).hostname
commands.append(put(
dedent('''\
Package: *
Pin: origin {}
Pin-Priority: 900
'''.format(buildbot_host)),
'/etc/apt/preferences.d/buildbot-900'))
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
]
if package_source.os_version:
package = 'clusterhq-flocker-node=%s' % (
package_source.os_version,)
else:
package = 'clusterhq-flocker-node'
# Install Flocker node and all dependencies
commands.append(run_from_args([
'apt-get', '-y', '--force-yes', 'install', package]))
return sequence(commands)
elif distribution in ('centos-7',):
commands = [
run(command="yum clean all"),
run(command="yum install -y " + get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version)))
]
if use_development_branch:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/etc/yum.repos.d/clusterhq-build.repo'))
repo_options = ['--enablerepo=clusterhq-build']
else:
repo_options = get_repo_options(
flocker_version=get_installable_version(version))
if package_source.os_version:
package = 'clusterhq-flocker-node-%s' % (
package_source.os_version,)
else:
package = 'clusterhq-flocker-node'
commands.append(run_from_args(
["yum", "install"] + repo_options + ["-y", package]))
return sequence(commands)
else:
raise UnsupportedDistribution()
ACCEPTANCE_IMAGES = [
"postgres:latest",
"clusterhq/mongodb:latest",
"python:2.7-slim",
"clusterhq/flaskenv",
"busybox",
]
def task_pull_docker_images(images=ACCEPTANCE_IMAGES):
"""
Pull docker images.
:param list images: List of images to pull. Defaults to images used in
acceptance tests.
"""
return sequence([
run_from_args(['docker', 'pull', image]) for image in images
])
def task_enable_updates_testing(distribution):
"""
Enable the distribution's proposed updates repository.
:param bytes distribution: See func:`task_install_flocker`
"""
raise DistributionNotSupported(distribution=distribution)
def task_enable_docker_head_repository(distribution):
"""
Enable the distribution's repository containing in-development docker
builds.
:param bytes distribution: See func:`task_install_flocker`
"""
if distribution == "centos-7":
return sequence([
put(content=dedent("""\
[virt7-testing]
name=virt7-testing
baseurl=http://cbs.centos.org/repos/virt7-testing/x86_64/os/
enabled=1
gpgcheck=0
"""),
path="/etc/yum.repos.d/virt7-testing.repo")
])
else:
raise DistributionNotSupported(distribution=distribution)
def provision(distribution, package_source, variants):
"""
Provision the node for running flocker.
This drives all the common node installation steps in:
* http://doc-dev.clusterhq.com/gettingstarted/installation.html
:param bytes address: Address of the node to provision.
:param bytes username: Username to connect as.
:param bytes distribution: See func:`task_install_flocker`
:param PackageSource package_source: See func:`task_install_flocker`
:param set variants: The set of variant configurations to use when
provisioning
"""
commands = []
if Variants.DISTRO_TESTING in variants:
commands.append(task_enable_updates_testing(distribution))
if Variants.DOCKER_HEAD in variants:
commands.append(task_enable_docker_head_repository(distribution))
commands.append(
task_install_flocker(
package_source=package_source, distribution=distribution))
if distribution in ('centos-7'):
commands.append(task_disable_selinux(distribution))
commands.append(task_enable_docker(distribution))
return sequence(commands)
def _run_on_all_nodes(nodes, task):
"""
Run some commands on some nodes.
:param nodes: An iterable of ``Node`` instances where the commands should
be run.
:param task: A one-argument callable which is called with each ``Node`` and
should return the ``Effect`` to run on that node.
:return: An ``Effect`` that runs the commands on a group of nodes.
"""
return sequence(list(
run_remotely(
username='root',
address=node.address,
commands=task(node),
)
for node in nodes
))
def install_flocker(nodes, package_source):
"""
Return an ``Effect`` that installs a certain version of Flocker on the
given nodes.
:param nodes: An iterable of ``Node`` instances on which to install
Flocker.
:param PackageSource package_source: The version of Flocker to install.
:return: An ``Effect`` which installs Flocker on the nodes.
"""
return _run_on_all_nodes(
nodes,
task=lambda node: task_install_flocker(
distribution=node.distribution,
package_source=package_source,
)
)
def configure_cluster(cluster, dataset_backend_configuration):
"""
Configure flocker-control, flocker-dataset-agent and
flocker-container-agent on a collection of nodes.
:param Cluster cluster: Description of the cluster to configure.
:param dict dataset_backend_configuration: Configuration parameters to
supply to the dataset backend.
"""
return sequence([
run_remotely(
username='root',
address=cluster.control_node.address,
commands=sequence([
task_install_control_certificates(
cluster.certificates.cluster.certificate,
cluster.certificates.control.certificate,
cluster.certificates.control.key),
task_enable_flocker_control(cluster.control_node.distribution),
]),
),
sequence([
sequence([
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_node_certificates(
cluster.certificates.cluster.certificate,
certnkey.certificate,
certnkey.key),
task_configure_flocker_agent(
control_node=cluster.control_node.address,
dataset_backend=cluster.dataset_backend,
dataset_backend_configuration=(
dataset_backend_configuration
),
),
task_enable_flocker_agent(
distribution=node.distribution,
)]),
),
]) for certnkey, node
in zip(cluster.certificates.nodes, cluster.agent_nodes)
])
])
| |
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly.
There are also "opportunistic" tests for both mysql and postgresql in here,
which allows testing against all 3 databases (sqlite in memory, mysql, pg) in
a properly configured unit test environment.
For the opportunistic testing you need to set up a db named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost.
The test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands:
::
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner openstack_citest;
"""
import contextlib
from alembic import script
import mock
from oslo.db import exception as db_exc
from oslo.db.sqlalchemy import test_base
from oslo.db.sqlalchemy import test_migrations
from oslo.db.sqlalchemy import utils as db_utils
import sqlalchemy
import sqlalchemy.exc
from ironic.common.i18n import _LE
from ironic.common import utils
from ironic.db.sqlalchemy import migration
from ironic.db.sqlalchemy import models
from ironic.openstack.common import log as logging
from ironic.tests import base
LOG = logging.getLogger(__name__)
def _get_connect_string(backend, user, passwd, database):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
backend = "mysql+mysqldb"
else:
raise Exception("Unrecognized backend: '%s'" % backend)
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% {'backend': backend, 'user': user, 'passwd': passwd,
'database': database})
def _is_backend_avail(backend, user, passwd, database):
try:
connect_uri = _get_connect_string(backend, user, passwd, database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
@contextlib.contextmanager
def patch_with_engine(engine):
with mock.patch(('ironic.db'
'.sqlalchemy.api.get_engine')) as patch_migration:
patch_migration.return_value = engine
yield
class WalkVersionsMixin(object):
def _walk_versions(self, engine=None, alembic_cfg=None, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
with patch_with_engine(engine):
script_directory = script.ScriptDirectory.from_config(alembic_cfg)
self.assertIsNone(self.migration_api.version(alembic_cfg))
versions = [ver for ver in script_directory.walk_revisions()]
for version in reversed(versions):
self._migrate_up(engine, alembic_cfg,
version.revision, with_data=True)
if downgrade:
for version in versions:
self._migrate_down(engine, alembic_cfg, version.revision)
def _migrate_down(self, engine, config, version, with_data=False):
try:
self.migration_api.downgrade(version, config=config)
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(version, self.migration_api.version(config))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%s" % (version), None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, config, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%s" % version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self.migration_api.upgrade(version, config=config)
self.assertEqual(version, self.migration_api.version(config))
if with_data:
check = getattr(self, "_check_%s" % version, None)
if check:
check(engine, data)
except Exception:
LOG.error(_LE("Failed to migrate to version %(version)s on engine "
"%(engine)s"),
{'version': version, 'engine': engine})
raise
class TestWalkVersions(base.TestCase, WalkVersionsMixin):
def setUp(self):
super(TestWalkVersions, self).setUp()
self.migration_api = mock.MagicMock()
self.engine = mock.MagicMock()
self.config = mock.MagicMock()
self.versions = [mock.Mock(revision='2b2'), mock.Mock(revision='1a1')]
def test_migrate_up(self):
self.migration_api.version.return_value = 'dsa123'
self._migrate_up(self.engine, self.config, 'dsa123')
self.migration_api.upgrade.assert_called_with('dsa123',
config=self.config)
self.migration_api.version.assert_called_with(self.config)
def test_migrate_up_with_data(self):
test_value = {"a": 1, "b": 2}
self.migration_api.version.return_value = '141'
self._pre_upgrade_141 = mock.MagicMock()
self._pre_upgrade_141.return_value = test_value
self._check_141 = mock.MagicMock()
self._migrate_up(self.engine, self.config, '141', True)
self._pre_upgrade_141.assert_called_with(self.engine)
self._check_141.assert_called_with(self.engine, test_value)
def test_migrate_down(self):
self.migration_api.version.return_value = '42'
self.assertTrue(self._migrate_down(self.engine, self.config, '42'))
self.migration_api.version.assert_called_with(self.config)
def test_migrate_down_not_implemented(self):
self.migration_api.downgrade.side_effect = NotImplementedError
self.assertFalse(self._migrate_down(self.engine, self.config, '42'))
def test_migrate_down_with_data(self):
self._post_downgrade_043 = mock.MagicMock()
self.migration_api.version.return_value = '043'
self._migrate_down(self.engine, self.config, '043', True)
self._post_downgrade_043.assert_called_with(self.engine)
@mock.patch.object(script, 'ScriptDirectory')
@mock.patch.object(WalkVersionsMixin, '_migrate_up')
@mock.patch.object(WalkVersionsMixin, '_migrate_down')
def test_walk_versions_all_default(self, _migrate_up, _migrate_down,
script_directory):
fc = script_directory.from_config()
fc.walk_revisions.return_value = self.versions
self.migration_api.version.return_value = None
self._walk_versions(self.engine, self.config)
self.migration_api.version.assert_called_with(self.config)
upgraded = [mock.call(self.engine, self.config, v.revision,
with_data=True) for v in reversed(self.versions)]
self.assertEqual(self._migrate_up.call_args_list, upgraded)
downgraded = [mock.call(self.engine, self.config, v.revision)
for v in self.versions]
self.assertEqual(self._migrate_down.call_args_list, downgraded)
@mock.patch.object(script, 'ScriptDirectory')
@mock.patch.object(WalkVersionsMixin, '_migrate_up')
@mock.patch.object(WalkVersionsMixin, '_migrate_down')
def test_walk_versions_all_false(self, _migrate_up, _migrate_down,
script_directory):
fc = script_directory.from_config()
fc.walk_revisions.return_value = self.versions
self.migration_api.version.return_value = None
self._walk_versions(self.engine, self.config, downgrade=False)
upgraded = [mock.call(self.engine, self.config, v.revision,
with_data=True) for v in reversed(self.versions)]
self.assertEqual(upgraded, self._migrate_up.call_args_list)
class MigrationCheckersMixin(object):
def setUp(self):
super(MigrationCheckersMixin, self).setUp()
self.config = migration._alembic_config()
self.migration_api = migration
def test_walk_versions(self):
self._walk_versions(self.engine, self.config, downgrade=False)
def test_connect_fail(self):
"""Test that we can trigger a database connection failure
Test that we can fail gracefully to ensure we don't break people
without specific database backend
"""
if _is_backend_avail(self.FIXTURE.DRIVER, "openstack_cifail",
self.FIXTURE.USERNAME, self.FIXTURE.DBNAME):
self.fail("Shouldn't have connected")
def _check_21b331f883ef(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('provision_updated_at', col_names)
self.assertIsInstance(nodes.c.provision_updated_at.type,
sqlalchemy.types.DateTime)
def _check_3cb628139ea4(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('console_enabled', col_names)
# in some backends bool type is integer
self.assertTrue(isinstance(nodes.c.console_enabled.type,
sqlalchemy.types.Boolean) or
isinstance(nodes.c.console_enabled.type,
sqlalchemy.types.Integer))
def _check_31baaf680d2b(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('instance_info', col_names)
self.assertIsInstance(nodes.c.instance_info.type,
sqlalchemy.types.TEXT)
def _check_3bea56f25597(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
instance_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
data = {'driver': 'fake',
'uuid': utils.generate_uuid(),
'instance_uuid': instance_uuid}
nodes.insert().values(data).execute()
data['uuid'] = utils.generate_uuid()
# TODO(viktors): Remove check on sqlalchemy.exc.IntegrityError, when
# Ironic will use oslo.db 0.4.0 or higher.
# See bug #1214341 for details.
self.assertRaises(
(sqlalchemy.exc.IntegrityError, db_exc.DBDuplicateEntry),
nodes.insert().execute, data)
def _check_242cc6a923b3(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
col_names = [column.name for column in nodes.c]
self.assertIn('maintenance_reason', col_names)
self.assertIsInstance(nodes.c.maintenance_reason.type,
sqlalchemy.types.String)
def test_upgrade_and_version(self):
with patch_with_engine(self.engine):
self.migration_api.upgrade('head')
self.assertIsNotNone(self.migration_api.version())
def test_create_schema_and_version(self):
with patch_with_engine(self.engine):
self.migration_api.create_schema()
self.assertIsNotNone(self.migration_api.version())
def test_upgrade_and_create_schema(self):
with patch_with_engine(self.engine):
self.migration_api.upgrade('31baaf680d2b')
self.assertRaises(db_exc.DbMigrationError,
self.migration_api.create_schema)
def test_upgrade_twice(self):
with patch_with_engine(self.engine):
self.migration_api.upgrade('31baaf680d2b')
v1 = self.migration_api.version()
self.migration_api.upgrade('head')
v2 = self.migration_api.version()
self.assertNotEqual(v1, v2)
class TestMigrationsMySQL(MigrationCheckersMixin,
WalkVersionsMixin,
test_base.MySQLOpportunisticTestCase):
pass
class TestMigrationsPostgreSQL(MigrationCheckersMixin,
WalkVersionsMixin,
test_base.PostgreSQLOpportunisticTestCase):
pass
class ModelsMigrationSyncMixin(object):
def get_metadata(self):
return models.Base.metadata
def get_engine(self):
return self.engine
def db_sync(self, engine):
with patch_with_engine(engine):
migration.upgrade('head')
class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.MySQLOpportunisticTestCase):
pass
class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin,
test_migrations.ModelsMigrationsSync,
test_base.PostgreSQLOpportunisticTestCase):
pass
| |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from ..externals.six import string_types
import os
from ..bem import fit_sphere_to_headshape
from ..io import read_raw_fif
from ..utils import logger, verbose, warn
from ..externals.six.moves import map
def _mxwarn(msg):
warn('Possible MaxFilter bug: %s, more info: '
'http://imaging.mrc-cbu.cam.ac.uk/meg/maxbugs' % msg)
@verbose
def apply_maxfilter(in_fname, out_fname, origin=None, frame='device',
bad=None, autobad='off', skip=None, force=False,
st=False, st_buflen=16.0, st_corr=0.96, mv_trans=None,
mv_comp=False, mv_headpos=False, mv_hp=None,
mv_hpistep=None, mv_hpisubt=None, mv_hpicons=True,
linefreq=None, cal=None, ctc=None, mx_args='',
overwrite=True, verbose=None):
""" Apply NeuroMag MaxFilter to raw data.
Needs Maxfilter license, maxfilter has to be in PATH
Parameters
----------
in_fname : string
Input file name
out_fname : string
Output file name
origin : array-like or string
Head origin in mm. If None it will be estimated from headshape points.
frame : string ('device' or 'head')
Coordinate frame for head center
bad : string, list (or None)
List of static bad channels. Can be a list with channel names, or a
string with channels (names or logical channel numbers)
autobad : string ('on', 'off', 'n')
Sets automated bad channel detection on or off
skip : string or a list of float-tuples (or None)
Skips raw data sequences, time intervals pairs in sec,
e.g.: 0 30 120 150
force : bool
Ignore program warnings
st : bool
Apply the time-domain MaxST extension
st_buflen : float
MaxSt buffer length in sec (disabled if st is False)
st_corr : float
MaxSt subspace correlation limit (disabled if st is False)
mv_trans : string (filename or 'default') (or None)
Transforms the data into the coil definitions of in_fname, or into the
default frame (None: don't use option)
mv_comp : bool (or 'inter')
Estimates and compensates head movements in continuous raw data
mv_headpos : bool
Estimates and stores head position parameters, but does not compensate
movements (disabled if mv_comp is False)
mv_hp : string (or None)
Stores head position data in an ascii file
(disabled if mv_comp is False)
mv_hpistep : float (or None)
Sets head position update interval in ms (disabled if mv_comp is False)
mv_hpisubt : string ('amp', 'base', 'off') (or None)
Subtracts hpi signals: sine amplitudes, amp + baseline, or switch off
(disabled if mv_comp is False)
mv_hpicons : bool
Check initial consistency isotrak vs hpifit
(disabled if mv_comp is False)
linefreq : int (50, 60) (or None)
Sets the basic line interference frequency (50 or 60 Hz)
(None: do not use line filter)
cal : string
Path to calibration file
ctc : string
Path to Cross-talk compensation file
mx_args : string
Additional command line arguments to pass to MaxFilter
overwrite : bool
Overwrite output file if it already exists
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
origin: string
Head origin in selected coordinate frame
"""
# check for possible maxfilter bugs
if mv_trans is not None and mv_comp:
_mxwarn("Don't use '-trans' with head-movement compensation "
"'-movecomp'")
if autobad != 'off' and (mv_headpos or mv_comp):
_mxwarn("Don't use '-autobad' with head-position estimation "
"'-headpos' or movement compensation '-movecomp'")
if st and autobad != 'off':
_mxwarn("Don't use '-autobad' with '-st' option")
# determine the head origin if necessary
if origin is None:
logger.info('Estimating head origin from headshape points..')
raw = read_raw_fif(in_fname, add_eeg_ref=False)
r, o_head, o_dev = fit_sphere_to_headshape(raw.info, units='mm')
raw.close()
logger.info('[done]')
if frame == 'head':
origin = o_head
elif frame == 'device':
origin = o_dev
else:
RuntimeError('invalid frame for origin')
if not isinstance(origin, string_types):
origin = '%0.1f %0.1f %0.1f' % (origin[0], origin[1], origin[2])
# format command
cmd = ('maxfilter -f %s -o %s -frame %s -origin %s '
% (in_fname, out_fname, frame, origin))
if bad is not None:
# format the channels
if not isinstance(bad, list):
bad = bad.split()
bad = map(str, bad)
bad_logic = [ch[3:] if ch.startswith('MEG') else ch for ch in bad]
bad_str = ' '.join(bad_logic)
cmd += '-bad %s ' % bad_str
cmd += '-autobad %s ' % autobad
if skip is not None:
if isinstance(skip, list):
skip = ' '.join(['%0.3f %0.3f' % (s[0], s[1]) for s in skip])
cmd += '-skip %s ' % skip
if force:
cmd += '-force '
if st:
cmd += '-st '
cmd += ' %d ' % st_buflen
cmd += '-corr %0.4f ' % st_corr
if mv_trans is not None:
cmd += '-trans %s ' % mv_trans
if mv_comp:
cmd += '-movecomp '
if mv_comp == 'inter':
cmd += ' inter '
if mv_headpos:
cmd += '-headpos '
if mv_hp is not None:
cmd += '-hp %s ' % mv_hp
if mv_hpisubt is not None:
cmd += 'hpisubt %s ' % mv_hpisubt
if mv_hpicons:
cmd += '-hpicons '
if linefreq is not None:
cmd += '-linefreq %d ' % linefreq
if cal is not None:
cmd += '-cal %s ' % cal
if ctc is not None:
cmd += '-ctc %s ' % ctc
cmd += mx_args
if overwrite and os.path.exists(out_fname):
os.remove(out_fname)
logger.info('Running MaxFilter: %s ' % cmd)
if os.getenv('_MNE_MAXFILTER_TEST', '') != 'true': # fake maxfilter
st = os.system(cmd)
else:
print(cmd) # we can check the output
st = 0
if st != 0:
raise RuntimeError('MaxFilter returned non-zero exit status %d' % st)
logger.info('[done]')
return origin
| |
import logging, pickle, os, glob
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
import numpy as np
from scipy.special import erfinv
from bokeh.plotting import ColumnDataSource, Figure, save, output_file, show
from bokeh.models import HoverTool, TapTool, OpenURL
from bokeh.models import Row, Column
from collections import OrderedDict
from rtpipe.parsecands import read_noise, read_candidates
from time import asctime
try:
import activegit, sys
except:
print('no activegit')
def initializenb():
""" Find input files and log initialization info """
logger.info('Working directory: {0}'.format(os.getcwd()))
logger.info('Run on {0}'.format(asctime()))
try:
fileroot = os.environ['fileroot']
logger.info('Setting fileroot to {0} from environment variable.\n'.format(fileroot))
candsfile = 'cands_{0}_merge.pkl'.format(fileroot)
noisefile = 'noise_{0}_merge.pkl'.format(fileroot)
except KeyError:
sdmdir = os.getcwd()
logger.info('Setting sdmdir to current directory {0}\n'.format(os.path.abspath(sdmdir)))
candsfiles = glob.glob('cands_*_merge.pkl')
noisefiles = glob.glob('noise_*_merge.pkl')
if len(candsfiles) == 1 and len(noisefiles) == 1:
logger.info('Found one cands/merge file set')
else:
logger.warn('Found multiple cands/noise file sets. Taking first.')
candsfile = candsfiles[0]
noisefile = noisefiles[0]
fileroot = candsfile.rstrip('_merge.pkl').lstrip('cands_')
logger.info('Set: \n\t candsfile {} \n\t noisefile {} \n\t fileroot {} '.format(candsfile, noisefile, fileroot))
return (candsfile, noisefile, fileroot)
def plot_interactive(mergepkl, noisepkl=None, thresh=6.0, thresh_link=7.0, ignoret=None, savehtml=True, url_path='plots'):
""" Backwards compatible function for making interactive candidate summary plot """
data = readdata(mergepkl)
circleinds = calcinds(data, thresh, ignoret)
crossinds = calcinds(data, -1*thresh, ignoret)
edgeinds = calcinds(data, thresh_link, ignoret)
workdir = os.path.dirname(mergepkl)
fileroot = os.path.basename(mergepkl).rstrip('_merge.pkl').lstrip('cands_')
logger.info('Total on target time: {} s'.format(calcontime(data, inds=circleinds+crossinds+edgeinds)))
if noisepkl:
noiseplot = plotnoisecum(noisepkl)
else:
noiseplot = None
combined = plotall(data, circleinds=circleinds, crossinds=crossinds, edgeinds=edgeinds,
htmlname=None, noiseplot=noiseplot, url_path=url_path, fileroot=fileroot)
if savehtml:
output_file(mergepkl.rstrip('.pkl') + '.html')
save(combined)
else:
return combined
def plotall(data, circleinds=[], crossinds=[], edgeinds=[], htmlname=None, noiseplot=None, url_path='plots', fileroot=None):
""" Create interactive plot (preserving links between panels) from data dictionary
data has keys of snr, time, dm, sizes, key and more.
Optional index arguments are used to filter full data set.
This can be used to remove bad segments or apply different symbols to subsets.
url_path is path difference to png files for taptool. ('../plots' for jupyter notebook, 'plots' for public page)
fileroot is the sdm file name used as root for all png files.
"""
# set up data dictionary
if not circleinds: circleinds = calcinds(data, np.abs(data['snrs']).min())
if not crossinds: crossinds = calcinds(data, -1*np.abs(data['snrs']).min())
TOOLS = "hover,tap,pan,box_select,wheel_zoom,reset"
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
dm = [data['dm'][i] for i in inds]
dm_min = min(min(dm), max(dm)/1.2)
dm_max = max(max(dm), min(dm)*1.2)
time = [data['time'][i] for i in inds]
time_min = min(time)
time_max = max(time)
l1 = [data['l1'][i] for i in inds]
l1_min = min(l1)
l1_max = max(l1)
m1 = [data['m1'][i] for i in inds]
m1_min = min(m1)
m1_max = max(m1)
specstd = [data['specstd'][i] for i in inds]
specstd_min = min(specstd)
specstd_max = max(specstd)
imkur = [data['imkur'][i] for i in inds]
imkur_min = min(imkur)
imkur_max = max(imkur)
# create figures
dmt = Figure(plot_width=950, plot_height=500, toolbar_location="left", x_axis_label='Time (s; relative)',
y_axis_label='DM (pc/cm3)', x_range=(time_min, time_max), y_range=(dm_min, dm_max),
output_backend='webgl', tools=TOOLS)
loc = Figure(plot_width=450, plot_height=400, toolbar_location="left", x_axis_label='l1 (rad)', y_axis_label='m1 (rad)',
x_range=(l1_min, l1_max), y_range=(m1_min,m1_max), tools=TOOLS, output_backend='webgl')
stat = Figure(plot_width=450, plot_height=400, toolbar_location="left", x_axis_label='Spectral std',
y_axis_label='Image kurtosis', x_range=(specstd_min, specstd_max),
y_range=(imkur_min, imkur_max), tools=TOOLS, output_backend='webgl')
norm = Figure(plot_width=450, plot_height=400, toolbar_location="left", x_axis_label='SNR observed',
y_axis_label='SNR expected', tools=TOOLS, output_backend='webgl')
# create positive symbol source and add glyphs
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems()}))
dmt.circle('time', 'dm', size='sizes', fill_color='colors', line_color=None, fill_alpha=0.2, source=source)
loc.circle('l1', 'm1', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
stat.circle('specstd', 'imkur', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
norm.circle('abssnr', 'zs', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
# create negative symbol source and add glyphs
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems()}))
dmt.cross('time', 'dm', size='sizes', fill_color='colors', line_alpha=0.3, source=sourceneg)
loc.cross('l1', 'm1', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
stat.cross('specstd', 'imkur', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
norm.cross('abssnr', 'zs', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
# create linked symbol source and add glyphs
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems()}))
dmt.circle('time', 'dm', size='sizes', line_color='colors', fill_color='colors', line_alpha=0.5, fill_alpha=0.2, source=sourceedge)
loc.circle('l1', 'm1', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
stat.circle('specstd', 'imkur', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
norm.circle('abssnr', 'zs', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = dmt.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
hover = loc.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
hover = stat.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
hover = norm.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}_@key.png'.format(url_path, fileroot)
taptool = dmt.select(type=TapTool)
taptool.callback = OpenURL(url=url)
taptool = loc.select(type=TapTool)
taptool.callback = OpenURL(url=url)
taptool = stat.select(type=TapTool)
taptool.callback = OpenURL(url=url)
taptool = norm.select(type=TapTool)
taptool.callback = OpenURL(url=url)
# this approach does not preserve links between panels
# dmt = plotdmt(data, circleinds=circleinds, crossinds=crossinds, edgeinds=edgeinds, url_path=url_path, fileroot=fileroot, tools=TOOLS) # maybe add size?
# loc = plotloc(data, circleinds=circleinds, crossinds=crossinds, edgeinds=edgeinds, url_path=url_path, fileroot=fileroot, tools=TOOLS)
# stat = plotstat(data, circleinds=circleinds, crossinds=crossinds, edgeinds=edgeinds, url_path=url_path, fileroot=fileroot, tools=TOOLS)
# norm = plotnorm(data, circleinds=circleinds, crossinds=crossinds, edgeinds=edgeinds, url_path=url_path, fileroot=fileroot, tools=TOOLS)
# arrange figures
top = Row(dmt, width=950)
middle = Row(loc, stat, width=950)
if noiseplot:
bottom = Row(norm, noiseplot, width=950)
else:
bottom = Row(norm, width=950)
combined = Column(top, middle, bottom, width=950)
if htmlname:
output_file(htmlname)
save(combined)
else:
return combined
def plotdmt(data, circleinds=[], crossinds=[], edgeinds=[], url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=950, plot_height=500):
""" Make a light-weight dm-time figure """
fields = ['dm', 'time', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
dm = [data['dm'][i] for i in inds]
dm_min = min(min(dm), max(dm)/1.2)
dm_max = max(max(dm), min(dm)*1.2)
time = [data['time'][i] for i in inds]
time_min = min(time)
time_max = max(time)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
dmt = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='Time (s; relative)',
y_axis_label='DM (pc/cm3)', x_range=(time_min, time_max), y_range=(dm_min, dm_max),
output_backend='webgl', tools=tools)
dmt.circle('time', 'dm', size='sizes', fill_color='colors', line_color=None, fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
dmt.cross('time', 'dm', size='sizes', fill_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
dmt.circle('time', 'dm', size='sizes', line_color='colors', fill_color='colors', line_alpha=0.5, fill_alpha=0.2, source=sourceedge)
hover = dmt.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
# url = '{}/cands_{}_sc@scan-seg@seg-i@candint-dm@dmind-dt@dtind.png'.format(url_path, fileroot)
url = '{}/cands_{}_@key.png'.format(url_path, fileroot)
taptool = dmt.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return dmt
def plotloc(data, circleinds=[], crossinds=[], edgeinds=[], url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight loc figure """
fields = ['l1', 'm1', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
l1 = [data['l1'][i] for i in inds]
l1_min = min(l1)
l1_max = max(l1)
m1 = [data['m1'][i] for i in inds]
m1_min = min(m1)
m1_max = max(m1)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='l1 (rad)', y_axis_label='m1 (rad)',
x_range=(l1_min, l1_max), y_range=(m1_min,m1_max), tools=tools, output_backend='webgl')
loc.circle('l1', 'm1', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.cross('l1', 'm1', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.circle('l1', 'm1', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = loc.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}_@key.png'.format(url_path, fileroot)
taptool = loc.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return loc
def plotstat(data, circleinds=None, crossinds=None, edgeinds=None, url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight stat figure """
fields = ['imkur', 'specstd', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
specstd = [data['specstd'][i] for i in inds]
specstd_min = min(specstd)
specstd_max = max(specstd)
imkur = [data['imkur'][i] for i in inds]
imkur_min = min(imkur)
imkur_max = max(imkur)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
stat = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='Spectral std',
y_axis_label='Image kurtosis', x_range=(specstd_min, specstd_max),
y_range=(imkur_min, imkur_max), tools=tools, output_backend='webgl')
stat.circle('specstd', 'imkur', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
stat.cross('specstd', 'imkur', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
stat.circle('specstd', 'imkur', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = stat.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}_@key.png'.format(url_path, fileroot)
taptool = stat.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return stat
def plotnorm(data, circleinds=[], crossinds=[], edgeinds=[], url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight norm figure """
fields = ['zs', 'sizes', 'colors', 'abssnr', 'key', 'snrs']
if not circleinds: circleinds = range(len(data['snrs']))
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
norm = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='SNR observed',
y_axis_label='SNR expected', tools=tools, output_backend='webgl')
norm.circle('abssnr', 'zs', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
norm.cross('abssnr', 'zs', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
norm.circle('abssnr', 'zs', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = norm.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}_@key.png'.format(url_path, fileroot)
taptool = norm.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return norm
def plotnoise(noisepkl, mergepkl, plot_width=950, plot_height=400):
""" Make two panel plot to summary noise analysis with estimated flux scale """
d = pickle.load(open(mergepkl))
ndist, imstd, flagfrac = plotnoisedist(noisepkl, plot_width=plot_width/2, plot_height=plot_height)
fluxscale = calcfluxscale(d, imstd, flagfrac)
logger.info('Median image noise is {0:.3} Jy.'.format(fluxscale*imstd))
ncum, imnoise = plotnoisecum(noisepkl, fluxscale=fluxscale, plot_width=plot_width/2, plot_height=plot_height)
hndle = show(Row(ndist, ncum, width=plot_width, height=plot_height))
return imnoise
def plotnoisecum(noisepkl, fluxscale=1, plot_width=450, plot_height=400):
""" Merged noise pkl converted to interactive cumulative histogram
noisepkl is standard noise pickle file.
fluxscale is scaling applied by gain calibrator. telcal solutions have fluxscale=1.
also returns corrected imnoise values if non-unity fluxscale provided
"""
# noise histogram
noises = read_noise(noisepkl)
imnoise = np.sort(fluxscale*noises[4])
frac = [float(count)/len(imnoise) for count in reversed(range(1, len(imnoise)+1))]
noiseplot = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="above",
x_axis_label='Image noise (Jy; cal scaling {0:.3})'.format(fluxscale),
y_axis_label='Cumulative fraction', tools='pan, wheel_zoom, reset')
noiseplot.line(imnoise, frac)
if fluxscale != 1:
return noiseplot, imnoise
else:
return noiseplot
def plotnoisedist(noisepkl, plot_width=450, plot_height=400):
""" """
# plot noise and flag distributions
scans, segments, noiseperbl, flagfrac, imstd = read_noise(noisepkl)
pl = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="above",
x_axis_label='Flag fraction', y_axis_label='Image noise (sys)', tools='pan, wheel_zoom, reset')
pl.cross(flagfrac, imstd, line_alpha=0.2, color='blue')
# find medians
flagfrac_med = np.median(flagfrac)
imstd_med = np.median(imstd)
logger.info('Median image noise (sys) = {0}'.format(imstd_med))
logger.info('Median flag fraction = {0}'.format(flagfrac_med))
pl.cross(flagfrac_med, imstd_med, line_alpha=1, size=40, color='red')
# estimate number of zero noise images
zeronoisefrac = float(len(np.where(imstd == 0.)[0]))/len(imstd)
logger.info('{0:.0%} of noise images are zeros'.format(zeronoisefrac))
return pl, imstd_med, flagfrac_med
def calcfluxscale(d, imstd_med, flagfrac_med):
""" Given state dict and noise properties, estimate flux scale at the VLA
imstd and flagfrac are expected to be median (typical) values from sample in merged noise pkl.
"""
# useful functions and VLA parameters
sensitivity = lambda sefd, dt, bw, eta, nbl, npol: sefd/(eta*np.sqrt(nbl*2 * dt * bw * npol))
nbl = lambda nant: nant*(nant-1)/2
eta = {'L': 0.92, 'S': 0.92, 'C': 0.8, 'X': 0.8} # correlator efficiency
sefd = {'L': 420, 'S': 370, 'C': 310, 'X': 250} # fixed to match exposure calculator int time to 100 microJy.
bw = sum([d['spw_nchan_select'][i]*d['spw_chansize'][i] for i in range(len(d['spw_chansize']))])
dt = d['inttime']
npol = d['npol']
nant = d['nants']
freq = d['freq'][0]
if (freq >= 1 and freq < 2):
band = 'L'
elif (freq >= 2 and freq < 4):
band = 'S'
elif (freq >= 4 and freq < 8):
band = 'C'
elif (freq >= 8 and freq < 12):
band = 'X'
else:
logger.warn('first channel freq ({0}) not in bands L, S, C, or X. Assuming L band.'.format(freq))
band = 'L'
goodfrac = 1 - flagfrac_med # correct for flagged data
slim_theory = sensitivity(sefd[band], dt, bw, eta[band], goodfrac*nbl(nant), npol)
fluxscale = slim_theory/imstd_med
return fluxscale
def readdata(mergepkl=None, d=None, cands=None, sizerange=(2,70)):
""" Converts candidate data to dictionary for bokeh
Can take merged pkl file or d/cands as read separately.
cands is an optional (loc, prop) tuple of numpy arrays.
"""
# get cands from pkl
if mergepkl:
logger.info('Reading {0}'.format(mergepkl))
loc, prop, d = read_candidates(mergepkl, returnstate=True)
elif d and cands:
logger.info('Using provided d/cands')
loc, prop = cands
# define columns to extract
if 'snr2' in d['features']:
snrcol = d['features'].index('snr2')
elif 'snr1' in d['features']:
snrcol = d['features'].index('snr1')
l1col = d['features'].index('l1')
m1col = d['features'].index('m1')
specstdcol = d['features'].index('specstd')
imkurcol = d['features'].index('imkurtosis')
dtindcol = d['featureind'].index('dtind')
dmindcol = d['featureind'].index('dmind')
intcol = d['featureind'].index('int')
segmentcol = d['featureind'].index('segment')
scancol = d['featureind'].index('scan')
# define data to plot
key = ['sc{0}-seg{1}-i{2}-dm{3}-dt{4}'.format(ll[scancol], ll[segmentcol], ll[intcol], ll[dmindcol], ll[dtindcol]) for ll in loc]
# key = [tuple(ll) for ll in loc]
scan = loc[:, scancol]
seg = loc[:, segmentcol]
candint = loc[:, 2]
dmind = loc[:, 3]
dtind = loc[:, 4]
beamnum = loc[:, 5]
logger.info('Setting columns...')
snrs = prop[:, snrcol]
abssnr = np.abs(prop[:, snrcol])
dm = np.array(d['dmarr'])[loc[:, dmindcol]]
l1 = prop[:, l1col]
m1 = prop[:, m1col]
time = np.array([24*3600*d['segmenttimesdict'][scan[i]][seg[i], 0] + d['inttime']*candint[i] for i in range(len(loc))])
# time.append(24*3600*d['segmenttimesdict'][k[scancol]][k[segmentcol],0] + d['inttime']*k[intcol])
specstd = prop[:, specstdcol]
imkur = prop[:, imkurcol]
logger.info('Calculating sizes, colors, normprob...')
time = time - min(time)
sizes = calcsize(snrs)
colors = colorsat(l1, m1)
zs = normprob(d, snrs)
# if pandas is available use dataframe to allow datashader feature
# data = DataFrame(data={'snrs': snrs, 'dm': dm, 'l1': l1, 'm1': m1, 'time': time, 'specstd': specstd,
# 'imkur': imkur, 'scan': scan, 'seg': seg, 'candint': candint, 'dmind': dmind,
# 'dtind': dtind, 'sizes': sizes, 'colors': colors, 'key': key, 'zs': zs, 'abssnr': abssnr})
# logger.info('Returning a pandas dataframe')
data = dict(snrs=snrs, dm=dm, l1=l1, m1=m1, time=time, specstd=specstd, scan=scan,
imkur=imkur, sizes=sizes, colors=colors, key=key, zs=zs, abssnr=abssnr)
# dtind=dtind, scan=scan, seg=seg, candint=candint, dmind=dmind,
return data
def findhight(data, ignoret=None, threshold=20):
""" Find bad time ranges from distribution of candidates.
ignoret is list of tuples [(t0, t1), (t2, t3)] defining ranges to ignore.
threshold is made above std of candidate distribution in time.
Returns the time (in seconds) and counts for bins above threshold.
"""
time = np.sort(data['time'])
ww = np.ones(len(time), dtype=bool) # initialize pass filter
if ignoret:
for (t0, t1) in ignoret:
ww = ww & np.where( (time < t0) | (time > t1), True, False )
bins = np.round(time[ww]).astype('int')
counts = np.bincount(bins)
high = np.where(counts > np.median(counts) + threshold*counts.std())[0]
return high, counts[high]
def calcinds(data, threshold, ignoret=None):
""" Find indexes for data above (or below) given threshold. """
inds = []
for i in range(len(data['time'])):
snr = data['snrs'][i]
time = data['time'][i]
if (threshold >= 0 and snr > threshold):
if ignoret:
incl = [t0 for (t0, t1) in ignoret if np.round(time).astype(int) in range(t0,t1)]
logger.debug('{} {} {} {}'.format(np.round(time).astype(int), t0, t1, incl))
if not incl:
inds.append(i)
else:
inds.append(i)
elif threshold < 0 and snr < threshold:
if ignoret:
incl = [t0 for (t0, t1) in ignoret if np.round(time).astype(int) in range(t0,t1)]
logger.debug('{} {} {} {}'.format(np.round(time).astype(int), t0, t1, incl))
if not incl:
inds.append(i)
else:
inds.append(i)
return inds
def calcontime(data, inds=None):
""" Given indices of good times, calculate total time per scan with indices. """
if not inds:
inds = range(len(data['time']))
logger.info('No indices provided. Assuming all are valid.')
scans = set([data['scan'][i] for i in inds])
total = 0.
for scan in scans:
time = [data['time'][i] for i in inds if data['scan'][i] == scan]
total += max(time) - min(time)
return total
def normprob(d, snrs, inds=None, version=2):
""" Uses observed SNR distribution to calculate normal probability SNR
Uses state dict to calculate number of trials.
snrs is list of all snrs in distribution.
version used to toggle for tests. version 2 is fastest and returns zeros for filtered snr values.
Returns list of expected snr given each input value's frequency of occurrence via the normal probability assumption
"""
if not inds: inds = range(len(snrs))
# define norm quantile functions
Z = lambda quan: np.sqrt(2)*erfinv( 2*quan - 1)
quan = lambda ntrials, i: (ntrials + 1/2. - i)/ntrials
# calc number of trials
npix = d['npixx']*d['npixy']
if d.has_key('goodintcount'):
nints = d['goodintcount']
else:
nints = d['nints']
ndms = len(d['dmarr'])
dtfactor = np.sum([1./i for i in d['dtarr']]) # assumes dedisperse-all algorithm
ntrials = npix*nints*ndms*dtfactor
logger.info('Calculating normal probability distribution for npix*nints*ndms*dtfactor = %d' % (ntrials))
# calc normal quantile
if version == 2:
# purely sort and numpy-based
sortinds = np.argsort(snrs[inds])
lenpos = len(np.where(snrs[inds] >= 0)[0])
lenneg = len(np.where(snrs[inds] < 0)[0])
unsortinds = np.zeros(len(sortinds), dtype=int)
unsortinds[sortinds] = np.arange(len(sortinds))
rank = np.concatenate( (np.arange(1, lenneg+1), np.arange(1, lenpos+1)[::-1]) )
logger.debug('{} {}'.format(rank, sortinds))
zval = Z(quan(ntrials, rank[unsortinds]))
if inds != range(len(snrs)): # add zeros for filtered data to match length to original snr array
zval = np.array([zval[inds.index(i)] if i in inds else 0 for i in range(len(snrs))])
elif version == 1:
# numpy array based
snrpos = snrs[inds][np.where(snrs[inds] > 0)]
snrneg = snrs[inds][np.where(snrs[inds] < 0)]
snrsortpos = np.sort(snrpos)[::-1]
snrsortneg = np.sort(snrneg)
logger.debug('Sorted pos/neg SNRs')
zval = []
for i,snr in enumerate(snrs):
if i in inds:
if snr in snrsortpos:
zval.append(Z(quan(ntrials, np.where(snr == snrsortpos)[0][0]+1)))
elif snr in snrsortneg:
zval.append(Z(quan(ntrials, np.where(snr == snrsortneg)[0][0]+1)))
elif version == 0:
# list based
snrsortpos = []
snrsortneg = []
for i in inds:
if snrs[i] > 0:
snrsortpos.append(snrs[i])
elif snrs[i] < 0:
snrsortneg.append(abs(snrs[i]))
snrsortpos = sorted(snrsortpos, reverse=True)
snrsortneg = sorted(snrsortneg, reverse=True)
logger.debug('Sorted pos/neg SNRs')
zval = []
for (i, snr) in enumerate(snrs):
if snr >= 0 and i in inds:
zval.append(Z(quan(ntrials, snrsortpos.index(snr)+1)))
elif snr < 0 and i in inds:
zval.append(Z(quan(ntrials, snrsortneg.index(abs(snr))+1)))
else:
zval.append(0)
return zval
def calcsize(values, sizerange=(2,70), inds=None, plaw=3):
""" Use set of values to calculate symbol size.
values is a list of floats for candidate significance.
inds is an optional list of indexes to use to calculate symbol size.
Scaling of symbol size min max set by sizerange tuple (min, max).
plaw is powerlaw scaling of symbol size from values
"""
if inds:
smax = max([abs(values[i]) for i in inds])
smin = min([abs(values[i]) for i in inds])
else:
smax = max([abs(val) for val in values])
smin = min([abs(val) for val in values])
return [sizerange[0] + sizerange[1] * ((abs(val) - smin)/(smax - smin))**plaw for val in values]
def colorsat(l,m):
""" Returns color for given l,m
Designed to look like a color wheel that is more saturated in middle.
"""
lm = np.zeros(len(l), dtype='complex')
lm.real = l
lm.imag = m
red = 0.5*(1+np.cos(np.angle(lm)))
green = 0.5*(1+np.cos(np.angle(lm) + 2*3.14/3))
blue = 0.5*(1+np.cos(np.angle(lm) - 2*3.14/3))
amp = 256*np.abs(lm)/np.abs(lm).max()
return ["#%02x%02x%02x" % (np.floor(amp[i]*red[i]), np.floor(amp[i]*green[i]), np.floor(amp[i]*blue[i])) for i in range(len(l))]
def filterdata(data, plinds, d, threshold, ignorestr):
""" Iteratively filter bad times and set indices for later plotting """
logger.info('Ignoring times from ignorestr {0}'.format(ignorestr))
ignoret = parseignoret(ignorestr)
thresh0 = d['sigma_image1']
thresh1 = d['sigma_plot']
plinds['cir'] = calcinds(data, thresh0, ignoret=ignoret) # positive cands
plinds['cro'] = calcinds(data, -1*thresh0, ignoret=ignoret) # negative cands
plinds['edg'] = calcinds(data, thresh1, ignoret=ignoret) # cands with png plots
sortinds = sorted(set(plinds['cir'] + plinds['cro'] + plinds['edg']))
logger.info('Selected {} ({} linked) points.'.format(len(sortinds), len(plinds['edg'])))
logger.info('Estimated total on target time: {} s\n'.format(calcontime(
data, inds=plinds['cir']+plinds['cro']+plinds['edg'])))
# these must get get rescaled when cands are ignored
data['zs'] = normprob(d, data['snrs'], inds=sortinds)
# print high 1s bin counts
logger.info('Finding high 1-second bins with threshold {0}'.format(threshold))
hight, highcount = findhight(data, ignoret=ignoret, threshold=threshold)
if len(hight):
logger.info('High times \t High counts:')
for i in range(len(hight)):
logger.info('{0}\t{1}'.format(hight[i], highcount[i]))
else:
logger.info('No high 1s bin counts.\n')
# print high cands and their times
biginds = np.argsort(data['abssnr'][sortinds])[-5:]
logger.info('Top 5 abs(snr) candidates and times:')
for ind in biginds[::-1]:
logger.info('{0} {1}'.format(data['snrs'][sortinds][ind], data['time'][sortinds][ind]))
logger.info('\n')
def parseignoret(ignorestr):
if ',' in ignorestr:
ignorelist = ignorestr.split(',')
assert (len(ignorelist)/2.).is_integer(), 'ignorestr be pairs of comma-delimited values.'
ignoret = [(int(ignorelist[i]), int(ignorelist[i+1])) for i in range(0, len(ignorelist), 2)]
else:
ignoret = []
return ignoret
def displayplot(data, plinds, plottype, scaling, fileroot, url_path='http://www.aoc.nrao.edu/~claw/plots'):
""" Generate interactive plot """
plotdict = {'dmt': plotdmt, 'norm': plotnorm,
'loc': plotloc, 'stat': plotstat,
'all': plotall}
sizedict = {'dmt': [900,500], 'norm': [700, 700], 'loc': [700,700],
'stat': [700,700]}
sortinds = sorted(set(plinds['cir'] + plinds['cro'] + plinds['edg']))
sizesrc, plaw = scaling.split('_')
data['sizes'] = calcsize(data[sizesrc], inds=sortinds, plaw=int(plaw))
if plottype != 'all':
wid, hei = sizedict[plottype]
pl = plotdict[plottype](data, circleinds=plinds['cir'], crossinds=plinds['cro'],
edgeinds=plinds['edg'], url_path=url_path,
fileroot=fileroot, plot_width=wid, plot_height=hei)
else:
pl = plotall(data, circleinds=plinds['cir'], crossinds=plinds['cro'],
edgeinds=plinds['edg'], url_path=url_path,
fileroot=fileroot)
hdl = show(pl)
def addclassifications(agdir, prop, version=None, statfeats = [0,4,5,6,7,8]):
""" Calculates real score probability of prop from an activegit repo.
version is string name of activegit tag.
Default agdir initialization will have latest tag, so version is optional.
statfeats set to work with alnotebook naming.
"""
try:
ag = activegit.ActiveGit(agdir)
if version:
ag.set_version(version)
clf = ag.classifier
score = clf.predict_proba((np.nan_to_num(prop[:,statfeats])))[:,1] # take real score
return score
except:
logger.info('Failure when parsing activegit repo or applying classification.\n{0}'.format(sys.exc_info()[0]))
return []
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The metaclasses used by the mojo python bindings for interfaces.
It is splitted from mojo_bindings.reflection because it uses some generated code
that would create a cyclic dependency.
"""
import logging
import sys
# pylint: disable=F0401
import interface_control_messages_mojom
import mojo_bindings.messaging as messaging
import mojo_bindings.promise as promise
import mojo_bindings.reflection as reflection
import mojo_bindings.serialization as serialization
import mojo_system
class MojoInterfaceType(type):
"""Meta class for interfaces.
Usage:
class MyInterface(object):
__metaclass__ = MojoInterfaceType
DESCRIPTOR = {
'fully_qualified_name': 'service::MyInterface'
'version': 3,
'methods': [
{
'name': 'FireAndForget',
'ordinal': 0,
'parameters': [
SingleFieldGroup('x', _descriptor.TYPE_INT32, 0, 0),
]
},
{
'name': 'Ping',
'ordinal': 1,
'parameters': [
SingleFieldGroup('x', _descriptor.TYPE_INT32, 0, 0),
],
'responses': [
SingleFieldGroup('x', _descriptor.TYPE_INT32, 0, 0),
],
},
],
}
"""
def __new__(mcs, name, bases, dictionary):
# If one of the base class is already an interface type, do not edit the
# class.
for base in bases:
if isinstance(base, mcs):
return type.__new__(mcs, name, bases, dictionary)
descriptor = dictionary.pop('DESCRIPTOR', {})
methods = [_MethodDescriptor(x) for x in descriptor.get('methods', [])]
for method in methods:
dictionary[method.name] = _NotImplemented
fully_qualified_name = descriptor['fully_qualified_name']
interface_manager = InterfaceManager(
fully_qualified_name, descriptor['version'], methods)
dictionary.update({
'manager': None,
'_interface_manager': interface_manager,
})
interface_class = type.__new__(mcs, name, bases, dictionary)
interface_manager.interface_class = interface_class
return interface_class
@property
def manager(cls):
return cls._interface_manager
# Prevent adding new attributes, or mutating constants.
def __setattr__(cls, key, value):
raise AttributeError('can\'t set attribute')
# Prevent deleting constants.
def __delattr__(cls, key):
raise AttributeError('can\'t delete attribute')
class InterfaceManager(object):
"""
Manager for an interface class. The manager contains the operation that allows
to bind an implementation to a pipe, or to generate a proxy for an interface
over a pipe.
"""
def __init__(self, name, version, methods):
self.name = name
self.version = version
self.methods = methods
self.interface_class = None
self._proxy_class = None
self._stub_class = None
def Proxy(self, handle, version=0):
router = messaging.Router(handle)
error_handler = _ProxyErrorHandler()
router.SetErrorHandler(error_handler)
router.Start()
return self._InternalProxy(router, error_handler, version)
# pylint: disable=W0212
def Bind(self, impl, handle):
router = messaging.Router(handle)
router.SetIncomingMessageReceiver(self._Stub(impl))
error_handler = _ProxyErrorHandler()
router.SetErrorHandler(error_handler)
# Retain the router, until an error happen.
retainer = _Retainer(router)
def Cleanup(_):
retainer.release()
error_handler.AddCallback(Cleanup)
# Give an instance manager to the implementation to allow it to close
# the connection.
impl.manager = InstanceManager(self, router, error_handler)
router.Start()
def NewRequest(self):
pipe = mojo_system.MessagePipe()
return (self.Proxy(pipe.handle0), reflection.InterfaceRequest(pipe.handle1))
def _InternalProxy(self, router, error_handler, version):
if error_handler is None:
error_handler = _ProxyErrorHandler()
if not self._proxy_class:
dictionary = {
'__module__': __name__,
'__init__': _ProxyInit,
}
for method in self.methods:
dictionary[method.name] = _ProxyMethodCall(method)
self._proxy_class = type(
'%sProxy' % self.name,
(self.interface_class, reflection.InterfaceProxy),
dictionary)
proxy = self._proxy_class(router, error_handler)
# Give an instance manager to the proxy to allow to close the connection.
proxy.manager = ProxyInstanceManager(
self, proxy, router, error_handler, version)
return proxy
def _Stub(self, impl):
if not self._stub_class:
accept_method = _StubAccept(self.methods)
dictionary = {
'__module__': __name__,
'__init__': _StubInit,
'Accept': accept_method,
'AcceptWithResponder': accept_method,
}
self._stub_class = type('%sStub' % self.name,
(messaging.MessageReceiverWithResponder,),
dictionary)
return self._stub_class(impl)
class InstanceManager(object):
"""
Manager for the implementation of an interface or a proxy. The manager allows
to control the connection over the pipe.
"""
def __init__(self, interface_manager, router, error_handler):
self.interface_manager = interface_manager
self._router = router
self._error_handler = error_handler
assert self._error_handler is not None
def Close(self):
self._error_handler.OnClose()
self._router.Close()
def PassMessagePipe(self):
self._error_handler.OnClose()
return self._router.PassMessagePipe()
def AddOnErrorCallback(self, callback):
self._error_handler.AddCallback(lambda _: callback(), False)
class ProxyInstanceManager(InstanceManager):
"""
Manager for the implementation of a proxy. The manager allows to control the
connection over the pipe.
"""
def __init__(self, interface_manager, proxy, router, error_handler, version):
super(ProxyInstanceManager, self).__init__(
interface_manager, router, error_handler)
self.proxy = proxy
self.version = version
self._run_method = _ProxyMethodCall(_BaseMethodDescriptor(
'Run',
interface_control_messages_mojom.RUN_MESSAGE_ID,
interface_control_messages_mojom.RunMessageParams,
interface_control_messages_mojom.RunResponseMessageParams))
self._run_or_close_pipe_method = _ProxyMethodCall(_BaseMethodDescriptor(
'RunOrClosePipe',
interface_control_messages_mojom.RUN_OR_CLOSE_PIPE_MESSAGE_ID,
interface_control_messages_mojom.RunOrClosePipeMessageParams,
None))
def QueryVersion(self):
params = interface_control_messages_mojom.RunMessageParams()
params.reserved0 = 16
params.reserved1 = 0
params.query_version = (
interface_control_messages_mojom.QueryVersion())
def ToVersion(r):
self.version = r.query_version_result.version
return self.version
return self._run_method(self.proxy, **params.AsDict()).Then(ToVersion)
def RequireVersion(self, version):
if self.version >= version:
return
self.version = version
params = interface_control_messages_mojom.RunOrClosePipeMessageParams()
params.reserved0 = 16
params.reserved1 = 0
params.require_version = interface_control_messages_mojom.RequireVersion()
params.require_version.version = version
return self._run_or_close_pipe_method(self.proxy, **params.AsDict())
class _BaseMethodDescriptor(object):
def __init__(self, name, ordinal, parameters_struct, response_struct):
self.name = name
self.ordinal = ordinal
self.parameters_struct = parameters_struct
self.response_struct = response_struct
class _MethodDescriptor(_BaseMethodDescriptor):
def __init__(self, descriptor):
name = descriptor['name']
super(_MethodDescriptor, self).__init__(
name,
descriptor['ordinal'],
_ConstructParameterStruct(
descriptor['parameters'], name, "Parameters"),
_ConstructParameterStruct(
descriptor.get('responses'), name, "Responses"))
def _ConstructParameterStruct(descriptor, name, suffix):
if descriptor is None:
return None
parameter_dictionary = {
'__metaclass__': reflection.MojoStructType,
'__module__': __name__,
'DESCRIPTOR': descriptor,
}
return reflection.MojoStructType(
'%s%s' % (name, suffix),
(object,),
parameter_dictionary)
class _ProxyErrorHandler(messaging.ConnectionErrorHandler):
def __init__(self):
messaging.ConnectionErrorHandler.__init__(self)
self._callbacks = dict()
def OnError(self, result):
if self._callbacks is None:
return
exception = messaging.MessagingException('Mojo error: %d' % result)
for (callback, _) in self._callbacks.iteritems():
callback(exception)
self._callbacks = None
def OnClose(self):
if self._callbacks is None:
return
exception = messaging.MessagingException('Router has been closed.')
for (callback, call_on_close) in self._callbacks.iteritems():
if call_on_close:
callback(exception)
self._callbacks = None
def AddCallback(self, callback, call_on_close=True):
if self._callbacks is not None:
self._callbacks[callback] = call_on_close
def RemoveCallback(self, callback):
if self._callbacks:
del self._callbacks[callback]
class _Retainer(object):
# Set to force instances to be retained.
_RETAINED = set()
def __init__(self, retained):
self._retained = retained
_Retainer._RETAINED.add(self)
def release(self):
self._retained = None
_Retainer._RETAINED.remove(self)
def _ProxyInit(self, router, error_handler):
self._router = router
self._error_handler = error_handler
# pylint: disable=W0212
def _ProxyMethodCall(method):
flags = messaging.NO_FLAG
if method.response_struct:
flags = messaging.MESSAGE_EXPECTS_RESPONSE_FLAG
def _Call(self, *args, **kwargs):
def GenerationMethod(resolve, reject):
message = _GetMessage(method, flags, None, *args, **kwargs)
if method.response_struct:
def Accept(message):
try:
assert message.header.message_type == method.ordinal
payload = message.payload
response = method.response_struct.Deserialize(
serialization.RootDeserializationContext(payload.data,
payload.handles))
as_dict = response.AsDict()
if len(as_dict) == 1:
value = as_dict.values()[0]
if not isinstance(value, dict):
response = value
resolve(response)
return True
except Exception as e:
# Adding traceback similarly to python 3.0 (pep-3134)
e.__traceback__ = sys.exc_info()[2]
reject(e)
return False
finally:
self._error_handler.RemoveCallback(reject)
self._error_handler.AddCallback(reject)
if not self._router.AcceptWithResponder(
message, messaging.ForwardingMessageReceiver(Accept)):
self._error_handler.RemoveCallback(reject)
reject(messaging.MessagingException("Unable to send message."))
else:
if (self._router.Accept(message)):
resolve(None)
else:
reject(messaging.MessagingException("Unable to send message."))
return promise.Promise(GenerationMethod)
return _Call
def _GetMessageWithStruct(struct, ordinal, flags, request_id):
header = messaging.MessageHeader(
ordinal, flags, 0 if request_id is None else request_id)
data = header.Serialize()
(payload, handles) = struct.Serialize()
data.extend(payload)
return messaging.Message(data, handles, header)
def _GetMessage(method, flags, request_id, *args, **kwargs):
if flags == messaging.MESSAGE_IS_RESPONSE_FLAG:
struct = method.response_struct(*args, **kwargs)
else:
struct = method.parameters_struct(*args, **kwargs)
return _GetMessageWithStruct(struct, method.ordinal, flags, request_id)
def _StubInit(self, impl):
self.impl = impl
def _StubAccept(methods):
methods_by_ordinal = dict((m.ordinal, m) for m in methods)
def Accept(self, message, responder=None):
try:
header = message.header
assert header.expects_response == bool(responder)
if header.message_type == interface_control_messages_mojom.RUN_MESSAGE_ID:
return _RunMessage(self.impl.manager, message, responder)
if (header.message_type ==
interface_control_messages_mojom.RUN_OR_CLOSE_PIPE_MESSAGE_ID):
return _RunMessageOrClosePipe(self.impl.manager, message)
assert header.message_type in methods_by_ordinal
method = methods_by_ordinal[header.message_type]
payload = message.payload
parameters = method.parameters_struct.Deserialize(
serialization.RootDeserializationContext(
payload.data, payload.handles)).AsDict()
response = getattr(self.impl, method.name)(**parameters)
if header.expects_response:
@promise.async
def SendResponse(response):
if isinstance(response, dict):
response_message = _GetMessage(method,
messaging.MESSAGE_IS_RESPONSE_FLAG,
header.request_id,
**response)
else:
response_message = _GetMessage(method,
messaging.MESSAGE_IS_RESPONSE_FLAG,
header.request_id,
response)
return responder.Accept(response_message)
p = SendResponse(response)
if self.impl.manager:
# Close the connection in case of error.
p.Catch(lambda _: self.impl.manager.Close())
return True
# pylint: disable=W0702
except:
# Close the connection in case of error.
logging.warning(
'Error occured in accept method. Connection will be closed.')
logging.debug("Exception", exc_info=True)
if self.impl.manager:
self.impl.manager.Close()
return False
return Accept
def _RunMessage(manager, message, responder):
response = interface_control_messages_mojom.RunResponseMessageParams()
response.reserved0 = 16
response.reserved1 = 0
response.query_version_result = (
interface_control_messages_mojom.QueryVersionResult())
response.query_version_result.version = manager.interface_manager.version
response_message = _GetMessageWithStruct(
response,
interface_control_messages_mojom.RUN_MESSAGE_ID,
messaging.MESSAGE_IS_RESPONSE_FLAG,
message.header.request_id)
return responder.Accept(response_message)
def _RunMessageOrClosePipe(manager, message):
payload = message.payload
query = (
interface_control_messages_mojom.RunOrClosePipeMessageParams.Deserialize(
serialization.RootDeserializationContext(payload.data,
payload.handles)))
return query.require_version.version <= manager.interface_manager.version
def _NotImplemented(*_1, **_2):
raise NotImplementedError()
| |
from __future__ import unicode_literals, division, absolute_import
import os
from datetime import datetime
from datetime import timedelta
from netrc import netrc, NetrcParseError
import logging
import base64
from flexget import plugin, validator
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
from flexget.utils.tools import parse_timedelta
from flexget.config_schema import one_or_more
from fnmatch import fnmatch
log = logging.getLogger('transmission')
def save_opener(f):
"""
Transmissionrpc sets a new default opener for urllib2
We use this as a decorator to capture and restore it when needed
"""
def new_f(self, *args, **kwargs):
import urllib2
prev_opener = urllib2._opener
urllib2.install_opener(self.opener)
try:
f(self, *args, **kwargs)
self.opener = urllib2._opener
finally:
urllib2.install_opener(prev_opener)
return new_f
class TransmissionBase(object):
def __init__(self):
self.client = None
self.opener = None
def _validator(self, advanced):
"""Return config validator"""
advanced.accept('text', key='host')
advanced.accept('integer', key='port')
# note that password is optional in transmission
advanced.accept('file', key='netrc')
advanced.accept('text', key='username')
advanced.accept('text', key='password')
advanced.accept('boolean', key='enabled')
return advanced
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
config.setdefault('enabled', True)
config.setdefault('host', 'localhost')
config.setdefault('port', 9091)
if 'netrc' in config:
netrc_path = os.path.expanduser(config['netrc'])
try:
config['username'], _, config['password'] = netrc(netrc_path).authenticators(config['host'])
except IOError as e:
log.error('netrc: unable to open: %s' % e.filename)
except NetrcParseError as e:
log.error('netrc: %s, file: %s, line: %s' % (e.msg, e.filename, e.lineno))
return config
def create_rpc_client(self, config):
import transmissionrpc
from transmissionrpc import TransmissionError
from transmissionrpc import HTTPHandlerError
user, password = config.get('username'), config.get('password')
try:
cli = transmissionrpc.Client(config['host'], config['port'], user, password)
except TransmissionError as e:
if isinstance(e.original, HTTPHandlerError):
if e.original.code == 111:
raise plugin.PluginError("Cannot connect to transmission. Is it running?")
elif e.original.code == 401:
raise plugin.PluginError("Username/password for transmission is incorrect. Cannot connect.")
elif e.original.code == 110:
raise plugin.PluginError("Cannot connect to transmission: Connection timed out.")
else:
raise plugin.PluginError("Error connecting to transmission: %s" % e.original.message)
else:
raise plugin.PluginError("Error connecting to transmission: %s" % e.message)
return cli
def torrent_info(self, torrent):
done = torrent.totalSize > 0
vloc = None
best = None
for t in torrent.files().iteritems():
tf = t[1]
if tf['selected']:
if tf['size'] <= 0 or tf['completed'] < tf['size']:
done = False
break
if not best or tf['size'] > best[1]:
best = (tf['name'], tf['size'])
if done and best and (100 * float(best[1]) / float(torrent.totalSize)) >= 90:
vloc = ('%s/%s' % (torrent.downloadDir, best[0])).replace('/', os.sep)
return done, vloc
def check_seed_limits(self, torrent, session):
seed_limit_ok = None # will remain if no seed ratio defined
idle_limit_ok = None # will remain if no idle limit defined
if torrent.seedRatioMode == 1: # use torrent's own seed ratio limit
seed_limit_ok = torrent.seedRatioLimit >= torrent.uploadRatio
elif torrent.seedRatioMode == 0: # use global rules
if session.seedRatioLimited:
seed_limit_ok = torrent.uploadRatio >= session.seedRatioLimit
if torrent.seedIdleMode == 1: # use torrent's own idle limit
idle_limit_ok = torrent.date_active + timedelta(minutes=torrent.seedIdleLimit) < datetime.now()
elif torrent.seedIdleMode == 0: # use global rules
if session.idle_seeding_limit_enabled:
idle_limit_ok = torrent.date_active + timedelta(minutes=session.idle_seeding_limit) < datetime.now()
return seed_limit_ok, idle_limit_ok
@save_opener
def on_task_start(self, task, config):
try:
import transmissionrpc
from transmissionrpc import TransmissionError
from transmissionrpc import HTTPHandlerError
except:
raise plugin.PluginError('Transmissionrpc module version 0.11 or higher required.', log)
if [int(part) for part in transmissionrpc.__version__.split('.')] < [0, 11]:
raise plugin.PluginError('Transmissionrpc module version 0.11 or higher required, please upgrade', log)
config = self.prepare_config(config)
if config['enabled']:
if task.options.test:
log.info('Trying to connect to transmission...')
self.client = self.create_rpc_client(config)
if self.client:
log.info('Successfully connected to transmission.')
else:
log.error('It looks like there was a problem connecting to transmission.')
class PluginTransmissionInput(TransmissionBase):
def validator(self):
"""Return config validator"""
root = validator.factory()
root.accept('boolean')
advanced = root.accept('dict')
self._validator(advanced)
advanced.accept('boolean', key='onlycomplete')
return root
def prepare_config(self, config):
config = TransmissionBase.prepare_config(self, config)
config.setdefault('onlycomplete', True)
return config
def on_task_input(self, task, config):
config = self.prepare_config(config)
if not config['enabled']:
return
if not self.client:
self.client = self.create_rpc_client(config)
entries = []
# Hack/Workaround for http://flexget.com/ticket/2002
# TODO: Proper fix
if 'username' in config and 'password' in config:
self.client.http_handler.set_authentication(self.client.url, config['username'], config['password'])
session = self.client.get_session()
for torrent in self.client.get_torrents():
downloaded, bigfella = self.torrent_info(torrent)
seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)
if not config['onlycomplete'] or (downloaded and torrent.status == 'stopped' and
(seed_ratio_ok is None and idle_limit_ok is None) or
(seed_ratio_ok is True or idle_limit_ok is True)):
entry = Entry(title=torrent.name,
url='file://%s' % torrent.torrentFile,
torrent_info_hash=torrent.hashString,
content_size=torrent.totalSize / (1024 * 1024))
for attr in ['comment', 'downloadDir', 'isFinished', 'isPrivate']:
entry['transmission_' + attr] = getattr(torrent, attr)
entry['transmission_trackers'] = [t['announce'] for t in torrent.trackers]
entry['location'] = bigfella
entries.append(entry)
return entries
class PluginTransmission(TransmissionBase):
"""
Add url from entry url to transmission
Example::
transmission:
host: localhost
port: 9091
netrc: /home/flexget/.tmnetrc
username: myusername
password: mypassword
path: the download location
Default values for the config elements::
transmission:
host: localhost
port: 9091
enabled: yes
"""
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'host': {'type': 'string'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'path': {'type': 'string'},
'maxupspeed': {'type': 'number'},
'maxdownspeed': {'type': 'number'},
'maxconnections': {'type': 'integer'},
'ratio': {'type': 'number'},
'addpaused': {'type': 'boolean'},
'content_filename': {'type': 'string'},
'main_file_only': {'type': 'boolean'},
'enabled': {'type': 'boolean'},
'include_subs': {'type': 'boolean'},
'bandwidthpriority': {'type': 'number'},
'honourlimits': {'type': 'boolean'},
'include_files': one_or_more({'type': 'string'}),
'skip_files': one_or_more({'type': 'string'}),
'rename_like_files': {'type': 'boolean'}
},
'additionalProperties': False
}
]
}
def prepare_config(self, config):
config = TransmissionBase.prepare_config(self, config)
config.setdefault('path', '')
config.setdefault('main_file_only', False)
config.setdefault('include_subs', False)
config.setdefault('rename_like_files', False)
config.setdefault('include_files', [])
return config
@plugin.priority(120)
def on_task_download(self, task, config):
"""
Call download plugin to generate the temp files we will load
into deluge then verify they are valid torrents
"""
config = self.prepare_config(config)
if not config['enabled']:
return
# If the download plugin is not enabled, we need to call it to get
# our temp .torrent files
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.get_temp_files(task, handle_magnets=True, fail_html=True)
@plugin.priority(135)
@save_opener
def on_task_output(self, task, config):
from transmissionrpc import TransmissionError
config = self.prepare_config(config)
# don't add when learning
if task.options.learn:
return
if not config['enabled']:
return
# Do not run if there is nothing to do
if not task.accepted:
return
if self.client is None:
self.client = self.create_rpc_client(config)
if self.client:
log.debug('Successfully connected to transmission.')
else:
raise plugin.PluginError("Couldn't connect to transmission.")
if task.accepted:
self.add_to_transmission(self.client, task, config)
def _make_torrent_options_dict(self, config, entry):
opt_dic = {}
for opt_key in ('path', 'addpaused', 'honourlimits', 'bandwidthpriority',
'maxconnections', 'maxupspeed', 'maxdownspeed', 'ratio', 'main_file_only',
'include_subs', 'content_filename', 'include_files', 'skip_files', 'rename_like_files'):
# Values do not merge config with task
# Task takes priority then config is used
if opt_key in entry:
opt_dic[opt_key] = entry[opt_key]
elif opt_key in config:
opt_dic[opt_key] = config[opt_key]
options = {'add': {}, 'change': {}, 'post': {}}
add = options['add']
if opt_dic.get('path'):
try:
path = os.path.expanduser(entry.render(opt_dic['path']))
add['download_dir'] = pathscrub(path).encode('utf-8')
except RenderError as e:
log.error('Error setting path for %s: %s' % (entry['title'], e))
if 'bandwidthpriority' in opt_dic:
add['bandwidthPriority'] = opt_dic['bandwidthpriority']
if 'maxconnections' in opt_dic:
add['peer_limit'] = opt_dic['maxconnections']
# make sure we add it paused, will modify status after adding
add['paused'] = True
change = options['change']
if 'honourlimits' in opt_dic and not opt_dic['honourlimits']:
change['honorsSessionLimits'] = False
if 'maxupspeed' in opt_dic:
change['uploadLimit'] = opt_dic['maxupspeed']
change['uploadLimited'] = True
if 'maxdownspeed' in opt_dic:
change['downloadLimit'] = opt_dic['maxdownspeed']
change['downloadLimited'] = True
if 'ratio' in opt_dic:
change['seedRatioLimit'] = opt_dic['ratio']
if opt_dic['ratio'] == -1:
# seedRatioMode:
# 0 follow the global settings
# 1 override the global settings, seeding until a certain ratio
# 2 override the global settings, seeding regardless of ratio
change['seedRatioMode'] = 2
else:
change['seedRatioMode'] = 1
post = options['post']
# set to modify paused status after
if 'addpaused' in opt_dic:
post['paused'] = opt_dic['addpaused']
if 'main_file_only' in opt_dic:
post['main_file_only'] = opt_dic['main_file_only']
if 'include_subs' in opt_dic:
post['include_subs'] = opt_dic['include_subs']
if 'content_filename' in opt_dic:
post['content_filename'] = opt_dic['content_filename']
if 'skip_files' in opt_dic:
post['skip_files'] = opt_dic['skip_files']
if 'include_files' in opt_dic:
post['include_files'] = opt_dic['include_files']
if 'rename_like_files' in opt_dic:
post['rename_like_files'] = opt_dic['rename_like_files']
return options
def add_to_transmission(self, cli, task, config):
"""Adds accepted entries to transmission """
from transmissionrpc import TransmissionError
for entry in task.accepted:
if task.options.test:
log.info('Would add %s to transmission' % entry['url'])
continue
# Compile user options into appripriate dict
options = self._make_torrent_options_dict(config, entry)
downloaded = not entry['url'].startswith('magnet:')
# Check that file is downloaded
if downloaded and 'file' not in entry:
entry.fail('file missing?')
continue
# Verify the temp file exists
if downloaded and not os.path.exists(entry['file']):
tmp_path = os.path.join(task.manager.config_base, 'temp')
log.debug('entry: %s' % entry)
log.debug('temp: %s' % ', '.join(os.listdir(tmp_path)))
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
continue
try:
if downloaded:
with open(entry['file'], 'rb') as f:
filedump = base64.b64encode(f.read()).encode('utf-8')
r = cli.add_torrent(filedump, 30, **options['add'])
else:
r = cli.add_torrent(entry['url'], timeout=30, **options['add'])
if r:
torrent = r
log.info('"%s" torrent added to transmission' % (entry['title']))
total_size = cli.get_torrent(r.id, ['id', 'totalSize']).totalSize
def _filter_list(list):
for item in list:
if not isinstance(item, basestring):
list.remove(item)
return list
def _find_matches(name, list):
for mask in list:
if fnmatch(name, mask):
return True
return False
skip_files = False
# Filter list because "set" plugin doesn't validate based on schema
# Skip files only used if we have no main file
if 'skip_files' in options['post']:
skip_files = True
options['post']['skip_files'] = _filter_list(options['post']['skip_files'])
main_id = None
# We need to index the files if any of the following are defined
if ('main_file_only' in options['post'] and options['post']['main_file_only'] == True or
'content_filename' in options['post'] or skip_files):
fl = cli.get_files(r.id)
# Find files based on config
dl_list = []
skip_list = []
main_list = []
full_list = []
ext_list = ['*.srt', '*.sub', '*.idx', '*.ssa', '*.ass']
if 'include_files' in options['post']:
include_files = True
options['post']['include_files'] = _filter_list(options['post']['include_files'])
for f in fl[r.id]:
full_list.append(f)
if fl[r.id][f]['size'] > total_size * 0.90:
main_id = f
if 'include_files' in options['post']:
if _find_matches(fl[r.id][f]['name'], options['post']['include_files']):
dl_list.append(f)
elif ('include_subs' in options['post'] and options['post']['include_subs'] == True and
_find_matches(fl[r.id][f]['name'], ext_list)):
dl_list.append(f)
if skip_files:
if _find_matches(fl[r.id][f]['name'], options['post']['skip_files']):
skip_list.append(f)
if main_id is not None:
# Look for files matching main ID title but with a different extension
if 'rename_like_files' in options['post'] and options['post']['rename_like_files'] == True:
for f in fl[r.id]:
# if this filename matches main filename we want to rename it as well
fs = os.path.splitext(fl[r.id][f]['name'])
if fs[0] == os.path.splitext(fl[r.id][main_id]['name'])[0]:
main_list.append(f)
else:
main_list = [main_id]
if main_id not in dl_list:
dl_list.append(main_id)
# If we have a main file and want to rename it and associated files
if 'content_filename' in options['post'] and main_id is not None:
download_dir = ""
if 'download_dir' not in options['add']:
download_dir = cli.get_session().download_dir
else:
download_dir = options['add']['download_dir']
# Get new filename without ext
file_ext = os.path.splitext(fl[r.id][main_id]['name'])[1]
file_path = os.path.dirname(os.path.join(download_dir, fl[r.id][main_id]['name']))
filename = options['post']['content_filename']
if config['host'] == 'localhost' or config['host'] == '127.0.0.1':
counter = 1
while os.path.exists(os.path.join(file_path, filename + file_ext)):
# Try appending a (#) suffix till a unique filename is found
filename = ''.join(options['post']['content_filename'], '(', str(counter), ')')
counter += 1
else:
log.debug('Cannot ensure content_filename is unique '
'when adding to a remote transmission daemon.')
for index in main_list:
file_ext = os.path.splitext(fl[r.id][index]['name'])[1]
log.debug('File %s renamed to %s' % (fl[r.id][index]['name'], filename + file_ext))
# change to below when set_files will allow setting name, more efficient to have one call
# fl[r.id][index]['name'] = os.path.basename(pathscrub(filename + file_ext).encode('utf-8'))
cli.rename_torrent_path(r.id, fl[r.id][index]['name'],
os.path.basename(
pathscrub(filename + file_ext).encode('utf-8'))
)
if ('main_file_only' in options['post'] and options['post']['main_file_only'] == True and
main_id is not None):
# Set Unwanted Files
options['change']['files_unwanted'] = [x for x in full_list if x not in dl_list]
options['change']['files_wanted'] = dl_list
log.debug('Downloading %s of %s files in torrent.' %
(len(options['change']['files_wanted']), len(full_list)))
elif(('main_file_only' not in options['post'] or
options['post']['main_file_only'] == False or
main_id is None) and
skip_files):
# If no main file and we want to skip files
if len(skip_list) >= len(full_list):
log.debug('skip_files filter would cause no files to be downloaded; '
'including all files in torrent.')
else:
options['change']['files_unwanted'] = skip_list
options['change']['files_wanted'] = [x for x in full_list if x not in skip_list]
log.debug('Downloading %s of %s files in torrent.'
% (len(options['change']['files_wanted']), len(full_list)))
# Set any changed file properties
if options['change'].keys():
cli.change_torrent(r.id, 30, **options['change'])
# if addpaused was defined and set to False start the torrent;
# prevents downloading data before we set what files we want
if ('paused' in options['post'] and options['post']['paused'] == False or
'paused' not in options['post'] and cli.get_session().start_added_torrents == True):
cli.start_torrent(r.id)
except TransmissionError as e:
log.debug('TransmissionError', exc_info=True)
log.debug('Failed options dict: %s' % options)
msg = 'TransmissionError: %s' % e.message or 'N/A'
log.error(msg)
entry.fail(msg)
def on_task_exit(self, task, config):
"""Make sure all temp files are cleaned up when task exits"""
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.cleanup_temp_files(task)
on_task_abort = on_task_exit
class PluginTransmissionClean(TransmissionBase):
"""
Remove completed torrents from Transmission.
Examples::
clean_transmission: yes # ignore both time and ratio
clean_transmission: # uses transmission's internal limits for idle time and seed ratio ( if defined )
transmission_seed_limits: yes
clean_transmission: # matches time only
finished_for: 2 hours
clean_transmission: # matches ratio only
min_ratio: 0.5
clean_transmission: # matches time OR ratio
finished_for: 2 hours
min_ratio: 0.5
Default values for the config elements::
clean_transmission:
host: localhost
port: 9091
enabled: yes
"""
def validator(self):
"""Return config validator"""
root = validator.factory()
root.accept('boolean')
advanced = root.accept('dict')
self._validator(advanced)
advanced.accept('number', key='min_ratio')
advanced.accept('interval', key='finished_for')
advanced.accept('boolean', key='transmission_seed_limits')
advanced.accept('boolean', key='delete_files')
return root
def on_task_exit(self, task, config):
config = self.prepare_config(config)
if not config['enabled'] or task.options.learn:
return
if not self.client:
self.client = self.create_rpc_client(config)
nrat = float(config['min_ratio']) if 'min_ratio' in config else None
nfor = parse_timedelta(config['finished_for']) if 'finished_for' in config else None
delete_files = bool(config['delete_files']) if 'delete_files' in config else False
trans_checks = bool(config['transmission_seed_limits']) if 'transmission_seed_limits' in config else False
session = self.client.get_session()
remove_ids = []
for torrent in self.client.get_torrents():
log.verbose('Torrent "%s": status: "%s" - ratio: %s - date done: %s' %
(torrent.name, torrent.status, torrent.ratio, torrent.date_done))
downloaded, dummy = self.torrent_info(torrent)
seed_ratio_ok, idle_limit_ok = self.check_seed_limits(torrent, session)
if (downloaded and ((nrat is None and nfor is None and trans_checks is None) or
(trans_checks and ((seed_ratio_ok is None and idle_limit_ok is None) or
(seed_ratio_ok is True or idle_limit_ok is True))) or
(nrat and (nrat <= torrent.ratio)) or
(nfor and ((torrent.date_done + nfor) <= datetime.now())))):
if task.options.test:
log.info('Would remove finished torrent `%s` from transmission' % torrent.name)
continue
log.info('Removing finished torrent `%s` from transmission' % torrent.name)
remove_ids.append(torrent.id)
if remove_ids:
self.client.remove_torrent(remove_ids, delete_files)
@event('plugin.register')
def register_plugin():
plugin.register(PluginTransmission, 'transmission', api_ver=2)
plugin.register(PluginTransmissionInput, 'from_transmission', api_ver=2)
plugin.register(PluginTransmissionClean, 'clean_transmission', api_ver=2)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
import textwrap
import unittest
from collections import namedtuple
from contextlib import contextmanager
import pytest
from twitter.common.collections import maybe_list
from pants.base.revision import Revision
from pants.java.distribution.distribution import Distribution
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import chmod_plus_x, safe_open, touch
class MockDistributionTest(unittest.TestCase):
EXE = namedtuple('Exe', ['name', 'contents'])
@classmethod
def exe(cls, name, version=None):
contents = None if not version else textwrap.dedent('''
#!/bin/sh
if [ $# -ne 3 ]; then
# Sanity check a classpath switch with a value plus the classname for main
echo "Expected 3 arguments, got $#: $@" >&2
exit 1
fi
echo "java.version=%s"
''' % version).strip()
return cls.EXE(name, contents=contents)
@contextmanager
def env(self, **kwargs):
environment = dict(JDK_HOME=None, JAVA_HOME=None, PATH=None)
environment.update(**kwargs)
with environment_as(**environment):
yield
@contextmanager
def distribution(self, files=None, executables=None):
with temporary_dir() as jdk:
for f in maybe_list(files or ()):
touch(os.path.join(jdk, f))
for exe in maybe_list(executables or (), expected_type=self.EXE):
path = os.path.join(jdk, exe.name)
with safe_open(path, 'w') as fp:
fp.write(exe.contents or '')
chmod_plus_x(path)
yield jdk
def setUp(self):
super(MockDistributionTest, self).setUp()
# Save local cache and then flush so tests get a clean environment. _CACHE restored in tearDown.
self._local_cache = Distribution._CACHE
Distribution._CACHE = {}
def tearDown(self):
super(MockDistributionTest, self).tearDown()
Distribution._CACHE = self._local_cache
def test_validate_basic(self):
with pytest.raises(Distribution.Error):
with self.distribution() as jdk:
Distribution(bin_path=jdk).validate()
with pytest.raises(Distribution.Error):
with self.distribution(files='java') as jdk:
Distribution(bin_path=jdk).validate()
with self.distribution(executables=self.exe('java')) as jdk:
Distribution(bin_path=jdk).validate()
def test_validate_jdk(self):
with pytest.raises(Distribution.Error):
with self.distribution(executables=self.exe('java')) as jdk:
Distribution(bin_path=jdk, jdk=True).validate()
with self.distribution(executables=[self.exe('java'), self.exe('javac')]) as jdk:
Distribution(bin_path=jdk, jdk=True).validate()
def test_validate_version(self):
with pytest.raises(Distribution.Error):
with self.distribution(executables=self.exe('java', '1.7.0_25')) as jdk:
Distribution(bin_path=jdk, minimum_version='1.7.0_45').validate()
with pytest.raises(Distribution.Error):
with self.distribution(executables=self.exe('java', '1.8.0_1')) as jdk:
Distribution(bin_path=jdk, maximum_version='1.7.9999').validate()
with self.distribution(executables=self.exe('java', '1.7.0_25')) as jdk:
Distribution(bin_path=jdk, minimum_version='1.7.0_25').validate()
Distribution(bin_path=jdk, minimum_version=Revision.semver('1.6.0')).validate()
Distribution(bin_path=jdk, minimum_version='1.7.0_25', maximum_version='1.7.999').validate()
def test_validated_binary(self):
with pytest.raises(Distribution.Error):
with self.distribution(files='jar', executables=self.exe('java')) as jdk:
Distribution(bin_path=jdk).binary('jar')
with self.distribution(executables=[self.exe('java'), self.exe('jar')]) as jdk:
Distribution(bin_path=jdk).binary('jar')
def test_locate(self):
with pytest.raises(Distribution.Error):
with self.env():
Distribution.locate()
with pytest.raises(Distribution.Error):
with self.distribution(files='java') as jdk:
with self.env(PATH=jdk):
Distribution.locate()
with pytest.raises(Distribution.Error):
with self.distribution(executables=self.exe('java')) as jdk:
with self.env(PATH=jdk):
Distribution.locate(jdk=True)
with pytest.raises(Distribution.Error):
with self.distribution(executables=self.exe('java', '1.6.0')) as jdk:
with self.env(PATH=jdk):
Distribution.locate(minimum_version='1.7.0')
with pytest.raises(Distribution.Error):
with self.distribution(executables=self.exe('java', '1.8.0')) as jdk:
with self.env(PATH=jdk):
Distribution.locate(maximum_version='1.7.999')
with pytest.raises(Distribution.Error):
with self.distribution(executables=self.exe('java')) as jdk:
with self.env(JDK_HOME=jdk):
Distribution.locate()
with pytest.raises(Distribution.Error):
with self.distribution(executables=self.exe('java')) as jdk:
with self.env(JAVA_HOME=jdk):
Distribution.locate()
with self.distribution(executables=self.exe('java')) as jdk:
with self.env(PATH=jdk):
Distribution.locate()
with self.distribution(executables=[self.exe('java'), self.exe('javac')]) as jdk:
with self.env(PATH=jdk):
Distribution.locate(jdk=True)
with self.distribution(executables=self.exe('java', '1.7.0')) as jdk:
with self.env(PATH=jdk):
Distribution.locate(minimum_version='1.6.0')
with self.env(PATH=jdk):
Distribution.locate(maximum_version='1.7.999')
with self.env(PATH=jdk):
Distribution.locate(minimum_version='1.6.0', maximum_version='1.7.999')
with self.distribution(executables=self.exe('bin/java')) as jdk:
with self.env(JDK_HOME=jdk):
Distribution.locate()
with self.distribution(executables=self.exe('bin/java')) as jdk:
with self.env(JAVA_HOME=jdk):
Distribution.locate()
def test_cached_good_min(self):
with self.distribution(executables=self.exe('java', '1.7.0_33')) as jdk:
with self.env(PATH=jdk):
Distribution.cached(minimum_version='1.7.0_25')
def test_cached_good_max(self):
with self.distribution(executables=self.exe('java', '1.7.0_33')) as jdk:
with self.env(PATH=jdk):
Distribution.cached(maximum_version='1.7.0_50')
def test_cached_good_bounds(self):
with self.distribution(executables=self.exe('java', '1.7.0_33')) as jdk:
with self.env(PATH=jdk):
Distribution.cached(minimum_version='1.6.0_35', maximum_version='1.7.0_55')
def test_cached_too_low(self):
with self.distribution(executables=self.exe('java', '1.7.0_33')) as jdk:
with self.env(PATH=jdk):
with self.assertRaises(Distribution.Error):
Distribution.cached(minimum_version='1.7.0_40')
def test_cached_too_high(self):
with self.distribution(executables=self.exe('java', '1.7.0_83')) as jdk:
with self.env(PATH=jdk):
with self.assertRaises(Distribution.Error):
Distribution.cached(maximum_version='1.7.0_55')
def test_cached_low_fault(self):
with self.distribution(executables=self.exe('java', '1.7.0_33')) as jdk:
with self.env(PATH=jdk):
with self.assertRaises(Distribution.Error):
Distribution.cached(minimum_version='1.7.0_35', maximum_version='1.7.0_55')
def test_cached_high_fault(self):
with self.distribution(executables=self.exe('java', '1.7.0_33')) as jdk:
with self.env(PATH=jdk):
with self.assertRaises(Distribution.Error):
Distribution.cached(minimum_version='1.6.0_00', maximum_version='1.6.0_50')
def test_cached_conflicting(self):
with self.distribution(executables=self.exe('java', '1.7.0_33')) as jdk:
with self.env(PATH=jdk):
with self.assertRaises(Distribution.Error):
Distribution.cached(minimum_version='1.7.0_00', maximum_version='1.6.0_50')
def test_cached_bad_input(self):
with self.assertRaises(ValueError):
with self.distribution(executables=self.exe('java', '1.7.0_33')) as jdk:
with self.env(PATH=jdk):
Distribution.cached(minimum_version=1.7, maximum_version=1.8)
def exe_path(name):
process = subprocess.Popen(['which', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = process.communicate()
if process.returncode != 0:
return None
path = stdout.strip()
return path if os.path.exists(path) and os.access(path, os.X_OK) else None
class LiveDistributionTest(unittest.TestCase):
JAVA = exe_path('java')
JAVAC = exe_path('javac')
@pytest.mark.skipif('not LiveDistributionTest.JAVA', reason='No java executable on the PATH.')
def test_validate_live(self):
with pytest.raises(Distribution.Error):
Distribution(bin_path=os.path.dirname(self.JAVA), minimum_version='999.9.9').validate()
with pytest.raises(Distribution.Error):
Distribution(bin_path=os.path.dirname(self.JAVA), maximum_version='0.0.1').validate()
Distribution(bin_path=os.path.dirname(self.JAVA)).validate()
Distribution(bin_path=os.path.dirname(self.JAVA), minimum_version='1.3.1').validate()
Distribution(bin_path=os.path.dirname(self.JAVA), maximum_version='999.999.999').validate()
Distribution(bin_path=os.path.dirname(self.JAVA), minimum_version='1.3.1',
maximum_version='999.999.999').validate()
Distribution.locate(jdk=False)
@pytest.mark.skipif('not LiveDistributionTest.JAVAC', reason='No javac executable on the PATH.')
def test_validate_live_jdk(self):
Distribution(bin_path=os.path.dirname(self.JAVAC), jdk=True).validate()
Distribution(bin_path=os.path.dirname(self.JAVAC), jdk=True).binary('javap')
Distribution.locate(jdk=True)
| |
#!/usr/bin/env python
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.compat import mock
import re
import xml.dom.minidom
from boto.exception import BotoServerError
from boto.route53.connection import Route53Connection
from boto.route53.exception import DNSServerError
from boto.route53.healthcheck import HealthCheck
from boto.route53.record import ResourceRecordSets, Record
from boto.route53.zone import Zone
from nose.plugins.attrib import attr
from tests.unit import AWSMockServiceTestCase
from boto.compat import six
urllib = six.moves.urllib
@attr(route53=True)
class TestRoute53Connection(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestRoute53Connection, self).setUp()
self.calls = {
'count': 0,
}
def default_body(self):
return b"""<Route53Result>
<Message>It failed.</Message>
</Route53Result>
"""
def test_typical_400(self):
self.set_http_response(status_code=400, header=[
['Code', 'AccessDenied'],
])
with self.assertRaises(DNSServerError) as err:
self.service_connection.get_all_hosted_zones()
self.assertTrue('It failed.' in str(err.exception))
def test_retryable_400_prior_request_not_complete(self):
# Test ability to retry on ``PriorRequestNotComplete``.
self.set_http_response(status_code=400, header=[
['Code', 'PriorRequestNotComplete'],
])
self.do_retry_handler()
def test_retryable_400_throttling(self):
# Test ability to rety on ``Throttling``.
self.set_http_response(status_code=400, header=[
['Code', 'Throttling'],
])
self.do_retry_handler()
@mock.patch('time.sleep')
def do_retry_handler(self, sleep_mock):
def incr_retry_handler(func):
def _wrapper(*args, **kwargs):
self.calls['count'] += 1
return func(*args, **kwargs)
return _wrapper
# Patch.
orig_retry = self.service_connection._retry_handler
self.service_connection._retry_handler = incr_retry_handler(
orig_retry
)
self.assertEqual(self.calls['count'], 0)
# Retries get exhausted.
with self.assertRaises(BotoServerError):
self.service_connection.get_all_hosted_zones()
self.assertEqual(self.calls['count'], 7)
# Unpatch.
self.service_connection._retry_handler = orig_retry
@attr(route53=True)
class TestCreateZoneRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestCreateZoneRoute53, self).setUp()
def default_body(self):
return b"""
<CreateHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-02-29/">
<HostedZone>
<Id>/hostedzone/Z11111</Id>
<Name>example.com.</Name>
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</CallerReference>
<Config>
<Comment></Comment>
</Config>
<ResourceRecordSetCount>2</ResourceRecordSetCount>
</HostedZone>
<ChangeInfo>
<Id>/change/C1111111111111</Id>
<Status>PENDING</Status>
<SubmittedAt>2014-02-02T10:19:29.928Z</SubmittedAt>
</ChangeInfo>
<DelegationSet>
<NameServers>
<NameServer>ns-100.awsdns-01.com</NameServer>
<NameServer>ns-1000.awsdns-01.co.uk</NameServer>
<NameServer>ns-1000.awsdns-01.org</NameServer>
<NameServer>ns-900.awsdns-01.net</NameServer>
</NameServers>
</DelegationSet>
</CreateHostedZoneResponse>
"""
def test_create_zone(self):
self.set_http_response(status_code=201)
response = self.service_connection.create_zone("example.com.")
self.assertTrue(isinstance(response, Zone))
self.assertEqual(response.id, "Z11111")
self.assertEqual(response.name, "example.com.")
def test_create_hosted_zone(self):
self.set_http_response(status_code=201)
response = self.service_connection.create_hosted_zone("example.com.", "my_ref", "this is a comment")
self.assertEqual(response['CreateHostedZoneResponse']['DelegationSet']['NameServers'],
['ns-100.awsdns-01.com', 'ns-1000.awsdns-01.co.uk', 'ns-1000.awsdns-01.org', 'ns-900.awsdns-01.net'])
@attr(route53=True)
class TestGetZoneRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestGetZoneRoute53, self).setUp()
def default_body(self):
return b"""
<ListHostedZonesResponse xmlns="https://route53.amazonaws.com/doc/2012-02-29/">
<HostedZones>
<HostedZone>
<Id>/hostedzone/Z1111</Id>
<Name>example2.com.</Name>
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</CallerReference>
<Config/>
<ResourceRecordSetCount>3</ResourceRecordSetCount>
</HostedZone>
<HostedZone>
<Id>/hostedzone/Z2222</Id>
<Name>example1.com.</Name>
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeef</CallerReference>
<Config/>
<ResourceRecordSetCount>6</ResourceRecordSetCount>
</HostedZone>
<HostedZone>
<Id>/hostedzone/Z3333</Id>
<Name>example.com.</Name>
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeeg</CallerReference>
<Config/>
<ResourceRecordSetCount>6</ResourceRecordSetCount>
</HostedZone>
</HostedZones>
<IsTruncated>false</IsTruncated>
<MaxItems>100</MaxItems>
</ListHostedZonesResponse>
"""
def test_list_zones(self):
self.set_http_response(status_code=201)
response = self.service_connection.get_all_hosted_zones()
domains = ['example2.com.', 'example1.com.', 'example.com.']
print(response['ListHostedZonesResponse']['HostedZones'][0])
for d in response['ListHostedZonesResponse']['HostedZones']:
print("Removing: %s" % d['Name'])
domains.remove(d['Name'])
self.assertEqual(domains, [])
def test_get_zone(self):
self.set_http_response(status_code=201)
response = self.service_connection.get_zone('example.com.')
self.assertTrue(isinstance(response, Zone))
self.assertEqual(response.name, "example.com.")
@attr(route53=True)
class TestGetHostedZoneRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestGetHostedZoneRoute53, self).setUp()
def default_body(self):
return b"""
<GetHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-02-29/">
<HostedZone>
<Id>/hostedzone/Z1111</Id>
<Name>example.com.</Name>
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</CallerReference>
<Config/>
<ResourceRecordSetCount>3</ResourceRecordSetCount>
</HostedZone>
<DelegationSet>
<NameServers>
<NameServer>ns-1000.awsdns-40.org</NameServer>
<NameServer>ns-200.awsdns-30.com</NameServer>
<NameServer>ns-900.awsdns-50.net</NameServer>
<NameServer>ns-1000.awsdns-00.co.uk</NameServer>
</NameServers>
</DelegationSet>
</GetHostedZoneResponse>
"""
def test_list_zones(self):
self.set_http_response(status_code=201)
response = self.service_connection.get_hosted_zone("Z1111")
self.assertEqual(response['GetHostedZoneResponse']['HostedZone']['Id'], '/hostedzone/Z1111')
self.assertEqual(response['GetHostedZoneResponse']['HostedZone']['Name'], 'example.com.')
self.assertEqual(response['GetHostedZoneResponse']['DelegationSet']['NameServers'],
['ns-1000.awsdns-40.org', 'ns-200.awsdns-30.com', 'ns-900.awsdns-50.net', 'ns-1000.awsdns-00.co.uk'])
@attr(route53=True)
class TestGetAllRRSetsRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestGetAllRRSetsRoute53, self).setUp()
def default_body(self):
return b"""
<ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ResourceRecordSets>
<ResourceRecordSet>
<Name>test.example.com.</Name>
<Type>A</Type>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>10.0.0.1</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>www.example.com.</Name>
<Type>A</Type>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>10.0.0.2</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>us-west-2-evaluate-health.example.com.</Name>
<Type>A</Type>
<SetIdentifier>latency-example-us-west-2-evaluate-health</SetIdentifier>
<Region>us-west-2</Region>
<AliasTarget>
<HostedZoneId>ABCDEFG123456</HostedZoneId>
<EvaluateTargetHealth>true</EvaluateTargetHealth>
<DNSName>example-123456-evaluate-health.us-west-2.elb.amazonaws.com.</DNSName>
</AliasTarget>
<HealthCheckId>abcdefgh-abcd-abcd-abcd-abcdefghijkl</HealthCheckId>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>us-west-2-no-evaluate-health.example.com.</Name>
<Type>A</Type>
<SetIdentifier>latency-example-us-west-2-no-evaluate-health</SetIdentifier>
<Region>us-west-2</Region>
<AliasTarget>
<HostedZoneId>ABCDEFG567890</HostedZoneId>
<EvaluateTargetHealth>false</EvaluateTargetHealth>
<DNSName>example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com.</DNSName>
</AliasTarget>
<HealthCheckId>abcdefgh-abcd-abcd-abcd-abcdefghijkl</HealthCheckId>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>failover.example.com.</Name>
<Type>A</Type>
<SetIdentifier>failover-primary</SetIdentifier>
<Failover>PRIMARY</Failover>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>10.0.0.4</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>us-west-2-evaluate-health-healthcheck.example.com.</Name>
<Type>A</Type>
<SetIdentifier>latency-example-us-west-2-evaluate-health-healthcheck</SetIdentifier>
<Region>us-west-2</Region>
<AliasTarget>
<HostedZoneId>ABCDEFG123456</HostedZoneId>
<EvaluateTargetHealth>true</EvaluateTargetHealth>
<DNSName>example-123456-evaluate-health-healthcheck.us-west-2.elb.amazonaws.com.</DNSName>
</AliasTarget>
<HealthCheckId>076a32f8-86f7-4c9e-9fa2-c163d5be67d9</HealthCheckId>
</ResourceRecordSet>
</ResourceRecordSets>
<IsTruncated>false</IsTruncated>
<MaxItems>100</MaxItems>
</ListResourceRecordSetsResponse>
"""
def test_get_all_rr_sets(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_rrsets("Z1111", "A", "example.com.")
self.assertIn(self.actual_request.path,
("/2013-04-01/hostedzone/Z1111/rrset?type=A&name=example.com.",
"/2013-04-01/hostedzone/Z1111/rrset?name=example.com.&type=A"))
self.assertTrue(isinstance(response, ResourceRecordSets))
self.assertEqual(response.hosted_zone_id, "Z1111")
self.assertTrue(isinstance(response[0], Record))
self.assertTrue(response[0].name, "test.example.com.")
self.assertTrue(response[0].ttl, "60")
self.assertTrue(response[0].type, "A")
evaluate_record = response[2]
self.assertEqual(evaluate_record.name, 'us-west-2-evaluate-health.example.com.')
self.assertEqual(evaluate_record.type, 'A')
self.assertEqual(evaluate_record.identifier, 'latency-example-us-west-2-evaluate-health')
self.assertEqual(evaluate_record.region, 'us-west-2')
self.assertEqual(evaluate_record.alias_hosted_zone_id, 'ABCDEFG123456')
self.assertTrue(evaluate_record.alias_evaluate_target_health)
self.assertEqual(evaluate_record.alias_dns_name, 'example-123456-evaluate-health.us-west-2.elb.amazonaws.com.')
evaluate_xml = evaluate_record.to_xml()
self.assertTrue(evaluate_record.health_check, 'abcdefgh-abcd-abcd-abcd-abcdefghijkl')
self.assertTrue('<EvaluateTargetHealth>true</EvaluateTargetHealth>' in evaluate_xml)
no_evaluate_record = response[3]
self.assertEqual(no_evaluate_record.name, 'us-west-2-no-evaluate-health.example.com.')
self.assertEqual(no_evaluate_record.type, 'A')
self.assertEqual(no_evaluate_record.identifier, 'latency-example-us-west-2-no-evaluate-health')
self.assertEqual(no_evaluate_record.region, 'us-west-2')
self.assertEqual(no_evaluate_record.alias_hosted_zone_id, 'ABCDEFG567890')
self.assertFalse(no_evaluate_record.alias_evaluate_target_health)
self.assertEqual(no_evaluate_record.alias_dns_name, 'example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com.')
no_evaluate_xml = no_evaluate_record.to_xml()
self.assertTrue(no_evaluate_record.health_check, 'abcdefgh-abcd-abcd-abcd-abcdefghijkl')
self.assertTrue('<EvaluateTargetHealth>false</EvaluateTargetHealth>' in no_evaluate_xml)
failover_record = response[4]
self.assertEqual(failover_record.name, 'failover.example.com.')
self.assertEqual(failover_record.type, 'A')
self.assertEqual(failover_record.identifier, 'failover-primary')
self.assertEqual(failover_record.failover, 'PRIMARY')
self.assertEqual(failover_record.ttl, '60')
healthcheck_record = response[5]
self.assertEqual(healthcheck_record.health_check, '076a32f8-86f7-4c9e-9fa2-c163d5be67d9')
self.assertEqual(healthcheck_record.name, 'us-west-2-evaluate-health-healthcheck.example.com.')
self.assertEqual(healthcheck_record.identifier, 'latency-example-us-west-2-evaluate-health-healthcheck')
self.assertEqual(healthcheck_record.alias_dns_name, 'example-123456-evaluate-health-healthcheck.us-west-2.elb.amazonaws.com.')
@attr(route53=True)
class TestTruncatedGetAllRRSetsRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestTruncatedGetAllRRSetsRoute53, self).setUp()
def default_body(self):
return b"""
<ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ResourceRecordSets>
<ResourceRecordSet>
<Name>example.com.</Name>
<Type>NS</Type>
<TTL>900</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>ns-91.awsdns-41.co.uk.</Value>
</ResourceRecord>
<ResourceRecord>
<Value>ns-1929.awsdns-93.net.</Value>
</ResourceRecord>
<ResourceRecord>
<Value>ns-12.awsdns-21.org.</Value>
</ResourceRecord>
<ResourceRecord>
<Value>ns-102.awsdns-96.com.</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>example.com.</Name>
<Type>SOA</Type>
<TTL>1800</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>ns-1929.awsdns-93.net. hostmaster.awsdns.net. 1 10800 3600 604800 1800</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>wrr.example.com.</Name>
<Type>A</Type>
<SetIdentifier>primary</SetIdentifier>
<Weight>100</Weight>
<TTL>300</TTL>
<ResourceRecords>
<ResourceRecord><Value>127.0.0.1</Value></ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
</ResourceRecordSets>
<IsTruncated>true</IsTruncated>
<NextRecordName>wrr.example.com.</NextRecordName>
<NextRecordType>A</NextRecordType>
<NextRecordIdentifier>secondary</NextRecordIdentifier>
<MaxItems>3</MaxItems>
</ListResourceRecordSetsResponse>"""
def paged_body(self):
return b"""
<ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ResourceRecordSets>
<ResourceRecordSet>
<Name>wrr.example.com.</Name>
<Type>A</Type>
<SetIdentifier>secondary</SetIdentifier>
<Weight>50</Weight>
<TTL>300</TTL>
<ResourceRecords>
<ResourceRecord><Value>127.0.0.2</Value></ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
</ResourceRecordSets>
<IsTruncated>false</IsTruncated>
<MaxItems>3</MaxItems>
</ListResourceRecordSetsResponse>"""
def test_get_all_rr_sets(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_rrsets("Z1111", maxitems=3)
# made first request
self.assertEqual(self.actual_request.path, '/2013-04-01/hostedzone/Z1111/rrset?maxitems=3')
# anticipate a second request when we page it
self.set_http_response(status_code=200, body=self.paged_body())
# this should trigger another call to get_all_rrsets
self.assertEqual(len(list(response)), 4)
url_parts = urllib.parse.urlparse(self.actual_request.path)
self.assertEqual(url_parts.path, '/2013-04-01/hostedzone/Z1111/rrset')
self.assertEqual(urllib.parse.parse_qs(url_parts.query),
dict(type=['A'], name=['wrr.example.com.'], identifier=['secondary']))
@attr(route53=True)
class TestCreateHealthCheckRoute53IpAddress(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestCreateHealthCheckRoute53IpAddress, self).setUp()
def default_body(self):
return b"""
<CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<HealthCheck>
<Id>34778cf8-e31e-4974-bad0-b108bd1623d3</Id>
<CallerReference>2fa48c8f-76ef-4253-9874-8bcb2b0d7694</CallerReference>
<HealthCheckConfig>
<IPAddress>74.125.228.81</IPAddress>
<Port>443</Port>
<Type>HTTPS_STR_MATCH</Type>
<SearchString>OK</SearchString>
<ResourcePath>/health_check</ResourcePath>
<RequestInterval>30</RequestInterval>
<FailureThreshold>3</FailureThreshold>
</HealthCheckConfig>
</HealthCheck>
</CreateHealthCheckResponse>
"""
def test_create_health_check_ip_address(self):
self.set_http_response(status_code=201)
hc = HealthCheck(ip_addr='74.125.228.81', port=443, hc_type='HTTPS_STR_MATCH', resource_path='/health_check', string_match='OK')
hc_xml = hc.to_xml()
self.assertFalse('<FullyQualifiedDomainName>' in hc_xml)
self.assertTrue('<IPAddress>' in hc_xml)
response = self.service_connection.create_health_check(hc)
hc_resp = response['CreateHealthCheckResponse']['HealthCheck']['HealthCheckConfig']
self.assertEqual(hc_resp['IPAddress'], '74.125.228.81')
self.assertEqual(hc_resp['ResourcePath'], '/health_check')
self.assertEqual(hc_resp['Type'], 'HTTPS_STR_MATCH')
self.assertEqual(hc_resp['Port'], '443')
self.assertEqual(hc_resp['ResourcePath'], '/health_check')
self.assertEqual(hc_resp['SearchString'], 'OK')
self.assertEqual(response['CreateHealthCheckResponse']['HealthCheck']['Id'], '34778cf8-e31e-4974-bad0-b108bd1623d3')
@attr(route53=True)
class TestCreateHealthCheckRoute53FQDN(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestCreateHealthCheckRoute53FQDN, self).setUp()
def default_body(self):
return b"""
<CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<HealthCheck>
<Id>f9abfe10-8d2a-4bbd-8f35-796f0f8572f2</Id>
<CallerReference>3246ac17-b651-4295-a5c8-c132a59693d7</CallerReference>
<HealthCheckConfig>
<Port>443</Port>
<Type>HTTPS</Type>
<ResourcePath>/health_check</ResourcePath>
<FullyQualifiedDomainName>example.com</FullyQualifiedDomainName>
<RequestInterval>30</RequestInterval>
<FailureThreshold>3</FailureThreshold>
</HealthCheckConfig>
</HealthCheck>
</CreateHealthCheckResponse>
"""
def test_create_health_check_fqdn(self):
self.set_http_response(status_code=201)
hc = HealthCheck(ip_addr='', port=443, hc_type='HTTPS', resource_path='/health_check', fqdn='example.com')
hc_xml = hc.to_xml()
self.assertTrue('<FullyQualifiedDomainName>' in hc_xml)
self.assertFalse('<IPAddress>' in hc_xml)
response = self.service_connection.create_health_check(hc)
hc_resp = response['CreateHealthCheckResponse']['HealthCheck']['HealthCheckConfig']
self.assertEqual(hc_resp['FullyQualifiedDomainName'], 'example.com')
self.assertEqual(hc_resp['ResourcePath'], '/health_check')
self.assertEqual(hc_resp['Type'], 'HTTPS')
self.assertEqual(hc_resp['Port'], '443')
self.assertEqual(hc_resp['ResourcePath'], '/health_check')
self.assertEqual(response['CreateHealthCheckResponse']['HealthCheck']['Id'], 'f9abfe10-8d2a-4bbd-8f35-796f0f8572f2')
@attr(route53=True)
class TestChangeResourceRecordSetsRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestChangeResourceRecordSetsRoute53, self).setUp()
def default_body(self):
return b"""
<ChangeResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeInfo>
<Id>/change/C1111111111111</Id>
<Status>PENDING</Status>
<SubmittedAt>2014-05-05T10:11:12.123Z</SubmittedAt>
</ChangeInfo>
</ChangeResourceRecordSetsResponse>
"""
def test_record_commit(self):
rrsets = ResourceRecordSets(self.service_connection)
rrsets.add_change_record('CREATE', Record('vanilla.example.com', 'A', 60, ['1.2.3.4']))
rrsets.add_change_record('CREATE', Record('alias.example.com', 'AAAA', alias_hosted_zone_id='Z123OTHER', alias_dns_name='target.other', alias_evaluate_target_health=True))
rrsets.add_change_record('CREATE', Record('wrr.example.com', 'CNAME', 60, ['cname.target'], weight=10, identifier='weight-1'))
rrsets.add_change_record('CREATE', Record('lbr.example.com', 'TXT', 60, ['text record'], region='us-west-2', identifier='region-1'))
rrsets.add_change_record('CREATE', Record('failover.example.com', 'A', 60, ['2.2.2.2'], health_check='hc-1234', failover='PRIMARY', identifier='primary'))
changes_xml = rrsets.to_xml()
# the whitespacing doesn't match exactly, so we'll pretty print and drop all new lines
# not the best, but
actual_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(changes_xml).toprettyxml())
expected_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(b"""
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeBatch>
<Comment>None</Comment>
<Changes>
<Change>
<Action>CREATE</Action>
<ResourceRecordSet>
<Name>vanilla.example.com</Name>
<Type>A</Type>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>1.2.3.4</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
</Change>
<Change>
<Action>CREATE</Action>
<ResourceRecordSet>
<Name>alias.example.com</Name>
<Type>AAAA</Type>
<AliasTarget>
<HostedZoneId>Z123OTHER</HostedZoneId>
<DNSName>target.other</DNSName>
<EvaluateTargetHealth>true</EvaluateTargetHealth>
</AliasTarget>
</ResourceRecordSet>
</Change>
<Change>
<Action>CREATE</Action>
<ResourceRecordSet>
<Name>wrr.example.com</Name>
<Type>CNAME</Type>
<SetIdentifier>weight-1</SetIdentifier>
<Weight>10</Weight>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>cname.target</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
</Change>
<Change>
<Action>CREATE</Action>
<ResourceRecordSet>
<Name>lbr.example.com</Name>
<Type>TXT</Type>
<SetIdentifier>region-1</SetIdentifier>
<Region>us-west-2</Region>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>text record</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
</Change>
<Change>
<Action>CREATE</Action>
<ResourceRecordSet>
<Name>failover.example.com</Name>
<Type>A</Type>
<SetIdentifier>primary</SetIdentifier>
<Failover>PRIMARY</Failover>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>2.2.2.2</Value>
</ResourceRecord>
</ResourceRecords>
<HealthCheckId>hc-1234</HealthCheckId>
</ResourceRecordSet>
</Change>
</Changes>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>
""").toprettyxml())
# Note: the alias XML should not include the TTL, even if it's specified in the object model
self.assertEqual(actual_xml, expected_xml)
| |
import collections
import email
import types
import urllib
import urlparse
import responses
import times
# todo: implement own conversion utility
from django.utils.encoding import force_bytes
from .loading import load_resource
def parse_http_date(header, headers):
if header in headers and headers[header]:
timetuple = email.utils.parsedate_tz(headers[header])
try:
return times.from_unix(email.utils.mktime_tz(timetuple))
except (TypeError, ValueError):
pass
class QueryDict(collections.MutableMapping):
"""
QueryDict acts like a plain `dict` type, but it handles
automatially multiple values for same key.
The most safest representation of URI query parameters is a list
of tuples, because the parameter names aren't unique. Unfortunately
accessing list of tuples is not so handy, so a mapping is
required.
In most cases query parameters looks like a mapping of simple
key => value pairs, so we're expecting just one value per key. But when
value is a list, we're expecting that accessing a key will return that
list, not last nor first value.
The problematic case is for keys, for which we're expecting always a list
of values, but just one was passed in URI. Accessing the key will give
just straight value instead of expected list with one item. In that cases
you should use `QueryDict.getlist()` directly, which returns always a list.
The values are stored internally as lists.
`.items()` method returns a list of (key, value) tuples, where value is
a single value from a key's values list. This means that key may not be
unique. This representation is compatible with `urllib.urlencode()`.
`.keys()` returns unique key names, same as for pure `dict`.
`.values()` returns list of same values, which can be accessed by key,
`.lists()` returns internal representation as list of lists.
"""
def __init__(self, initial=None):
self._data = {}
self.update(initial)
def update(self, data):
if data is None:
return
else:
try:
data = data.items()
except AttributeError:
pass
finally:
keys = set([x[0] for x in data])
for key in keys:
self._data[key] = []
for key, value in data:
if isinstance(value, (types.ListType, types.TupleType)):
for x in value:
self._data[key].append(x)
else:
self._data[key].append(value)
def items(self):
result = []
for key, values in self._data.items():
result += map(lambda x: (key, x), values)
return result
def getlist(self, key, default=None):
return self._data.get(key, default)
def lists(self):
return self._data.items()
def __setitem__(self, key, value):
return self.update({key: value})
def __getitem__(self, key):
return self._data[key][-1]\
if len(self._data[key]) < 2 else self._data[key]
def __delitem__(self, key):
del self._data[key]
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __repr__(self):
return repr(self._data)
class Context(object):
def __init__(
self, api, request, resource, method, parameters=None,
body=None, data=None, files=None, raw=None, extra=None,
headers=None):
self.method = method
self.api = api
self.headers = headers or {}
self.request = request
self.body = body
self.raw = raw
self.resource = resource
self.parameters = QueryDict(parameters) # GET
self.data = data or {} # POST
self.files = files or {} # FILES
self.deserializer = None
self.content_type = None
self.extra = extra or {}
def build_absolute_uri(self, path=None, parameters=None):
"""
Returns absolute uri to the specified `path` with optional
query string `parameters`.
If no `path` is provided, the current request full path
(including query string) will be used and extended by
optional `parameters`.
"""
def build_uri(path):
current = 'http%s://%s%s' % (
's' if self.request.is_secure() else '',
self.request.get_host(), self.request.path)
return urlparse.urljoin(current, path)
params = QueryDict()
if path:
full_path = u'/'.join(
filter(None, (self.api.path+path).split('/')))
if path.endswith('/'):
full_path += '/'
uri = build_uri('/'+full_path)
else:
params.update(self.parameters.items())
uri = build_uri(self.request.path)
# todo: change to internal restosaur settings
enc = self.request.GET.encoding
params.update(parameters or {})
params = map(
lambda x: (x[0], force_bytes(x[1], enc)),
params.items())
if params:
return '%s?%s' % (uri, urllib.urlencode(params))
else:
return uri
def url_for(self, resource, **kwargs):
"""
Shortcut wrapper of `resource.uri()`
"""
if isinstance(resource, types.StringTypes):
resource = load_resource(resource)
return resource.uri(self, params=kwargs)
def is_modified_since(self, dt):
"""
Compares datetime `dt` with `If-Modified-Since` header value.
Returns True if `dt` is newer than `If-Modified-Since`,
False otherwise.
"""
if_modified_since = parse_http_date('if-modified-since', self.headers)
if if_modified_since:
return times.to_unix(
dt.replace(microsecond=0)) > times.to_unix(if_modified_since)
return True
@property
def deserialized(self):
return self.body
# response factories
def Response(self, *args, **kwargs):
return responses.Response(self, *args, **kwargs)
def Created(self, *args, **kwargs):
return responses.CreatedResponse(self, *args, **kwargs)
def ValidationError(self, *args, **kwargs):
return responses.ValidationErrorResponse(self, *args, **kwargs)
def NotAcceptable(self, *args, **kwargs):
return responses.NotAcceptableResponse(self, *args, **kwargs)
def NotFound(self, *args, **kwargs):
return responses.NotFoundResponse(self, *args, **kwargs)
def SeeOther(self, *args, **kwargs):
return responses.SeeOtherResponse(self, *args, **kwargs)
def NotModified(self, *args, **kwargs):
return responses.NotModifiedResponse(self, *args, **kwargs)
def MethodNotAllowed(self, *args, **kwargs):
return responses.MethodNotAllowedResponse(self, *args, **kwargs)
def Forbidden(self, *args, **kwargs):
return responses.ForbiddenResponse(self, *args, **kwargs)
def BadRequest(self, *args, **kwargs):
return responses.BadRequestResponse(self, *args, **kwargs)
def Unauthorized(self, *args, **kwargs):
return responses.UnauthorizedResponse(self, *args, **kwargs)
def NoContent(self, *args, **kwargs):
return responses.NoContentResponse(self, *args, **kwargs)
def Entity(self, *args, **kwargs):
return responses.EntityResponse(self, *args, **kwargs)
def Collection(self, *args, **kwargs):
return responses.CollectionResponse(self, *args, **kwargs)
| |
#!/usr/bin/env python
#
# Use the raw transactions API to spend etps received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a etpd or Etp-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the Etp Core data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/EtpCore/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "EtpCore")
return os.path.expanduser("~/.etpcore")
def read_bitcoin_config(dbdir):
"""Read the etp.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "etp.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a Etp Core JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 17898 if testnet else 7898
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the etpd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(etpd):
info = etpd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
etpd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = etpd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(etpd):
address_summary = dict()
address_to_account = dict()
for info in etpd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = etpd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = etpd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-etp-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(etpd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(etpd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to etpd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = etpd.createrawtransaction(inputs, outputs)
signed_rawtx = etpd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(etpd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = etpd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(etpd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = etpd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(etpd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get etps from")
parser.add_option("--to", dest="to", default=None,
help="address to get send etps to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of etp.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
etpd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(etpd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(etpd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(etpd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(etpd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = etpd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| |
from collections import OrderedDict
import datetime
import math
# from util.singly import SinglyHelper]
from django.core.cache import cache
from django.db.models import Sum
from django.http import HttpResponseRedirect, HttpResponse
from django.template.loader import render_to_string
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from annoying.decorators import render_to, ajax_request
from manual.models import GutterBumper, Emotion, Value, Weight, WeeklyGoal
from manual.forms import GutterBumperForm, WeeklyGoalForm
from manual.utils import dump_data_pickle, CORRELATION_CHOICES, save_correlations
def turn_friendly_time_into_python_time(time_with_ampm):
time = time_with_ampm[:5]
ampm = time_with_ampm[5:]
hour, minute = time.split(":")
hour = int(hour)
minute = int(minute)
if hour == 12 and ampm.lower() == "am":
hour = 0
if ampm.lower() == "pm":
hour += 12
if hour == 24:
hour = 0
timestr = "%02d:%02d:00" % (hour, minute)
return timestr
def success_and_statii_for_bumper(success, bumper_pk):
bumper = GutterBumper.objects.get(pk=bumper_pk)
return {
"success": success,
"sleep_hrs": bumper.sleep_hrs,
"id": bumper_pk,
"meditated_status": bumper.meditated_status,
"off_status": bumper.off_status,
"art_status": bumper.art_status,
"worked_out_status": bumper.worked_out_status,
"left_the_house_status": bumper.left_the_house_status,
"nature_time_status": bumper.nature_time_status,
"ate_green_status": bumper.ate_green_status,
"has_reported_presence_today": bumper.has_reported_presence_today,
"has_reported_creativity_today": bumper.has_reported_creativity_today,
"has_reported_happiness_today": bumper.has_reported_happiness_today,
"has_reported_morning_mood_today": bumper.has_reported_morning_mood_today,
"has_reported_unbusy_today": bumper.has_reported_unbusy_today,
"has_reported_spoons_today": bumper.has_reported_spoons_today,
"has_reported_burnt_out_today": bumper.has_reported_burnt_out_today,
"all_green": bumper.all_green,
}
@login_required
@render_to("manual/home.html")
def home(request):
return locals()
@render_to("manual/emotions.html")
def emotions(request):
emotions = Emotion.objects.all()
return locals()
@login_required
@render_to("manual/emotion.html")
def emotion(request, emotion_slug):
emotion = Emotion.objects.get(slug=emotion_slug)
return locals()
@login_required
@render_to("manual/values.html")
def values(request):
values = Value.objects.all()
return locals()
@login_required
@render_to("manual/value.html")
def value(request, value_slug):
value = Value.objects.get(slug=value_slug)
return locals()
@login_required
@render_to("manual/monthly.html")
def monthly(request):
gutterbumpers = GutterBumper.objects\
.filter(date__lt=datetime.date.today())
# .filter(date__gte=datetime.date.today()-datetime.timedelta(days=31))\
total_days = gutterbumpers.filter(date__gte=datetime.date.today()-datetime.timedelta(days=31)).count()
total_workdays = total_days - math.floor(total_days/7*2)
total_sleep = 0
total_work = 0
total_alone = 0
total_neap = 0
total_friend = 0
total_public = 0
total_relationship = 0
total_presence = 0
total_happiness = 0
total_creativity = 0
total_morning_mood = 0
total_unbusy = 0
total_spoons = 0
total_burnout = 0
for g in gutterbumpers.filter(date__gte=datetime.date.today()-datetime.timedelta(days=31)):
total_sleep += g.sleep_hrs or 0
total_work += g.work_hrs or 0
total_alone += g.alone_hrs or 0
total_neap += g.neap_hrs or 0
total_friend += g.friend_hrs or 0
total_public += g.public_hrs or 0
total_relationship += g.relationship_hrs or 0
total_presence += g.presence or 0
total_happiness += g.happiness or 0
total_creativity += g.creativity or 0
total_morning_mood += g.morning_mood or 0
total_unbusy += g.unbusy or 0
total_spoons += g.spoons or 0
total_burnout += g.burnt_out or 0
avg_sleep = total_sleep / total_days
avg_work = total_work / total_days
avg_alone = total_alone / total_days
avg_neap = total_neap / total_days
avg_friend = total_friend / total_days
avg_public = total_public / total_days
avg_relationship = total_relationship / total_days
avg_work_per_workday = total_work / total_workdays
avg_presence = total_presence / total_days
avg_happiness = total_happiness / total_days
avg_creativity = total_creativity / total_days
avg_morning_mood = total_morning_mood / total_days
avg_unbusy = total_unbusy / total_days
avg_burnout = total_burnout / total_days
avg_spoons = total_spoons / total_days
return locals()
@login_required
@render_to("manual/dashboard.html")
def dashboard(request):
current_bumper = GutterBumper.objects.get_or_create(date=datetime.datetime.today())[0]
bumper_statii = success_and_statii_for_bumper(True, current_bumper.pk)
return locals()
@login_required
@csrf_exempt
@render_to("manual/daily.html")
def daily(request):
today = datetime.date.today()
yesterday_bumper = GutterBumper.objects.get_or_create(date=today-datetime.timedelta(days=1))[0]
yesterday_form = GutterBumperForm(instance=yesterday_bumper, prefix="yesterday")
today_bumper = GutterBumper.objects.get_or_create(date=today)[0]
today_form = GutterBumperForm(instance=today_bumper)
prev_day = GutterBumper.objects.get_or_create(date=today-datetime.timedelta(days=2))[0]
next_day = GutterBumper.objects.get_or_create(date=today+datetime.timedelta(days=1))[0]
return locals()
@csrf_exempt
@ajax_request
@login_required
def daily_form(request, day_pk):
today_bumper = GutterBumper.objects.get(pk=day_pk)
prev_day = GutterBumper.objects.get_or_create(date=today_bumper.date-datetime.timedelta(days=1))[0]
next_day = GutterBumper.objects.get_or_create(date=today_bumper.date+datetime.timedelta(days=1))[0]
form = GutterBumperForm(instance=today_bumper)
return {'html': render_to_string("manual/_daily_form.html", locals())}
@csrf_exempt
@ajax_request
@login_required
def update_bumpers(request, bumper_pk):
success = False
try:
data = request.POST.copy()
prefix = ""
manual_prefix = ""
if "yesterday-woke_up_at" in data:
prefix = "yesterday"
manual_prefix = "%s-" % prefix
data["%swoke_up_at" % manual_prefix] = turn_friendly_time_into_python_time(data["%swoke_up_at" % manual_prefix])
data["%sfell_asleep_at" % manual_prefix] = turn_friendly_time_into_python_time(data["%sfell_asleep_at" % manual_prefix])
bumper = GutterBumper.objects.get(pk=bumper_pk)
form = GutterBumperForm(data, instance=bumper, prefix=prefix)
if form.is_valid():
form.save()
success=True
else:
print form.errors
except:
from traceback import print_exc
print print_exc()
pass
return success_and_statii_for_bumper(success, bumper_pk)
@csrf_exempt
@ajax_request
@login_required
def get_sleep_hrs(request, bumper_pk):
return success_and_statii_for_bumper(True, bumper_pk)
def singly_callback(request, service="fitbit"):
url = SinglyHelper.get_authorize_url(service)
return HttpResponseRedirect(url)
def fitbit_callback(request):
print request.POST
print request
@render_to("manual/eighty.html")
def eighty(request):
num_sex = GutterBumper.objects.all().aggregate(Sum('sex'))['sex__sum']
return locals()
@render_to("manual/weekly.html")
def weekly(request):
weekly_goals = WeeklyGoal.objects.all()
most_recent = WeeklyGoal.objects.all().order_by("-start_date")[0]
if (datetime.date.today() - most_recent.start_date > datetime.timedelta(days=7)):
WeeklyGoal.objects.create(
start_date=most_recent.start_date + datetime.timedelta(days=7),
primary=most_recent.primary,
)
weekly_goals = WeeklyGoal.objects.all()
most_recent = WeeklyGoal.objects.all().order_by("-start_date")[0]
if request.method == "POST":
form = WeeklyGoalForm(request.POST, instance=most_recent)
if form.is_valid():
print "saving"
form.save()
form = WeeklyGoalForm(instance=most_recent)
# .order_by("-start_date")
return locals()
@ajax_request
def todays_one_thing(request):
today = datetime.date.today()
today_bumper = GutterBumper.objects.get_or_create(date=today)[0]
return {"one_thing": today_bumper.one_thing_to_focus_on}
@ajax_request
def red_flag_drinking(request):
today = datetime.date.today()
drank_too_much = True
for i in range(1, 7):
bumper = GutterBumper.objects.get_or_create(date=today-datetime.timedelta(days=i))[0]
fun = bumper.number_of_fun_beers or 0
sleep = bumper.number_of_sleep_beers or 0
if (fun + sleep) < 3:
drank_too_much = False
break
return {"too_much": drank_too_much}
@ajax_request
def last_10_weights(request):
w = Weight()
w.get_and_save_weight()
weights = []
for w in Weight.objects.all()[:10]:
weights.append({
"when": w.when,
"weight": w.weight,
"fat": w.body_fat_percent,
})
return {"weights": weights}
@login_required
def data_dump(request):
return HttpResponse(dump_data_pickle())
# @login_required
@render_to("manual/correlations.html")
def correlations(request):
private_data = ["orgasm", "sex_count",]
choices = OrderedDict()
if request.user.username != "skoczen" or 'all' not in request.GET:
for k, v in CORRELATION_CHOICES.items():
if k not in private_data:
choices[k] = v
else:
choices = CORRELATION_CHOICES
correlations = cache.get("current_correlations", None)
if not correlations:
save_correlations()
correlations = cache.get("current_correlations")
return {
"correlations": correlations,
"CORRELATION_CHOICES": choices,
}
@login_required
@ajax_request
def correlations_for(request):
return locals()
| |
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
from django.shortcuts import render, get_object_or_404
from django.conf import settings
from kobo.django.views.generic import DetailView, SearchView
from rest_framework import viewsets, mixins, status
from rest_framework.response import Response
from . import filters
from . import signals
from . import models
from .forms import (ReleaseSearchForm, BaseProductSearchForm,
ProductSearchForm, ProductVersionSearchForm)
from .serializers import (ProductSerializer, ProductVersionSerializer,
ReleaseSerializer, BaseProductSerializer,
ReleaseTypeSerializer, ReleaseVariantSerializer,
VariantTypeSerializer)
from pdc.apps.common.viewsets import (ChangeSetModelMixin,
ChangeSetCreateModelMixin,
ChangeSetUpdateModelMixin,
MultiLookupFieldMixin,
StrictQueryParamMixin)
from . import lib
class ReleaseListView(SearchView):
form_class = ReleaseSearchForm
queryset = models.Release.objects.all()
allow_empty = True
template_name = "release_list.html"
context_object_name = "release_list"
paginate_by = settings.ITEMS_PER_PAGE
class ReleaseDetailView(DetailView):
model = models.Release
pk_url_kwarg = "id"
template_name = "release_detail.html"
class BaseProductListView(SearchView):
form_class = BaseProductSearchForm
queryset = models.BaseProduct.objects.all()
allow_empty = True
template_name = "base_product_list.html"
context_object_name = "base_product_list"
paginate_by = settings.ITEMS_PER_PAGE
class BaseProductDetailView(DetailView):
model = models.BaseProduct
pk_url_kwarg = "id"
template_name = "base_product_detail.html"
context_object_name = "base_product"
def get_context_data(self, **kwargs):
context = super(BaseProductDetailView, self).get_context_data(**kwargs)
context["release_list"] = models.Release.objects.filter(base_product=self.get_object().id)
return context
class ProductListView(SearchView):
form_class = ProductSearchForm
queryset = models.Product.objects.all()
allow_empty = True
template_name = "product_list.html"
context_object_name = "product_list"
paginate_by = settings.ITEMS_PER_PAGE
class ProductDetailView(DetailView):
model = models.Product
pk_url_kwarg = "id"
template_name = "product_detail.html"
context_object_name = "product"
def get_context_data(self, **kwargs):
context = super(ProductDetailView, self).get_context_data(**kwargs)
context['product_version_list'] = self.get_object().productversion_set.all()
return context
class ProductViewSet(ChangeSetCreateModelMixin,
ChangeSetUpdateModelMixin,
StrictQueryParamMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
API endpoint that allows products to be viewed or edited.
Each product can have multiple version. Their identifiers are provided in
the form of `product_version_id` (both in requests and responses).
"""
queryset = models.Product.objects.all().prefetch_related('productversion_set')
serializer_class = ProductSerializer
lookup_field = 'short'
filter_class = filters.ProductFilter
def create(self, *args, **kwargs):
"""
__Method__: POST
__URL__: $LINK:product-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
return super(ProductViewSet, self).create(*args, **kwargs)
def retrieve(self, *args, **kwargs):
"""
__Method__: GET
__URL__: $LINK:product-detail:short$
__Response__:
%(SERIALIZER)s
"""
return super(ProductViewSet, self).retrieve(*args, **kwargs)
def list(self, *args, **kwargs):
"""
__Method__: GET
__URL__: $LINK:product-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
return super(ProductViewSet, self).list(*args, **kwargs)
def update(self, *args, **kwargs):
"""
__Method__: PUT, PATCH
__URL__: $LINK:product-detail:short$
__Data__:
%(WRITABLE_SERIALIZER)s
Please note that if you update the `short` field, the URL of this
product will change. The change of short name is *not* propagated to
product versions nor releases.
__Response__:
%(SERIALIZER)s
"""
return super(ProductViewSet, self).update(*args, **kwargs)
class ProductVersionViewSet(ChangeSetCreateModelMixin,
ChangeSetUpdateModelMixin,
StrictQueryParamMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
API endpoint that allows product versions to be viewed or edited.
Product versions always refer to a product by means of a human readable
`short` name. Similarly releases are referenced by `release_id`. This
applies to both requests and responses.
"""
queryset = models.ProductVersion.objects.all().select_related('product').prefetch_related('release_set')
serializer_class = ProductVersionSerializer
lookup_field = 'product_version_id'
lookup_value_regex = '[^/]+'
filter_class = filters.ProductVersionFilter
def create(self, *args, **kwargs):
"""
__Method__: POST
__URL__: $LINK:productversion-list$
__Data__:
%(WRITABLE_SERIALIZER)s
If `short` is not specified, the short name of associated product will
be used.
__Response__:
%(SERIALIZER)s
"""
return super(ProductVersionViewSet, self).create(*args, **kwargs)
def retrieve(self, *args, **kwargs):
"""
__Method__: GET
__URL__: $LINK:productversion-detail:product_version_id$
__Response__:
%(SERIALIZER)s
The list of releases is ordered by short and version.
"""
return super(ProductVersionViewSet, self).retrieve(*args, **kwargs)
def list(self, *args, **kwargs):
"""
__Method__: GET
__URL__: $LINK:productversion-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
The list of releases for each product version is ordered by short and
version.
"""
return super(ProductVersionViewSet, self).list(*args, **kwargs)
def update(self, *args, **kwargs):
"""
__Method__: PUT, PATCH
__URL__: $LINK:productversion-detail:product_version_id$
__Data__:
%(WRITABLE_SERIALIZER)s
Please note that if you change the `short` or `version` field, the
`product_version_id` will be modified accordingly, and the URL of the
object will be changed. All changes are local to the updated model and
are not propagated to associated releases.
__Response__:
%(SERIALIZER)s
"""
return super(ProductVersionViewSet, self).update(*args, **kwargs)
class ReleaseViewSet(ChangeSetCreateModelMixin,
ChangeSetUpdateModelMixin,
StrictQueryParamMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
"""
An API endpoint providing access to releases.
Each release can reference either a product version or a base product (or
both). There references are done via human-readable `product_version_id` or
`base_product_id`. Composes belonging to given release are referenced via
`compose_id`.
The list of associated composes includes both composes built for the
particular release as well as composes linked to it. It is possible to
distinguish between these cases by retrieving a detail of the compose.
"""
queryset = models.Release.objects.all() \
.select_related('product_version', 'release_type', 'base_product') \
.prefetch_related('compose_set')
serializer_class = ReleaseSerializer
lookup_field = 'release_id'
lookup_value_regex = '[^/]+'
filter_class = filters.ReleaseFilter
def filter_queryset(self, qs):
"""
If the viewset instance has attribute `order_queryset` set to True,
this method returns a list of releases ordered by version. Otherwise it
will return an unsorted queryset. (It is not possible to sort
unconditionally as get_object() will at some point call this method and
fail unless it receives a QuerySet instance.)
"""
qs = super(ReleaseViewSet, self).filter_queryset(qs)
if getattr(self, 'order_queryset', False):
return sorted(qs, key=models.Release.version_sort_key)
return qs
def create(self, request, *args, **kwargs):
"""
__Method__: POST
__URL__: $LINK:release-list$
__Data__:
%(WRITABLE_SERIALIZER)s
*release_type*: $LINK:releasetype-list$
__Response__:
%(SERIALIZER)s
"""
response = super(ReleaseViewSet, self).create(request, *args, **kwargs)
if response.status_code == status.HTTP_201_CREATED:
signals.release_post_update.send(sender=self.object.__class__,
release=self.object,
request=request)
return response
def retrieve(self, *args, **kwargs):
"""
__Method__: GET
__URL__: $LINK:release-detail:release_id$
__Response__:
%(SERIALIZER)s
The list of composes is ordered by their date, type and respin (even
though those fields are not directly visible here).
"""
return super(ReleaseViewSet, self).retrieve(*args, **kwargs)
def list(self, *args, **kwargs):
"""
__Method__: GET
__URL__: $LINK:release-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
The list of composes for each release is ordered by their date, type,
and respin (even though those fields are not directly visible here).
The releases themselves are ordered by short and version.
"""
self.order_queryset = True
return super(ReleaseViewSet, self).list(*args, **kwargs)
def update(self, request, *args, **kwargs):
"""
This end-point allows updating a release.
When using the `PUT` method, if an optional field is not specified in
the input, it will be erased.
This applies also to Bugzilla and DistGit mapping: if it is not specified,
it will be cleared.
__Method__: PUT, PATCH
__URL__: $LINK:release-detail:release_id$
__Data__:
%(WRITABLE_SERIALIZER)s
Please note that if you change the `short`, `version`, `release_type`
or `base_product` fields, the `release_id` will be updated and the URL
of this release will change.
__Response__:
%(SERIALIZER)s
"""
object = self.get_object()
signals.release_pre_update.send(sender=object.__class__, release=object, request=request)
response = super(ReleaseViewSet, self).update(request, *args, **kwargs)
if response.status_code == status.HTTP_200_OK:
signals.release_post_update.send(sender=object.__class__,
release=self.object,
request=request)
return response
class ReleaseImportView(StrictQueryParamMixin, viewsets.GenericViewSet):
queryset = models.Release.objects.none() # Required for permissions
def create(self, request):
"""
Import release including variants and architectures from composeinfo
json file.
The input to this call is a compose info file in JSON format. The
imported file will be parsed and required objects created in database.
The created objects are *BaseProduct*, *Product*, *ProductVersion*,
*Release* and its *Variant.Arch* mapping. Note that despite the input
being a composeinfo file, no Compose object will be ever created by
this call.
If the created objects already exist, nothing is done with them.
Therefore this call is idempotent and uploading the same composeinfo
data twice is safe.
__Method__: POST
__URL__: $LINK:releaseimportcomposeinfo-list$
__Data__: composeinfo data as saved in `composeinfo.json` file (the
formatting of the file is not important for PDC, and it is possible to
significantly minimize size of the file by removing indentation)
__Example__:
$ curl -H 'Content-Type: application/json' -X POST -d @/path/to/composeinfo.json \\
"$URL:releaseimportcomposeinfo-list$"
"""
if not request.data:
return Response(status=status.HTTP_400_BAD_REQUEST, data={'detail': 'Missing composeinfo'})
lib.release__import_from_composeinfo(request, request.data)
return Response(status=status.HTTP_201_CREATED)
class BaseProductViewSet(ChangeSetCreateModelMixin,
ChangeSetUpdateModelMixin,
StrictQueryParamMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
"""
An API endpoint providing access to base products.
"""
queryset = models.BaseProduct.objects.all()
serializer_class = BaseProductSerializer
lookup_field = 'base_product_id'
lookup_value_regex = '[^/]+'
filter_fields = ('base_product_id', 'name', 'short', 'version')
def create(self, *args, **kwargs):
"""
__Method__: POST
__URL__: $LINK:baseproduct-list$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
return super(BaseProductViewSet, self).create(*args, **kwargs)
def retrieve(self, *args, **kwargs):
"""
__Method__: GET
__URL__: $LINK:baseproduct-detail:base_product_id$
__Response__:
%(SERIALIZER)s
"""
return super(BaseProductViewSet, self).retrieve(*args, **kwargs)
def list(self, *args, **kwargs):
"""
__Method__: GET
__URL__: $LINK:baseproduct-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
return super(BaseProductViewSet, self).list(*args, **kwargs)
def update(self, *args, **kwargs):
"""
__Method__: PUT, PATCH
__URL__: $LINK:baseproduct-detail:base_product_id$
__Data__:
%(WRITABLE_SERIALIZER)s
__Response__:
%(SERIALIZER)s
"""
return super(BaseProductViewSet, self).update(*args, **kwargs)
class ProductVersionListView(SearchView):
form_class = ProductVersionSearchForm
queryset = models.ProductVersion.objects.all()
allow_empty = True
template_name = "product_version_list.html"
context_object_name = "product_version_list"
paginate_by = settings.ITEMS_PER_PAGE
class ProductVersionDetailView(DetailView):
model = models.ProductVersion
pk_url_kwarg = "id"
template_name = "product_version_detail.html"
context_object_name = "product_version"
def get_context_data(self, **kwargs):
context = super(ProductVersionDetailView, self).get_context_data(**kwargs)
context['release_list'] = self.get_object().release_set.all()
return context
def product_pages(request):
return render(request, "product_pages.html", {})
def release_pages(request):
return render(request, "release_pages.html", {})
class ReleaseCloneViewSet(StrictQueryParamMixin, viewsets.GenericViewSet):
queryset = models.Release.objects.none() # Required for permissions
def create(self, request):
"""
Clone an existing release identified by `old_release_id`. Currently the
release, its variants and arches will be cloned.
__Method__: POST
__URL__: $LINK:releaseclone-list$
__Data__:
{
"old_release_id": string,
"short": string, # optional
"version": string, # optional
"name": string, # optional
"release_type": string, # optional
"base_product": string, # optional
"active": bool, # optional
"product_version": string, # optional
"dist_git": {
"branch": string
}, # optional
"bugzilla": {
"product": string
}, # optional
"component_dist_git_branch": string, # optional
"include_inactive": bool, # optional
"include_trees": [string], # optional
"integrated_with: string # optional
}
The changed attributes must yield a different release_id, therefore
change in at least one of `short`, `version`, `base_product` or
`release_type` is required.
If `component_dist_git_branch` is present, it will be set for all
release components under the newly created release. If missing, release
components will be cloned without changes.
If `include_inactive` is False, the inactive release_components belong to
the old release won't be cloned to new release.
Default it will clone all release_components to new release.
If `include_tree` is specified, it should contain a list of
Variant.Arch pairs that should be cloned. If not given, all trees will
be cloned. If the list is empty, no trees will be cloned.
"""
data = request.data
if 'old_release_id' not in data:
return Response({'__all__': 'Missing old_release_id'},
status=status.HTTP_400_BAD_REQUEST)
old_release_id = data.pop('old_release_id')
old_release = get_object_or_404(models.Release, release_id=old_release_id)
old_data = ReleaseSerializer(instance=old_release).data
for (field_name, field) in ReleaseSerializer().fields.iteritems():
if not field.read_only and field_name not in data:
value = old_data.get(field_name, None)
if value:
data[field_name] = value
for key in data.keys():
if data[key] is None:
data.pop(key)
serializer = ReleaseSerializer(data=data,
extra_fields=['include_trees',
'include_inactive',
'component_dist_git_branch'])
serializer.is_valid(raise_exception=True)
new_release = serializer.save()
request.changeset.add('Release', new_release.pk,
'null', json.dumps(new_release.export()))
signals.release_clone.send(sender=new_release.__class__,
request=request,
original_release=old_release,
release=new_release)
return Response(serializer.data, status=status.HTTP_201_CREATED)
class ReleaseRPMMappingView(StrictQueryParamMixin, viewsets.GenericViewSet):
lookup_field = 'package'
queryset = models.Release.objects.none() # Required for permissions
extra_query_params = ['disable_overrides']
def retrieve(self, request, **kwargs):
"""
__URL__: $LINK:releaserpmmapping-detail:release_id:package$
Returns a JSON representing the RPM mapping of the latest compose for
given release. There is an optional query parameter
`?disable_overrides=1` which returns the raw mapping not affected by
any overrides.
The latest compose is chosen from the list of composes built for the
release or linked to it. The RPM mapping of that compose is filtered to
only include variants and architectures listed for the release.
The used overrides come from the release specified in the URL, not the
one for which the compose was originally built for.
Following cases result in response of `404 NOT FOUND`:
* no release with given id
* release exists, but has no composes
* release and compose exists, but there are no RPMs for the package
__Response__:
{
"compose": string,
"mapping": object
}
The `compose` key contains compose id of the compose used to populate
the mapping.
"""
release = get_object_or_404(models.Release, release_id=kwargs['release_id'])
compose = release.get_latest_compose()
if not compose:
return Response(status=status.HTTP_404_NOT_FOUND,
data={'detail': 'Release %s has no composes' % kwargs['release_id']})
mapping, _ = compose.get_rpm_mapping(kwargs['package'],
bool(request.query_params.get('disable_overrides', False)),
release=release)
result = mapping.get_pure_dict()
if not result:
return Response(status=status.HTTP_404_NOT_FOUND,
data={'detail': 'Package %s not present in release %s'
% (kwargs['package'], kwargs['release_id'])})
return Response(data={'compose': compose.compose_id, 'mapping': result})
class ReleaseTypeViewSet(StrictQueryParamMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
##Overview##
This page shows the usage of the **Release Types API**, please see the
following for more details.
##Test tools##
You can use ``curl`` in terminal, with -X _method_ (GET|POST|PUT|PATCH|DELETE),
-d _data_ (a json string). or GUI plugins for
browsers, such as ``RESTClient``, ``RESTConsole``.
"""
queryset = models.ReleaseType.objects.all()
serializer_class = ReleaseTypeSerializer
filter_class = filters.ReleaseTypeFilter
def list(self, request, *args, **kwargs):
"""
__Method__: `GET`
__URL__: $LINK:releasetype-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
__Example__:
$ curl "$URL:releasetype-list$"
{
"previous": null,
"next": null,
"count": 6,
"results": [
{
"short": "ga",
"name": "Release",
"suffix": ""
},
......
]
}
"""
return super(ReleaseTypeViewSet, self).list(request, *args, **kwargs)
class ReleaseVariantViewSet(ChangeSetModelMixin,
StrictQueryParamMixin,
MultiLookupFieldMixin,
viewsets.GenericViewSet):
"""
This end-point provides access to Variants. Each variant is uniquely
identified by release ID and variant UID. The pair in the form
`release_id/variant_uid` is used in URL for retrieving, updating or
deleting a single variant as well as in bulk operations.
"""
queryset = models.Variant.objects.all()
serializer_class = ReleaseVariantSerializer
filter_class = filters.ReleaseVariantFilter
lookup_fields = (('release__release_id', r'[^/]+'), ('variant_uid', r'[^/]+'))
def create(self, *args, **kwargs):
"""
__Method__: `POST`
__URL__: $LINK:variant-list$
__Data__:
%(WRITABLE_SERIALIZER)s
All fields are required. The required architectures must already be
present in PDC.
*type*: $LINK:varianttype-list$
__Response__:
%(SERIALIZER)s
"""
return super(ReleaseVariantViewSet, self).create(*args, **kwargs)
def update(self, *args, **kwargs):
"""
__Method__: `PUT`
__URL__: $LINK:variant-detail:release_id}/{variant_uid$
__Data__:
%(WRITABLE_SERIALIZER)s
All attributes are required. The specified architectures will be set
for this release. Also note that if you change the `uid`, the url for
this variant will change.
Changing the architectures may involve deleting some. Note that
repositories are connected to some Variant.Arch pair and it is not
possible to remove an arch with any repositories..
*type*: $LINK:varianttype-list$
__Response__:
%(SERIALIZER)s
"""
return super(ReleaseVariantViewSet, self).update(*args, **kwargs)
def partial_update(self, *args, **kwargs):
"""
__Method__: `PATCH`
__URL__: $LINK:variant-detail:release_id}/{variant_uid$
__Data__:
{
"release": string,
"id": string,
"uid": string,
"name": string,
"type": string,
"arches": [string],
"add_arches": [string],
"remove_arches": [string]
}
All attributes are optional. If an attribute is not specified, that
property of a variant will not change. The `arches` key can be used to
set architectures associated with the variant. The `add_arches` key can
list architectures to be added to current ones, with `remove_arches`
some can be removed. While it is possible to combine `add_arches` with
`remove_arches`, the `arches` attribute must not be combined with any
other arch manipulation.
If you try to remove architectures with associated repositories, the
request will fail to do anything.
__Response__:
%(SERIALIZER)s
"""
return super(ReleaseVariantViewSet, self).partial_update(*args, **kwargs)
def list(self, *args, **kwargs):
"""
__Method__: `GET`
__URL__: $LINK:variant-list$
__Query params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
"""
return super(ReleaseVariantViewSet, self).list(*args, **kwargs)
def retrieve(self, *args, **kwargs):
"""
__Method__: `GET`
__URL__: $LINK:variant-detail:release_id}/{variant_uid$
__Response__:
%(SERIALIZER)s
"""
return super(ReleaseVariantViewSet, self).retrieve(*args, **kwargs)
def destroy(self, *args, **kwargs):
"""
This call will delete selected variant with all its arches. Please note
that if there are any repositories filed under this variant, you will
get an error `409 CONFLICT`.
__Method__: `DELETE`
__URL__: $LINK:variant-detail:release_id}/{variant_uid$
"""
return super(ReleaseVariantViewSet, self).destroy(*args, **kwargs)
class VariantTypeViewSet(StrictQueryParamMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
API endpoint that allows variant_types to be viewed.
"""
serializer_class = VariantTypeSerializer
queryset = models.VariantType.objects.all()
def list(self, request, *args, **kwargs):
"""
__Method__: GET
__URL__: $LINK:varianttype-list$
__Response__:
%(SERIALIZER)s
"""
return super(VariantTypeViewSet, self).list(request, *args, **kwargs)
| |
import sys
from xml.parsers import expat
import copy
from . import syntax
from . import grammar
from . import error
from . import statements
from . import util
yin_namespace = "urn:ietf:params:xml:ns:yang:yin:1"
# We're using expat to parse to our own primitive dom-like
# structure, because we need to keep track of the linenumber per
# statement. And expat is easier to work with than minidom.
class Element(object):
def __init__(self, ns, local_name, attrs, pos):
self.ns = ns
self.local_name = local_name
self.attrs = attrs
self.pos = copy.copy(pos)
self.children = []
self.data = ''
def find_child(self, ns, local_name):
for ch in self.children:
if ch.ns == ns and ch.local_name == local_name:
return ch
return None
def remove_child(self, ch):
self.children.remove(ch)
def find_attribute(self, name):
try:
return self.attrs[name]
except KeyError:
return None
def remove_attribute(self, name):
del self.attrs[name]
class YinParser(object):
ns_sep = "}"
"""namespace separator"""
def __init__(self, extra={}):
self.parser = expat.ParserCreate("UTF-8", self.ns_sep)
self.parser.CharacterDataHandler = self.char_data
self.parser.StartElementHandler = self.start_element
self.parser.EndElementHandler = self.end_element
self.extra = extra
def split_qname(qname):
"""Split `qname` into namespace URI and local name
Return namespace and local name as a tuple. This is a static
method."""
res = qname.split(YinParser.ns_sep)
if len(res) == 1: # no namespace
return None, res[0]
else:
return res
split_qname = staticmethod(split_qname)
def parse(self, ctx, ref, text):
"""Parse the string `text` containing a YIN (sub)module.
Return a Statement on success or None on failure.
"""
self.ctx = ctx
self.pos = error.Position(ref)
self.top = None
self.top_element = None
self.uri = None
self.nsmap = {}
self.prefixmap = {}
self.included = []
self.extensions = {}
self.data = ''
self.element_stack = []
try:
self.parser.Parse(text.encode('utf-8'), True)
except error.Abort:
return None
except expat.ExpatError as ex:
self.pos.line = ex.lineno
error.err_add(self.ctx.errors, self.pos, 'SYNTAX_ERROR',
str(ex).split(":")[0])
return None
self.look_ahead()
self.create_statement(self.top_element, None)
return self.top
def get_lineno(self):
"""Return current line of the parser."""
return self.parser.CurrentLineNumber
lineno = property(get_lineno, doc="parser position")
# Handlers for Expat events
def start_element(self, name, attrs):
name = str(name) # convert from unicode strings
self.pos.line = self.lineno
(ns, local_name) = self.split_qname(name)
e = Element(ns, local_name, attrs, self.pos)
if self.data.lstrip() != '':
error.err_add(self.ctx.errors, self.pos, 'SYNTAX_ERROR',
"unexpected element - mixed content")
self.data = ''
if self.element_stack == []:
# this is the top-level element
self.top_element = e
self.element_stack.append(e)
# special case - the top-level statement has its argument
# as an attribute, so we can save it here
try:
(argname, _arg_is_elem) = syntax.yin_map[e.local_name]
arg = e.find_attribute(argname)
# create and save the top-level statement here, so
# we get a correct Statement in pos.
stmt = statements.Statement(None, None,
e.pos, e.local_name, arg)
self.top = stmt
self.pos.top = stmt
except:
pass
return
else:
parent = self.element_stack[-1]
parent.children.append(e)
self.element_stack.append(e)
def char_data(self, data):
self.data += data
def end_element(self, name):
self.pos.line = self.lineno
e = self.element_stack[-1]
e.data = self.data
self.data = ''
# end of statement, pop from stack
del self.element_stack[-1]
# Builds the statement tree
def create_statement(self, e, parent):
if e.ns == yin_namespace:
keywd = e.local_name
try:
(argname, arg_is_elem) = syntax.yin_map[keywd]
except KeyError:
error.err_add(self.ctx.errors, e.pos,
'UNKNOWN_KEYWORD', keywd)
return None
else:
# extension
try:
prefix = self.prefixmap[e.ns]
except KeyError:
error.err_add(self.ctx.errors, e.pos,
'MODULE_NOT_IMPORTED', e.ns)
return None
keywd = (prefix, e.local_name)
keywdstr = util.keyword_to_str(keywd)
if 'no_extensions' in self.extra:
return None
res = self.find_extension(e.ns, e.local_name)
if res is None:
error.err_add(self.ctx.errors, e.pos,
'UNKNOWN_KEYWORD', keywdstr)
return None
(arg_is_elem, argname) = res
keywdstr = util.keyword_to_str(keywd)
if arg_is_elem == True:
# find the argument element
arg_elem = e.find_child(e.ns, argname)
if arg_elem is None:
arg = None
error.err_add(self.ctx.errors, e.pos,
'MISSING_ARGUMENT_ELEMENT', (argname, keywdstr))
else:
if self.ctx.trim_yin:
arg = "\n".join([x.strip() for x in
arg_elem.data.strip().splitlines()])
else:
arg = arg_elem.data
e.remove_child(arg_elem)
elif arg_is_elem == False:
arg = e.find_attribute(argname)
if arg is None:
error.err_add(self.ctx.errors, e.pos,
'MISSING_ARGUMENT_ATTRIBUTE', (argname, keywdstr))
else:
e.remove_attribute(argname)
else:
# no arguments
arg = None
self.check_attr(e.pos, e.attrs)
if parent is not None:
stmt = statements.Statement(self.top, parent, e.pos, keywd, arg)
parent.substmts.append(stmt)
else:
stmt = self.top
for ch in e.children:
self.create_statement(ch, stmt)
def check_attr(self, pos, attrs):
"""Check for unknown attributes."""
for at in attrs:
(ns, local_name) = self.split_qname(at)
if ns is None:
error.err_add(self.ctx.errors, pos,
'UNEXPECTED_ATTRIBUTE', local_name)
elif ns == yin_namespace:
error.err_add(self.ctx.errors, pos,
'UNEXPECTED_ATTRIBUTE', "{"+at)
# allow foreign attributes
# FIXME: hmm... is this the right thing to do?
# these things are supposed to be handled with extensions...
def look_ahead(self):
# To find an extension <smi:oid> we need to find the module
# that corresponds to 'smi'. We get extension's URI from expat,
# so we need a map from URI -> module. This works for
# imported modules, but for extensions defined in the local
# module we have to check if the extension's URI is
# the local URI.
#
# If we're a submodule, we need to find our module's
# namespace, so we need to parse the module :(
# 1. find our own namespace URI
if self.top_element.local_name == 'module':
p = self.top_element.find_child(yin_namespace, 'namespace')
if p is not None:
self.uri = p.find_attribute('uri')
p = self.top_element.find_child(yin_namespace, 'prefix')
if p is not None:
self.prefixmap[self.uri] = p.find_attribute('value')
elif self.top_element.local_name == 'submodule':
p = self.top_element.find_child(yin_namespace, 'belongs-to')
modname = p.find_attribute('module')
# read the parent module in order to find the namespace uri
res = self.ctx.read_module(modname, extra={'no_include':True,
'no_extensions':True})
if res == 'not_found':
error.err_add(self.ctx.errors, p.pos,
'MODULE_NOT_FOUND', modname)
elif type(res) == type(()) and res[0] == 'read_error':
error.err_add(self.ctx.errors, p.pos, 'READ_ERROR', res[1])
elif res == None:
pass
else:
namespace = res.search_one('namespace')
if namespace is None or namespace.arg is None:
pass
else:
# success - save our uri
self.uri = namespace.arg
else:
return
# 2. read all imports and includes and add the modules to the context
# and to the nsmap.
if not hasattr(self.ctx, 'yin_module_map'):
self.ctx.yin_module_map = {}
if self.top.keyword == 'module':
if self.top.arg not in self.ctx.yin_module_map:
self.ctx.yin_module_map[self.top.arg] = []
mymodules = self.ctx.yin_module_map[self.top.arg]
else:
mymodules = []
for ch in self.top_element.children:
if ch.ns == yin_namespace and ch.local_name == 'import':
modname = ch.find_attribute('module')
if modname is not None:
if modname in mymodules:
# circular import; ignore here and detect in validation
pass
else:
mymodules.append(modname)
mod = self.ctx.search_module(ch.pos, modname)
if mod is not None:
ns = mod.search_one('namespace')
if ns is not None and ns.arg is not None:
# record the uri->mod mapping
self.nsmap[ns.arg] = mod
# also record uri->prefix, where prefix
# is the *yang* prefix, *not* the XML prefix
# (it can be different in theory...)
p = ch.find_child(yin_namespace, 'prefix')
if p is not None:
prefix = p.find_attribute('value')
if prefix is not None:
self.prefixmap[ns.arg] = prefix
elif (ch.ns == yin_namespace and ch.local_name == 'include' and
'no_include' not in self.extra):
modname = ch.find_attribute('module')
if modname is not None:
mod = self.ctx.search_module(ch.pos, modname)
if mod is not None:
self.included.append(mod)
# 3. find all extensions defined locally
for ch in self.top_element.children:
if ch.ns == yin_namespace and ch.local_name == 'extension':
extname = ch.find_attribute('name')
if extname is None:
continue
arg = ch.find_child(yin_namespace, 'argument')
if arg is None:
self.extensions[extname] = (None, None)
else:
argname = arg.find_attribute('name')
if argname is None:
continue
arg_is_elem = arg.find_child(yin_namespace, 'yin-element')
if arg_is_elem is None:
self.extensions[extname] = (False, argname)
continue
val = arg_is_elem.find_attribute('value')
if val == 'false':
self.extensions[extname] = (False, argname)
elif val == 'true':
self.extensions[extname] = (True, argname)
def find_extension(self, uri, extname):
def find_in_mod(mod):
ext = self.search_definition(mod, 'extension', extname)
if ext is None:
return None
ext_arg = ext.search_one('argument')
if ext_arg is None:
return (None, None)
arg_is_elem = ext_arg.search_one('yin-element')
if arg_is_elem is None or arg_is_elem.arg == 'false':
return (False, ext_arg.arg)
else:
return (True, ext_arg.arg)
if uri == self.uri:
# extension is defined locally or in one of our submodules
try:
return self.extensions[extname]
except KeyError:
pass
# check submodules
for submod in self.included:
res = find_in_mod(submod)
if res is not None:
return res
return None
else:
try:
mod = self.nsmap[uri]
return find_in_mod(mod)
except KeyError:
return None
def search_definition(self, module, keyword, arg):
"""Search for a defintion with `keyword` `name`
Search the module and its submodules."""
r = module.search_one(keyword, arg)
if r is not None:
return r
for i in module.search('include'):
modulename = i.arg
m = module.i_ctx.get_module(modulename)
if m is not None:
r = m.search_one(keyword, arg)
if r is not None:
return r
return None
| |
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
import time
from test_framework.test_framework import SyscoinTestFramework
from test_framework import util
class ConfArgsTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
self.wallet_names = []
self.disable_autoconnect = False
def test_config_file_parser(self):
self.stop_node(0)
inc_conf_file_path = os.path.join(self.nodes[0].datadir, 'include.conf')
with open(os.path.join(self.nodes[0].datadir, 'syscoin.conf'), 'a', encoding='utf-8') as conf:
conf.write(f'includeconf={inc_conf_file_path}\n')
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Error parsing command line arguments: Invalid parameter -dash_cli=1',
extra_args=['-dash_cli=1'],
)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('dash_conf=1\n')
with self.nodes[0].assert_debug_log(expected_msgs=['Ignoring unknown configuration value dash_conf']):
self.start_node(0)
self.stop_node(0)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('-dash=1\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 1: -dash=1, options in configuration file must be specified without leading -')
if self.is_wallet_compiled():
with open(inc_conf_file_path, 'w', encoding='utf8') as conf:
conf.write("wallet=foo\n")
self.nodes[0].assert_start_raises_init_error(expected_msg=f'Error: Config setting for -wallet only applied on {self.chain} network when in [{self.chain}] section.')
main_conf_file_path = os.path.join(self.options.tmpdir, 'node0', 'syscoin_main.conf')
util.write_config(main_conf_file_path, n=0, chain='', extra_config=f'includeconf={inc_conf_file_path}\n')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('acceptnonstdtxn=1\n')
self.nodes[0].assert_start_raises_init_error(extra_args=[f"-conf={main_conf_file_path}"], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('nono\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 1: nono, if you intended to specify a negated option, use nono=1 instead')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nmain.rpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\n[main]\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 4, using # in rpcpassword can be ambiguous and should be avoided')
inc_conf_file2_path = os.path.join(self.nodes[0].datadir, 'include2.conf')
with open(os.path.join(self.nodes[0].datadir, 'syscoin.conf'), 'a', encoding='utf-8') as conf:
conf.write(f'includeconf={inc_conf_file2_path}\n')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('testnot.datadir=1\n')
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('[testnet]\n')
self.restart_node(0)
self.nodes[0].stop_node(expected_stderr=f'Warning: {inc_conf_file_path}:1 Section [testnot] is not recognized.{os.linesep}{inc_conf_file2_path}:1 Section [testnet] is not recognized.')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
def test_invalid_command_line_options(self):
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: No proxy server specified. Use -proxy=<ip> or -proxy=<ip:port>.',
extra_args=['-proxy'],
)
def test_log_buffer(self):
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['Warning: parsed potentially confusing double-negative -connect=0\n']):
self.start_node(0, extra_args=['-noconnect=0'])
def test_args_log(self):
self.stop_node(0)
self.log.info('Test config args logging')
with self.nodes[0].assert_debug_log(
expected_msgs=[
'Command-line arg: addnode="some.node"',
'Command-line arg: rpcauth=****',
'Command-line arg: rpcbind=****',
'Command-line arg: rpcpassword=****',
'Command-line arg: rpcuser=****',
'Command-line arg: torpassword=****',
f'Config file arg: {self.chain}="1"',
f'Config file arg: [{self.chain}] server="1"',
],
unexpected_msgs=[
'alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
'127.1.1.1',
'secret-rpcuser',
'secret-torpassword',
]):
self.start_node(0, extra_args=[
'-addnode=some.node',
'-rpcauth=alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
'-rpcbind=127.1.1.1',
'-rpcpassword=',
'-rpcuser=secret-rpcuser',
'-torpassword=secret-torpassword',
])
def test_networkactive(self):
self.log.info('Test -networkactive option')
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.start_node(0)
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.start_node(0, extra_args=['-networkactive'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.start_node(0, extra_args=['-networkactive=1'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
self.start_node(0, extra_args=['-networkactive=0'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
self.start_node(0, extra_args=['-nonetworkactive'])
self.stop_node(0)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
self.start_node(0, extra_args=['-nonetworkactive=1'])
def test_seed_peers(self):
self.log.info('Test seed peers')
default_data_dir = self.nodes[0].datadir
# Only regtest has no fixed seeds. To avoid connections to random
# nodes, regtest is the only network where it is safe to enable
# -fixedseeds in tests
util.assert_equal(self.nodes[0].getblockchaininfo()['chain'],'regtest')
self.stop_node(0)
# No peers.dat exists and -dnsseed=1
# We expect the node will use DNS Seeds, but Regtest mode does not have
# any valid DNS seeds. So after 60 seconds, the node should fallback to
# fixed seeds
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = int(time.time())
with self.nodes[0].assert_debug_log(
expected_msgs=[
"Loaded 0 addresses from peers.dat",
"0 addresses found from DNS seeds",
"opencon thread start", # Ensure ThreadOpenConnections::start time is properly set
],
timeout=10,
):
self.start_node(0, extra_args=['-dnsseed=1', '-fixedseeds=1', f'-mocktime={start}'])
with self.nodes[0].assert_debug_log(expected_msgs=[
"Adding fixed seeds as 60 seconds have passed and addrman is empty",
]):
self.nodes[0].setmocktime(start + 65)
self.stop_node(0)
# No peers.dat exists and -dnsseed=0
# We expect the node will fallback immediately to fixed seeds
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = time.time()
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"DNS seeding disabled",
"Adding fixed seeds as -dnsseed=0, -addnode is not provided and all -seednode(s) attempted\n",
]):
self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=1'])
assert time.time() - start < 60
self.stop_node(0)
# No peers.dat exists and dns seeds are disabled.
# We expect the node will not add fixed seeds when explicitly disabled.
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = time.time()
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"DNS seeding disabled",
"Fixed seeds are disabled",
]):
self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=0'])
assert time.time() - start < 60
self.stop_node(0)
# No peers.dat exists and -dnsseed=0, but a -addnode is provided
# We expect the node will allow 60 seconds prior to using fixed seeds
assert not os.path.exists(os.path.join(default_data_dir, "peers.dat"))
start = int(time.time())
with self.nodes[0].assert_debug_log(
expected_msgs=[
"Loaded 0 addresses from peers.dat",
"DNS seeding disabled",
"opencon thread start", # Ensure ThreadOpenConnections::start time is properly set
],
timeout=10,
):
self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=1', '-addnode=fakenodeaddr', f'-mocktime={start}'])
with self.nodes[0].assert_debug_log(expected_msgs=[
"Adding fixed seeds as 60 seconds have passed and addrman is empty",
]):
self.nodes[0].setmocktime(start + 65)
def run_test(self):
self.test_log_buffer()
self.test_args_log()
self.test_seed_peers()
self.test_networkactive()
self.test_config_file_parser()
self.test_invalid_command_line_options()
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = self.nodes[0].datadir
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.nodes[0].assert_start_raises_init_error([f'-datadir={new_data_dir}'], f'Error: Specified data directory "{new_data_dir}" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "syscoin.conf")
# datadir needs to be set before [chain] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
f.write(f"datadir={new_data_dir}\n")
f.write(conf_file_contents)
self.nodes[0].assert_start_raises_init_error([f'-conf={conf_file}'], f'Error: Error reading configuration file: specified data directory "{new_data_dir}" does not exist.')
# Check that an explicitly specified config file that cannot be opened fails
none_existent_conf_file = os.path.join(default_data_dir, "none_existent_syscoin.conf")
self.nodes[0].assert_start_raises_init_error(['-conf=' + none_existent_conf_file], 'Error: Error reading configuration file: specified config file "' + none_existent_conf_file + '" could not be opened.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, [f'-conf={conf_file}'])
self.stop_node(0)
assert os.path.exists(os.path.join(new_data_dir, self.chain, 'blocks'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, [f'-datadir={new_data_dir_2}', f'-conf={conf_file}'])
assert os.path.exists(os.path.join(new_data_dir_2, self.chain, 'blocks'))
if __name__ == '__main__':
ConfArgsTest().main()
| |
import ptypes
from ptypes import *
import six,itertools,functools,operator
ptypes.Config.ptype.clone_name = '{}'
ptypes.Config.pbinary.littleendian_name = '{}'
ptypes.Config.pbinary.bigendian_name = 'be({})'
ptypes.setbyteorder(ptypes.config.byteorder.littleendian)
### utility functions
R = lambda f: list(reversed(f)) # reverse a ._fields_ declaration because Micro$oft's documentation lists structures with bit 0 as the high bit
### atomic types
class bool1(pint.uint8_t): pass
class ubyte1(pint.uint8_t): pass
class uint2(pint.uint16_t): pass
class uint4(pint.uint32_t): pass
class sint2(pint.int16_t): pass
class sint4(pint.int32_t): pass
class undefined(ptype.block):
def summary(self):
return super(undefined, self).summary() if self.size() else '...'
### general types
class MD4(dyn.block(16)): pass
### record generalities
class RecordUnknown(ptype.block):
length = 0
def classname(self):
names = self.shortname().split('.')
if self.type is None:
res = '?'
elif isinstance(self.type, six.integer_types):
res = "{:#x}".format(self.type)
elif hasattr(self.type, '__iter__'):
res = "({:s})".format(','.join("{:#x}".format(item) if isinstance(item, six.integer_types) else "{!r}".format(item) for item in self.type))
else:
res = repr(self.type)
names[-1] = "{:s}<{:s}>[size:{:#x}]".format(names[-1], res, self.blocksize())
return '.'.join(names)
# record type lookup
class Record(ptype.definition):
cache = {}
class RT_Unknown(ptype.definition): cache, default = {}, RecordUnknown
default = RT_Unknown
@classmethod
def get_recordtype(cls, instance):
'''Search through all definitions for whichever one contains the class for the specified instance.'''
klass = type(instance) if ptype.isinstance(instance) else instance
for rt, definition in cls.cache.items():
if klass in definition.cache.values():
return rt
continue
raise KeyError(klass)
class Instance(ptype.definition):
@classmethod
def define(cls, *definition, **attributes):
res = super(Instance, cls).define(*definition, **attributes)
res.__instance__ = cls.type
return res
### Record type
class RecordType(pint.enum, pint.littleendian(pint.uint16_t)):
_values_ = []
@classmethod
def define(cls,pack_namevalue):
name, value = pack_namevalue
res = type(name, (Instance,), {'type':value, 'cache':{}})
cls._values_.append((res.__name__,res.type))
return (name, Record.define(res))
### Record header
class RecordGeneral(pstruct.type):
Record = Record
class Header(pstruct.type):
RecordType = RecordType
class VersionInstance(pbinary.struct):
_fields_ = R([(4,'version'), (12,'instance')])
def summary(self):
return "{:d} / {:#0{:d}x}".format(self['version'], self['instance'], 2 + 3)
def set(self, *versioninstance, **fields):
iterable, = versioninstance if versioninstance else ((),)
if iterable and not isinstance(iterable, dict):
version, instance = iterable
return self.set({'instance': instance, 'version': version})
return super(RecordGeneral.Header.VersionInstance, self).set(iterable, **fields)
_fields_ = [
(VersionInstance, 'Version/Instance'),
(lambda self: self.RecordType, 'Type'),
(pint.uint32_t, 'Length')
]
def Type(self):
return self['Type'].int()
def Instance(self):
res = self['Version/Instance']
return res['version'],res['instance']
def Length(self):
return self['Length'].int()
def summary(self):
v = self['Version/Instance'].int()
t,l = self['Type'].int(),self['Length'].int()
return "version={:d} instance={:#05x} type={:#06x} length={length:#x}({length:x})".format(v & 0xf, (v&0xfff0) // 0x10, t, length=l)
def __data(self):
header = self['header'].li
t, vi, length = header.Type(), header.Instance(), header.Length()
Type = self.Record.withdefault(t, type=t)
# look for an explicit instance
try:
res = Type.lookup(vi)
# otherwise, the instance might modify the Instance in some way
except KeyError:
ver, _ = vi
res = Type.withdefault((ver, None), type=(ver, None), length=length)
# something good had to come out of that
if getattr(self, 'lazy', False):
class RecordData(ptype.encoded_t):
@classmethod
def typename(cls):
return cls._object_.typename()
RecordData._value_ = dyn.block(length)
RecordData._object_ = res
return RecordData
return dyn.clone(res, blocksize=lambda _, bs=length: bs) if length < self.new(res).a.size() else res
def __extra(self):
header, size = self['header'].li, self['data'].li.size()
return dyn.block(max(0, header.Length() - size))
_fields_ = [
(lambda self: self.Header, 'header'),
(__data, 'data'),
(__extra, 'extra'),
]
def alloc(self, **fields):
res = super(RecordGeneral, self).alloc(**fields)
if operator.contains(fields, 'header'):
return res
if hasattr(res.d, 'type'):
version, instance = res.d.type
versioninstance = dict(version=version) if instance is None else dict(version=version, instance=instance)
header = {'Version/Instance': versioninstance}
try:
rt = self.Record.get_recordtype(res.d)
except KeyError:
pass
else:
header['Type'] = rt
header['Length'] = sum(item.size() for item in [res.d, res['extra']])
res.h.set(**header)
return res
h = property(fget=lambda self: self['header'])
def Data(self):
return self['data'].d if getattr(self, 'lazy', False) else self['data']
d = property(fget=Data)
def Extra(self):
return self['extra']
def previousRecord(self, record_t, **count):
container = self.parent
idx = container.value.index(self)
# Seek backwards from index to find record
count = count.get('count', -1)
if count > 0:
for i in range(count):
if isinstance(container[idx - i].d, record_t):
break
continue
else:
i = 0
while idx >= i and not isinstance(container[idx - i].d, record_t):
i += 1
if not isinstance(container[idx - i].d, record_t):
raise ptypes.error.ItemNotFoundError(self, 'previousRecord', message="Unable to locate previous record type : {!r}".format(record_t))
return container[idx - i]
class RecordContainer(parray.block):
_object_ = RecordGeneral
def repr(self):
try:
res = self.details()
except ptypes.error.InitializationError:
return super(RecordContainer, self).repr()
return res + ('\n' if res else '')
def summary(self):
if len(self) > 1:
first, last = items = [self[idx] for idx in [0, -1]]
instances = ("{:s}[{:x}:{:+x}]".format(item.Data().classname(), item.getoffset(), item.size()) for item in items)
instancedata = zip(instances, items)
elif len(self):
item = self[0]
instance = "{:s}[{:x}:{:+x}]".format(item.Data().classname(), item.getoffset(), item.size())
instancedata = [(instance, item)]
else:
return super(RecordContainer, self).summary()
return ' : '.join(["{:d} record{:s}".format(len(self), '' if len(self) == 1 else 's'), ' ... '.join("{!s} {:s}".format(instance, data.summary()) for instance, data in instancedata)])
def details(self):
def Fkey(object):
'''lambda (_,item): (lambda recordType:"{:s}[{:04x}]".format(item.classname(), recordType))(item.getparent(RecordGeneral)['header']['type'].int())'''
index, item = object
record, Fclassname = item.parent, functools.partial("{:s}[{:04x}]".format, item.classname())
return Fclassname(record['header']['type'].int())
def emit_prefix(_, records):
index, record = records[0]
ok = not any(item.parent.Extra().size() for _, item in records)
return "[{:x}] {:s}{:s}[{:d}]".format(record.getparent(RecordGeneral).getoffset(), '' if ok else '*', self.classname(), index)
def emit_classname(classname, records):
if len(records) > 1:
return "{length:d} * {:s}".format(classname, length=len(records))
return classname
def emit_summary(_, records):
if len(records) > 1:
res = b''.join(item.serialize() for index, item in records)
return ptypes.utils.emit_repr(res, ptypes.Config.display.threshold.summary) or '...'
(_, record), = records
return record.summary()
groups = [(typename, [item for item in items]) for typename, items in itertools.groupby(enumerate(self.walk()), key=Fkey)]
iterable = ([emit_prefix(*item), emit_classname(*item), emit_summary(*item)] for item in groups)
return '\n'.join(map(' : '.join, iterable))
def search(self, type, recurse=False):
'''Search through a list of records for a particular type'''
if not recurse:
for item in self.filter(type):
yield item
return
# ourselves first
for d in self.search(type, False):
yield d
flazy = (lambda item: item['data'].d.l) if getattr(self, 'lazy', False) else (lambda item: item['data'])
# now our chidren
for item in self:
if not hasattr(flazy(item), 'search'):
continue
for d in flazy(item).search(type, True):
yield d
continue
return
def lookup(self, type):
'''Return the first instance of specified record type'''
items = [item for item in self if item['header'].Instance() == type]
if not items:
raise KeyError(type)
if len(items) != 1:
raise AssertionError("Unexpected number of items ({:d}) of the specified type ({:#x}) was returned".format(len(items), type))
return items[0]
def walk(self):
flazy = (lambda item: item['data'].d.l) if getattr(self, 'lazy', False) else (lambda item: item['data'])
for item in self:
yield flazy(item)
return
def errors(self):
for item in self:
if item.initializedQ() and item.size() == item.blocksize():
continue
yield item
return
def filter(self, type):
if isinstance(type, six.integer_types):
for item in self:
if item['header'].Instance() == type:
yield item
continue
return
flazy = (lambda item: item['data'].d.li) if getattr(self, 'lazy', False) else (lambda item: item['data'])
for item in self:
if isinstance(flazy(item), type):
yield item
continue
return
def __getitem__(self, index):
flazy = (lambda item: item['data'].d.li) if getattr(self, 'lazy', False) else (lambda item: item['data'])
if hasattr(self, '_values_') and isinstance(index, six.string_types):
lookup = { name : value for name, value in self._values_ }
t = lookup[index]
iterable = (index for index, item in enumerate(self) if isinstance(flazy(item), t))
index = next(iterable)
return super(RecordContainer, self).__getitem__(index)
# yea, a file really is usually just a gigantic list of records...
class File(RecordContainer):
def blocksize(self):
return self.source.size() if isinstance(self.source, ptypes.provider.bounded) else super(File, self).blocksize()
def properties(self):
res = super(File, self).properties()
try: res['size'] = self.size()
except ptypes.error.InitializationError: pass
try:
if not operator.contains(res, 'size'):
res['blocksize'] = self.blocksize()
except ptypes.error.InitializationError:
pass
return res
if __name__ == '__main__':
from ptypes import *
# @Record.Define
class r(pstruct.type):
type = 0
_fields_ = [
(pint.uint32_t, 'a')
]
s = b'\x00\x00\x00\x00\x0c\x00\x00\x00' + b'A'*30
z = RecordGeneral()
z.source = provider.string(s)
print(z.l)
| |
import numpy
from six import moves
from chainer import cuda
from chainer import function
from chainer.utils import conv
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cudnn.cudnn
_fwd_pref = libcudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return (x, x)
class Convolution2DFunction(function.Function):
def __init__(self, stride=1, pad=0, use_cudnn=True):
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.use_cudnn = use_cudnn
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype == numpy.float32,
w_type.dtype == numpy.float32,
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[1],
)
if n_in.eval() == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_cpu(self, inputs):
x, W = inputs[:2]
kh, kw = W.shape[2:]
self.col = conv.im2col_cpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw)
y = numpy.tensordot(self.col, W, ((1, 2, 3), (1, 2, 3)))
if len(inputs) == 3:
b = inputs[2]
y += b
return numpy.rollaxis(y, 3, 1),
def forward_gpu(self, inputs):
x, W = inputs[:2]
out_c, _, kh, kw = W.shape
n, c, h, w = x.shape
b = inputs[2] if len(inputs) == 3 else None
out_h = conv.get_conv_outsize(h, kh, self.sy, self.ph)
out_w = conv.get_conv_outsize(w, kw, self.sx, self.pw)
y = cuda.cupy.empty((n, out_c, out_h, out_w), dtype=x.dtype)
if cuda.cudnn_enabled and self.use_cudnn:
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
y_desc = cudnn.create_tensor_descriptor(y)
self.filter_desc = cudnn.create_filter_descriptor(W)
self.conv_desc = cudnn.create_convolution_descriptor(
(self.ph, self.pw), (self.sy, self.sx))
if b is not None:
self.bias_desc = cudnn.create_tensor_descriptor(
b[None, :, None, None])
self.max_workspace_size = c * kh * kw * 4
algo = libcudnn.getConvolutionForwardAlgorithm(
handle, x_desc.value, self.filter_desc.value,
self.conv_desc.value, y_desc.value, _fwd_pref,
self.max_workspace_size)
workspace_size = libcudnn.getConvolutionForwardWorkspaceSize(
handle, x_desc.value, self.filter_desc.value,
self.conv_desc.value, y_desc.value, algo)
workspace = cuda.cupy.empty(
(max(workspace_size // 4, 1),), dtype=x.dtype)
dtype = x.dtype
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
libcudnn.convolutionForward(
handle, one.data, x_desc.value, x.data.ptr,
self.filter_desc.value, W.data.ptr, self.conv_desc.value,
algo, workspace.data.ptr, workspace_size, zero.data,
y_desc.value, y.data.ptr)
# TODO(beam2d): Support unshared bias
if b is not None:
libcudnn.addTensor(
handle, libcudnn.CUDNN_ADD_SAME_C, one.data,
self.bias_desc.value, b.data.ptr, one.data,
y_desc.value, y.data.ptr)
else:
# Implementation using im2col
self.col = conv.im2col_gpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw)
W_mat = W.reshape(out_c, -1)
col_mats = self.col.reshape(n, -1, out_h * out_w)
y_mats = y.reshape(n, out_c, -1)
# TODO(beam2d): Use streams or batch gemm
for i in moves.range(n):
y_mats[i] = W_mat.dot(col_mats[i])
# TODO(beam2d): Support unshared bias
if b is not None:
y += b[:, None, None]
return y,
def backward_cpu(self, inputs, grad_outputs):
x, W = inputs[:2]
gy = grad_outputs[0]
h, w = x.shape[2:]
gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5)))
gcol = numpy.tensordot(W, gy, (0, 1))
gcol = numpy.rollaxis(gcol, 3)
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
if len(inputs) == 3:
gb = gy.sum(axis=(0, 2, 3))
return gx, gW, gb
else:
return gx, gW
def backward_gpu(self, inputs, grad_outputs):
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
gy = grad_outputs[0]
_, out_c, out_h, out_w = gy.shape
n, c, h, w = x.shape
kh, kw = W.shape[2:]
gW = cuda.cupy.empty_like(W)
if cuda.cudnn_enabled and self.use_cudnn:
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
if not gy.flags.c_contiguous:
gy = cuda.cupy.ascontiguousarray(gy)
gy_desc = cudnn.create_tensor_descriptor(gy)
dtype = x.dtype
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
libcudnn.convolutionBackwardFilter(
handle, one.data, x_desc.value, x.data.ptr,
gy_desc.value, gy.data.ptr, self.conv_desc.value,
zero.data, self.filter_desc.value, gW.data.ptr)
gx = cuda.cupy.empty_like(x)
libcudnn.convolutionBackwardData(
handle, one.data, self.filter_desc.value, W.data.ptr,
gy_desc.value, gy.data.ptr, self.conv_desc.value,
zero.data, x_desc.value, gx.data.ptr)
if b is not None:
gb = cuda.cupy.empty_like(inputs[2])
libcudnn.convolutionBackwardBias(
handle, one.data, gy_desc.value, gy.data.ptr,
zero.data, self.bias_desc.value, gb.data.ptr)
else:
gW_mat = gW.reshape(out_c, c * kh * kw)
col_mats = self.col.reshape(n, c * kh * kw, out_h * out_w)
gy_mats = gy.reshape(n, out_c, out_h * out_w)
# TODO(beam2d): Use streams or batch gemm
gW_mat[...] = 0
for i in moves.range(n):
gW_mat += cuda.cupy.dot(gy_mats[i], col_mats[i].T)
W_mat = W.reshape(out_c, -1)
gcol = cuda.cupy.empty_like(self.col)
gcol_mats = gcol.reshape(n, c * kh * kw, out_h * out_w)
for i in moves.range(n):
cuda.cupy.dot(W_mat.T, gy_mats[i], gcol_mats[i])
gx = conv.col2im_gpu(
gcol, self.sy, self.sx, self.ph, self.pw, h, w)
if b is not None:
gb = gy.sum(axis=(0, 2, 3))
if b is None:
return gx, gW
else:
return gx, gW, gb
def convolution_2d(x, W, b=None, stride=1, pad=0, use_cudnn=True):
"""Two-dimensional convolution function.
This is an implementation of two-dimensional convolution in ConvNets.
It takes three variables: the input image ``x``, the filter weight ``W``,
and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output,
respectively.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
Args:
x (~chainer.Variable): Input variable of shape :math:`(n, c_I, h, w)`.
W (~chainer.Variable): Weight variable of shape
:math:`(c_O, c_I, k_H, k_W)`.
b (~chainer.Variable): Bias variable of length :math:`c_O` (optional).
stride (int or (int, int)): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or (int, int)): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
use_cudnn (bool): If True, then this function uses CuDNN if available.
Returns:
~chainer.Variable: Output variable.
The two-dimensional convolution function is defined as follows.
Then the ``Convolution2D`` function computes correlations between filters
and patches of size :math:`(k_H, k_W)` in ``x``.
Note that correlation here is equivalent to the inner product between
expanded vectors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``-pad`` for each spatial axis.
The right-most (or bottom-most) patches do not run over the padded spatial
size.
Let :math:`(s_Y, s_X)` be the stride of filter application, and
:math:`(p_H, p_W)` the spatial padding size. Then, the output size
:math:`(h_O, w_O)` is determined by the following equations:
.. math::
h_O &= (h + 2p_H - k_H) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W) / s_X + 1.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
.. seealso:: :class:`Convolution2D`
"""
func = Convolution2DFunction(stride, pad, use_cudnn)
if b is None:
return func(x, W)
else:
return func(x, W, b)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import cryptor, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr, onetimeauth_verify, \
onetimeauth_gen, ONETIMEAUTH_BYTES, ADDRTYPE_AUTH
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self.tunnel_remote = config.get('tunnel_remote', "8.8.8.8")
self.tunnel_remote_port = config.get('tunnel_remote_port', 53)
self.tunnel_port = config.get('tunnel_port', 53)
self._is_tunnel = False
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._ota_enable = config.get('one_time_auth', False)
self._ota_enable_session = self._ota_enable
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
self._forbidden_iplist = config.get('forbidden_ip')
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("UDP can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
key = None
iv = None
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
if self._is_tunnel:
# add ss header to data
tunnel_remote = self.tunnel_remote
tunnel_remote_port = self.tunnel_remote_port
data = common.add_header(tunnel_remote,
tunnel_remote_port, data)
else:
frag = common.ord(data[2])
if frag != 0:
logging.warn('UDP drop a message since frag is not 0')
return
else:
data = data[3:]
else:
# decrypt data
try:
data, key, iv = cryptor.decrypt_all(self._password,
self._method,
data)
except Exception:
logging.debug('UDP handle_server: decrypt data failed')
return
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
logging.info("udp data to %s:%d from %s:%d"
% (dest_addr, dest_port, r_addr[0], r_addr[1]))
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
# spec https://shadowsocks.org/en/spec/one-time-auth.html
self._ota_enable_session = addrtype & ADDRTYPE_AUTH
if self._ota_enable and not self._ota_enable_session:
logging.warn('client one time auth is required')
return
if self._ota_enable_session:
if len(data) < header_length + ONETIMEAUTH_BYTES:
logging.warn('UDP one time auth header is too short')
return
_hash = data[-ONETIMEAUTH_BYTES:]
data = data[: -ONETIMEAUTH_BYTES]
_key = iv + key
if onetimeauth_verify(_hash, data, _key) is False:
logging.warn('UDP one time auth fail')
return
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
key, iv, m = cryptor.gen_key_iv(self._password, self._method)
# spec https://shadowsocks.org/en/spec/one-time-auth.html
if self._ota_enable_session:
data = self._ota_chunk_data_gen(key, iv, data)
try:
data = cryptor.encrypt_all_m(key, iv, m, self._method, data)
except Exception:
logging.debug("UDP handle_server: encrypt data failed")
return
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
try:
response = cryptor.encrypt_all(self._password,
self._method, data)
except Exception:
logging.debug("UDP handle_client: encrypt data failed")
return
if not response:
return
else:
try:
data, key, iv = cryptor.decrypt_all(self._password,
self._method, data)
except Exception:
logging.debug('UDP handle_client: decrypt data failed')
return
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_tunnel:
# remove ss header
response = data[header_length:]
else:
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
logging.debug("send udp response to %s:%d"
% (client_addr[0], client_addr[1]))
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def _ota_chunk_data_gen(self, key, iv, data):
data = common.chr(common.ord(data[0]) | ADDRTYPE_AUTH) + data[1:]
key = iv + key
return data + onetimeauth_gen(data, key)
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
self._dns_cache.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
| |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains common test utilities for Timesketch."""
from flask_testing import TestCase
from timesketch import create_app
from timesketch.lib import datastore
from timesketch.lib.definitions import HTTP_STATUS_CODE_REDIRECT
from timesketch.models import init_db
from timesketch.models import drop_all
from timesketch.models import db_session
from timesketch.models.user import User
from timesketch.models.sketch import Sketch
from timesketch.models.sketch import Timeline
from timesketch.models.sketch import SearchIndex
from timesketch.models.sketch import View
from timesketch.models.sketch import Event
class TestConfig(object):
"""Config for the test environment."""
DEBUG = True
SECRET_KEY = u'testing'
SQLALCHEMY_DATABASE_URI = u'sqlite://'
WTF_CSRF_ENABLED = False
ELASTIC_HOST = None
ELASTIC_PORT = None
UPLOAD_ENABLED = False
class MockDataStore(datastore.DataStore):
"""A mock implementation of a Datastore."""
event_dict = {
u'_index': [],
u'_id': u'adc123',
u'_type': u'plaso_event',
u'_source': {
u'es_index': u'',
u'es_id': u'',
u'label': u'',
u'timestamp': 1410895419859714,
u'timestamp_desc': u'',
u'datetime': u'2014-09-16T19:23:40+00:00',
u'source_short': u'',
u'source_long': u'',
u'message': u'',
}
}
search_result_dict = {
u'hits': {
u'hits': [
{
u'sort': [
1410593223000
],
u'_type': u'plaso_event',
u'_source': {
u'timestamp': 1410593222543942,
u'message': u'Test event',
u'timesketch_label': [
{
u'user_id': 1,
u'name': u'__ts_star',
u'sketch_id': 1
},
{
u'user_id': 2,
u'name': u'__ts_star',
u'sketch_id': 99
},
],
u'timestamp_desc': u'Content Modification Time',
u'datetime': u'2014-09-13T07:27:03+00:00'
},
u'_score': u'null',
u'_index': u'test',
u'_id': u'test'
}
],
u'total': 1,
u'max_score': u'null'
},
u'_shards': {
u'successful': 10,
u'failed': 0,
u'total': 10
},
u'took': 5,
u'timed_out': False
}
def __init__(self, host, port):
"""Initialize the datastore.
Args:
host: Hostname or IP address to the datastore
port: The port used by the datastore
"""
self.host = host
self.port = port
def search(
self, unused_sketch_id, unused_query, unused_query_filter,
unused_indices):
"""Mock a search query.
Returns:
A dictionary with search result.
"""
return self.search_result_dict
def get_event(self, unused_searchindex_id, unused_event_id):
"""Mock returning a single event from the datastore.
Returns:
A dictionary with event data.
"""
return self.event_dict
def set_label(
self, searchindex_id, event_id, event_type, sketch_id, user_id,
label, toggle=False):
"""Mock adding a label to an event."""
return
class BaseTest(TestCase):
"""Base class for tests."""
COLOR_WHITE = u'FFFFFF'
def create_app(self):
"""Setup the Flask application.
Returns:
Flask application (instance of flask.app.Flask)
"""
app = create_app(TestConfig)
return app
def _commit_to_database(self, model):
"""Add object to the database session and commit.
Args:
model: Instance of timesketch.models.[model] object
"""
db_session.add(model)
db_session.commit()
def _create_user(self, username, set_password=False):
"""Create a user in the database.
Args:
username: Username (string)
set_password: Boolean value to decide if a password should be set
Returns:
A user (instance of timesketch.models.user.User)
"""
user = User(username=username)
if set_password:
user.set_password(plaintext=u'test', rounds=1)
self._commit_to_database(user)
return user
def _create_sketch(self, name, user, acl=False):
"""Create a sketch in the database.
Args:
name: Name of the sketch (string)
user: A user (instance of timesketch.models.user.User)
acl: Boolean value to decide if ACL permissions should be set
Returns:
A sketch (instance of timesketch.models.sketch.Sketch)
"""
sketch = Sketch(name=name, description=name, user=user)
if acl:
for permission in [u'read', u'write', u'delete']:
sketch.grant_permission(user=user, permission=permission)
label = sketch.Label(label=u'Test label', user=user)
status = sketch.Status(status=u'Test status', user=user)
sketch.labels.append(label)
sketch.status.append(status)
self._commit_to_database(sketch)
return sketch
def _create_searchindex(self, name, user):
"""Create a searchindex in the database.
Args:
name: Name of the searchindex (string)
user: A user (instance of timesketch.models.user.User)
Returns:
A searchindex (instance of timesketch.models.sketch.SearchIndex)
"""
searchindex = SearchIndex(
name=name, description=name, index_name=name, user=user)
self._commit_to_database(searchindex)
return searchindex
def _create_event(self, sketch, searchindex, user):
"""Create an event in the database.
Args:
sketch: A sketch (instance of timesketch.models.sketch.Sketch)
searchindex:
A searchindex (instance of timesketch.models.sketch.SearchIndex)
user: A user (instance of timesketch.models.user.User)
Returns:
An event (instance of timesketch.models.sketch.Event)
"""
event = Event(
sketch=sketch, searchindex=searchindex, document_id=u'test')
comment = event.Comment(comment=u'test', user=user)
event.comments.append(comment)
self._commit_to_database(event)
return event
def _create_timeline(self, name, sketch, searchindex, user):
"""Create a timeline in the database.
Args:
name: Name of the timeline (string)
sketch: A sketch (instance of timesketch.models.sketch.Sketch)
searchindex:
A searchindex (instance of timesketch.models.sketch.SearchIndex)
user: A user (instance of timesketch.models.user.User)
Returns:
A timeline (instance of timesketch.models.sketch.Timeline)
"""
timeline = Timeline(
name=name, description=name, user=user, sketch=sketch,
searchindex=searchindex, color=self.COLOR_WHITE)
self._commit_to_database(timeline)
return timeline
def _create_view(self, name, sketch, user):
"""Create a view in the database.
Args:
name: Name of the view (string)
sketch: A sketch (instance of timesketch.models.sketch.Sketch)
user: A user (instance of timesketch.models.user.User)
Returns:
A view (instance of timesketch.models.sketch.View)
"""
view = View(
name=name, query_string=name, query_filter=u'', user=user,
sketch=sketch)
self._commit_to_database(view)
return view
def setUp(self):
"""Setup the test database."""
init_db()
self.user1 = self._create_user(username=u'test1', set_password=True)
self.user2 = self._create_user(username=u'test2', set_password=False)
self.sketch1 = self._create_sketch(
name=u'Test 1', user=self.user1, acl=True)
self.sketch2 = self._create_sketch(
name=u'Test 2', user=self.user1, acl=False)
self.searchindex = self._create_searchindex(
name=u'test', user=self.user1)
self.timeline = self._create_timeline(
name=u'Timeline 1', sketch=self.sketch1,
searchindex=self.searchindex, user=self.user1)
self.view1 = self._create_view(
name=u'View 1', sketch=self.sketch1, user=self.user1)
self.view2 = self._create_view(
name=u'View 2', sketch=self.sketch2, user=self.user1)
self.view3 = self._create_view(
name=u'', sketch=self.sketch1, user=self.user2)
self.event = self._create_event(
sketch=self.sketch1, searchindex=self.searchindex, user=self.user1)
def tearDown(self):
"""Tear down the test database."""
db_session.remove()
drop_all()
def login(self):
"""Authenticate the test user."""
self.client.post(
u'/login/', data=dict(username=u'test1', password=u'test'),
follow_redirects=True)
def test_unauthenticated(self):
"""
Generic test for all resources. It tests that no
unauthenticated request are accepted.
"""
if not getattr(self, u'resource_url', False):
self.skipTest(self)
response = self.client.get(self.resource_url)
if response.status_code == 405:
response = self.client.post(self.resource_url)
self.assertIn(u'/login/', response.data)
self.assertEquals(response.status_code, HTTP_STATUS_CODE_REDIRECT)
class ModelBaseTest(BaseTest):
"""Base class for database model tests."""
def _test_db_object(self, expected_result=None, model_cls=None):
"""Generic test that checks if the stored data is correct."""
db_obj = model_cls.query.get(1)
for x in expected_result:
k, v = x[0], x[1]
self.assertEquals(db_obj.__getattribute__(k), v)
| |
import logging
import time
from abc import abstractmethod
from typing import Dict, Any, Iterable, List
import numpy as np
from guacamol.utils.chemistry import canonicalize_list, is_valid, calculate_pc_descriptors, continuous_kldiv, \
discrete_kldiv, calculate_internal_pairwise_similarities
from guacamol.distribution_matching_generator import DistributionMatchingGenerator
from guacamol.utils.data import get_random_subset
from guacamol.utils.sampling_helpers import sample_valid_molecules, sample_unique_molecules
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class DistributionLearningBenchmarkResult:
"""
Contains the results of a distribution learning benchmark.
NB: timing does not make sense since training happens outside of DistributionLearningBenchmark.
"""
def __init__(self, benchmark_name: str, score: float, sampling_time: float, metadata: Dict[str, Any]) -> None:
"""
Args:
benchmark_name: name of the distribution-learning benchmark
score: benchmark score
sampling_time: time for sampling the molecules in seconds
metadata: benchmark-specific information
"""
self.benchmark_name = benchmark_name
self.score = score
self.sampling_time = sampling_time
self.metadata = metadata
class DistributionLearningBenchmark:
"""
Base class for assessing how well a model is able to generate molecules matching a molecule distribution.
Derived class should implement the assess_molecules function
"""
def __init__(self, name: str, number_samples: int) -> None:
self.name = name
self.number_samples = number_samples
@abstractmethod
def assess_model(self, model: DistributionMatchingGenerator) -> DistributionLearningBenchmarkResult:
"""
Assess a distribution-matching generator model.
Args:
model: model to assess
"""
class ValidityBenchmark(DistributionLearningBenchmark):
"""
Assesses what percentage of molecules generated by a model are valid molecules (i.e. have a valid SMILES string)
"""
def __init__(self, number_samples) -> None:
super().__init__(name='Validity', number_samples=number_samples)
def assess_model(self, model: DistributionMatchingGenerator) -> DistributionLearningBenchmarkResult:
start_time = time.time()
molecules = model.generate(number_samples=self.number_samples)
end_time = time.time()
if len(molecules) != self.number_samples:
raise Exception('The model did not generate the correct number of molecules')
number_valid = sum(1 if is_valid(smiles) else 0 for smiles in molecules)
validity_ratio = number_valid / self.number_samples
metadata = {
'number_samples': self.number_samples,
'number_valid': number_valid,
}
return DistributionLearningBenchmarkResult(benchmark_name=self.name,
score=validity_ratio,
sampling_time=end_time - start_time,
metadata=metadata)
class UniquenessBenchmark(DistributionLearningBenchmark):
"""
Assesses what percentage of molecules generated by a model are unique.
"""
def __init__(self, number_samples) -> None:
super().__init__(name='Uniqueness', number_samples=number_samples)
def assess_model(self, model: DistributionMatchingGenerator) -> DistributionLearningBenchmarkResult:
start_time = time.time()
molecules = sample_valid_molecules(model=model, number_molecules=self.number_samples)
end_time = time.time()
if len(molecules) != self.number_samples:
logger.warning('The model could not generate enough valid molecules. The score will be penalized.')
# canonicalize_list removes duplicates (and invalid molecules, but there shouldn't be any)
unique_molecules = canonicalize_list(molecules, include_stereocenters=False)
unique_ratio = len(unique_molecules) / self.number_samples
metadata = {
'number_samples': self.number_samples,
'number_unique': len(unique_molecules)
}
return DistributionLearningBenchmarkResult(benchmark_name=self.name,
score=unique_ratio,
sampling_time=end_time - start_time,
metadata=metadata)
class NoveltyBenchmark(DistributionLearningBenchmark):
def __init__(self, number_samples: int, training_set: Iterable[str]) -> None:
"""
Args:
number_samples: number of samples to generate from the model
training_set: molecules from the training set
"""
super().__init__(name='Novelty', number_samples=number_samples)
self.training_set_molecules = set(canonicalize_list(training_set, include_stereocenters=False))
def assess_model(self, model: DistributionMatchingGenerator) -> DistributionLearningBenchmarkResult:
"""
Assess a distribution-matching generator model.
Args:
model: model to assess
"""
start_time = time.time()
molecules = sample_unique_molecules(model=model, number_molecules=self.number_samples, max_tries=2)
end_time = time.time()
if len(molecules) != self.number_samples:
logger.warning('The model could not generate enough unique molecules. The score will be penalized.')
# canonicalize_list in order to remove stereo information (also removes duplicates and invalid molecules, but there shouldn't be any)
unique_molecules = set(canonicalize_list(molecules, include_stereocenters=False))
novel_molecules = unique_molecules.difference(self.training_set_molecules)
novel_ratio = len(novel_molecules) / self.number_samples
metadata = {
'number_samples': self.number_samples,
'number_novel': len(novel_molecules)
}
return DistributionLearningBenchmarkResult(benchmark_name=self.name,
score=novel_ratio,
sampling_time=end_time - start_time,
metadata=metadata)
class KLDivBenchmark(DistributionLearningBenchmark):
"""
Computes the KL divergence between a number of samples and the training set for physchem descriptors
"""
def __init__(self, number_samples: int, training_set: List[str]) -> None:
"""
Args:
number_samples: number of samples to generate from the model
training_set: molecules from the training set
"""
super().__init__(name='KL divergence', number_samples=number_samples)
self.training_set_molecules = canonicalize_list(get_random_subset(training_set, self.number_samples, seed=42),
include_stereocenters=False)
self.pc_descriptor_subset = [
'BertzCT',
'MolLogP',
'MolWt',
'TPSA',
'NumHAcceptors',
'NumHDonors',
'NumRotatableBonds',
'NumAliphaticRings',
'NumAromaticRings'
]
def assess_model(self, model: DistributionMatchingGenerator) -> DistributionLearningBenchmarkResult:
"""
Assess a distribution-matching generator model.
Args:
model: model to assess
"""
start_time = time.time()
molecules = sample_unique_molecules(model=model, number_molecules=self.number_samples, max_tries=2)
end_time = time.time()
if len(molecules) != self.number_samples:
logger.warning('The model could not generate enough unique molecules. The score will be penalized.')
# canonicalize_list in order to remove stereo information (also removes duplicates and invalid molecules, but there shouldn't be any)
unique_molecules = set(canonicalize_list(molecules, include_stereocenters=False))
# first we calculate the descriptors, which are np.arrays of size n_samples x n_descriptors
d_sampled = calculate_pc_descriptors(unique_molecules, self.pc_descriptor_subset)
d_chembl = calculate_pc_descriptors(self.training_set_molecules, self.pc_descriptor_subset)
kldivs = {}
# now we calculate the kl divergence for the float valued descriptors ...
for i in range(4):
kldiv = continuous_kldiv(X_baseline=d_chembl[:, i], X_sampled=d_sampled[:, i])
kldivs[self.pc_descriptor_subset[i]] = kldiv
# ... and for the int valued ones.
for i in range(4, 9):
kldiv = discrete_kldiv(X_baseline=d_chembl[:, i], X_sampled=d_sampled[:, i])
kldivs[self.pc_descriptor_subset[i]] = kldiv
# pairwise similarity
chembl_sim = calculate_internal_pairwise_similarities(self.training_set_molecules)
chembl_sim = chembl_sim.max(axis=1)
sampled_sim = calculate_internal_pairwise_similarities(unique_molecules)
sampled_sim = sampled_sim.max(axis=1)
kldiv_int_int = continuous_kldiv(X_baseline=chembl_sim, X_sampled=sampled_sim)
kldivs['internal_similarity'] = kldiv_int_int
# for some reason, this runs into problems when both sets are identical.
# cross_set_sim = calculate_pairwise_similarities(self.training_set_molecules, unique_molecules)
# cross_set_sim = cross_set_sim.max(axis=1)
#
# kldiv_ext = discrete_kldiv(chembl_sim, cross_set_sim)
# kldivs['external_similarity'] = kldiv_ext
# kldiv_sum += kldiv_ext
metadata = {
'number_samples': self.number_samples,
'kl_divs': kldivs
}
# Each KL divergence value is transformed to be in [0, 1].
# Then their average delivers the final score.
partial_scores = [np.exp(-score) for score in kldivs.values()]
score = sum(partial_scores) / len(partial_scores)
return DistributionLearningBenchmarkResult(benchmark_name=self.name,
score=score,
sampling_time=end_time - start_time,
metadata=metadata)
| |
"""
ANSI -> html converter
Credit for original idea and implementation
goes to Muhammad Alkarouri and his
snippet #577349 on http://code.activestate.com.
(extensively modified by Griatch 2010)
"""
from __future__ import absolute_import
from builtins import object
import re
import cgi
from .ansi import *
# All xterm256 RGB equivalents
XTERM256_FG = "\033[38;5;%sm"
XTERM256_BG = "\033[48;5;%sm"
class TextToHTMLparser(object):
"""
This class describes a parser for converting from ANSI to html.
"""
tabstop = 4
# mapping html color name <-> ansi code.
hilite = ANSI_HILITE
unhilite = ANSI_UNHILITE # this will be stripped - there is no css equivalent.
normal = ANSI_NORMAL # "
underline = ANSI_UNDERLINE
blink = ANSI_BLINK
inverse = ANSI_INVERSE # this will produce an outline; no obvious css equivalent?
colorcodes = [
('color-000', unhilite + ANSI_BLACK), # pure black
('color-001', unhilite + ANSI_RED),
('color-002', unhilite + ANSI_GREEN),
('color-003', unhilite + ANSI_YELLOW),
('color-004', unhilite + ANSI_BLUE),
('color-005', unhilite + ANSI_MAGENTA),
('color-006', unhilite + ANSI_CYAN),
('color-007', unhilite + ANSI_WHITE), # light grey
('color-008', hilite + ANSI_BLACK), # dark grey
('color-009', hilite + ANSI_RED),
('color-010', hilite + ANSI_GREEN),
('color-011', hilite + ANSI_YELLOW),
('color-012', hilite + ANSI_BLUE),
('color-013', hilite + ANSI_MAGENTA),
('color-014', hilite + ANSI_CYAN),
('color-015', hilite + ANSI_WHITE) # pure white
] + [("color-%03i" % (i+16), XTERM256_FG % ("%i" % (i+16))) for i in xrange(240)]
colorback = [
('bgcolor-000', ANSI_BACK_BLACK), # pure black
('bgcolor-001', ANSI_BACK_RED),
('bgcolor-002', ANSI_BACK_GREEN),
('bgcolor-003', ANSI_BACK_YELLOW),
('bgcolor-004', ANSI_BACK_BLUE),
('bgcolor-005', ANSI_BACK_MAGENTA),
('bgcolor-006', ANSI_BACK_CYAN),
('bgcolor-007', ANSI_BACK_WHITE), # light grey
('bgcolor-008', hilite + ANSI_BACK_BLACK), # dark grey
('bgcolor-009', hilite + ANSI_BACK_RED),
('bgcolor-010', hilite + ANSI_BACK_GREEN),
('bgcolor-011', hilite + ANSI_BACK_YELLOW),
('bgcolor-012', hilite + ANSI_BACK_BLUE),
('bgcolor-013', hilite + ANSI_BACK_MAGENTA),
('bgcolor-014', hilite + ANSI_BACK_CYAN),
('bgcolor-015', hilite + ANSI_BACK_WHITE), # pure white
] + [("bgcolor-%03i" % (i+16), XTERM256_BG % ("%i" % (i+16))) for i in range(240)]
# make sure to escape [
#colorcodes = [(c, code.replace("[", r"\[")) for c, code in colorcodes]
#colorback = [(c, code.replace("[", r"\[")) for c, code in colorback]
fg_colormap = dict((code, clr) for clr, code in colorcodes)
bg_colormap = dict((code, clr) for clr, code in colorback)
# create stop markers
fgstop = "(?:\033\[1m|\033\[22m)*\033\[3[0-8].*?m|\033\[0m|$"
bgstop = "(?:\033\[1m|\033\[22m)*\033\[4[0-8].*?m|\033\[0m|$"
# extract color markers, tagging the start marker and the text marked
re_fgs = re.compile("((?:\033\[1m|\033\[22m)*\033\[3[0-8].*?m)(.*?)(?=" + fgstop + ")")
re_bgs = re.compile("((?:\033\[1m|\033\[22m)*\033\[4[0-8].*?m)(.*?)(?=" + bgstop + ")")
re_normal = re.compile(normal.replace("[", r"\["))
re_hilite = re.compile("(?:%s)(.*)(?=%s|%s)" % (hilite.replace("[", r"\["), fgstop, bgstop))
re_unhilite = re.compile("(?:%s)(.*)(?=%s|%s)" % (unhilite.replace("[", r"\["), fgstop, bgstop))
re_uline = re.compile("(?:%s)(.*?)(?=%s|%s)" % (underline.replace("[", r"\["), fgstop, bgstop))
re_blink = re.compile("(?:%s)(.*?)(?=%s|%s)" % (blink.replace("[", r"\["), fgstop, bgstop))
re_inverse = re.compile("(?:%s)(.*?)(?=%s|%s)" % (inverse.replace("[", r"\["), fgstop, bgstop))
re_string = re.compile(r'(?P<htmlchars>[<&>])|(?P<space> [ \t]+)|(?P<lineend>\r\n|\r|\n)', re.S|re.M|re.I)
re_url = re.compile(r'((?:ftp|www|https?)\W+(?:(?!\.(?:\s|$)|&\w+;)[^"\',;$*^\\(){}<>\[\]\s])+)(\.(?:\s|$)|&\w+;|)')
re_mxplink = re.compile(r'\|lc(.*?)\|lt(.*?)\|le', re.DOTALL)
def _sub_fg(self, colormatch):
code, text = colormatch.groups()
return r'''<span class="%s">%s</span>''' % (self.fg_colormap.get(code, "err"), text)
def _sub_bg(self, colormatch):
code, text = colormatch.groups()
return r'''<span class="%s">%s</span>''' % (self.bg_colormap.get(code, "err"), text)
def re_color(self, text):
"""
Replace ansi colors with html color class names. Let the
client choose how it will display colors, if it wishes to.
Args:
text (str): the string with color to replace.
Returns:
text (str): Re-colored text.
"""
text = self.re_fgs.sub(self._sub_fg, text)
text = self.re_bgs.sub(self._sub_bg, text)
text = self.re_normal.sub("", text)
return text
def re_bold(self, text):
"""
Clean out superfluous hilights rather than set <strong>to make
it match the look of telnet.
Args:
text (str): Text to process.
Returns:
text (str): Processed text.
"""
text = self.re_hilite.sub(r'<strong>\1</strong>', text)
return self.re_unhilite.sub(r'\1', text) # strip unhilite - there is no equivalent in css.
def re_underline(self, text):
"""
Replace ansi underline with html underline class name.
Args:
text (str): Text to process.
Returns:
text (str): Processed text.
"""
return self.re_uline.sub(r'<span class="underline">\1</span>', text)
def re_blinking(self, text):
"""
Replace ansi blink with custom blink css class
Args:
text (str): Text to process.
Returns:
text (str): Processed text.
"""
return self.re_blink.sub(r'<span class="blink">\1</span>', text)
def re_inversing(self, text):
"""
Replace ansi inverse with custom inverse css class
Args:
text (str): Text to process.
Returns:
text (str): Processed text.
"""
return self.re_inverse.sub(r'<span class="inverse">\1</span>', text)
def remove_bells(self, text):
"""
Remove ansi specials
Args:
text (str): Text to process.
Returns:
text (str): Processed text.
"""
return text.replace('\07', '')
def remove_backspaces(self, text):
"""
Removes special escape sequences
Args:
text (str): Text to process.
Returns:
text (str): Processed text.
"""
backspace_or_eol = r'(.\010)|(\033\[K)'
n = 1
while n > 0:
text, n = re.subn(backspace_or_eol, '', text, 1)
return text
def convert_linebreaks(self, text):
"""
Extra method for cleaning linebreaks
Args:
text (str): Text to process.
Returns:
text (str): Processed text.
"""
return text.replace(r'\n', r'<br>')
def convert_urls(self, text):
"""
Replace urls (http://...) by valid HTML.
Args:
text (str): Text to process.
Returns:
text (str): Processed text.
"""
# -> added target to output prevent the web browser from attempting to
# change pages (and losing our webclient session).
return self.re_url.sub(r'<a href="\1" target="_blank">\1</a>\2', text)
def sub_mxp_links(self, match):
"""
Helper method to be passed to re.sub,
replaces MXP links with HTML code.
Args:
text (str): Text to process.
Returns:
text (str): Processed text.
"""
cmd, text = [grp.replace('\"', "\\"") for grp in match.groups()]
val = r'''<a id="mxplink" href="#" ''' \
'''onclick="Evennia.msg("text",["{cmd}"],{{}});''' \
'''return false;">{text}</a>'''.format(cmd=cmd, text=text)
return val
def sub_text(self, match):
"""
Helper method to be passed to re.sub,
for handling all substitutions.
Args:
match (re.Matchobject): Match for substitution.
Returns:
text (str): Processed text.
"""
cdict = match.groupdict()
if cdict['htmlchars']:
return cgi.escape(cdict['htmlchars'])
if cdict['lineend']:
return '<br>'
elif cdict['space'] == '\t':
return ' ' * self.tabstop
elif cdict['space']:
text = match.group().replace('\t', ' ' * self.tabstop)
text = text.replace(' ', ' ')
return text
def parse(self, text, strip_ansi=False):
"""
Main access function, converts a text containing ANSI codes
into html statements.
Args:
text (str): Text to process.
strip_ansi (bool, optional):
Returns:
text (str): Parsed text.
"""
# parse everything to ansi first
text = parse_ansi(text, strip_ansi=strip_ansi, xterm256=True, mxp=True)
# convert all ansi to html
result = re.sub(self.re_string, self.sub_text, text)
result = re.sub(self.re_mxplink, self.sub_mxp_links, result)
result = self.re_color(result)
result = self.re_bold(result)
result = self.re_underline(result)
result = self.re_blinking(result)
result = self.re_inversing(result)
result = self.remove_bells(result)
result = self.convert_linebreaks(result)
result = self.remove_backspaces(result)
result = self.convert_urls(result)
# clean out eventual ansi that was missed
#result = parse_ansi(result, strip_ansi=True)
return result
HTML_PARSER = TextToHTMLparser()
#
# Access function
#
def parse_html(string, strip_ansi=False, parser=HTML_PARSER):
"""
Parses a string, replace ANSI markup with html
"""
return parser.parse(string, strip_ansi=strip_ansi)
| |
# -*- coding: utf-8 -*-
""" Simple Generic Location Tracking System
@copyright: 2011-12 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from gluon.dal import Table, Query, Set, Expression, Rows, Row
from datetime import datetime, timedelta
__all__ = ["S3Tracker"]
UID = "uuid" # field name for UIDs
TRACK_ID = "track_id" # field name for track ID
LOCATION_ID = "location_id" # field name for base location
LOCATION = "gis_location" # location tablename
PRESENCE = "sit_presence" # presence tablename
# =============================================================================
class S3Trackable(object):
"""
Trackable types instance(s)
"""
def __init__(self, trackable, record_id=None, uid=None, rtable=None):
"""
Constructor:
@param trackable: the trackable object
@param record_id: the record ID(s) (if object is a table or tablename)
@param uid: the record UID(s) (if object is a table or tablename)
@param rtable: the resource table (for the recursive calls)
"""
db = current.db
s3db = current.s3db
self.records = []
self.table = s3db.sit_trackable
self.rtable = rtable
if isinstance(trackable, (Table, str)):
if hasattr(trackable, "_tablename"):
table = trackable
tablename = table._tablename
else:
table = s3db[trackable]
tablename = trackable
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
query = (table._id > 0)
if uid is None:
if record_id is not None:
if isinstance(record_id, (list, tuple)):
query = (table._id.belongs(record_id))
else:
query = (table._id == record_id)
elif UID in table.fields:
if not isinstance(uid, (list, tuple)):
query = (table[UID].belongs(uid))
else:
query = (table[UID] == uid)
fields = [table[f] for f in fields]
rows = db(query).select(*fields)
elif isinstance(trackable, Row):
fields = self.__get_fields(trackable)
if not fields:
raise SyntaxError("Required fields not present in the row")
rows = Rows(records=[trackable], compact=False)
elif isinstance(trackable, Rows):
rows = [r for r in trackable if self.__get_fields(r)]
fail = len(trackable) - len(rows)
if fail:
raise SyntaxError("Required fields not present in %d of the rows" % fail)
rows = Rows(records=rows, compact=False)
elif isinstance(trackable, (Query, Expression)):
tablename = db._adapter.get_table(trackable)
self.rtable = s3db[tablename]
fields = self.__get_fields(self.rtable)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
query = trackable
fields = [self.rtable[f] for f in fields]
rows = db(query).select(*fields)
elif isinstance(trackable, Set):
query = trackable.query
tablename = db._adapter.get_table(query)
table = s3db[tablename]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
fields = [table[f] for f in fields]
rows = trackable.select(*fields)
else:
raise SyntaxError("Invalid parameter type %s" % type(trackable))
records = []
for r in rows:
if self.__super_entity(r):
table = s3db[r.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
fields = [table[f] for f in fields]
query = table[UID] == r[UID]
row = db(query).select(limitby=(0, 1), *fields).first()
if row:
records.append(row)
else:
records.append(r)
self.records = Rows(records=records, compact=False)
# -------------------------------------------------------------------------
@staticmethod
def __super_entity(trackable):
"""
Check whether a trackable is a super-entity
@param trackable: the trackable object
"""
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
return "instance_type" in keys
# -------------------------------------------------------------------------
def __get_fields(self, trackable, super_entity=True):
"""
Check a trackable for presence of required fields
@param: the trackable object
"""
fields = []
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
try:
if super_entity and \
self.__super_entity(trackable) and UID in keys:
return ("instance_type", UID)
if LOCATION_ID in keys:
fields.append(LOCATION_ID)
if TRACK_ID in keys:
fields.append(TRACK_ID)
return fields
elif hasattr(trackable, "update_record") or \
isinstance(trackable, Table) or \
isinstance(trackable, Row):
return fields
except:
pass
return None
# -------------------------------------------------------------------------
def get_location(self,
timestmp=None,
_fields=None,
_filter=None,
as_rows=False,
exclude=[]):
"""
Get the current location of the instance(s) (at the given time)
@param timestmp: last datetime for presence (defaults to current time)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@param exclude: interlocks to break at (avoids circular check-ins)
@returns: a location record, or a list of location records (if multiple)
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
ltable = s3db[LOCATION]
if timestmp is None:
timestmp = datetime.utcnow()
locations = []
for r in self.records:
location = None
if TRACK_ID in r:
query = ((ptable.deleted == False) & \
(ptable[TRACK_ID] == r[TRACK_ID]) & \
(ptable.timestmp <= timestmp))
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence:
if presence.interlock:
exclude = [r[TRACK_ID]] + exclude
tablename, record = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename, record)
record = trackable.records.first()
if TRACK_ID not in record or \
record[TRACK_ID] not in exclude:
location = trackable.get_location(timestmp=timestmp,
exclude=exclude)
elif presence.location_id:
query = (ltable.id == presence.location_id)
if _filter is not None:
query = query & _filter
if _fields is None:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if not location:
if len(self.records) > 1:
trackable = S3Trackable(r, rtable=self.rtable)
else:
trackable = self
location = trackable.get_base_location(_fields=_fields)
if location:
locations.append(location)
else:
# Ensure we return an entry so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
elif len(locations) == 1:
return locations[0]
else:
return locations
# -------------------------------------------------------------------------
def set_location(self, location, timestmp=None):
"""
Set the current location of instance(s) (at the given time)
@param location: the location (as Row or record ID)
@param timestmp: the datetime of the presence (defaults to current time)
@returns: nothing
"""
ptable = current.s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
if "location_id" in location:
location = location.location_id
else:
location = location.id
if not location:
return
else:
data = dict(location_id=location, timestmp=timestmp)
for r in self.records:
if TRACK_ID not in r:
# No track ID => set base location
if len(self.records) > 1:
trackable = S3Trackable(r)
else:
trackable = self
trackable.set_base_location(location)
elif r[TRACK_ID]:
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def check_in(self, table, record, timestmp=None):
"""
Bind the presence of the instance(s) to another instance
@param table: table name of the other resource
@param record: record in the other resource (as Row or record ID)
@param timestmp: datetime of the check-in
@returns: nothing
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if isinstance(table, str):
table = s3db[table]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("No location data in %s" % table._tablename)
interlock = None
if isinstance(record, Rows):
record = record.first()
if not isinstance(record, Row):
record = table[record]
if self.__super_entity(record):
table = s3db[record.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
query = table[UID] == record[UID]
record = db(query).select(limitby=(0, 1)).first()
if record and table._id.name in record:
record = record[table._id.name]
if record:
interlock = "%s,%s" % (table, record)
else:
raise SyntaxError("No record specified for %s" % table._tablename)
if interlock:
if timestmp is None:
timestmp = datetime.utcnow()
data = dict(location_id=None,
timestmp=timestmp,
interlock=interlock)
q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp))
for r in self.records:
if TRACK_ID not in r:
# Cannot check-in a non-trackable
continue
query = q & (ptable[TRACK_ID] == r[TRACK_ID])
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence and presence.interlock == interlock:
# already checked-in to the same instance
continue
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def check_out(self, table=None, record=None, timestmp=None):
"""
Make the last log entry before timestmp independent from
the referenced entity (if any)
@param timestmp: the date/time of the check-out, defaults
to current time
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
interlock = None
if table is not None:
if isinstance(table, str):
table = s3db[table]
if isinstance(record, Rows):
record = record.first()
if self.__super_entity(table):
if not isinstance(record, Row):
record = table[record]
table = s3db[record.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
query = table[UID] == record[UID]
record = db(query).select(limitby=(0, 1)).first()
if isinstance(record, Row) and table._id.name in record:
record = record[table._id.name]
if record:
interlock = "%s,%s" % (table, record)
else:
return
q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp))
for r in self.records:
if TRACK_ID not in r:
# Cannot check-out a non-trackable
continue
query = q & (ptable[TRACK_ID] == r[TRACK_ID])
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence and presence.interlock:
if interlock and presence.interlock != interlock:
continue
elif not interlock and table and \
not presence.interlock.startswith("%s" % table):
continue
tablename, record = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename, record)
location = trackable.get_location(timestmp=timestmp)
if timestmp - presence.timestmp < timedelta(seconds=1):
timestmp = timestmp + timedelta(seconds=1)
data = dict(location_id=location,
timestmp=timestmp,
interlock=None)
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def remove_location(self, location=None):
"""
Remove a location from the presence log of the instance(s)
@todo: implement
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_base_location(self,
_fields=None,
_filter=None,
as_rows=False):
"""
Get the base location of the instance(s)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@returns: the base location(s) of the current instance
"""
db = current.db
s3db = current.s3db
ltable = s3db[LOCATION]
rtable = self.rtable
locations = []
for r in self.records:
location = None
query = None
if LOCATION_ID in r:
query = (ltable.id == r[LOCATION_ID])
if rtable:
query = query & (rtable[LOCATION_ID] == ltable.id)
if TRACK_ID in r:
query = query & (rtable[TRACK_ID] == r[TRACK_ID])
elif TRACK_ID in r:
q = (self.table[TRACK_ID] == r[TRACK_ID])
trackable = db(q).select(limitby=(0, 1)).first()
table = s3db[trackable.instance_type]
if LOCATION_ID in table.fields:
query = ((table[TRACK_ID] == r[TRACK_ID]) &
(table[LOCATION_ID] == ltable.id))
if query:
if _filter is not None:
query = query & _filter
if not _fields:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if location:
locations.append(location)
else:
# Ensure we return an entry so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
elif len(locations) == 1:
return locations[0]
else:
return locations
# -------------------------------------------------------------------------
def set_base_location(self, location=None):
"""
Set the base location of the instance(s)
@param location: the location for the base location as Row or record ID
@returns: nothing
@note: instance tables without a location_id field will be ignored
"""
db = current.db
s3db = current.s3db
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
location = location.id
if not location or not str(location).isdigit():
# Location not found
return
else:
data = {LOCATION_ID:location}
# Update records without track ID
for r in self.records:
if TRACK_ID in r:
continue
elif LOCATION_ID in r:
if hasattr(r, "update_record"):
r.update_record(**data)
else:
raise SyntaxError("Cannot relate record to a table.")
# Update records with track ID
# => this can happen table-wise = less queries
track_ids = [r[TRACK_ID] for r in self.records
if TRACK_ID in r]
rows = db(self.table[TRACK_ID].belongs(track_ids)).select()
tables = []
for r in rows:
instance_type = r.instance_type
table = s3db[instance_type]
if instance_type not in tables and \
LOCATION_ID in table.fields:
tables.append(table)
else:
# No location ID in this type => ignore gracefully
continue
# Location specified => update all base locations
for table in tables:
db(table[TRACK_ID].belongs(track_ids)).update(**data)
# Refresh records
for r in self.records:
if LOCATION_ID in r:
r[LOCATION_ID] = location
# -------------------------------------------------------------------------
def __update_timestamp(self, track_id, timestamp):
"""
Update the timestamp of a trackable
@param track_id: the trackable ID (super-entity key)
@param timestamp: the timestamp
"""
if timestamp is None:
timestamp = datetime.utcnow()
if track_id:
trackable = self.table[track_id]
if trackable:
trackable.update_record(track_timestmp=timestamp)
# =============================================================================
class S3Tracker(object):
"""
S3 Tracking system, to be instantiated once as global "s3tracker" object
"""
def __init__(self):
"""
Constructor
"""
# -------------------------------------------------------------------------
def __call__(self, trackable, record_id=None, uid=None):
"""
Get a tracking interface for a record or set of records
@param trackable: a Row, Rows, Query, Expression, Set object or
a Table or a tablename
@param record_id: a record ID or a list/tuple of record IDs
(together with Table or tablename)
@param uid: a record UID or a list/tuple of record UIDs
(together with Table or tablename)
@returns: a S3Trackable instance for the specified record(s)
"""
return S3Trackable(trackable,
record_id=record_id,
uid=uid)
# -------------------------------------------------------------------------
def get_all(self, entity,
location=None,
bbox=None,
timestmp=None):
"""
Get all instances of the given entity at the given location and time
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_checked_in(self, table, record,
instance_type=None,
timestmp=None):
"""
Get all trackables of the given type that are checked-in
to the given instance at the given time
"""
raise NotImplementedError
# =============================================================================
| |
# -*- coding: utf-8 -*-
"""
Test all terms in terms_dg. Performs numerical test on simple mesh.
"""
import functools
import inspect
import numpy as nm
import numpy.testing as nmts
import scipy.sparse as sp
from sfepy.base.base import Struct
from sfepy.base.testing import TestCommon
from sfepy.discrete import DGFieldVariable, Material, Integral
from sfepy.discrete import Variables
from sfepy.discrete.common.dof_info import EquationMap
from sfepy.terms.terms_dg import AdvectionDGFluxTerm, \
NonlinearHyperbolicDGFluxTerm, NonlinearScalarDotGradTerm, \
DiffusionDGFluxTerm, DiffusionInteriorPenaltyTerm
from test_dg_field import prepare_dgfield_1D, prepare_field_2D
class Test(TestCommon):
def capture_assertion_decorator(self, method):
@functools.wraps(method)
def captured_assertion_method(_):
try:
method()
except AssertionError:
return False
return True
return captured_assertion_method.__get__(self, self.__class__)
@staticmethod
def from_conf(conf, options):
"""
Filters out terms test classes and gathers their test methods in
resulting object.
"""
term_test_classes = [(key, var) for key, var in dict(globals()).items()
if (key.startswith("Test") and key.endswith("Term"))]
all_test = Test()
for cname, term_test_cls in term_test_classes:
term_test = term_test_cls()
methods = inspect.getmembers(term_test, inspect.ismethod)
all_test.update({"{}_{}".format(mname, cname[4:]):
all_test.capture_assertion_decorator(meth)
for mname, meth in methods})
return all_test
class DGTermTestEnvornment:
"""
Class for easy creation of all the data needed for testing terms.
"""
def burg_fun(self, u):
vu = self.burg_velo * u[..., None] ** 2
return vu
def burg_fun_d(self, u):
v1 = 2 * self.burg_velo * u[..., None]
return v1
def __init__(self, dim, approx_order, **kwargs):
"""
Creates Struct object with all the data necessary to test terms
:param dim: dimension
:param approx_order: approximation order
:param kwargs: velo, diffusion or penalty for prepare_materials
:return: term test scope
"""
if dim == 1:
(field, regions), mesh = prepare_dgfield_1D(approx_order)
elif dim == 2:
(field, regions), mesh = prepare_field_2D(approx_order)
self.field = field
self.regions = regions
self.mesh = mesh
self.n_cell = field.n_cell
self.n_nod = field.n_nod
self.n_el_nod = field.n_el_nod
self.u, self.v = self.prepare_variables(field)
self.u.data = [(nm.zeros(self.n_nod))]
self.variables = Variables([ self.u, self.v])
self.integral = Integral('i', order=approx_order * 2)
self.a, self.D, self.Cw = self.prepare_materials(field, **kwargs)
if dim == 1:
velo = nm.array(1.0)
elif dim == 2:
velo = nm.array([1.0, 0])
self.burg_velo = velo.T / nm.linalg.norm(velo)
self.nonlin = Material('nonlin',
values={'.fun': self.burg_fun,
'.dfun': self.burg_fun_d})
self.out = nm.zeros((self.n_cell, 1, self.n_el_nod, 1))
def prepare_variables(self, field):
"""
Prepares state and test variables, adds empty
eq_map to state variable
:param field:
:return: state, test
"""
n_nod = field.n_nod
u = DGFieldVariable('u', 'unknown', field, history=1)
v = DGFieldVariable('v', 'test', field, primary_var_name='u')
var_di = Struct(
details=Struct(dpn=1, n_nod=n_nod,
name="field_var_dof_details"),
indx=slice(0, n_nod, None), n_dof=n_nod, name='u_dof_info',
var_name="u")
u.eq_map = EquationMap("eq_map", ["u.0"], var_di)
u.eq_map._init_empty(field)
return u, v
def prepare_materials(self, field, velo=1.0, diffusion=0.1, penalty=100):
"""
Crates material objects with data attribute, containing properly shaped
data to pass to terms
:param field: DGField
:param velo: optional values for velocity a
:param diffusion: optional value for diffusion tensor D
:param penalty: optional value for diffusion penalty Cw
:return: a, D, Cw
"""
a = Material('a', val=[velo])
a.data = nm.ones((field.n_cell, 1)) * velo
D = Material('D', val=[diffusion])
D.data = nm.ones((field.n_cell, 1, 1)) * diffusion
Cw = Material("Cw", values={".val": penalty})
Cw.data = penalty
return a, D, Cw
class TestAdvectDGFluxTerm:
def test_function_explicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = AdvectionDGFluxTerm("adv_stiff(a.val, u, v)",
"a.val, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, a=te.a)
# te.u.data[0][::te.n_el_nod] = 1
result = nm.zeros(te.out.shape)
out, _ = term.function(te.out,
te.u,
None, # diff_var
te.field,
te.regions["omega"],
te.a.data
)
nmts.assert_almost_equal(out, result)
return True
def test_function_implicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = AdvectionDGFluxTerm("adv_stiff(a.val, u, v)",
"a.val, u, v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, a=te.a)
# te.u.data[0][::ts.n_el_nod] = 1
expected = nm.zeros(((te.n_cell * te.n_el_nod),) * 2)
(out, iel1, iel2, _, _), _ = term.function(
te.out, # out, note that for implicit mode the out
# argument is ignored
te.u, # state
"u", # diff_var
te.field,
te.regions["omega"],
te.a.data, # advelo
)
out = sp.csr_matrix((out, (iel1, iel2)),
shape=((te.n_cell * te.n_el_nod),) * 2).toarray()
assert expected.shape == out.shape
return True
class TestNonlinearHyperDGFluxTerm:
def test_function_explicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = NonlinearHyperbolicDGFluxTerm("adv_stiff(f, df u, v)",
"nonlin.f, nonlin.df, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, nonlin=te.nonlin)
# te.u.data[0][::ts.n_el_nod] = 1
result = nm.zeros(te.out.shape)
out, _ = term.function(te.out,
te.u,
te.field,
te.regions["omega"],
te.burg_fun,
te.burg_fun_d
)
nmts.assert_almost_equal(out, result)
return True
class TestDiffusionDGFluxTerm:
def test_function_explicit_right_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionDGFluxTerm("diff_lf_flux(D.val, v, u)",
"D.val, v, u[-1]",
te.integral, te.regions["omega"],
u=te.u, v=te.v, D=te.D)
term.mode = "avg_state"
result = nm.zeros(te.out.shape)
out, _ = term.function(te.out, # out
te.u, # state
None, # diff_var, explicit
te.field,
te.regions["omega"],
te.D.data, # advelo
)
nmts.assert_almost_equal(out, result)
return True
def test_function_explicit_left_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionDGFluxTerm("diff_lf_flux(D.val, u, v)",
"D.val, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, D=te.D)
term.mode = "avg_virtual"
result = nm.zeros(te.out.shape)
out, _ = term.function(te.out, # out
te.u, # state
None, # diff_var, explicit
te.field,
te.regions["omega"],
te.D.data, # advelo
)
nmts.assert_almost_equal(out, result)
return True
def test_function_implicit_right_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionDGFluxTerm("diff_lf_flux(D.val, v, u)",
"D.val, v, u",
te.integral, te.regions["omega"],
u=te.u, v=te.v, D=te.D)
term.mode = "avg_state"
expected = nm.zeros(((te.n_cell * te.n_el_nod),) * 2)
(out, iel1, iel2, _, _), _ = term.function(
te.out, # out
te.u, # state
"u", # diff_var, explicit
te.field,
te.regions["omega"],
te.D.data, # advelo
)
out = sp.csr_matrix((out, (iel1, iel2)),
shape=((te.n_cell * te.n_el_nod),) * 2).toarray()
assert expected.shape == out.shape
return True
def test_function_implicit_left_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionDGFluxTerm("diff_lf_flux(D.val, u, v)",
"D.val, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, D=te.D)
term.mode = "avg_virtual"
expected = nm.zeros(((te.n_cell * te.n_el_nod),) * 2)
(out, iel1, iel2, _, _), _ = term.function(
te.out, # out
te.u, # state
"u", # diff_var, explicit
te.field,
te.regions["omega"],
te.D.data, # advelo
)
out = sp.csr_matrix((out, (iel1, iel2)),
shape=((te.n_cell * te.n_el_nod),) * 2).toarray()
assert expected.shape == out.shape
return True
class TestDiffusionInteriorPenaltyTerm:
def test_function_explicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionInteriorPenaltyTerm("adv_stiff(Cw.val, u, v)",
"Cw.val, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, Cw=te.Cw)
# te.u.data[0][::ts.n_el_nod] = 1
result = nm.zeros(te.out.shape)
out, _ = term.function(te.out,
te.u,
None, # diff_var
te.field,
te.regions["omega"],
te.Cw.data,
te.D.data
)
nmts.assert_almost_equal(out, result)
return True
def test_function_implicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionInteriorPenaltyTerm("adv_stiff(D.val, a.val, u, v)",
"Cw.val, u, v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, a=te.Cw)
# te.u.data[0][::ts.n_el_nod] = 1
expected = nm.zeros(((te.n_cell * te.n_el_nod),) * 2)
(out, iel1, iel2, _, _), _ = term.function(
te.out, # out, note that for implicit mode the out
# argument is ignored
te.u, # state
"u", # diff_var
te.field,
te.regions["omega"],
te.Cw.data,
te.D.data,
)
out = sp.csr_matrix((out, (iel1, iel2)),
shape=((te.n_cell * te.n_el_nod),) * 2).toarray()
assert expected.shape == out.shape
return True
class TestNonlinScalarDotGradTerm:
def test_function_explicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = NonlinearScalarDotGradTerm("adv_stiff(f, df u, v)",
"nonlin.f, nonlin.df, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, nonlin=te.nonlin)
term.setup()
# te.u.data[0][::ts.n_el_nod] = 1
expected = nm.zeros(te.out.shape)
out = nm.zeros(te.out.shape)
fargs = term.get_fargs(
te.burg_fun,
te.burg_fun_d,
te.u,
te.v
)
fargs = (out,) + fargs
out = term.function(*fargs)
nmts.assert_almost_equal(out, expected)
return True
if __name__ == '__main__':
t = Test()
t.test_dg_term_calls()
| |
#!/usr/bin/env python
from nose.tools import *
import networkx
from test_graph import BaseGraphTester, BaseAttrGraphTester, TestGraph
class BaseDiGraphTester(BaseGraphTester):
def test_has_successor(self):
G=self.K3
assert_equal(G.has_successor(0,1),True)
assert_equal(G.has_successor(0,-1),False)
def test_successors(self):
G=self.K3
assert_equal(sorted(G.successors(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.successors,-1)
def test_successors_iter(self):
G=self.K3
assert_equal(sorted(G.successors_iter(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.successors_iter,-1)
def test_has_predecessor(self):
G=self.K3
assert_equal(G.has_predecessor(0,1),True)
assert_equal(G.has_predecessor(0,-1),False)
def test_predecessors(self):
G=self.K3
assert_equal(sorted(G.predecessors(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.predecessors,-1)
def test_predecessors_iter(self):
G=self.K3
assert_equal(sorted(G.predecessors_iter(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.predecessors_iter,-1)
def test_edges(self):
G=self.K3
assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.edges(0)),[(0,1),(0,2)])
assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
def test_edges_iter(self):
G=self.K3
assert_equal(sorted(G.edges_iter()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.edges_iter(0)),[(0,1),(0,2)])
def test_edges_data(self):
G=self.K3
assert_equal(sorted(G.edges(data=True)),
[(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),(2,0,{}),(2,1,{})])
assert_equal(sorted(G.edges(0,data=True)),[(0,1,{}),(0,2,{})])
assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
def test_out_edges(self):
G=self.K3
assert_equal(sorted(G.out_edges()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.out_edges(0)),[(0,1),(0,2)])
assert_raises((KeyError,networkx.NetworkXError), G.out_edges,-1)
def test_out_edges_iter(self):
G=self.K3
assert_equal(sorted(G.out_edges_iter()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.edges_iter(0)),[(0,1),(0,2)])
def test_out_edges_dir(self):
G=self.P3
assert_equal(sorted(G.out_edges()),[(0, 1), (1, 2)])
assert_equal(sorted(G.out_edges(0)),[(0, 1)])
assert_equal(sorted(G.out_edges(2)),[])
def test_out_edges_iter_dir(self):
G=self.P3
assert_equal(sorted(G.out_edges_iter()),[(0, 1), (1, 2)])
assert_equal(sorted(G.out_edges_iter(0)),[(0, 1)])
assert_equal(sorted(G.out_edges_iter(2)),[])
def test_in_edges_dir(self):
G=self.P3
assert_equal(sorted(G.in_edges()),[(0, 1), (1, 2)])
assert_equal(sorted(G.in_edges(0)),[])
assert_equal(sorted(G.in_edges(2)),[(1,2)])
def test_in_edges_iter_dir(self):
G=self.P3
assert_equal(sorted(G.in_edges_iter()),[(0, 1), (1, 2)])
assert_equal(sorted(G.in_edges_iter(0)),[])
assert_equal(sorted(G.in_edges_iter(2)),[(1,2)])
def test_degree(self):
G=self.K3
assert_equal(list(G.degree().values()),[4,4,4])
assert_equal(G.degree(),{0:4,1:4,2:4})
assert_equal(G.degree(0),4)
assert_equal(G.degree([0]),{0:4})
assert_raises((KeyError,networkx.NetworkXError), G.degree,-1)
def test_degree_iter(self):
G=self.K3
assert_equal(list(G.degree_iter()),[(0,4),(1,4),(2,4)])
assert_equal(dict(G.degree_iter()),{0:4,1:4,2:4})
assert_equal(list(G.degree_iter(0)),[(0,4)])
assert_equal(list(G.degree_iter(iter([0]))),[(0,4)]) #run through iterator
def test_in_degree(self):
G=self.K3
assert_equal(list(G.in_degree().values()),[2,2,2])
assert_equal(G.in_degree(),{0:2,1:2,2:2})
assert_equal(G.in_degree(0),2)
assert_equal(G.in_degree([0]),{0:2})
assert_equal(G.in_degree(iter([0])),{0:2})
assert_raises((KeyError,networkx.NetworkXError), G.in_degree,-1)
def test_in_degree_iter(self):
G=self.K3
assert_equal(list(G.in_degree_iter()),[(0,2),(1,2),(2,2)])
assert_equal(dict(G.in_degree_iter()),{0:2,1:2,2:2})
assert_equal(list(G.in_degree_iter(0)),[(0,2)])
assert_equal(list(G.in_degree_iter(iter([0]))),[(0,2)]) #run through iterator
def test_in_degree_iter_weighted(self):
G=self.K3
G.add_edge(0,1,weight=0.3,other=1.2)
assert_equal(list(G.in_degree_iter(weight='weight')),[(0,2),(1,1.3),(2,2)])
assert_equal(dict(G.in_degree_iter(weight='weight')),{0:2,1:1.3,2:2})
assert_equal(list(G.in_degree_iter(1,weight='weight')),[(1,1.3)])
assert_equal(list(G.in_degree_iter(weight='other')),[(0,2),(1,2.2),(2,2)])
assert_equal(dict(G.in_degree_iter(weight='other')),{0:2,1:2.2,2:2})
assert_equal(list(G.in_degree_iter(1,weight='other')),[(1,2.2)])
assert_equal(list(G.in_degree_iter(iter([1]),weight='other')),[(1,2.2)])
def test_out_degree(self):
G=self.K3
assert_equal(list(G.out_degree().values()),[2,2,2])
assert_equal(G.out_degree(),{0:2,1:2,2:2})
assert_equal(G.out_degree(0),2)
assert_equal(G.out_degree([0]),{0:2})
assert_equal(G.out_degree(iter([0])),{0:2})
assert_raises((KeyError,networkx.NetworkXError), G.out_degree,-1)
def test_out_degree_iter_weighted(self):
G=self.K3
G.add_edge(0,1,weight=0.3,other=1.2)
assert_equal(list(G.out_degree_iter(weight='weight')),[(0,1.3),(1,2),(2,2)])
assert_equal(dict(G.out_degree_iter(weight='weight')),{0:1.3,1:2,2:2})
assert_equal(list(G.out_degree_iter(0,weight='weight')),[(0,1.3)])
assert_equal(list(G.out_degree_iter(weight='other')),[(0,2.2),(1,2),(2,2)])
assert_equal(dict(G.out_degree_iter(weight='other')),{0:2.2,1:2,2:2})
assert_equal(list(G.out_degree_iter(0,weight='other')),[(0,2.2)])
assert_equal(list(G.out_degree_iter(iter([0]),weight='other')),[(0,2.2)])
def test_out_degree_iter(self):
G=self.K3
assert_equal(list(G.out_degree_iter()),[(0,2),(1,2),(2,2)])
assert_equal(dict(G.out_degree_iter()),{0:2,1:2,2:2})
assert_equal(list(G.out_degree_iter(0)),[(0,2)])
assert_equal(list(G.out_degree_iter(iter([0]))),[(0,2)])
def test_size(self):
G=self.K3
assert_equal(G.size(),6)
assert_equal(G.number_of_edges(),6)
def test_to_undirected_reciprocal(self):
G=self.Graph()
G.add_edge(1,2)
assert_true(G.to_undirected().has_edge(1,2))
assert_false(G.to_undirected(reciprocal=True).has_edge(1,2))
G.add_edge(2,1)
assert_true(G.to_undirected(reciprocal=True).has_edge(1,2))
def test_reverse_copy(self):
G=networkx.DiGraph([(0,1),(1,2)])
R=G.reverse()
assert_equal(sorted(R.edges()),[(1,0),(2,1)])
R.remove_edge(1,0)
assert_equal(sorted(R.edges()),[(2,1)])
assert_equal(sorted(G.edges()),[(0,1),(1,2)])
def test_reverse_nocopy(self):
G=networkx.DiGraph([(0,1),(1,2)])
R=G.reverse(copy=False)
assert_equal(sorted(R.edges()),[(1,0),(2,1)])
R.remove_edge(1,0)
assert_equal(sorted(R.edges()),[(2,1)])
assert_equal(sorted(G.edges()),[(2,1)])
class BaseAttrDiGraphTester(BaseDiGraphTester,BaseAttrGraphTester):
pass
class TestDiGraph(BaseAttrDiGraphTester,TestGraph):
"""Tests specific to dict-of-dict-of-dict digraph data structure"""
def setUp(self):
self.Graph=networkx.DiGraph
# build dict-of-dict-of-dict K3
ed1,ed2,ed3,ed4,ed5,ed6 = ({},{},{},{},{},{})
self.k3adj={0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1:ed6}}
self.k3edges=[(0, 1), (0, 2), (1, 2)]
self.k3nodes=[0, 1, 2]
self.K3=self.Graph()
self.K3.adj = self.K3.succ = self.K3.edge = self.k3adj
self.K3.pred={0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1:ed4}}
ed1,ed2 = ({},{})
self.P3=self.Graph()
self.P3.adj={0: {1: ed1}, 1: {2: ed2}, 2: {}}
self.P3.succ=self.P3.adj
self.P3.pred={0: {}, 1: {0: ed1}, 2: {1: ed2}}
self.K3.node={}
self.K3.node[0]={}
self.K3.node[1]={}
self.K3.node[2]={}
self.P3.node={}
self.P3.node[0]={}
self.P3.node[1]={}
self.P3.node[2]={}
def test_data_input(self):
G=self.Graph(data={1:[2],2:[1]}, name="test")
assert_equal(G.name,"test")
assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})])
assert_equal(sorted(G.succ.items()),[(1, {2: {}}), (2, {1: {}})])
assert_equal(sorted(G.pred.items()),[(1, {2: {}}), (2, {1: {}})])
def test_add_edge(self):
G=self.Graph()
G.add_edge(0,1)
assert_equal(G.adj,{0: {1: {}}, 1: {}})
assert_equal(G.succ,{0: {1: {}}, 1: {}})
assert_equal(G.pred,{0: {}, 1: {0:{}}})
G=self.Graph()
G.add_edge(*(0,1))
assert_equal(G.adj,{0: {1: {}}, 1: {}})
assert_equal(G.succ,{0: {1: {}}, 1: {}})
assert_equal(G.pred,{0: {}, 1: {0:{}}})
def test_add_edges_from(self):
G=self.Graph()
G.add_edges_from([(0,1),(0,2,{'data':3})],data=2)
assert_equal(G.adj,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}})
assert_equal(G.succ,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}})
assert_equal(G.pred,{0: {}, 1: {0: {'data':2}}, 2: {0: {'data':3}}})
assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)]) # too few in tuple
assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3)]) # too many in tuple
assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
def test_remove_edge(self):
G=self.K3
G.remove_edge(0,1)
assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}})
assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1:{}}})
assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
def test_remove_edges_from(self):
G=self.K3
G.remove_edges_from([(0,1)])
assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}})
assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1: {}}})
G.remove_edges_from([(0,0)]) # silent fail
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]),
op.inputs[2], op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
return [
nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), grad, op.inputs[2],
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
None,
nn_ops.conv2d(
op.inputs[0], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
data_format = op.get_attr("data_format")
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d_backprop_filter_v2(op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
data_format = op.get_attr("data_format")
return [None,
nn_ops.conv3d_backprop_filter_v2(grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d(grad,
op.inputs[1],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
data_format = op.get_attr("data_format")
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
None,
nn_ops.conv3d(op.inputs[0],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return gen_nn_ops._avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPool3DGrad")
def _AvgPool3DGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]), gen_nn_ops.avg_pool3d(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return gen_nn_ops._max_pool3d_grad(
op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPool3DGrad")
def _MaxPool3DGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool3d_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool3DGradGrad")
def _MaxPool3DGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool3d_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax - array_ops.reshape(
math_ops.reduce_sum(grad_softmax * softmax, [1]), [-1, 1])) * softmax)
return grad_x
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, 1, keep_dims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
data_format=data_format))
@ops.RegisterGradient("BiasAddGrad")
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:-3]), bias_shape,
array_ops.ones_like(shape[-2:])
], 0)
tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("EluGrad")
def _EluGradGrad(op, grad):
elu_x = op.inputs[1]
return (gen_nn_ops._elu_grad(grad, op.outputs[0]),
array_ops.where(elu_x < 0,
grad * op.inputs[0],
array_ops.zeros(shape=array_ops.shape(elu_x),
dtype=elu_x.dtype)))
@ops.RegisterGradient("SeluGrad")
def _SeluGradGrad(op, grad):
x = op.inputs[1]
scale_alpha = 1.7580993408473768599402175208123
return (gen_nn_ops._elu_grad(grad, op.outputs[0]),
array_ops.where(
x < 0., gen_nn_ops._elu_grad(grad, op.outputs[0] + scale_alpha),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype)))
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Selu")
def _SeluGrad(op, grad):
return gen_nn_ops._selu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("SoftplusGrad")
def _SoftplusGradGrad(op, grad):
# Let:
# y = tf.nn.softplus(x)
# dx = gen_nn_ops._softplus_grad(dy, x) = dy / (1 + exp(-x))
# This op computes (ddy, d2x) from op.inputs == [dy, x] and grad == ddx.
dy, x = op.inputs
with ops.control_dependencies([grad.op]):
ddy = gen_nn_ops._softplus_grad(grad, x) # pylint: disable=protected-access
d2x = grad * dy / (math_ops.exp(-x) + 2.0 + math_ops.exp(x))
return (ddy, d2x)
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x), array_ops.zeros(
shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
# There is no gradient for the labels
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
def IsZero(g):
# Some introspection to check if the gradient is feeding zeros
if g.op.type in ("ZerosLike", "Zeros"):
return True
const_fill_value = tensor_util.constant_value(g)
return const_fill_value is not None and (const_fill_value == 0).all()
if not IsZero(grad_grad):
logits = op.inputs[0]
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(grad_grad[:, None, :],
softmax[:, :, None]), axis=1)) * softmax)
return grad, None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
"""Gradient function for SparseSoftmaxCrossEntropyWithLogits."""
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
sparse_softmax_grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1], message="Currently there is no way to take the second "
"derivative of sparse_softmax_cross_entropy_with_logits due to the fused "
"implementation's interaction with tf.gradients()")
return _BroadcastMul(grad_0, sparse_softmax_grad_without_gradient), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius,
bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPoolGrad")
def _AvgPoolGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]), gen_nn_ops._avg_pool(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPoolV2")
def _MaxPoolGradV2(op, grad):
ksize = op.inputs[1]
strides = op.inputs[2]
return gen_nn_ops.max_pool_grad_v2(op.inputs[0],
op.outputs[0],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None
@ops.RegisterGradient("MaxPoolWithArgmax")
def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
return gen_nn_ops._max_pool_grad_with_argmax(op.inputs[0],
grad,
op.outputs[1],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("MaxPoolGrad")
def _MaxPoolGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPoolGradV2")
def _MaxPoolGradGradV2(op, grad):
ksize = op.inputs[3]
strides = op.inputs[4]
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad_v2(
op.inputs[0],
op.inputs[1],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None)
@ops.RegisterGradient("MaxPoolGradGrad")
def _MaxPoolGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_max_pool_grad(op.inputs[0], op.outputs[0],
grad_0, op.outputs[1],
op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("FusedBatchNorm")
def _FusedBatchNormGrad(op, *grad):
"""Return the gradients for the 3 inputs of BatchNorm.
Args:
op: The BatchNormOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_y.
Returns:
grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *
[grad_y - mean(grad_y) - (x - mean(x)) *
mean(grad_y * (x - mean(x))) / (variance + epsilon)]
in training mode; grad_y * scale * rsqrt(pop_variance + epsilon)
in freeze mode.
grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *
rsqrt(variance + epsilon)) in training mode;
sum(grad_y * (x - pop_mean) * rsqrt(pop_variance + epsilon))
in freeze mode.
grad_offset: gradient for offset, which is sum(grad_y) in training mode;
sum(grad_y) in freeze mode.
"""
x = op.inputs[0]
grad_y = grad[0]
scale = op.inputs[1]
epsilon = op.get_attr("epsilon")
data_format = op.get_attr("data_format")
is_training = op.get_attr("is_training")
if is_training:
return gen_nn_ops.fused_batch_norm_grad(
grad_y,
x,
scale,
op.outputs[3],
op.outputs[4],
epsilon=epsilon,
data_format=data_format,
is_training=is_training)
else:
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
if data_format == b"NHWC":
reduce_axis = [0, 1, 2]
else:
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(pop_mean), 1, 1]
pop_mean = array_ops.reshape(pop_mean, shape)
pop_var = array_ops.reshape(pop_var, shape)
scale = array_ops.reshape(scale, shape)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
var_rsqrt = math_ops.rsqrt(pop_var + epsilon)
grad_scale = math_ops.reduce_sum(
grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis)
grad_x = grad_y * scale * var_rsqrt
return grad_x, grad_scale, grad_offset, None, None
def _BatchNormGrad(grad_y, x, scale, epsilon, data_format):
"""Returns the gradients for the 3 inputs of BatchNorm.
Args:
grad_y: A `Tensor` of 4 dimensions for gradient for y.
x: A `Tensor` of 4 dimensions for x.
scale: A `Tensor` of 1 dimension for scaling.
epsilon: A small float number added to the variance of x.
data_format: The data format for input. Either b"NHWC" or b"NCHW".
Returns:
A tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient
for x, grad_scale the gradient for scale, and grad_offset the gradient
for offset.
"""
if data_format == b"NHWC":
keep_dims = False
reduce_axis = [0, 1, 2]
else:
keep_dims = True
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(scale), 1, 1]
scale = array_ops.reshape(scale, shape)
mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keep_dims=keep_dims)
mean_x = math_ops.reduce_mean(x, reduce_axis, keep_dims=keep_dims)
var_x = math_ops.reduce_mean(
math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)),
reduce_axis,
keep_dims=keep_dims)
grad_y_offset = grad_y - mean_grad_y
x_offset = x - mean_x
mean = math_ops.reduce_mean(
grad_y * x_offset, axis=reduce_axis, keep_dims=keep_dims)
grad_x = scale * math_ops.rsqrt(var_x + epsilon) * (
grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset)
grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum(
grad_y * x_offset, axis=reduce_axis, keep_dims=keep_dims)
if data_format == b"NCHW":
grad_scale = array_ops.squeeze(grad_scale)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
return grad_x, grad_scale, grad_offset
@ops.RegisterGradient("FusedBatchNormGrad")
def _FusedBatchNormGradGrad(op, *grad):
"""Returns the gradients for the 3 inputs of FusedBatchNormGrad.
Args:
op: The FusedBatchNormGradOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_grad_x, grad[1] as grad_grad_scale,
grad[2] as grad_grad_offset.
Returns:
A tuple (grad_grad_y, grad_x, grad_scale, None, None), where grad_grad_y
is the gradient for grad_y, grad_x the gradient for x, grad_scale the
gradient for scale.
"""
data_format = op.get_attr("data_format")
epsilon = op.get_attr("epsilon")
grad_y = op.inputs[0]
x = op.inputs[1]
scale = op.inputs[2]
grad_grad_x = grad[0]
grad_grad_scale = grad[1]
grad_grad_offset = grad[2]
grad_x, grad_scale, grad_offset = _BatchNormGrad(grad_y, x, scale, epsilon,
data_format)
grad_initial = [grad_grad_x, grad_grad_scale, grad_grad_offset]
grad_grad_y, grad_x, grad_scale = gradients_impl.gradients(
[grad_x, grad_scale, grad_offset], [grad_y, x, scale], grad_initial)
return grad_grad_y, grad_x, grad_scale, None, None
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[], dtype=dtypes.int32)]
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import os.path
import time
from datetime import datetime
import json
from logger import Logger
import ogp2solr
import dateutil.parser
import sys
import urllib2
import re
import codecs
import zipfile
import pdb
import glob
try:
import zlib
mode = zipfile.ZIP_DEFLATED
except ImportError:
mode = zipfile.ZIP_STORED
try:
from lxml import etree
except ImportError:
try:
print("\n\nPython lib lxml not found. Using xml.etree instead. Note that pretty printing with xml.etree is not supported.\n\n")
from xml.etree import ElementTree as etree
except ImportError:
print("No xml lib found. Please install lxml lib to continue")
def parse_data_type_FGDC(root):
try:
if root.find("*//geoform") is not None:
geoform = root.findtext("*//geoform").lower()
if ("scanned" in geoform or
"paper" in geoform or
"scanned paper map" in geoform
):
return "Paper Map"
if root.find("*//direct") is not None:
direct = root.findtext("*//direct").lower()
if "raster" in direct:
return "Raster"
elif (
"g-polygon" in direct or
"polygon" in direct or
"chain" in direct
):
return "Polygon"
elif "point" in direct:
return "Point"
if root.find("*//sdtstype") is not None:
sdtstype = root.findtext("*//sdtstype").lower()
if ("composite" in sdtstype or
"point" in sdtstype
):
return "Point"
elif "string" in sdtstype:
return "Line"
elif ("g-polygon" in sdtstype or
"polygon" in sdtstype or
"chain" in sdtstype
):
return "Polygon"
except AttributeError as e:
return "Undefined"
def parse_data_type_MGMG(root):
try:
if root.findtext("*//direct").lower() == "raster":
return "Raster"
if root.findtext("*//direct").lower() == "point":
return "Point"
if root.findtext("*//direct").lower() == "vector":
mgmg3obj = root.find("*//mgmg3obj")
if mgmg3obj is not None:
mgmg3obj = mgmg3obj.text.lower()
if (
"area" in mgmg3obj or
"polygon" in mgmg3obj or
"region" in mgmg3obj or
"TIN" in mgmg3obj
):
return "Polygon"
elif (
"line" in mgmg3obj or
"network" in mgmg3obj or
"route-section" in mgmg3obj or
"arc" in mgmg3obj
):
return "Line"
elif (
"node" in mgmg3obj or
"point" in mgmg3obj or
"label" in mgmg3obj
):
return "Point"
if root.find("*//sdtstype") is not None:
sdtstype = root.findtext("*//sdtstype").lower()
if ("composite" in sdtstype or
"point" in sdtstype
):
return "Point"
elif "string" in sdtstype:
return "Line"
elif ("g-polygon" in sdtstype or
"polygon" in sdtstype or
"chain" in sdtstype
):
return "Polygon"
else:
return "Undefined"
except AttributeError as e:
print("Can't determine data type, setting to Undefined for now")
return "Undefined"
def _build_base_ogp_tree():
# build empty etree to house output doc
ogp_root = etree.Element("add", allowDups="false")
return ogp_root
def _process_field_handlers(doc):
root_element = _build_base_ogp_tree()
doc_element = etree.SubElement(root_element, "doc")
for field in doc.field_handlers:
try:
field_ele = etree.SubElement(doc_element, "field", name=field)
if hasattr(doc.field_handlers[field], '__call__'):
field_ele.text = doc.field_handlers[field].__call__()
else:
field_ele.text = doc.field_handlers[field]
except (KeyError, ValueError) as e:
print("Nonexistent key: ", field)
print(e)
return root_element
def write_ogp_tree(doc):
return _process_field_handlers(doc)
class baseOGP(object):
def __init__(self, output_path, md):
self.output_path = output_path.rstrip('/')
self.log = self.create_log()
self.md = md.lower()
self.indirect_links = False
self.logging_only = False
self.zip= False
self.to_solr = False
self.zip_file = self.init_zip()
self.overrides = {}
def set_overrides(self,f):
if not os.path.isfile(f):
sys.exit("Override file does not seem to exist")
self.overrides = json.load(open(f))
def init_zip(self):
if self.zip:
d = datetime.now()
ds = d.strftime('%m%d%Y_%H%M')
zip_file_name = os.path.join(self.output_path,
self.output_path.split(os.path.sep)[-1] + "_" + ds + "_OGP.zip")
return zipfile.ZipFile(zip_file_name, 'a', mode)
else:
return None
def add_to_zip(self,tree,name):
self.zip_file.writestr(
os.path.split(name)[1]+"_OGP.xml", etree.tostring(tree)
)
def set_indirect_links(self):
self.indirect_links = True
def set_zip(self):
self.zip = True
self.zip_file = self.initZip()
def set_logging_only(self):
self.logging_only = True
def create_log(self):
return Logger(self.output_path)
def process_for_solr(self,listoffiles):
trees = []
for f in listoffiles:
trees.append(self.process_file(f))
self.solr.add_to_solr_bulk(trees)
def set_solr(self, url="http://54.235.211.28:8080/solr/collection1/"):
self.to_solr = True
self.solr = ogp2solr.SolrOGP(url)
def process_list_of_files(self, listoffiles):
if not os.path.exists(self.output_path):
try:
os.mkdir(self.output_path)
except OSError:
print("There's a problem with the output path: {path}. Are you sure you entered it correctly?".format(path=self.output_path))
for f in listoffiles:
self.process_file(f)
# when done, close the log file and zip
self.log.close()
if self.zip:
self.zip_file.close()
def check_for_BOM(self,filename):
with open(filename, "r") as opened_file:
contents = opened_file.read()
if contents[:3] == codecs.BOM_UTF8:
print "found BOM in {name}".format(name=filename)
return contents[3:]
else:
return False
def remove_BOM(self, filename, new_contents):
write_file = open(filename, "w")
write_file.write(new_contents)
write_file.close()
def process_file(self, filename):
print('Starting file:', filename)
# parse the current XML into an etree
tree = etree.ElementTree()
try:
root = tree.parse(filename, etree.XMLParser(encoding="utf-8"))
except Exception as e:
bom_contents = self.check_for_BOM(filename)
if bom_contents:
self.remove_BOM(filename, bom_contents)
root = tree.parse(filename, etree.XMLParser(encoding="utf-8"))
else:
raise e
# grab the full text of the current XML for later use
fullText = etree.tostring(root)
doc = False
if self.md == "mgmg":
doc = MGMGDocument(root, filename, self.log, self.indirect_links)
elif self.md == "fgdc":
doc = FGDCDocument(root, filename, self.log, self.indirect_links)
elif self.md == "arcgis":
doc = ArcGISDocument(root, filename, self.log, self.indirect_links)
elif self.md == "iso":
doc = ISODocument(root, filename, self.log, self.indirect_links)
elif self.md == "eod":
doc = EsriOpenDataISODocument(root, filename, self.log, self.indirect_links)
elif self.md == "marc":
doc = MARCXMLDocument(root, filename, self.log, self.indirect_links)
elif self.md == "gdrs":
doc = GDRSDocument(root, filename, self.log, self.indirect_links)
elif self.md == "guess":
if root.find("metainfo/metstdn") is not None:
if "Minnesota" in root.find("metainfo/metstdn").text:
doc = MGMGDocument(root, filename, self.log, self.indirect_links)
elif "FGDC" in root.find("metainfo/metstdn").text:
doc = FGDCDocument(root, filename, self.log, self.indirect_links)
elif root.find("collection/record") is not None:
doc = MARCXMLDocument(root, filename, self.log, self.indirect_links)
elif "_Metadata" in root.tag:
doc = ISODocument(root, filename, self.log, self.indirect_links)
else:
self.log.write(filename, 'metadata standard undecipherable')
if doc:
root_element = _build_base_ogp_tree()
doc_element = etree.SubElement(root_element, "doc")
for field in doc.field_handlers:
try:
fieldEle = etree.SubElement(doc_element, "field", name=field)
if self.overrides.has_key(field):
fieldEle.text = self.overrides[field]
elif hasattr(doc.field_handlers[field], '__call__'):
fieldEle.text = doc.field_handlers[field].__call__()
else:
fieldEle.text = doc.field_handlers[field]
except KeyError as e:
print("Nonexistant key: ", field)
fullTextElement = etree.SubElement(doc_element, "field", name="FgdcText")
fullTextElement.text = fullText
if not self.logging_only:
new_tree = etree.ElementTree(root_element)
if self.md == "gdrs":
resultName = os.path.join(self.output_path, filename.split(os.path.sep)[-2] +".xml")
else:
resultName = os.path.join(self.output_path,
os.path.splitext(os.path.split(filename)[1])[0] + "_OGP.xml")
# check for duplicate names (since w're looking across records with similar dataset content)
# and add an _ to the end to avoid overwriting
if os.path.exists(resultName):
resultName = os.path.splitext(resultName)[0] + "_" + os.path.splitext(resultName)[1]
if self.zip:
print('Writing: ' + resultName)
self.add_to_zip(new_tree,filename)
elif self.to_solr:
return new_tree
elif "lxml" in etree.__name__:
print('Writing: ' + resultName)
new_tree.write(resultName, pretty_print=True)
else:
print('Writing: ' + resultName)
new_tree.write(resultName)
class MetadataDocument(object):
"""
Base class for metadata documents. This is where OGP fields that are handled the same
across standards are implemented.
"""
def __init__(self, root, file_name, log, indirect_links):
self.log = log
self.root = root
self.file_name = file_name
self.indirect_links = indirect_links
"""
For docs without a specified bounding box, will use the below,
by default set to state of MN
"""
self.DEFAULT_BBOX = {
"min_x": "-97.5",
"max_x": "-89",
"min_y": "43",
"max_y": "49.5"
}
# create a dictionary containing the OGP field names as keys, mapping
# the corresponding class method (or hardcoded string) as value
self.field_handlers = {
# hardcoded values first
"Access": "Public",
"Availability": "Online",
"Institution": "Minnesota",
"InstitutionSort": "Minnesota",
"CollectionId": "initial collection",
"WorkspaceName": "edu.umn",
# the rest are associated with a method
"Publisher": self.publisher,
"PublisherSort": self.publisher,
"Originator": self.originator,
"OriginatorSort": self.originator,
"DataType": self.data_type,
"ThemeKeywords": self.theme_keywords,
"PlaceKeywords": self.place_keywords,
"LayerId": self.layer_id,
"Location": self.location,
"Name": self.name,
"LayerDisplayName": self.layer_display_name,
"LayerDisplayNameSort": self.layer_display_name,
"ContentDate": self.content_date,
"Abstract": self.abstract,
"MinX": self.min_x,
"MaxX": self.max_x,
"MinY": self.min_y,
"MaxY": self.max_y,
"CenterX": self.center_x,
"CenterY": self.center_y
}
# field methods
def _file_name_sans_extension(self):
file_name = os.path.basename(self.file_name)
file_name = "".join(file_name.split(".")[:-1])
return file_name
def layer_id(self):
return self._file_name_sans_extension() + str(time.time()).replace('.', '')
def name(self):
return self._file_name_sans_extension()
def data_type(self):
# see standard specific sub-class implementation
pass
def theme_keywords(self):
# see standard specific sub-class implementation
pass
def place_keywords(self):
# see standard specific sub-class implementation
pass
def publisher(self):
# see standard specific sub-class implementation
pass
def originator(self):
# see standard specific sub-class implementation
pass
def layer_display_name(self):
# see standard specific sub-class implementation
pass
def _location_check_indirect(self,d,loc):
if self.indirect_links:
d['externalDownload'] = loc
else:
d['download'] = loc
return d
def location(self):
# see standard specific sub-class implementation
pass
def content_date(self):
# see standard specific sub-class implementation
pass
def abstract(self):
# see standard specific sub-class implementation
pass
def min_x(self):
# see standard specific sub-class implementation
pass
def min_y(self):
# see standard specific sub-class implementation
pass
def max_x(self):
# see standard specific sub-class implementation
pass
def max_y(self):
# see standard specific sub-class implementation
pass
def center_x(self):
# see standard specific sub-class implementation
pass
def center_y(self):
# see standard specific sub-class implementation
pass
class ISODocument(MetadataDocument):
def __init__(self, root, filename, log, indirect_links):
super(ISODocument, self).__init__(root, filename, log, indirect_links)
self.NSMAP = {
"srv":"http://www.isotc211.org/2005/srv",
"gco":"http://www.isotc211.org/2005/gco",
"xlink":"http://www.w3.org/1999/xlink",
"gts":"http://www.isotc211.org/2005/gts",
"xsi":"http://www.w3.org/2001/XMLSchema-instance",
"gml":"http://www.opengis.net/gml",
"gmd":"http://www.isotc211.org/2005/gmd"
}
# _______ _______.. ______ __
# | ____| / | | _ \ | |
# | |__ | (----` | |_) | | |
# | __| \ \ | / | |
# | |____ .----) | | |\ \----. | |
# |_______| |_______/ | _| `._____| |__|
class EsriOpenDataISODocument(ISODocument):
"""
Handle a particular instance of ISO document created by another script that parses
Esri Open Data sites' data.json files
"""
def __init__(self, root, filename, log, indirect_links):
super(EsriOpenDataISODocument, self).__init__(root, filename, log, indirect_links)
self.PATHS = {
"title" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:title/gco:CharacterString",
"pubdate" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:DateTime",
"onlink" : "gmd:distributionInfo/gmd:MD_Distribution/gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource/gmd:linkage/gmd:URL",
"origin" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:citedResponsibleParty/gmd:CI_ResponsibleParty/gmd:organisationName/gco:CharacterString",
"publish" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:citedResponsibleParty/gmd:CI_ResponsibleParty/gmd:organisationName/gco:CharacterString",
"westbc" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicBoundingBox/gmd:westBoundLongitude/gco:Decimal",
"eastbc" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicBoundingBox/gmd:eastBoundLongitude/gco:Decimal",
"northbc" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicBoundingBox/gmd:northBoundLatitude/gco:Decimal",
"southbc" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicBoundingBox/gmd:southBoundLatitude/gco:Decimal",
"themekey" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:topicCategory/gmd:MD_TopicCategoryCode",
"placekey" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:descriptiveKeywords/gmd:MD_Keywords/gmd:type/gmd:MD_KeywordTypeCode[@codeListValue='place']",
"abstract" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:abstract/gco:CharacterString",
"accconst" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_LegalConstraints/gmd:otherConstraints/gco:CharacterString",
"useconst" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:resourceConstraints/gmd:MD_Constraints/gmd:useLimitation/gco:CharacterString",
"formname" : "gmd:distributionInfo/gmd:MD_Distribution/gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString",
"id" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/gmd:CI_Citation/gmd:identifier/gmd:MD_Identifier/gmd:code/gco:CharacterString",
"distribution_links" : "gmd:distributionInfo/gmd:MD_Distribution/gmd:transferOptions/gmd:MD_DigitalTransferOptions/gmd:onLine/gmd:CI_OnlineResource/gmd:protocol/gco:CharacterString",
"vector_datatype" : "gmd:spatialRepresentationInfo/gmd:MD_VectorSpatialRepresentation/gmd:geometricObjects/gmd:MD_GeometricObjects/gmd:geometricObjectType/gmd:MD_GeometricObjectTypeCode",
"spatialrep" : "gmd:identificationInfo/gmd:MD_DataIdentification/gmd:spatialRepresentationType/gmd:MD_SpatialRepresentationTypeCode"
}
def data_type(self):
#TODO check for vector/raster first using spatialrep
spatial_rep_code = self.root.find(self.PATHS["spatialrep"], self.NSMAP).get("codeListValue")
if spatial_rep_code == "vector":
code = self.root.find(self.PATHS["vector_datatype"], self.NSMAP).get("codeListValue")
if code == "point":
return "Point"
elif code == "curve":
return "Line"
elif code == "surface" or code == "complex":
return "Polygon"
elif spatial_rep_code == "grid":
return "Raster"
def layer_id(self):
print self.root.find(self.PATHS["id"], self.NSMAP).text.split("/")[-1]
return self.root.find(self.PATHS["id"], self.NSMAP).text.split("/")[-1]
def theme_keywords(self):
keywords = self.root.findall(self.PATHS["themekey"], self.NSMAP)
keywords_list = [k.text for k in keywords if k is not None]
if not keywords_list:
return ""
elif len(keywords_list) == 1:
return keywords_list[0]
elif len(keywords_list) > 1:
print keywords_list
return ", ".join(keywords_list)
def place_keywords(self):
keywords = self.root.find(self.PATHS["placekey"], self.NSMAP).getparent().getparent().findall("gmd:keyword", self.NSMAP)
keywords_string = [k.find("gco:CharacterString", self.NSMAP).text for k in keywords]
if keywords_string[0]:
return ", ".join(keywords_string)
return ""
def publisher(self):
return self.root.findtext(self.PATHS["publish"], "UNKNOWN", self.NSMAP)
def originator(self):
return self.root.findtext(self.PATHS["origin"], "UNKNOWN", self.NSMAP)
def layer_display_name(self):
return self.root.findtext(self.PATHS["title"], "UNKNOWN", self.NSMAP)
def _location_check_indirect(self,d,loc):
if self.indirect_links:
d['externalDownload'] = loc
else:
d['download'] = loc
return d
def _location_get_protocols(self):
"""
returns a dict of protocols (keys) and matching URLs (values)
"""
link_protocols = self.root.findall(self.PATHS["distribution_links"], self.NSMAP)
d = {}
for protocol in link_protocols:
d[protocol.text] = protocol.getparent().getparent().findtext("gmd:linkage/gmd:URL", "UNKNOWN", self.NSMAP)
return d
def _location_build(self, protocols):
"""
creates an OGP specific dict from a dict of protocol/urls
"""
loc = {}
# protocols identified in Cat-Interop table
# see https://github.com/OSGeo/Cat-Interop/blob/master/LinkPropertyLookupTable.csv
for p in protocols:
url = protocols[p]
if url != "":
if p == "ESRI:ArcGIS":
if url.find("FeatureServer") is -1:
loc["ArcGISRest"] = url[:url.rfind("/")]
loc["layerId"] = url[url.rfind("/") + 1:]
else:
loc["esrifeatureservice"] = url + "/"
#loc["layerId"] = url[url.rfind("/") + 1:]
elif p == "download":
loc["download"] = url
# indicates an indirect link
#elif p == "WWW:LINK":
# loc["externalDownload"] = url
return loc
def location(self):
protocols = self._location_get_protocols()
loc = self._location_build(protocols)
return json.dumps(loc)
def content_date(self):
return self.root.findtext(self.PATHS["pubdate"], "1919-08-01T00:00:00Z", self.NSMAP)
def abstract(self):
return self.root.findtext(self.PATHS["abstract"],"UNKNOWN", self.NSMAP)
def min_x(self):
return self.root.findtext(self.PATHS["westbc"], "UNKNOWN", self.NSMAP)
def min_y(self):
return self.root.findtext(self.PATHS["southbc"], "UNKNOWN", self.NSMAP)
def max_x(self):
return self.root.findtext(self.PATHS["eastbc"], "UNKNOWN", self.NSMAP)
def max_y(self):
return self.root.findtext(self.PATHS["northbc"], "UNKNOWN", self.NSMAP)
def center_x(self):
spread = abs(float(self.max_x()) - float(self.min_x())) / 2
return unicode(float(self.max_x()) - spread)
def center_y(self):
spread = abs(float(self.max_y()) - float(self.min_y())) / 2
return unicode(float(self.max_y()) - spread)
class ArcGISDocument(MetadataDocument):
"""
Unimplemented. Inherits from MetadataDocument
"""
def __init__(self, root, filename, log, indirect_links):
super(ArcGISDocument, self).__init__(root, filename, log, indirect_links)
# _______ _______ _______ ______
# | ____| / _____| | \ / |
# | |__ | | __ | .--. | | ,----'
# | __| | | |_ | | | | | | |
# | | | |__| | | '--' | | `----.
# |__| \______| |_______/ \______|
class FGDCDocument(MetadataDocument):
"""
inherits from MetadataDocument
"""
def __init__(self, root, filename, log, indirect_links):
super(FGDCDocument, self).__init__(root, filename, log, indirect_links)
def publisher(self):
publisher = self.root.findtext("idinfo/citation/citeinfo/pubinfo/publish", "UNKNOWN")
return publisher
def layer_id(self):
layer_id = self.root.find("idinfo/citation/citeinfo/title").get("catid")
if layer_id is not None:
return layer_id
else:
self.log.write(self.file_name, 'No catid found in title, using file name for now')
return self.file_name.split(os.path.sep)[-1].replace(".xml","")
def layer_display_name(self):
disp_name = self.root.findtext("idinfo/citation/citeinfo/title", "UNKNOWN")
#disp_name = disp_name + " (" + self.name() + ")"
return disp_name.replace("_"," ").title()
def abstract(self):
abstract = self.root.findtext("idinfo/descript/abstract", "UNKNOWN")
return abstract
def originator(self):
originator = self.root.findtext("idinfo/citation/citeinfo/origin", "UNKNOWN")
return originator
def data_type(self):
root = self.root
try:
if root.find("*//geoform") is not None:
geoform = root.findtext("*//geoform").lower()
if ("scanned" in geoform or
"paper" in geoform or
"scanned paper map" in geoform
):
return "Paper Map"
if root.find("*//direct") is not None:
direct = root.findtext("*//direct").lower()
if "raster" in direct:
return "Raster"
elif (
"g-polygon" in direct or
"polygon" in direct or
"chain" in direct
):
return "Polygon"
elif "point" in direct:
return "Point"
if root.find("*//sdtstype") is not None:
sdtstype = root.findtext("*//sdtstype").lower()
if ("composite" in sdtstype or
"point" in sdtstype
):
return "Point"
elif "string" in sdtstype:
return "Line"
elif ("g-polygon" in sdtstype or
"polygon" in sdtstype or
"chain" in sdtstype
):
return "Polygon"
except AttributeError as e:
self.log.write(self.file_name, 'can\'t find data type')
return "Undefined"
def theme_keywords(self):
try:
kw = list(self.root.iter("themekey"))
if len(kw) > 0:
kw_str = [i.text for i in kw][0]
return kw_str
else:
return 'None'
except AttributeError as e:
self.log.write(self.file_name, 'can\'t find keywords')
return "UNKNOWN"
def place_keywords(self):
try:
kw = list(self.root.iter("placekey"))
if len(kw) > 0:
kw_str = [i.text for i in kw][0]
return kw_str
else:
return 'None'
except AttributeError as e:
print("can't find keywords. Setting to UNKNOWN for now")
self.log.write(self.file_name, 'can\'t find keywords')
return "UNKNOWN"
def _parse_content_date(self, date_text):
try:
if self._try_parse_int(date_text) is not None:
if len(date_text) == 4:
year = int(date_text)
# we'll just use Jan 1 as the default for year only entries
date = datetime(year, 1, 1)
#now format it ISO style
return date.isoformat() + "Z"
elif len(date_text) == 8:
year = int(date_text[0:4])
month = int(date_text[4:6])
day = int(date_text[6:])
date = datetime(year, month, day)
return date.isoformat() + "Z"
else:
self.log.write(self.file_name, 'can\'t parse date with text: "' + date_text + '"')
except ValueError as e:
return "UNKNOWN"
def _try_parse_int(self, s, base=10, val=None):
try:
return int(s, base)
except ValueError:
return val
def content_date(self):
root = self.root
try:
if root.find("idinfo/timeperd/timeinfo/sngdate/caldate") is not None:
date_text = root.find("idinfo/timeperd/timeinfo/sngdate/caldate").text
elif root.find("idinfo/timeperd/timeinfo/rngdates/begdate") is not None:
date_text = root.find("idinfo/timeperd/timeinfo/rngdates/begdate").text
elif root.find("idinfo/citation/citeinfo/pubdate") is not None:
date_text = root.find("idinfo/citation/citeinfo/pubdate").text
else:
date_text = False
if date_text:
return self._parse_content_date(date_text)
else:
self.log.write(self.file_name, 'can\'t find date')
return "1919-08-01T00:00:00Z"
except (AttributeError, TypeError) as e:
print(e)
print("No content date found! setting to 1919-08-01T00:00:00Z for now")
self.log.write(self.file_name, 'can\'t find date')
return "1919-08-01T00:00:00Z"
def _parse_coord(self, coord):
try:
coord = float(coord)
return unicode(coord)
except (ValueError, TypeError):
self.log.write(self.file_name, 'can\'t parse coordinate: "' + coord + '"')
return "0"
def min_x(self):
coord = self.root.findtext("idinfo/spdom/bounding/westbc", "UNKNOWN")
if coord is not "UNKNOWN":
return self._parse_coord(coord)
self.log.write(self.file_name, 'min_x issues')
return "0"
def min_y(self):
coord = self.root.findtext("idinfo/spdom/bounding/southbc", "UNKNOWN")
if coord is not "UNKNOWN":
return self._parse_coord(coord)
self.log.write(self.file_name, 'min_y issues')
return "0"
def max_x(self):
coord = self.root.findtext("idinfo/spdom/bounding/eastbc", "UNKNOWN")
if coord is not "UNKNOWN":
return self._parse_coord(coord)
self.log.write(self.file_name, 'max_x issues')
return "0"
def max_y(self):
coord = self.root.findtext("idinfo/spdom/bounding/northbc", "UNKNOWN")
if coord is not "UNKNOWN":
return self._parse_coord(coord)
self.log.write(self.file_name, 'max_y issues')
return "0"
def center_x(self):
try:
min_x = float(self.min_x())
max_x = float(self.max_x())
center_x = min_x + (abs(max_x - min_x) / 2)
return unicode(center_x)
except ValueError:
self.log.write(self.file_name, 'center_x issues')
return "0"
def center_y(self):
try:
min_y = float(self.min_y())
max_y = float(self.max_y())
center_x = min_y + (abs(max_y - min_y) / 2)
return unicode(center_x)
except ValueError:
self.log.write(self.file_name, 'center_y issues')
return "0"
def location(self):
loc = self.root.findtext("idinfo/citation/citeinfo/onlink", "UNKNOWN")
if loc != "UNKNOWN":
locDict = {}
locDict = self._location_check_indirect(locDict, loc)
return json.dumps(locDict)
else:
self.log.write(self.file_name, 'can\'t find onlink.')
return "UNKNOWN"
# .___ ___. _______ .___ ___. _______
# | \/ | / _____| | \/ | / _____|
# | \ / | | | __ | \ / | | | __
# | |\/| | | | |_ | | |\/| | | | |_ |
# | | | | | |__| | | | | | | |__| |
# |__| |__| \______| |__| |__| \______|
class MGMGDocument(FGDCDocument):
"""
Inherits from FGDCDocument
"""
def __init__(self, root, filename, log, indirect_links):
super(MGMGDocument, self).__init__(root, filename, log, indirect_links)
def data_type(self):
root = self.root
try:
if root.findtext("*//direct").lower() == "raster":
return "Raster"
if root.findtext("*//direct").lower() == "point":
return "Point"
if root.findtext("*//direct").lower() == "vector":
mgmg3obj = root.find("*//mgmg3obj")
if mgmg3obj is not None:
mgmg3obj = mgmg3obj.text.lower()
if (
"area" in mgmg3obj or
"polygon" in mgmg3obj or
"region" in mgmg3obj or
"TIN" in mgmg3obj
):
return "Polygon"
elif (
"line" in mgmg3obj or
"network" in mgmg3obj or
"route-section" in mgmg3obj or
"arc" in mgmg3obj
):
return "Line"
elif (
"node" in mgmg3obj or
"point" in mgmg3obj or
"label" in mgmg3obj
):
return "Point"
if root.find("*//sdtstype") is not None:
sdtstype = root.findtext("*//sdtstype").lower()
if sdtstype:
if ("composite" in sdtstype or
"point" in sdtstype
):
return "Point"
elif "string" in sdtstype:
return "Line"
elif ("g-polygon" in sdtstype or
"polygon" in sdtstype or
"chain" in sdtstype
):
return "Polygon"
else:
self.log.write(self.file_name, 'data type issues')
return "Undefined"
except AttributeError as e:
print("Can't determine data type, setting to Undefined for now")
self.log.write(self.file_name, 'data type issues')
return "Undefined"
def location(self):
loc = self.root.findtext("idinfo/citation/citeinfo/onlink", "UNKNOWN")
if loc != "UNKNOWN":
locDict = {}
"""
#datafinder.org specific stuff
try:
if df.has_key(os.path.split(self.file_name)[1]):
f = df[os.path.split(self.file_name)[1]]
locDict['ArcGISRest'] = f['ArcGISRest']
locDict['layerId'] = f['layerId']
except KeyError:
pass
"""
#end datafinder specific
locDict = self._location_check_indirect(locDict, loc)
return json.dumps(locDict)
else:
self.log.write(self.file_name, 'can\'t find onlink, or else it\'s goofy somehow')
return "UNKNOWN"
# ______ _______ ______ _____.
# / _____| | \ | _ \ / |
# | | __ | .--. | | |_) | | (---`
# | | |_ | | | | | | / \ \
# | |__| | | '--' | | |\ \--. .--) |
# \______| |_______/ | _| `.___| |_____/
#
class GDRSDocument(MGMGDocument):
def __init__(self, root, filename, log, indirect_links):
super(GDRSDocument, self).__init__(root, filename, log, indirect_links)
self.filename = filename
self.root = root
self._geospatial_commons_root_url = "https://gisdata.mn.gov/dataset/"
self._gdrs_root_url = "ftp://ftp.gisdata.mn.gov/pub/gdrs/data/pub/"
self.field_handlers["Access"] = "Public"
self._get_resource_xml()
self._data_resource_paths = {
"sub_resources" : "dataSubResources/dataSubResource",
"topic_categories":"topicCategories/topicCategory"
}
#taken from https://gisdata.mn.gov/content/?q=publisher_codes
self._gdrs_publisher_codes = {
"us_mn_co_carver":"Carver County, Minnesota",
"us_mn_co_dakota":"Dakota County, Minnesota",
"us_mn_co_itasca":"Itasca County, Minnesota",
"us_mn_co_lake":"Lake County, Minnesota",
"us_mn_state_metrogis":"Metro GIS, Minnesota",
"us_mn_state_metc":"Metropolitan Council, Minnesota",
"us_mn_state_bwsr":"Minnesota Board of Water and Soil Resources (BWSR)",
"us_mn_state_mda":"Minnesota Department of Agriculture",
"us_mn_state_mde":"Minnesota Department of Education",
"us_mn_state_health":"Minnesota Department of Health",
"us_mn_state_dnr":"Minnesota Department of Natural Resources",
"us_mn_state_mdor":"Minnesota Department of Revenue",
"us_mn_state_dot":"Minnesota Department of Transportation",
"edu_umn_mngs":"Minnesota Geological Survey",
"us_mn_state_mngeo":"Minnesota Geospatial Information Office",
"us_mn_state_pca":"Minnesota Pollution Control Agency",
"com_mvta": "Minnesota Valley Transit Authority (MVTA)",
"us_mn_co_ramsey": "Ramsey County, Minnesota",
"edu_umn":"University of Minnesota, Twin Cities"
}
def layer_id(self):
return self._get_resource_name()
def _get_layer_file(self):
path_to_lyr = os.path.join(os.path.split(self.filename)[0], "*.lyr")
lyr_list = glob.glob(path_to_lyr)
if len(lyr_list) > 0:
lyr_file = lyr_list[0]
return lyr_file
return None
def _get_subresources(self):
return self._data_resource_tree.findall(self._data_resource_paths["sub_resources"])
def _get_subresource_type(self, sub_resource):
return sub_resource.findtext("subResourceType", None)
def _get_dataset_url_for_publisher(self):
if self._get_resource_publisher_id() in ["us_mn_co_dakota", "us_mn_state_metc", "us_mn_state_metrogis"]:
return self._get_resource_name()
else:
return self._get_resource_basename()
def _build_geocommons_url(self):
return self._geospatial_commons_root_url + self._get_dataset_url_for_publisher().replace("_","-")
def _build_download_url(self):
name = self._get_resource_basename()
pub = self._get_resource_publisher_id()
return self._gdrs_root_url + pub + "/" + name + "/"
def _get_resource_basename(self):
return self._data_resource_tree.findtext("baseName", None)
def _get_resource_publisher_id(self):
return self._data_resource_tree.findtext("publisherID", None)
def _get_resource_name(self):
name = self._get_resource_basename()
pub = self._get_resource_publisher_id()
if name and pub:
return pub + "_" + name
return None
def _get_resource_xml(self):
path_to_data_resource_xml = os.path.join(os.path.split(self.filename)[0], "dataResource.xml")
if os.path.exists(path_to_data_resource_xml):
self._data_resource_tree = etree.parse(path_to_data_resource_xml)
def _get_topic_categories(self):
tc = self._data_resource_tree.findall(self._data_resource_paths["topic_categories"])
if len(tc) > 1:
return '"' + ", ".join([i.text for i in tc]) + '"'
else:
return tc[0].text
def name(self):
return self._get_resource_name()
def _check_metadata_standard(self):
root_tag = self.root.tag
if root_tag == "metadata":
if "Minnesota" in self.root.find("metainfo/metstdn").text:
return "mgmg"
elif "FGDC" in self.root.find("metainfo/metstdn").text:
return "fgdc"
elif root_tag.find("MD_Metadata") != -1 or root_tag.find("MI_Metadata") != -1:
return "iso"
def data_type(self):
metadata_standard = self._check_metadata_standard()
if metadata_standard == "mgmg":
return parse_data_type_MGMG(self.root)
elif metadata_standard == "fgdc":
return parse_data_type_FGDC(self.root)
def theme_keywords(self):
return self._get_topic_categories()
def _get_publisher_name(self, publisher_id):
return self._gdrs_publisher_codes[publisher_id]
def publisher(self):
pub_id = self._get_resource_publisher_id()
pub = self._get_publisher_name(pub_id)
return pub
def _get_subresource_urls(self, resource):
return resource.findall("subResourceAccess/subResourceURL")
def _build_download_url(self):
name = self._get_resource_basename()
pub = self._get_resource_publisher_id()
return self._gdrs_root_url + pub + "/" + name + "/"
def location(self):
loc = {}
resources = self._get_subresources()
for resource in resources:
external_count = 0
resource_type = self._get_subresource_type(resource)
if resource_type:
if resource_type == "shp" or resource_type == "fgdb":
url = self._build_download_url() + "shp_" + self._get_resource_basename() + ".zip"
loc["download"] = url
elif resource_type == "external":
url_elements = self._get_subresource_urls(resource)
desc = resource.findtext("subResourceName", None)
if desc and "download" in desc.lower():
loc["externalDownload"] = url_elements[0].text
elif resource_type == "ags_mapserver":
url = self._get_subresource_urls(resource)[0].text
self.log.write(self.file_name, url)
lyr_file = self._get_layer_file()
lyr_text = open(os.path.join(os.path.split(self.filename)[0],"lyr_text.txt"),"wb")
if lyr_file:
self.log.write(self.file_name, "I have a layer file!")
import arcpy
lyr = arcpy.mapping.Layer(lyr_file)
if lyr.isGroupLayer:
some_visible = False
for index, ly in enumerate(arcpy.mapping.ListLayers(lyr)):
if ly.visible:
some_visible = True
ind = index -1
if ind >= 0:
lyr_number = str(ind)
#map service
if url.find("MapServer") is not -1:
loc["ArcGISRest"] = url
loc["layerId"] = lyr_number
lyr_text.write("MapService|||" + url.rstrip("/") + "/" + lyr_number + "\n")
#feature service
elif url.find("FeatureServer") is not -1:
if url.endswith("/"):
url = url + lyr_number
else:
url = url + "/" + lyr_number
loc["esrifeatureservice"] = url + "/"
lyr_text.write("FeatureService|||" + url.rstrip("/") + "/" + lyr_number + "\n")
elif url.find("ImageServer") is not -1:
lyr_text.write("ImageService|||" + url.rstrip("/") + "/" + lyr_number + "\n")
else:
lyr_text.write("MysteryService|||" + url.rstrip("/") + "/" + lyr_number + "\n")
elif ind == -1:
lyr_text.write("RootService|||" + url.rstrip("/") + "\n")
if not some_visible:
lyr_text.write("MapService|||" + url.rstrip("/") + "\n")
loc["externalDownload"] = self._build_geocommons_url()
elif lyr.isServiceLayer:
if lyr.serviceProperties["URL"].find("MapServer") is not -1:
lyr_text.write("MapService|||" + url.rstrip("/") + "\n")
loc["ArcGISRest"] = lyr.serviceProperties["URL"]
else:
#if there's no layer file, we'll just check if it's a MapServer
if url.find("MapServer") is not -1:
lyr_text.write("MapService|||" + url.rstrip("/") + "\n")
loc["ArcGISRest"] = url
lyr_text.close()
if len(loc.items()) == 0:
#if all else fails, default to pointing to the geospatial commons address
loc["externalDownload"] = self._build_geocommons_url()
self.log.write(self.file_name, json.dumps(loc))
return json.dumps(loc)
# .___ ___. ___ .______ ______
# | \/ | / \ | _ \ / |
# | \ / | / ^ \ | |_) | | ,----'
# | |\/| | / /_\ \ | / | |
# | | | | / _____ \ | |\ \----.| `----.
# |__| |__| /__/ \__\ | _| `._____| \______|
# from https://github.com/gravesm/marcingest
class MARCXMLDocument(MetadataDocument):
def __init__(self, root, file_name, log, indirect_links):
import re
super(MARCXMLDocument, self).__init__(root, file_name, log, indirect_links)
MARC = "http://www.loc.gov/MARC21/slim"
MARCNS = "{{{0}}}".format(MARC)
NSMAP = {
"marc": MARC,
}
_XPATHS = {
"001": "/collection/record/controlfield[@tag='001']",
"008": "/collection/record/controlfield[@tag='008']/text()",
"034_d": "/collection/record/datafield[@tag='034']/subfield[@code='d']/text()",
"034_e": "/collection/record/datafield[@tag='034']/subfield[@code='e']/text()",
"034_f": "/collection/record/datafield[@tag='034']/subfield[@code='f']/text()",
"034_g": "/collection/record/datafield[@tag='034']/subfield[@code='g']/text()",
"245": "/collection/record/datafield[@tag='245']/subfield[@code='a']/text()",
"260_b": "/collection/record/datafield[@tag='260']/subfield[@code='b']",
"500_a": "/collection/record/datafield[@tag='500']/subfield[@code='a']/text()",
"650_a": "/collection/record/datafield[@tag='650']/subfield[@code='a']",
"650_z": "/collection/record/datafield[@tag='650']/subfield[@code='z']",
"876_k": "/collection/record/datafield[@tag='876']/subfield[@code='k']",
}
self.XPATHS = dict((k, etree.XPath(v)) for k, v in _XPATHS.items())
self._COORD_REGEX = re.compile("^([NSEW+-])?(\d{3}(\.\d*)?)(\d{2}(\.\d*)?)?(\d{2}(\.\d*)?)?",
re.IGNORECASE)
def datatype(self):
xpath = self.XPATHS['876_k']
mapping = {
"MAP": "Paper Map",
"CDROM": "CD-ROM",
"DVDROM": "DVD-ROM",
}
for datatype in xpath(self.root):
if datatype.text in ("MAP", "CDROM", "DVDROM"):
return mapping[datatype.text]
return "Unknown"
def theme_keywords(self):
return " ".join(self._keywords(self.XPATHS['650_a']))
def place_keywords(self):
return " ".join(self._keywords(self.XPATHS['650_z']))
def publisher(self):
xpath = self.XPATHS['260_b']
publisher = xpath(self.root)
if publisher:
return publisher[0].text.rstrip(",")
def name(self):
xpath = self.XPATHS['001']
return xpath(self.root)[0].text
def layer_display_name(self):
xpath = self.XPATHS['245']
return " ".join(xpath(self.root))
def content_date(self):
xpath = self.XPATHS['008']
date = xpath(self.root)[0][7:11]
try:
date = datetime(int(date), 1, 1)
return date.isoformat() + "Z"
except ValueError:
pass
def abstract(self):
xpath = self.XPATHS['500_a']
return " ".join(xpath(self.root))
def min_x(self):
xpath = self.XPATHS['034_d']
coord = xpath(self.root)
if coord:
return unicode(self._convert_coord(coord[0]))
def min_y(self):
xpath = self.XPATHS['034_g']
coord = xpath(self.root)
if coord:
return unicode(self._convert_coord(coord[0]))
def max_x(self):
xpath = self.XPATHS['034_e']
coord = xpath(self.root)
if coord:
return unicode(self._convert_coord(coord[0]))
def max_y(self):
xpath = self.XPATHS['034_f']
coord = xpath(self.root)
if coord:
return unicode(self._convert_coord(coord[0]))
def center_x(self):
west = float(self.min_x())
east = float(self.max_x())
if west is not None and east is not None:
return unicode(west + abs(east - west) / 2)
def center_y(self):
south = float(self.min_y())
north = float(self.max_y())
if south is not None and north is not None:
return unicode(south + abs(north - south) / 2)
def half_height(self):
north = float(self.max_y())
south = float(self.min_y())
if north is not None and south is not None:
return unicode(abs(north - south) / 2)
def half_width(self):
east = float(self.max_x())
west = float(self.min_x())
if east is not None and west is not None:
return unicode(abs(east - west) / 2)
def _convert_coord(self, coordinate):
parts = self._COORD_REGEX.search(coordinate)
if parts is None:
return
decimal = float(parts.group(2)) + float(parts.group(4) or 0) / 60 + float(parts.group(6) or 0) / 3600
if parts.group(1) and parts.group(1) in "WSws-":
decimal = -decimal
return decimal
def _keywords(self, xpath):
keywords = set()
for keyword in xpath(self.root):
keywords.add(keyword.text.rstrip(":;,. "))
return list(keywords)
| |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
GPFS Driver for shares.
Config Requirements:
GPFS file system must have quotas enabled (`mmchfs -Q yes`).
Notes:
GPFS independent fileset is used for each share.
TODO(nileshb): add support for share server creation/deletion/handling.
Limitation:
While using remote GPFS node, with Ganesha NFS, 'gpfs_ssh_private_key'
for remote login to the GPFS node must be specified and there must be
a passwordless authentication already setup between the Manila share
service and the remote GPFS node.
"""
import abc
import math
import os
import re
import shlex
import socket
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import units
from manila.common import constants
from manila import exception
from manila.i18n import _
from manila.share import driver
from manila.share.drivers.helpers import NFSHelper
from manila.share import share_types
from manila import utils
LOG = log.getLogger(__name__)
# matches multiple comma separated avpairs on a line. values with an embedded
# comma must be wrapped in quotation marks
AVPATTERN = re.compile(r'\s*(?P<attr>\w+)\s*=\s*(?P<val>'
r'(["][a-zA-Z0-9_, ]+["])|(\w+))\s*[,]?')
ERR_FILE_NOT_FOUND = 2
gpfs_share_opts = [
cfg.HostAddressOpt('gpfs_share_export_ip',
help='IP to be added to GPFS export string.'),
cfg.StrOpt('gpfs_mount_point_base',
default='$state_path/mnt',
help='Base folder where exported shares are located.'),
cfg.StrOpt('gpfs_nfs_server_type',
default='CES',
help=('NFS Server type. Valid choices are "CES" (Ganesha NFS) '
'or "KNFS" (Kernel NFS).')),
cfg.ListOpt('gpfs_nfs_server_list',
help=('A list of the fully qualified NFS server names that '
'make up the OpenStack Manila configuration.')),
cfg.BoolOpt('is_gpfs_node',
default=False,
help=('True:when Manila services are running on one of the '
'Spectrum Scale node. '
'False:when Manila services are not running on any of '
'the Spectrum Scale node.')),
cfg.PortOpt('gpfs_ssh_port',
default=22,
help='GPFS server SSH port.'),
cfg.StrOpt('gpfs_ssh_login',
help='GPFS server SSH login name.'),
cfg.StrOpt('gpfs_ssh_password',
secret=True,
help='GPFS server SSH login password. '
'The password is not needed, if \'gpfs_ssh_private_key\' '
'is configured.'),
cfg.StrOpt('gpfs_ssh_private_key',
help='Path to GPFS server SSH private key for login.'),
cfg.ListOpt('gpfs_share_helpers',
default=[
'KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper',
'CES=manila.share.drivers.ibm.gpfs.CESHelper',
],
help='Specify list of share export helpers.'),
]
CONF = cfg.CONF
CONF.register_opts(gpfs_share_opts)
class GPFSShareDriver(driver.ExecuteMixin, driver.GaneshaMixin,
driver.ShareDriver):
"""GPFS Share Driver.
Executes commands relating to Shares.
Supports creation of shares on a GPFS cluster.
API version history:
1.0 - Initial version.
1.1 - Added extend_share functionality
2.0 - Added CES support for NFS Ganesha
"""
def __init__(self, *args, **kwargs):
"""Do initialization."""
super(GPFSShareDriver, self).__init__(False, *args, **kwargs)
self._helpers = {}
self.configuration.append_config_values(gpfs_share_opts)
self.backend_name = self.configuration.safe_get(
'share_backend_name') or "IBM Storage System"
self.sshpool = None
self.ssh_connections = {}
self._gpfs_execute = None
if self.configuration.is_gpfs_node:
self.GPFS_PATH = ''
else:
self.GPFS_PATH = '/usr/lpp/mmfs/bin/'
def do_setup(self, context):
"""Any initialization the share driver does while starting."""
super(GPFSShareDriver, self).do_setup(context)
if self.configuration.is_gpfs_node:
self._gpfs_execute = self._gpfs_local_execute
else:
self._gpfs_execute = self._gpfs_remote_execute
self._setup_helpers()
def _gpfs_local_execute(self, *cmd, **kwargs):
if 'run_as_root' not in kwargs:
kwargs.update({'run_as_root': True})
if 'ignore_exit_code' in kwargs:
check_exit_code = kwargs.pop('ignore_exit_code')
check_exit_code.append(0)
kwargs.update({'check_exit_code': check_exit_code})
return utils.execute(*cmd, **kwargs)
def _gpfs_remote_execute(self, *cmd, **kwargs):
host = self.configuration.gpfs_share_export_ip
check_exit_code = kwargs.pop('check_exit_code', True)
ignore_exit_code = kwargs.pop('ignore_exit_code', None)
return self._run_ssh(host, cmd, ignore_exit_code, check_exit_code)
def _sanitize_command(self, cmd_list):
# pylint: disable=too-many-function-args
return ' '.join(shlex.quote(cmd_arg) for cmd_arg in cmd_list)
def _run_ssh(self, host, cmd_list, ignore_exit_code=None,
check_exit_code=True):
command = self._sanitize_command(cmd_list)
if not self.sshpool:
gpfs_ssh_login = self.configuration.gpfs_ssh_login
password = self.configuration.gpfs_ssh_password
privatekey = self.configuration.gpfs_ssh_private_key
gpfs_ssh_port = self.configuration.gpfs_ssh_port
ssh_conn_timeout = self.configuration.ssh_conn_timeout
min_size = self.configuration.ssh_min_pool_conn
max_size = self.configuration.ssh_max_pool_conn
self.sshpool = utils.SSHPool(host,
gpfs_ssh_port,
ssh_conn_timeout,
gpfs_ssh_login,
password=password,
privatekey=privatekey,
min_size=min_size,
max_size=max_size)
try:
with self.sshpool.item() as ssh:
return self._gpfs_ssh_execute(
ssh,
command,
ignore_exit_code=ignore_exit_code,
check_exit_code=check_exit_code)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_('Error running SSH command: %(cmd)s. '
'Error: %(excmsg)s.') %
{'cmd': command, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
def _gpfs_ssh_execute(self, ssh, cmd, ignore_exit_code=None,
check_exit_code=True):
sanitized_cmd = strutils.mask_password(cmd)
LOG.debug('Running cmd (SSH): %s', sanitized_cmd)
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
stdout = stdout_stream.read()
sanitized_stdout = strutils.mask_password(stdout)
stderr = stderr_stream.read()
sanitized_stderr = strutils.mask_password(stderr)
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s', exit_status)
if ((check_exit_code and exit_status != 0)
and
(ignore_exit_code is None or
exit_status not in ignore_exit_code)):
raise exception.ProcessExecutionError(exit_code=exit_status,
stdout=sanitized_stdout,
stderr=sanitized_stderr,
cmd=sanitized_cmd)
return (sanitized_stdout, sanitized_stderr)
def _check_gpfs_state(self):
try:
out, __ = self._gpfs_execute(self.GPFS_PATH + 'mmgetstate', '-Y')
except exception.ProcessExecutionError as e:
msg = (_('Failed to check GPFS state. Error: %(excmsg)s.') %
{'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
lines = out.splitlines()
try:
state_token = lines[0].split(':').index('state')
gpfs_state = lines[1].split(':')[state_token]
except (IndexError, ValueError) as e:
msg = (_('Failed to check GPFS state. Error: %(excmsg)s.') %
{'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
if gpfs_state != 'active':
return False
return True
def _is_dir(self, path):
try:
output, __ = self._gpfs_execute('stat', '--format=%F', path,
run_as_root=False)
except exception.ProcessExecutionError as e:
msg = (_('%(path)s is not a directory. Error: %(excmsg)s') %
{'path': path, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
return output.strip() == 'directory'
def _is_gpfs_path(self, directory):
try:
self._gpfs_execute(self.GPFS_PATH + 'mmlsattr', directory)
except exception.ProcessExecutionError as e:
msg = (_('%(dir)s is not on GPFS filesystem. Error: %(excmsg)s.') %
{'dir': directory, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
return True
def _setup_helpers(self):
"""Initializes protocol-specific NAS drivers."""
self._helpers = {}
for helper_str in self.configuration.gpfs_share_helpers:
share_proto, _, import_str = helper_str.partition('=')
helper = importutils.import_class(import_str)
self._helpers[share_proto.upper()] = helper(self._gpfs_execute,
self.configuration)
def _local_path(self, sharename):
"""Get local path for a share or share snapshot by name."""
return os.path.join(self.configuration.gpfs_mount_point_base,
sharename)
def _get_gpfs_device(self):
fspath = self.configuration.gpfs_mount_point_base
try:
(out, __) = self._gpfs_execute('df', fspath)
except exception.ProcessExecutionError as e:
msg = (_('Failed to get GPFS device for %(fspath)s.'
'Error: %(excmsg)s') %
{'fspath': fspath, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
lines = out.splitlines()
fs = lines[1].split()[0]
return fs
def _create_share(self, shareobj):
"""Create a linked fileset file in GPFS.
Note: GPFS file system must have quotas enabled
(mmchfs -Q yes).
"""
sharename = shareobj['name']
sizestr = '%sG' % shareobj['size']
sharepath = self._local_path(sharename)
fsdev = self._get_gpfs_device()
# create fileset for the share, link it to root path and set max size
try:
self._gpfs_execute(self.GPFS_PATH + 'mmcrfileset', fsdev,
sharename, '--inode-space', 'new')
except exception.ProcessExecutionError as e:
msg = (_('Failed to create fileset on %(fsdev)s for '
'the share %(sharename)s. Error: %(excmsg)s.') %
{'fsdev': fsdev, 'sharename': sharename,
'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
try:
self._gpfs_execute(self.GPFS_PATH + 'mmlinkfileset', fsdev,
sharename, '-J', sharepath)
except exception.ProcessExecutionError as e:
msg = (_('Failed to link fileset for the share %(sharename)s. '
'Error: %(excmsg)s.') %
{'sharename': sharename, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
try:
self._gpfs_execute(self.GPFS_PATH + 'mmsetquota', fsdev + ':' +
sharename, '--block', '0:' + sizestr)
except exception.ProcessExecutionError as e:
msg = (_('Failed to set quota for the share %(sharename)s. '
'Error: %(excmsg)s.') %
{'sharename': sharename, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
try:
self._gpfs_execute('chmod', '777', sharepath)
except exception.ProcessExecutionError as e:
msg = (_('Failed to set permissions for share %(sharename)s. '
'Error: %(excmsg)s.') %
{'sharename': sharename, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
def _delete_share(self, shareobj):
"""Remove container by removing GPFS fileset."""
sharename = shareobj['name']
fsdev = self._get_gpfs_device()
# ignore error, when the fileset does not exist
# it may happen, when the share creation failed, the share is in
# 'error' state, and the fileset was never created
# we want to ignore that error condition while deleting the fileset,
# i.e. 'Fileset name share-xyz not found', with error code '2'
# and mark the deletion successful
ignore_exit_code = [ERR_FILE_NOT_FOUND]
# unlink and delete the share's fileset
try:
self._gpfs_execute(self.GPFS_PATH + 'mmunlinkfileset', fsdev,
sharename, '-f',
ignore_exit_code=ignore_exit_code)
except exception.ProcessExecutionError as e:
msg = (_('Failed unlink fileset for share %(sharename)s. '
'Error: %(excmsg)s.') %
{'sharename': sharename, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
try:
self._gpfs_execute(self.GPFS_PATH + 'mmdelfileset', fsdev,
sharename, '-f',
ignore_exit_code=ignore_exit_code)
except exception.ProcessExecutionError as e:
msg = (_('Failed delete fileset for share %(sharename)s. '
'Error: %(excmsg)s.') %
{'sharename': sharename, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
def _get_available_capacity(self, path):
"""Calculate available space on path."""
try:
out, __ = self._gpfs_execute('df', '-P', '-B', '1', path)
except exception.ProcessExecutionError as e:
msg = (_('Failed to check available capacity for %(path)s.'
'Error: %(excmsg)s.') %
{'path': path, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _create_share_snapshot(self, snapshot):
"""Create a snapshot of the share."""
sharename = snapshot['share_name']
snapshotname = snapshot['name']
fsdev = self._get_gpfs_device()
LOG.debug(
'Attempting to create a snapshot %(snap)s from share %(share)s '
'on device %(dev)s.',
{'share': sharename, 'snap': snapshotname, 'dev': fsdev}
)
try:
self._gpfs_execute(self.GPFS_PATH + 'mmcrsnapshot', fsdev,
snapshot['name'], '-j', sharename)
except exception.ProcessExecutionError as e:
msg = (_('Failed to create snapshot %(snapshot)s. '
'Error: %(excmsg)s.') %
{'snapshot': snapshot['name'], 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
def _delete_share_snapshot(self, snapshot):
"""Delete a snapshot of the share."""
sharename = snapshot['share_name']
fsdev = self._get_gpfs_device()
try:
self._gpfs_execute(self.GPFS_PATH + 'mmdelsnapshot', fsdev,
snapshot['name'], '-j', sharename)
except exception.ProcessExecutionError as e:
msg = (_('Failed to delete snapshot %(snapshot)s. '
'Error: %(excmsg)s.') %
{'snapshot': snapshot['name'], 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
def _create_share_from_snapshot(self, share, snapshot, share_path):
"""Create share from a share snapshot."""
self._create_share(share)
snapshot_path = self._get_snapshot_path(snapshot)
snapshot_path = snapshot_path + "/"
try:
self._gpfs_execute('rsync', '-rp', snapshot_path, share_path)
except exception.ProcessExecutionError as e:
msg = (_('Failed to create share %(share)s from '
'snapshot %(snapshot)s. Error: %(excmsg)s.') %
{'share': share['name'], 'snapshot': snapshot['name'],
'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
def _extend_share(self, shareobj, new_size):
sharename = shareobj['name']
sizestr = '%sG' % new_size
fsdev = self._get_gpfs_device()
try:
self._gpfs_execute(self.GPFS_PATH + 'mmsetquota', fsdev + ':' +
sharename, '--block', '0:' + sizestr)
except exception.ProcessExecutionError as e:
msg = (_('Failed to set quota for the share %(sharename)s. '
'Error: %(excmsg)s.') %
{'sharename': sharename, 'excmsg': e})
LOG.error(msg)
raise exception.GPFSException(msg)
def get_network_allocations_number(self):
return 0
def create_share(self, ctx, share, share_server=None):
"""Create GPFS directory that will be represented as share."""
self._create_share(share)
share_path = self._get_share_path(share)
location = self._get_helper(share).create_export(share_path)
return location
def create_share_from_snapshot(self, ctx, share, snapshot,
share_server=None, parent_share=None):
"""Is called to create share from a snapshot."""
share_path = self._get_share_path(share)
self._create_share_from_snapshot(share, snapshot, share_path)
location = self._get_helper(share).create_export(share_path)
return location
def create_snapshot(self, context, snapshot, share_server=None):
"""Creates a snapshot."""
self._create_share_snapshot(snapshot)
def delete_share(self, ctx, share, share_server=None):
"""Remove and cleanup share storage."""
location = self._get_share_path(share)
self._get_helper(share).remove_export(location, share)
self._delete_share(share)
def delete_snapshot(self, context, snapshot, share_server=None):
"""Deletes a snapshot."""
self._delete_share_snapshot(snapshot)
def extend_share(self, share, new_size, share_server=None):
"""Extends the quota on the share fileset."""
self._extend_share(share, new_size)
def ensure_share(self, ctx, share, share_server=None):
"""Ensure that storage are mounted and exported."""
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules for given share."""
helper = self._get_helper(share)
location = self._get_share_path(share)
for access in delete_rules:
helper.deny_access(location, share, access)
for access in add_rules:
helper.allow_access(location, share, access)
if not (add_rules or delete_rules):
helper.resync_access(location, share, access_rules)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if not self._check_gpfs_state():
msg = (_('GPFS is not active.'))
LOG.error(msg)
raise exception.GPFSException(msg)
if not self.configuration.gpfs_share_export_ip:
msg = (_('gpfs_share_export_ip must be specified.'))
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
gpfs_base_dir = self.configuration.gpfs_mount_point_base
if not gpfs_base_dir.startswith('/'):
msg = (_('%s must be an absolute path.') % gpfs_base_dir)
LOG.error(msg)
raise exception.GPFSException(msg)
if not self._is_dir(gpfs_base_dir):
msg = (_('%s is not a directory.') % gpfs_base_dir)
LOG.error(msg)
raise exception.GPFSException(msg)
if not self._is_gpfs_path(gpfs_base_dir):
msg = (_('%s is not on GPFS. Perhaps GPFS not mounted.')
% gpfs_base_dir)
LOG.error(msg)
raise exception.GPFSException(msg)
if self.configuration.gpfs_nfs_server_type not in ("KNFS", "CES"):
msg = (_('Invalid gpfs_nfs_server_type value: %s. '
'Valid values are: "KNFS", "CES".')
% self.configuration.gpfs_nfs_server_type)
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
if ((not self.configuration.gpfs_nfs_server_list) and
(self.configuration.gpfs_nfs_server_type != 'CES')):
msg = (_('Missing value for gpfs_nfs_server_list.'))
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
def _is_share_valid(self, fsdev, location):
try:
out, __ = self._gpfs_execute(self.GPFS_PATH + 'mmlsfileset', fsdev,
'-J', location, '-L', '-Y')
except exception.ProcessExecutionError:
msg = (_('Given share path %(share_path)s does not exist at '
'mount point %(mount_point)s.')
% {'share_path': location, 'mount_point': fsdev})
LOG.exception(msg)
raise exception.ManageInvalidShare(reason=msg)
lines = out.splitlines()
try:
validation_token = lines[0].split(':').index('allocInodes')
alloc_inodes = lines[1].split(':')[validation_token]
except (IndexError, ValueError):
msg = (_('Failed to check share at %s.') % location)
LOG.exception(msg)
raise exception.GPFSException(msg)
return alloc_inodes != '0'
def _get_share_name(self, fsdev, location):
try:
out, __ = self._gpfs_execute(self.GPFS_PATH + 'mmlsfileset', fsdev,
'-J', location, '-L', '-Y')
except exception.ProcessExecutionError:
msg = (_('Given share path %(share_path)s does not exist at '
'mount point %(mount_point)s.')
% {'share_path': location, 'mount_point': fsdev})
LOG.exception(msg)
raise exception.ManageInvalidShare(reason=msg)
lines = out.splitlines()
try:
validation_token = lines[0].split(':').index('filesetName')
share_name = lines[1].split(':')[validation_token]
except (IndexError, ValueError):
msg = (_('Failed to check share at %s.') % location)
LOG.exception(msg)
raise exception.GPFSException(msg)
return share_name
def _manage_existing(self, fsdev, share, old_share_name):
new_share_name = share['name']
new_export_location = self._local_path(new_share_name)
try:
self._gpfs_execute(self.GPFS_PATH + 'mmunlinkfileset', fsdev,
old_share_name, '-f')
except exception.ProcessExecutionError:
msg = _('Failed to unlink fileset for share %s.') % new_share_name
LOG.exception(msg)
raise exception.GPFSException(msg)
LOG.debug('Unlinked the fileset of share %s.', old_share_name)
try:
self._gpfs_execute(self.GPFS_PATH + 'mmchfileset', fsdev,
old_share_name, '-j', new_share_name)
except exception.ProcessExecutionError:
msg = _('Failed to rename fileset for share %s.') % new_share_name
LOG.exception(msg)
raise exception.GPFSException(msg)
LOG.debug('Renamed the fileset from %(old_share)s to %(new_share)s.',
{'old_share': old_share_name, 'new_share': new_share_name})
try:
self._gpfs_execute(self.GPFS_PATH + 'mmlinkfileset', fsdev,
new_share_name, '-J', new_export_location)
except exception.ProcessExecutionError:
msg = _('Failed to link fileset for the share %s.'
) % new_share_name
LOG.exception(msg)
raise exception.GPFSException(msg)
LOG.debug('Linked the fileset of share %(share_name)s at location '
'%(export_location)s.',
{'share_name': new_share_name,
'export_location': new_export_location})
try:
self._gpfs_execute('chmod', '777', new_export_location)
except exception.ProcessExecutionError:
msg = _('Failed to set permissions for share %s.') % new_share_name
LOG.exception(msg)
raise exception.GPFSException(msg)
LOG.debug('Changed the permission of share %s.', new_share_name)
try:
out, __ = self._gpfs_execute(self.GPFS_PATH + 'mmlsquota', '-j',
new_share_name, '-Y', fsdev)
except exception.ProcessExecutionError:
msg = _('Failed to check size for share %s.') % new_share_name
LOG.exception(msg)
raise exception.GPFSException(msg)
lines = out.splitlines()
try:
quota_limit = lines[0].split(':').index('blockLimit')
quota_status = lines[1].split(':')[quota_limit]
except (IndexError, ValueError):
msg = _('Failed to check quota for share %s.') % new_share_name
LOG.exception(msg)
raise exception.GPFSException(msg)
share_size = int(quota_status)
# Note: since share_size returns integer value in KB,
# we are checking whether share is less than 1GiB.
# (units.Mi * KB = 1GB)
if share_size < units.Mi:
try:
self._gpfs_execute(self.GPFS_PATH + 'mmsetquota', fsdev + ':' +
new_share_name, '--block', '0:1G')
except exception.ProcessExecutionError:
msg = _('Failed to set quota for share %s.') % new_share_name
LOG.exception(msg)
raise exception.GPFSException(msg)
LOG.info('Existing share %(shr)s has size %(size)s KB '
'which is below 1GiB, so extended it to 1GiB.',
{'shr': new_share_name, 'size': share_size})
share_size = 1
else:
orig_share_size = share_size
share_size = int(math.ceil(float(share_size) / units.Mi))
if orig_share_size != share_size * units.Mi:
try:
self._gpfs_execute(self.GPFS_PATH + 'mmsetquota', fsdev +
':' + new_share_name, '--block', '0:' +
str(share_size) + 'G')
except exception.ProcessExecutionError:
msg = _('Failed to set quota for share %s.'
) % new_share_name
LOG.exception(msg)
raise exception.GPFSException(msg)
new_export_location = self._get_helper(share).create_export(
new_export_location)
return share_size, new_export_location
def manage_existing(self, share, driver_options):
old_export = share['export_location'].split(':')
try:
ces_ip = old_export[0]
old_export_location = old_export[1]
except IndexError:
msg = _('Incorrect export path. Expected format: '
'IP:/gpfs_mount_point_base/share_id.')
LOG.exception(msg)
raise exception.ShareBackendException(msg=msg)
if ces_ip not in self.configuration.gpfs_nfs_server_list:
msg = _('The CES IP %s is not present in the '
'configuration option "gpfs_nfs_server_list".') % ces_ip
raise exception.ShareBackendException(msg=msg)
fsdev = self._get_gpfs_device()
if not self._is_share_valid(fsdev, old_export_location):
err_msg = _('Given share path %s does not have a valid '
'share.') % old_export_location
raise exception.ManageInvalidShare(reason=err_msg)
share_name = self._get_share_name(fsdev, old_export_location)
out = self._get_helper(share)._has_client_access(old_export_location)
if out:
err_msg = _('Clients have access to %s share currently. Evict any '
'clients before trying again.') % share_name
raise exception.ManageInvalidShare(reason=err_msg)
share_size, new_export_location = self._manage_existing(
fsdev, share, share_name)
return {"size": share_size, "export_locations": new_export_location}
def _update_share_stats(self):
"""Retrieve stats info from share volume group."""
data = dict(
share_backend_name=self.backend_name,
vendor_name='IBM',
storage_protocol='NFS',
reserved_percentage=self.configuration.reserved_share_percentage,
reserved_snapshot_percentage=(
self.configuration.reserved_share_from_snapshot_percentage
or self.configuration.reserved_share_percentage))
free, capacity = self._get_available_capacity(
self.configuration.gpfs_mount_point_base)
data['total_capacity_gb'] = math.ceil(capacity / units.Gi)
data['free_capacity_gb'] = math.ceil(free / units.Gi)
super(GPFSShareDriver, self)._update_share_stats(data)
def _get_helper(self, share):
if share['share_proto'] == 'NFS':
return self._helpers[self.configuration.gpfs_nfs_server_type]
else:
msg = (_('Share protocol %s not supported by GPFS driver.')
% share['share_proto'])
LOG.error(msg)
raise exception.InvalidShare(reason=msg)
def _get_share_path(self, share):
"""Returns share path on storage provider."""
return os.path.join(self.configuration.gpfs_mount_point_base,
share['name'])
def _get_snapshot_path(self, snapshot):
"""Returns share path on storage provider."""
snapshot_dir = ".snapshots"
return os.path.join(self.configuration.gpfs_mount_point_base,
snapshot["share_name"], snapshot_dir,
snapshot["name"])
class NASHelperBase(metaclass=abc.ABCMeta):
"""Interface to work with share."""
def __init__(self, execute, config_object):
self.configuration = config_object
self._execute = execute
def create_export(self, local_path):
"""Construct location of new export."""
return ':'.join([self.configuration.gpfs_share_export_ip, local_path])
def get_export_options(self, share, access, helper):
"""Get the export options."""
extra_specs = share_types.get_extra_specs_from_share(share)
if helper == 'KNFS':
export_options = extra_specs.get('knfs:export_options')
elif helper == 'CES':
export_options = extra_specs.get('ces:export_options')
else:
export_options = None
options = self._get_validated_opt_list(export_options)
options.append(self.get_access_option(access))
return ','.join(options)
def _validate_export_options(self, options):
"""Validate the export options."""
options_not_allowed = self._get_options_not_allowed()
invalid_options = [
option for option in options if option in options_not_allowed
]
if invalid_options:
raise exception.InvalidInput(reason='Invalid export_option %s as '
'it is set by access_type.'
% invalid_options)
def _get_validated_opt_list(self, export_options):
"""Validate the export options and return an option list."""
if export_options:
options = export_options.lower().split(',')
self._validate_export_options(options)
else:
options = []
return options
@abc.abstractmethod
def get_access_option(self, access):
"""Get access option string based on access level."""
@abc.abstractmethod
def _get_options_not_allowed(self):
"""Get access options that are not allowed in extra-specs."""
@abc.abstractmethod
def remove_export(self, local_path, share):
"""Remove export."""
@abc.abstractmethod
def allow_access(self, local_path, share, access):
"""Allow access to the host."""
@abc.abstractmethod
def deny_access(self, local_path, share, access):
"""Deny access to the host."""
@abc.abstractmethod
def resync_access(self, local_path, share, access_rules):
"""Re-sync all access rules for given share."""
class KNFSHelper(NASHelperBase):
"""Wrapper for Kernel NFS Commands."""
def __init__(self, execute, config_object):
super(KNFSHelper, self).__init__(execute, config_object)
self._execute = execute
try:
self._execute('exportfs', check_exit_code=True, run_as_root=True)
except exception.ProcessExecutionError as e:
msg = (_('NFS server not found. Error: %s.') % e)
LOG.error(msg)
raise exception.GPFSException(msg)
def _has_client_access(self, local_path, access_to=None):
try:
out, __ = self._execute('exportfs', run_as_root=True)
except exception.ProcessExecutionError:
msg = _('Failed to check exports on the systems.')
LOG.exception(msg)
raise exception.GPFSException(msg)
if access_to:
if (re.search(re.escape(local_path) + r'[\s\n]*'
+ re.escape(access_to), out)):
return True
else:
if re.findall(local_path + '\\b', ''.join(out)):
return True
return False
def _publish_access(self, *cmd, **kwargs):
check_exit_code = kwargs.get('check_exit_code', True)
outs = []
localserver_iplist = socket.gethostbyname_ex(socket.gethostname())[2]
for server in self.configuration.gpfs_nfs_server_list:
if server in localserver_iplist:
run_command = cmd
run_local = True
else:
sshlogin = self.configuration.gpfs_ssh_login
remote_login = sshlogin + '@' + server
run_command = ['ssh', remote_login] + list(cmd)
run_local = False
try:
out = utils.execute(*run_command,
run_as_root=run_local,
check_exit_code=check_exit_code)
except exception.ProcessExecutionError:
raise
outs.append(out)
return outs
def _verify_denied_access(self, local_path, share, ip):
try:
cmd = ['exportfs']
outs = self._publish_access(*cmd)
except exception.ProcessExecutionError:
msg = _('Failed to verify denied access for '
'share %s.') % share['name']
LOG.exception(msg)
raise exception.GPFSException(msg)
for stdout, stderr in outs:
if stderr and stderr.strip():
msg = ('Log/ignore stderr during _validate_denied_access for '
'share %(sharename)s. Return code OK. '
'Stderr: %(stderr)s' % {'sharename': share['name'],
'stderr': stderr})
LOG.debug(msg)
gpfs_ips = NFSHelper.get_host_list(stdout, local_path)
if ip in gpfs_ips:
msg = (_('Failed to deny access for share %(sharename)s. '
'IP %(ip)s still has access.') %
{'sharename': share['name'],
'ip': ip})
LOG.error(msg)
raise exception.GPFSException(msg)
def remove_export(self, local_path, share):
"""Remove export."""
def get_access_option(self, access):
"""Get access option string based on access level."""
return access['access_level']
def _get_options_not_allowed(self):
"""Get access options that are not allowed in extra-specs."""
return list(constants.ACCESS_LEVELS)
def _get_exports(self):
"""Get exportfs output."""
try:
out, __ = self._execute('exportfs', run_as_root=True)
except exception.ProcessExecutionError as e:
msg = (_('Failed to check exports on the systems. '
' Error: %s.') % e)
LOG.error(msg)
raise exception.GPFSException(msg)
return out
def allow_access(self, local_path, share, access, error_on_exists=True):
"""Allow access to one or more vm instances."""
if access['access_type'] != 'ip':
raise exception.InvalidShareAccess(reason='Only ip access type '
'supported.')
if error_on_exists:
# check if present in export
out = re.search(
re.escape(local_path) + r'[\s\n]*'
+ re.escape(access['access_to']), self._get_exports())
if out is not None:
access_type = access['access_type']
access_to = access['access_to']
raise exception.ShareAccessExists(access_type=access_type,
access=access_to)
export_opts = self.get_export_options(share, access, 'KNFS')
cmd = ['exportfs', '-o', export_opts,
':'.join([access['access_to'], local_path])]
try:
self._publish_access(*cmd)
except exception.ProcessExecutionError:
msg = _('Failed to allow access for share %s.') % share['name']
LOG.exception(msg)
raise exception.GPFSException(msg)
def _deny_ip(self, local_path, share, ip):
"""Remove access for one or more vm instances."""
cmd = ['exportfs', '-u', ':'.join([ip, local_path])]
try:
# Can get exit code 0 for success or 1 for already gone (also
# potentially get 1 due to exportfs bug). So allow
# _publish_access to continue with [0, 1] and then verify after
# it is done.
self._publish_access(*cmd, check_exit_code=[0, 1])
except exception.ProcessExecutionError:
msg = _('Failed to deny access for share %s.') % share['name']
LOG.exception(msg)
raise exception.GPFSException(msg)
# Error code (0 or 1) makes deny IP success indeterminate.
# So, verify that the IP access was completely removed.
self._verify_denied_access(local_path, share, ip)
def deny_access(self, local_path, share, access):
"""Remove access for one or more vm instances."""
self._deny_ip(local_path, share, access['access_to'])
def _remove_other_access(self, local_path, share, access_rules):
"""Remove any client access that is not in access_rules."""
exports = self._get_exports()
gpfs_ips = set(NFSHelper.get_host_list(exports, local_path))
manila_ips = set([x['access_to'] for x in access_rules])
remove_ips = gpfs_ips - manila_ips
for ip in remove_ips:
self._deny_ip(local_path, share, ip)
def resync_access(self, local_path, share, access_rules):
"""Re-sync all access rules for given share."""
for access in access_rules:
self.allow_access(local_path, share, access, error_on_exists=False)
self._remove_other_access(local_path, share, access_rules)
class CESHelper(NASHelperBase):
"""Wrapper for NFS by Spectrum Scale CES"""
def __init__(self, execute, config_object):
super(CESHelper, self).__init__(execute, config_object)
self._execute = execute
if self.configuration.is_gpfs_node:
self.GPFS_PATH = ''
else:
self.GPFS_PATH = '/usr/lpp/mmfs/bin/'
def _execute_mmnfs_command(self, cmd, err_msg):
try:
out, __ = self._execute(self.GPFS_PATH + 'mmnfs', 'export', *cmd)
except exception.ProcessExecutionError as e:
msg = (_('%(err_msg)s Error: %(e)s.')
% {'err_msg': err_msg, 'e': e})
LOG.error(msg)
raise exception.GPFSException(msg)
return out
@staticmethod
def _fix_export_data(data, headers):
"""Export data split by ':' may need fixing if client had colons."""
# If an IPv6 client shows up then ':' delimiters don't work.
# So use header positions to get data before/after Clients.
# Then what is left in between can be joined back into a client IP.
client_index = headers.index('Clients')
# reverse_client_index is distance from end.
reverse_client_index = len(headers) - (client_index + 1)
after_client_index = len(data) - reverse_client_index
before_client = data[:client_index]
client = data[client_index: after_client_index]
after_client = data[after_client_index:]
result_data = before_client
result_data.append(':'.join(client)) # Fixes colons in client IP
result_data.extend(after_client)
return result_data
def _get_nfs_client_exports(self, local_path):
"""Get the current NFS client export details from GPFS."""
out = self._execute_mmnfs_command(
('list', '-n', local_path, '-Y'),
'Failed to get exports from the system.')
# Remove the header line and use the headers to describe the data
lines = out.splitlines()
for line in lines:
data = line.split(':')
if "HEADER" in data:
headers = data
lines.remove(line)
break
else:
msg = _('Failed to parse exports for path %s. '
'No HEADER found.') % local_path
LOG.error(msg)
raise exception.GPFSException(msg)
exports = []
for line in lines:
data = line.split(':')
if len(data) < 3:
continue # Skip empty lines (and anything less than minimal).
result_data = self._fix_export_data(data, headers)
exports.append(dict(zip(headers, result_data)))
return exports
def _has_client_access(self, local_path, access_to=None):
"""Check path for any export or for one with a specific IP address."""
gpfs_clients = self._get_nfs_client_exports(local_path)
return gpfs_clients and (access_to is None or access_to in [
x['Clients'] for x in gpfs_clients])
def remove_export(self, local_path, share):
"""Remove export."""
if self._has_client_access(local_path):
err_msg = ('Failed to remove export for share %s.'
% share['name'])
self._execute_mmnfs_command(('remove', local_path), err_msg)
def _get_options_not_allowed(self):
"""Get access options that are not allowed in extra-specs."""
return ['access_type=ro', 'access_type=rw']
def get_access_option(self, access):
"""Get access option string based on access level."""
if access['access_level'] == constants.ACCESS_LEVEL_RO:
return 'access_type=ro'
else:
return 'access_type=rw'
def allow_access(self, local_path, share, access):
"""Allow access to the host."""
if access['access_type'] != 'ip':
raise exception.InvalidShareAccess(reason='Only ip access type '
'supported.')
has_exports = self._has_client_access(local_path)
export_opts = self.get_export_options(share, access, 'CES')
if not has_exports:
cmd = ['add', local_path, '-c',
access['access_to'] +
'(' + export_opts + ')']
else:
cmd = ['change', local_path, '--nfsadd',
access['access_to'] +
'(' + export_opts + ')']
err_msg = ('Failed to allow access for share %s.'
% share['name'])
self._execute_mmnfs_command(cmd, err_msg)
def deny_access(self, local_path, share, access, force=False):
"""Deny access to the host."""
has_export = self._has_client_access(local_path, access['access_to'])
if has_export:
err_msg = ('Failed to remove access for share %s.'
% share['name'])
self._execute_mmnfs_command(('change', local_path,
'--nfsremove', access['access_to']),
err_msg)
def _get_client_opts(self, access, opts_list):
"""Get client options string for access rule and NFS options."""
nfs_opts = ','.join([self.get_access_option(access)] + opts_list)
return '%(ip)s(%(nfs_opts)s)' % {'ip': access['access_to'],
'nfs_opts': nfs_opts}
def _get_share_opts(self, share):
"""Get a list of NFS options from the share's share type."""
extra_specs = share_types.get_extra_specs_from_share(share)
opts_list = self._get_validated_opt_list(
extra_specs.get('ces:export_options'))
return opts_list
def _nfs_change(self, local_path, share, access_rules, gpfs_clients):
"""Bulk add/update/remove of access rules for share."""
opts_list = self._get_share_opts(share)
# Create a map of existing client access rules from GPFS.
# Key from 'Clients' is an IP address or
# Value from 'Access_Type' is RW|RO (case varies)
gpfs_map = {
x['Clients']: x['Access_Type'].lower() for x in gpfs_clients}
gpfs_ips = set(gpfs_map.keys())
manila_ips = set([x['access_to'] for x in access_rules])
add_ips = manila_ips - gpfs_ips
update_ips = gpfs_ips.intersection(manila_ips)
remove_ips = gpfs_ips - manila_ips
adds = []
updates = []
if add_ips or update_ips:
for access in access_rules:
ip = access['access_to']
if ip in add_ips:
adds.append(self._get_client_opts(access, opts_list))
elif (ip in update_ips
and access['access_level'] != gpfs_map[ip]):
updates.append(self._get_client_opts(access, opts_list))
if remove_ips or adds or updates:
cmd = ['change', local_path]
if remove_ips:
cmd.append('--nfsremove')
cmd.append(','.join(remove_ips))
if adds:
cmd.append('--nfsadd')
cmd.append(';'.join(adds))
if updates:
cmd.append('--nfschange')
cmd.append(';'.join(updates))
err_msg = ('Failed to resync access for share %s.' % share['name'])
self._execute_mmnfs_command(cmd, err_msg)
def _nfs_add(self, access_rules, local_path, share):
"""Bulk add of access rules to share."""
if not access_rules:
return
opts_list = self._get_share_opts(share)
client_options = []
for access in access_rules:
client_options.append(self._get_client_opts(access, opts_list))
cmd = ['add', local_path, '-c', ';'.join(client_options)]
err_msg = ('Failed to resync access for share %s.' % share['name'])
self._execute_mmnfs_command(cmd, err_msg)
def resync_access(self, local_path, share, access_rules):
"""Re-sync all access rules for given share."""
gpfs_clients = self._get_nfs_client_exports(local_path)
if not gpfs_clients:
self._nfs_add(access_rules, local_path, share)
else:
self._nfs_change(local_path, share, access_rules, gpfs_clients)
| |
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class RuleFailureReason(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'campaign_id': 'int',
'campaign_name': 'str',
'ruleset_id': 'int',
'coupon_id': 'int',
'coupon_value': 'str',
'referral_id': 'int',
'referral_value': 'str',
'rule_index': 'int',
'rule_name': 'str',
'condition_index': 'int',
'effect_index': 'int',
'details': 'str'
}
attribute_map = {
'campaign_id': 'campaignID',
'campaign_name': 'campaignName',
'ruleset_id': 'rulesetID',
'coupon_id': 'couponID',
'coupon_value': 'couponValue',
'referral_id': 'referralID',
'referral_value': 'referralValue',
'rule_index': 'ruleIndex',
'rule_name': 'ruleName',
'condition_index': 'conditionIndex',
'effect_index': 'effectIndex',
'details': 'details'
}
def __init__(self, campaign_id=None, campaign_name=None, ruleset_id=None, coupon_id=None, coupon_value=None, referral_id=None, referral_value=None, rule_index=None, rule_name=None, condition_index=None, effect_index=None, details=None, local_vars_configuration=None): # noqa: E501
"""RuleFailureReason - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._campaign_id = None
self._campaign_name = None
self._ruleset_id = None
self._coupon_id = None
self._coupon_value = None
self._referral_id = None
self._referral_value = None
self._rule_index = None
self._rule_name = None
self._condition_index = None
self._effect_index = None
self._details = None
self.discriminator = None
self.campaign_id = campaign_id
self.campaign_name = campaign_name
self.ruleset_id = ruleset_id
if coupon_id is not None:
self.coupon_id = coupon_id
if coupon_value is not None:
self.coupon_value = coupon_value
if referral_id is not None:
self.referral_id = referral_id
if referral_value is not None:
self.referral_value = referral_value
self.rule_index = rule_index
self.rule_name = rule_name
if condition_index is not None:
self.condition_index = condition_index
if effect_index is not None:
self.effect_index = effect_index
if details is not None:
self.details = details
@property
def campaign_id(self):
"""Gets the campaign_id of this RuleFailureReason. # noqa: E501
The ID of the campaign that contains the rule that failed # noqa: E501
:return: The campaign_id of this RuleFailureReason. # noqa: E501
:rtype: int
"""
return self._campaign_id
@campaign_id.setter
def campaign_id(self, campaign_id):
"""Sets the campaign_id of this RuleFailureReason.
The ID of the campaign that contains the rule that failed # noqa: E501
:param campaign_id: The campaign_id of this RuleFailureReason. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and campaign_id is None: # noqa: E501
raise ValueError("Invalid value for `campaign_id`, must not be `None`") # noqa: E501
self._campaign_id = campaign_id
@property
def campaign_name(self):
"""Gets the campaign_name of this RuleFailureReason. # noqa: E501
The name of the campaign that contains the rule that failed # noqa: E501
:return: The campaign_name of this RuleFailureReason. # noqa: E501
:rtype: str
"""
return self._campaign_name
@campaign_name.setter
def campaign_name(self, campaign_name):
"""Sets the campaign_name of this RuleFailureReason.
The name of the campaign that contains the rule that failed # noqa: E501
:param campaign_name: The campaign_name of this RuleFailureReason. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and campaign_name is None: # noqa: E501
raise ValueError("Invalid value for `campaign_name`, must not be `None`") # noqa: E501
self._campaign_name = campaign_name
@property
def ruleset_id(self):
"""Gets the ruleset_id of this RuleFailureReason. # noqa: E501
The ID of the ruleset that contains the rule that failed # noqa: E501
:return: The ruleset_id of this RuleFailureReason. # noqa: E501
:rtype: int
"""
return self._ruleset_id
@ruleset_id.setter
def ruleset_id(self, ruleset_id):
"""Sets the ruleset_id of this RuleFailureReason.
The ID of the ruleset that contains the rule that failed # noqa: E501
:param ruleset_id: The ruleset_id of this RuleFailureReason. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and ruleset_id is None: # noqa: E501
raise ValueError("Invalid value for `ruleset_id`, must not be `None`") # noqa: E501
self._ruleset_id = ruleset_id
@property
def coupon_id(self):
"""Gets the coupon_id of this RuleFailureReason. # noqa: E501
The ID of the coupon that was being evaluated at the time of the rule failure # noqa: E501
:return: The coupon_id of this RuleFailureReason. # noqa: E501
:rtype: int
"""
return self._coupon_id
@coupon_id.setter
def coupon_id(self, coupon_id):
"""Sets the coupon_id of this RuleFailureReason.
The ID of the coupon that was being evaluated at the time of the rule failure # noqa: E501
:param coupon_id: The coupon_id of this RuleFailureReason. # noqa: E501
:type: int
"""
self._coupon_id = coupon_id
@property
def coupon_value(self):
"""Gets the coupon_value of this RuleFailureReason. # noqa: E501
The value of the coupon that was being evaluated at the time of the rule failure # noqa: E501
:return: The coupon_value of this RuleFailureReason. # noqa: E501
:rtype: str
"""
return self._coupon_value
@coupon_value.setter
def coupon_value(self, coupon_value):
"""Sets the coupon_value of this RuleFailureReason.
The value of the coupon that was being evaluated at the time of the rule failure # noqa: E501
:param coupon_value: The coupon_value of this RuleFailureReason. # noqa: E501
:type: str
"""
self._coupon_value = coupon_value
@property
def referral_id(self):
"""Gets the referral_id of this RuleFailureReason. # noqa: E501
The ID of the referral that was being evaluated at the time of the rule failure # noqa: E501
:return: The referral_id of this RuleFailureReason. # noqa: E501
:rtype: int
"""
return self._referral_id
@referral_id.setter
def referral_id(self, referral_id):
"""Sets the referral_id of this RuleFailureReason.
The ID of the referral that was being evaluated at the time of the rule failure # noqa: E501
:param referral_id: The referral_id of this RuleFailureReason. # noqa: E501
:type: int
"""
self._referral_id = referral_id
@property
def referral_value(self):
"""Gets the referral_value of this RuleFailureReason. # noqa: E501
The value of the referral that was being evaluated at the time of the rule failure # noqa: E501
:return: The referral_value of this RuleFailureReason. # noqa: E501
:rtype: str
"""
return self._referral_value
@referral_value.setter
def referral_value(self, referral_value):
"""Sets the referral_value of this RuleFailureReason.
The value of the referral that was being evaluated at the time of the rule failure # noqa: E501
:param referral_value: The referral_value of this RuleFailureReason. # noqa: E501
:type: str
"""
self._referral_value = referral_value
@property
def rule_index(self):
"""Gets the rule_index of this RuleFailureReason. # noqa: E501
The index of the rule that failed within the ruleset # noqa: E501
:return: The rule_index of this RuleFailureReason. # noqa: E501
:rtype: int
"""
return self._rule_index
@rule_index.setter
def rule_index(self, rule_index):
"""Sets the rule_index of this RuleFailureReason.
The index of the rule that failed within the ruleset # noqa: E501
:param rule_index: The rule_index of this RuleFailureReason. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and rule_index is None: # noqa: E501
raise ValueError("Invalid value for `rule_index`, must not be `None`") # noqa: E501
self._rule_index = rule_index
@property
def rule_name(self):
"""Gets the rule_name of this RuleFailureReason. # noqa: E501
The name of the rule that failed within the ruleset # noqa: E501
:return: The rule_name of this RuleFailureReason. # noqa: E501
:rtype: str
"""
return self._rule_name
@rule_name.setter
def rule_name(self, rule_name):
"""Sets the rule_name of this RuleFailureReason.
The name of the rule that failed within the ruleset # noqa: E501
:param rule_name: The rule_name of this RuleFailureReason. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and rule_name is None: # noqa: E501
raise ValueError("Invalid value for `rule_name`, must not be `None`") # noqa: E501
self._rule_name = rule_name
@property
def condition_index(self):
"""Gets the condition_index of this RuleFailureReason. # noqa: E501
The index of the condition that failed # noqa: E501
:return: The condition_index of this RuleFailureReason. # noqa: E501
:rtype: int
"""
return self._condition_index
@condition_index.setter
def condition_index(self, condition_index):
"""Sets the condition_index of this RuleFailureReason.
The index of the condition that failed # noqa: E501
:param condition_index: The condition_index of this RuleFailureReason. # noqa: E501
:type: int
"""
self._condition_index = condition_index
@property
def effect_index(self):
"""Gets the effect_index of this RuleFailureReason. # noqa: E501
The index of the effect that failed # noqa: E501
:return: The effect_index of this RuleFailureReason. # noqa: E501
:rtype: int
"""
return self._effect_index
@effect_index.setter
def effect_index(self, effect_index):
"""Sets the effect_index of this RuleFailureReason.
The index of the effect that failed # noqa: E501
:param effect_index: The effect_index of this RuleFailureReason. # noqa: E501
:type: int
"""
self._effect_index = effect_index
@property
def details(self):
"""Gets the details of this RuleFailureReason. # noqa: E501
More details about the failure # noqa: E501
:return: The details of this RuleFailureReason. # noqa: E501
:rtype: str
"""
return self._details
@details.setter
def details(self, details):
"""Sets the details of this RuleFailureReason.
More details about the failure # noqa: E501
:param details: The details of this RuleFailureReason. # noqa: E501
:type: str
"""
self._details = details
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleFailureReason):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RuleFailureReason):
return True
return self.to_dict() != other.to_dict()
| |
from unittest import mock
from django.apps.registry import apps as global_apps
from django.db import DatabaseError, connection, migrations, models
from django.db.migrations.exceptions import InvalidMigrationPlan
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from django.db.migrations.state import ProjectState
from django.test import (
SimpleTestCase, modify_settings, override_settings, skipUnlessDBFeature,
)
from django.test.utils import isolate_lru_cache
from .test_base import MigrationTestBase
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
class ExecutorTests(MigrationTestBase):
"""
Tests the migration executor (full end-to-end running).
Bear in mind that if these are failing you should fix the other
test failures first, as they may be propagating into here.
"""
available_apps = ["migrations", "migrations2", "django.contrib.auth", "django.contrib.contenttypes"]
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_run(self):
"""
Tests running a simple set of migrations.
"""
executor = MigrationExecutor(connection)
# Let's look at the plan first and make sure it's up to scratch
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0002_second")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0002_second"], True),
(executor.loader.graph.nodes["migrations", "0001_initial"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_run_with_squashed(self):
"""
Tests running a squashed migration from zero (should ignore what it replaces)
"""
executor = MigrationExecutor(connection)
# Check our leaf node is the squashed one
leaves = [key for key in executor.loader.graph.leaf_nodes() if key[0] == "migrations"]
self.assertEqual(leaves, [("migrations", "0001_squashed_0002")])
# Check the plan
plan = executor.migration_plan([("migrations", "0001_squashed_0002")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0001_squashed_0002")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did. Should also just use squashed.
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed'},
)
def test_migrate_backward_to_squashed_migration(self):
executor = MigrationExecutor(connection)
try:
self.assertTableNotExists('migrations_author')
self.assertTableNotExists('migrations_book')
executor.migrate([('migrations', '0001_squashed_0002')])
self.assertTableExists('migrations_author')
self.assertTableExists('migrations_book')
executor.loader.build_graph()
# Migrate backward to a squashed migration.
executor.migrate([('migrations', '0001_initial')])
self.assertTableExists('migrations_author')
self.assertTableNotExists('migrations_book')
finally:
# Unmigrate everything.
executor = MigrationExecutor(connection)
executor.migrate([('migrations', None)])
self.assertTableNotExists('migrations_author')
self.assertTableNotExists('migrations_book')
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_non_atomic"})
def test_non_atomic_migration(self):
"""
Applying a non-atomic migration works as expected.
"""
executor = MigrationExecutor(connection)
with self.assertRaisesMessage(RuntimeError, "Abort migration"):
executor.migrate([("migrations", "0001_initial")])
self.assertTableExists("migrations_publisher")
migrations_apps = executor.loader.project_state(("migrations", "0001_initial")).apps
Publisher = migrations_apps.get_model("migrations", "Publisher")
self.assertTrue(Publisher.objects.exists())
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_atomic_operation"})
def test_atomic_operation_in_non_atomic_migration(self):
"""
An atomic operation is properly rolled back inside a non-atomic
migration.
"""
executor = MigrationExecutor(connection)
with self.assertRaisesMessage(RuntimeError, "Abort migration"):
executor.migrate([("migrations", "0001_initial")])
migrations_apps = executor.loader.project_state(("migrations", "0001_initial")).apps
Editor = migrations_apps.get_model("migrations", "Editor")
self.assertFalse(Editor.objects.exists())
# Record previous migration as successful.
executor.migrate([("migrations", "0001_initial")], fake=True)
# Rebuild the graph to reflect the new DB state.
executor.loader.build_graph()
# Migrating backwards is also atomic.
with self.assertRaisesMessage(RuntimeError, "Abort migration"):
executor.migrate([("migrations", None)])
self.assertFalse(Editor.objects.exists())
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
"migrations2": "migrations2.test_migrations_2",
})
def test_empty_plan(self):
"""
Re-planning a full migration of a fully-migrated set doesn't
perform spurious unmigrations and remigrations.
There was previously a bug where the executor just always performed the
backwards plan for applied migrations - which even for the most recent
migration in an app, might include other, dependent apps, and these
were being unmigrated.
"""
# Make the initial plan, check it
executor = MigrationExecutor(connection)
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
(executor.loader.graph.nodes["migrations2", "0001_initial"], False),
],
)
# Fake-apply all migrations
executor.migrate([
("migrations", "0002_second"),
("migrations2", "0001_initial")
], fake=True)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Now plan a second time and make sure it's empty
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(plan, [])
# The resulting state should include applied migrations.
state = executor.migrate([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertIn(('migrations', 'book'), state.models)
self.assertIn(('migrations', 'author'), state.models)
self.assertIn(('migrations2', 'otherauthor'), state.models)
# Erase all the fake records
executor.recorder.record_unapplied("migrations2", "0001_initial")
executor.recorder.record_unapplied("migrations", "0002_second")
executor.recorder.record_unapplied("migrations", "0001_initial")
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
"migrations2": "migrations2.test_migrations_2_no_deps",
})
def test_mixed_plan_not_supported(self):
"""
Although the MigrationExecutor interfaces allows for mixed migration
plans (combined forwards and backwards migrations) this is not
supported.
"""
# Prepare for mixed plan
executor = MigrationExecutor(connection)
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
executor.migrate(None, plan)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
self.assertIn(('migrations', '0001_initial'), executor.loader.applied_migrations)
self.assertIn(('migrations', '0002_second'), executor.loader.applied_migrations)
self.assertNotIn(('migrations2', '0001_initial'), executor.loader.applied_migrations)
# Generate mixed plan
plan = executor.migration_plan([
("migrations", None),
("migrations2", "0001_initial"),
])
msg = (
'Migration plans with both forwards and backwards migrations are '
'not supported. Please split your migration process into separate '
'plans of only forwards OR backwards migrations.'
)
with self.assertRaisesMessage(InvalidMigrationPlan, msg) as cm:
executor.migrate(None, plan)
self.assertEqual(
cm.exception.args[1],
[
(executor.loader.graph.nodes["migrations", "0002_second"], True),
(executor.loader.graph.nodes["migrations", "0001_initial"], True),
(executor.loader.graph.nodes["migrations2", "0001_initial"], False),
],
)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
executor.migrate([
("migrations", None),
("migrations2", None),
])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
self.assertTableNotExists("migrations2_otherauthor")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_soft_apply(self):
"""
Tests detection of initial migrations already having been applied.
"""
state = {"faked": None}
def fake_storer(phase, migration=None, fake=None):
state["faked"] = fake
executor = MigrationExecutor(connection, progress_callback=fake_storer)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run it normally
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
executor.migrate([("migrations", "0001_initial")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# We shouldn't have faked that one
self.assertIs(state["faked"], False)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Fake-reverse that
executor.migrate([("migrations", None)], fake=True)
# Are the tables still there?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Make sure that was faked
self.assertIs(state["faked"], True)
# Finally, migrate forwards; this should fake-apply our initial migration
executor.loader.build_graph()
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
# Applying the migration should raise a database level error
# because we haven't given the --fake-initial option
with self.assertRaises(DatabaseError):
executor.migrate([("migrations", "0001_initial")])
# Reset the faked state
state = {"faked": None}
# Allow faking of initial CreateModel operations
executor.migrate([("migrations", "0001_initial")], fake_initial=True)
self.assertIs(state["faked"], True)
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_custom_user",
"django.contrib.auth": "django.contrib.auth.migrations",
},
AUTH_USER_MODEL="migrations.Author",
)
def test_custom_user(self):
"""
Regression test for #22325 - references to a custom user model defined in the
same app are not resolved correctly.
"""
with isolate_lru_cache(global_apps.get_swappable_settings_name):
executor = MigrationExecutor(connection)
self.assertTableNotExists('migrations_author')
self.assertTableNotExists('migrations_tribble')
# Migrate forwards
executor.migrate([('migrations', '0001_initial')])
self.assertTableExists('migrations_author')
self.assertTableExists('migrations_tribble')
# The soft-application detection works.
# Change table_names to not return auth_user during this as it
# wouldn't be there in a normal run, and ensure migrations.Author
# exists in the global app registry temporarily.
old_table_names = connection.introspection.table_names
connection.introspection.table_names = lambda c: [
x for x in old_table_names(c) if x != 'auth_user'
]
migrations_apps = executor.loader.project_state(
('migrations', '0001_initial'),
).apps
global_apps.get_app_config('migrations').models['author'] = (
migrations_apps.get_model('migrations', 'author')
)
try:
migration = executor.loader.get_migration('auth', '0001_initial')
self.assertIs(executor.detect_soft_applied(None, migration)[0], True)
finally:
connection.introspection.table_names = old_table_names
del global_apps.get_app_config('migrations').models['author']
# Migrate back to clean up the database.
executor.loader.build_graph()
executor.migrate([('migrations', None)])
self.assertTableNotExists('migrations_author')
self.assertTableNotExists('migrations_tribble')
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_add_many_to_many_field_initial",
},
)
def test_detect_soft_applied_add_field_manytomanyfield(self):
"""
executor.detect_soft_applied() detects ManyToManyField tables from an
AddField operation. This checks the case of AddField in a migration
with other operations (0001) and the case of AddField in its own
migration (0002).
"""
tables = [
# from 0001
"migrations_project",
"migrations_task",
"migrations_project_tasks",
# from 0002
"migrations_task_projects",
]
executor = MigrationExecutor(connection)
# Create the tables for 0001 but make it look like the migration hasn't
# been applied.
executor.migrate([("migrations", "0001_initial")])
executor.migrate([("migrations", None)], fake=True)
for table in tables[:3]:
self.assertTableExists(table)
# Table detection sees 0001 is applied but not 0002.
migration = executor.loader.get_migration("migrations", "0001_initial")
self.assertIs(executor.detect_soft_applied(None, migration)[0], True)
migration = executor.loader.get_migration("migrations", "0002_initial")
self.assertIs(executor.detect_soft_applied(None, migration)[0], False)
# Create the tables for both migrations but make it look like neither
# has been applied.
executor.loader.build_graph()
executor.migrate([("migrations", "0001_initial")], fake=True)
executor.migrate([("migrations", "0002_initial")])
executor.loader.build_graph()
executor.migrate([("migrations", None)], fake=True)
# Table detection sees 0002 is applied.
migration = executor.loader.get_migration("migrations", "0002_initial")
self.assertIs(executor.detect_soft_applied(None, migration)[0], True)
# Leave the tables for 0001 except the many-to-many table. That missing
# table should cause detect_soft_applied() to return False.
with connection.schema_editor() as editor:
for table in tables[2:]:
editor.execute(editor.sql_delete_table % {"table": table})
migration = executor.loader.get_migration("migrations", "0001_initial")
self.assertIs(executor.detect_soft_applied(None, migration)[0], False)
# Cleanup by removing the remaining tables.
with connection.schema_editor() as editor:
for table in tables[:2]:
editor.execute(editor.sql_delete_table % {"table": table})
for table in tables:
self.assertTableNotExists(table)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.lookuperror_a",
"migrations.migrations_test_apps.lookuperror_b",
"migrations.migrations_test_apps.lookuperror_c"
]
)
def test_unrelated_model_lookups_forwards(self):
"""
#24123 - All models of apps already applied which are
unrelated to the first app being applied are part of the initial model
state.
"""
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
executor.migrate([("lookuperror_b", "0003_b3")])
self.assertTableExists("lookuperror_b_b3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Migrate forwards -- This led to a lookup LookupErrors because
# lookuperror_b.B2 is already applied
executor.migrate([
("lookuperror_a", "0004_a4"),
("lookuperror_c", "0003_c3"),
])
self.assertTableExists("lookuperror_a_a4")
self.assertTableExists("lookuperror_c_c3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# Cleanup
executor.migrate([
("lookuperror_a", None),
("lookuperror_b", None),
("lookuperror_c", None),
])
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.lookuperror_a",
"migrations.migrations_test_apps.lookuperror_b",
"migrations.migrations_test_apps.lookuperror_c"
]
)
def test_unrelated_model_lookups_backwards(self):
"""
#24123 - All models of apps being unapplied which are
unrelated to the first app being unapplied are part of the initial
model state.
"""
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
executor.migrate([
("lookuperror_a", "0004_a4"),
("lookuperror_b", "0003_b3"),
("lookuperror_c", "0003_c3"),
])
self.assertTableExists("lookuperror_b_b3")
self.assertTableExists("lookuperror_a_a4")
self.assertTableExists("lookuperror_c_c3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Migrate backwards -- This led to a lookup LookupErrors because
# lookuperror_b.B2 is not in the initial state (unrelated to app c)
executor.migrate([("lookuperror_a", None)])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# Cleanup
executor.migrate([
("lookuperror_b", None),
("lookuperror_c", None)
])
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
@override_settings(
INSTALLED_APPS=[
'migrations.migrations_test_apps.mutate_state_a',
'migrations.migrations_test_apps.mutate_state_b',
]
)
def test_unrelated_applied_migrations_mutate_state(self):
"""
#26647 - Unrelated applied migrations should be part of the final
state in both directions.
"""
executor = MigrationExecutor(connection)
executor.migrate([
('mutate_state_b', '0002_add_field'),
])
# Migrate forward.
executor.loader.build_graph()
state = executor.migrate([
('mutate_state_a', '0001_initial'),
])
self.assertIn('added', state.models['mutate_state_b', 'b'].fields)
executor.loader.build_graph()
# Migrate backward.
state = executor.migrate([
('mutate_state_a', None),
])
self.assertIn('added', state.models['mutate_state_b', 'b'].fields)
executor.migrate([
('mutate_state_b', None),
])
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_process_callback(self):
"""
#24129 - Tests callback process
"""
call_args_list = []
def callback(*args):
call_args_list.append(args)
executor = MigrationExecutor(connection, progress_callback=callback)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
executor.migrate([
("migrations", "0001_initial"),
("migrations", "0002_second"),
])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
executor.migrate([
("migrations", None),
("migrations", None),
])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
migrations = executor.loader.graph.nodes
expected = [
("render_start",),
("render_success",),
("apply_start", migrations['migrations', '0001_initial'], False),
("apply_success", migrations['migrations', '0001_initial'], False),
("apply_start", migrations['migrations', '0002_second'], False),
("apply_success", migrations['migrations', '0002_second'], False),
("render_start",),
("render_success",),
("unapply_start", migrations['migrations', '0002_second'], False),
("unapply_success", migrations['migrations', '0002_second'], False),
("unapply_start", migrations['migrations', '0001_initial'], False),
("unapply_success", migrations['migrations', '0001_initial'], False),
]
self.assertEqual(call_args_list, expected)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_alter_id_type_with_fk(self):
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("author_app_author")
self.assertTableNotExists("book_app_book")
# Apply initial migrations
executor.migrate([
("author_app", "0001_initial"),
("book_app", "0001_initial"),
])
self.assertTableExists("author_app_author")
self.assertTableExists("book_app_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Apply PK type alteration
executor.migrate([("author_app", "0002_alter_id")])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# We can't simply unapply the migrations here because there is no
# implicit cast from VARCHAR to INT on the database level.
with connection.schema_editor() as editor:
editor.execute(editor.sql_delete_table % {"table": "book_app_book"})
editor.execute(editor.sql_delete_table % {"table": "author_app_author"})
self.assertTableNotExists("author_app_author")
self.assertTableNotExists("book_app_book")
executor.migrate([("author_app", None)], fake=True)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_apply_all_replaced_marks_replacement_as_applied(self):
"""
Applying all replaced migrations marks replacement as applied (#24628).
"""
recorder = MigrationRecorder(connection)
# Place the database in a state where the replaced migrations are
# partially applied: 0001 is applied, 0002 is not.
recorder.record_applied("migrations", "0001_initial")
executor = MigrationExecutor(connection)
# Use fake because we don't actually have the first migration
# applied, so the second will fail. And there's no need to actually
# create/modify tables here, we're just testing the
# MigrationRecord, which works the same with or without fake.
executor.migrate([("migrations", "0002_second")], fake=True)
# Because we've now applied 0001 and 0002 both, their squashed
# replacement should be marked as applied.
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations(),
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_marks_replacement_applied_even_if_it_did_nothing(self):
"""
A new squash migration will be marked as applied even if all its
replaced migrations were previously already applied (#24628).
"""
recorder = MigrationRecorder(connection)
# Record all replaced migrations as applied
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
executor = MigrationExecutor(connection)
executor.migrate([("migrations", "0001_squashed_0002")])
# Because 0001 and 0002 are both applied, even though this migrate run
# didn't apply anything new, their squashed replacement should be
# marked as applied.
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations(),
)
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed'})
def test_migrate_marks_replacement_unapplied(self):
executor = MigrationExecutor(connection)
executor.migrate([('migrations', '0001_squashed_0002')])
try:
self.assertIn(
('migrations', '0001_squashed_0002'),
executor.recorder.applied_migrations(),
)
finally:
executor.loader.build_graph()
executor.migrate([('migrations', None)])
self.assertNotIn(
('migrations', '0001_squashed_0002'),
executor.recorder.applied_migrations(),
)
# When the feature is False, the operation and the record won't be
# performed in a transaction and the test will systematically pass.
@skipUnlessDBFeature('can_rollback_ddl')
def test_migrations_applied_and_recorded_atomically(self):
"""Migrations are applied and recorded atomically."""
class Migration(migrations.Migration):
operations = [
migrations.CreateModel('model', [
('id', models.AutoField(primary_key=True)),
]),
]
executor = MigrationExecutor(connection)
with mock.patch('django.db.migrations.executor.MigrationExecutor.record_migration') as record_migration:
record_migration.side_effect = RuntimeError('Recording migration failed.')
with self.assertRaisesMessage(RuntimeError, 'Recording migration failed.'):
executor.apply_migration(
ProjectState(),
Migration('0001_initial', 'record_migration'),
)
executor.migrate([('migrations', '0001_initial')])
# The migration isn't recorded as applied since it failed.
migration_recorder = MigrationRecorder(connection)
self.assertIs(
migration_recorder.migration_qs.filter(
app='record_migration', name='0001_initial',
).exists(),
False,
)
self.assertTableNotExists('record_migration_model')
def test_migrations_not_applied_on_deferred_sql_failure(self):
"""Migrations are not recorded if deferred SQL application fails."""
class DeferredSQL:
def __str__(self):
raise DatabaseError('Failed to apply deferred SQL')
class Migration(migrations.Migration):
atomic = False
def apply(self, project_state, schema_editor, collect_sql=False):
schema_editor.deferred_sql.append(DeferredSQL())
executor = MigrationExecutor(connection)
with self.assertRaisesMessage(DatabaseError, 'Failed to apply deferred SQL'):
executor.apply_migration(
ProjectState(),
Migration('0001_initial', 'deferred_sql'),
)
# The migration isn't recorded as applied since it failed.
migration_recorder = MigrationRecorder(connection)
self.assertIs(
migration_recorder.migration_qs.filter(
app='deferred_sql', name='0001_initial',
).exists(),
False,
)
class FakeLoader:
def __init__(self, graph, applied):
self.graph = graph
self.applied_migrations = applied
self.replace_migrations = True
class FakeMigration:
"""Really all we need is any object with a debug-useful repr."""
def __init__(self, name):
self.name = name
def __repr__(self):
return 'M<%s>' % self.name
class ExecutorUnitTests(SimpleTestCase):
"""(More) isolated unit tests for executor methods."""
def test_minimize_rollbacks(self):
"""
Minimize unnecessary rollbacks in connected apps.
When you say "./manage.py migrate appA 0001", rather than migrating to
just after appA-0001 in the linearized migration plan (which could roll
back migrations in other apps that depend on appA 0001, but don't need
to be rolled back since we're not rolling back appA 0001), we migrate
to just before appA-0002.
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(b1, b1_impl)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, a2, a1)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {
a1: a1_impl,
b1: b1_impl,
a2: a2_impl,
})
plan = executor.migration_plan({a1})
self.assertEqual(plan, [(a2_impl, True)])
def test_minimize_rollbacks_branchy(self):
r"""
Minimize rollbacks when target has multiple in-app children.
a: 1 <---- 3 <--\
\ \- 2 <--- 4
\ \
b: \- 1 <--- 2
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
a3_impl = FakeMigration('a3')
a3 = ('a', '3')
a4_impl = FakeMigration('a4')
a4 = ('a', '4')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
b2_impl = FakeMigration('b2')
b2 = ('b', '2')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(a3, a3_impl)
graph.add_node(a4, a4_impl)
graph.add_node(b1, b1_impl)
graph.add_node(b2, b2_impl)
graph.add_dependency(None, a2, a1)
graph.add_dependency(None, a3, a1)
graph.add_dependency(None, a4, a2)
graph.add_dependency(None, a4, a3)
graph.add_dependency(None, b2, b1)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, b2, a2)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {
a1: a1_impl,
b1: b1_impl,
a2: a2_impl,
b2: b2_impl,
a3: a3_impl,
a4: a4_impl,
})
plan = executor.migration_plan({a1})
should_be_rolled_back = [b2_impl, a4_impl, a2_impl, a3_impl]
exp = [(m, True) for m in should_be_rolled_back]
self.assertEqual(plan, exp)
def test_backwards_nothing_to_do(self):
r"""
If the current state satisfies the given target, do nothing.
a: 1 <--- 2
b: \- 1
c: \- 1
If a1 is applied already and a2 is not, and we're asked to migrate to
a1, don't apply or unapply b1 or c1, regardless of their current state.
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
c1_impl = FakeMigration('c1')
c1 = ('c', '1')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(b1, b1_impl)
graph.add_node(c1, c1_impl)
graph.add_dependency(None, a2, a1)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, c1, a1)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {
a1: a1_impl,
b1: b1_impl,
})
plan = executor.migration_plan({a1})
self.assertEqual(plan, [])
| |
#pylint: disable-msg=C0103, C0301
"""
Pure Python implementation of RTree spatial index
Adaptation of
http://code.google.com/p/pyrtree/
R-tree.
see doc/ref/r-tree-clustering-split-algo.pdf
"""
__author__ = "Sergio J. Rey"
__all__ = ['RTree', 'Rect', 'Rtree']
MAXCHILDREN = 10
MAX_KMEANS = 5
BUFFER = 0.0000001
import math
import random
import time
import array
class Rect(object):
"""
A rectangle class that stores: an axis aligned rectangle, and: two
flags (swapped_x and swapped_y). (The flags are stored
implicitly via swaps in the order of minx/y and maxx/y.)
"""
__slots__ = ("x", "y", "xx", "yy", "swapped_x", "swapped_y")
def __getstate__(self):
return (self.x, self.y, self.xx, self.yy, self.swapped_x, self.swapped_y)
def __setstate__(self, state):
self.x, self.y, self.xx, self.yy, self.swapped_x, self.swapped_y = state
def __init__(self, minx, miny, maxx, maxy):
self.swapped_x = (maxx < minx)
self.swapped_y = (maxy < miny)
self.x = minx
self.y = miny
self.xx = maxx
self.yy = maxy
if self.swapped_x:
self.x, self.xx = maxx, minx
if self.swapped_y:
self.y, self.yy = maxy, miny
def coords(self):
return self.x, self.y, self.xx, self.yy
def overlap(self, orect):
return self.intersect(orect).area()
def write_raw_coords(self, toarray, idx):
toarray[idx] = self.x
toarray[idx + 1] = self.y
toarray[idx + 2] = self.xx
toarray[idx + 3] = self.yy
if (self.swapped_x):
toarray[idx] = self.xx
toarray[idx + 2] = self.x
if (self.swapped_y):
toarray[idx + 1] = self.yy
toarray[idx + 3] = self.y
def area(self):
w = self.xx - self.x
h = self.yy - self.y
return w * h
def extent(self):
x = self.x
y = self.y
return (x, y, self.xx - x, self.yy - y)
def grow(self, amt):
a = amt * 0.5
return Rect(self.x - a, self.y - a, self.xx + a, self.yy + a)
def intersect(self, o):
if self is NullRect:
return NullRect
if o is NullRect:
return NullRect
nx, ny = max(self.x, o.x), max(self.y, o.y)
nx2, ny2 = min(self.xx, o.xx), min(self.yy, o.yy)
w, h = nx2 - nx, ny2 - ny
if w <= 0 or h <= 0:
return NullRect
return Rect(nx, ny, nx2, ny2)
def does_contain(self, o):
return self.does_containpoint((o.x, o.y)) and self.does_containpoint((o.xx, o.yy))
def does_intersect(self, o):
return (self.intersect(o).area() > 0)
def does_containpoint(self, p):
x, y = p
return (x >= self.x and x <= self.xx and y >= self.y and y <= self.yy)
def union(self, o):
if o is NullRect:
return Rect(self.x, self.y, self.xx, self.yy)
if self is NullRect:
return Rect(o.x, o.y, o.xx, o.yy)
x = self.x
y = self.y
xx = self.xx
yy = self.yy
ox = o.x
oy = o.y
oxx = o.xx
oyy = o.yy
nx = x if x < ox else ox
ny = y if y < oy else oy
nx2 = xx if xx > oxx else oxx
ny2 = yy if yy > oyy else oyy
res = Rect(nx, ny, nx2, ny2)
return res
def union_point(self, o):
x, y = o
return self.union(Rect(x, y, x, y))
def diagonal_sq(self):
if self is NullRect:
return 0
w = self.xx - self.x
h = self.yy - self.y
return w * w + h * h
def diagonal(self):
return math.sqrt(self.diagonal_sq())
NullRect = Rect(0.0, 0.0, 0.0, 0.0)
NullRect.swapped_x = False
NullRect.swapped_y = False
def union_all(kids):
cur = NullRect
for k in kids:
cur = cur.union(k.rect)
assert(False == cur.swapped_x)
return cur
def Rtree():
return RTree()
class RTree(object):
def __init__(self):
self.count = 0
self.stats = {
"overflow_f": 0,
"avg_overflow_t_f": 0.0,
"longest_overflow": 0.0,
"longest_kmeans": 0.0,
"sum_kmeans_iter_f": 0,
"count_kmeans_iter_f": 0,
"avg_kmeans_iter_f": 0.0
}
# This round: not using objects directly -- they
# take up too much memory, and efficiency goes down the toilet
# (obviously) if things start to page.
# Less obviously: using object graph directly leads to really long GC
# pause times, too.
# Instead, it uses pools of arrays:
self.count = 0
self.leaf_count = 0
self.rect_pool = array.array('d')
self.node_pool = array.array('L')
self.leaf_pool = [] # leaf objects.
self.cursor = _NodeCursor.create(self, NullRect)
def _ensure_pool(self, idx):
if len(self.rect_pool) < (4 * idx):
self.rect_pool.extend([0, 0, 0, 0] * idx)
self.node_pool.extend([0, 0] * idx)
def insert(self, o, orect):
self.cursor.insert(o, orect)
assert(self.cursor.index == 0)
def query_rect(self, r):
for x in self.cursor.query_rect(r):
yield x
def query_point(self, p):
for x in self.cursor.query_point(p):
yield x
def walk(self, pred):
return self.cursor.walk(pred)
def intersection(self, boundingbox):
"""
replicate c rtree method
Returns
-------
ids : list
list of object ids whose bounding boxes intersect with query
bounding box
"""
# grow the bounding box slightly to handle coincident edges
bb = boundingbox[:]
bb[0] = bb[0] - BUFFER
bb[1] = bb[1] - BUFFER
bb[2] = bb[2] + BUFFER
bb[3] = bb[3] + BUFFER
qr = Rect(bb[0], bb[1], bb[2], bb[3])
return [r.leaf_obj() for r in self.query_rect(qr) if r.is_leaf()]
def add(self, id, boundingbox):
"""
replicate c rtree method
Arguments
---------
id: object id
boundingbox: list
bounding box [minx, miny, maxx, maxy]
"""
bb = boundingbox
self.cursor.insert(id, Rect(bb[0], bb[1], bb[2], bb[3]))
class _NodeCursor(object):
@classmethod
def create(cls, rooto, rect):
idx = rooto.count
rooto.count += 1
rooto._ensure_pool(idx + 1)
#rooto.node_pool.extend([0,0])
#rooto.rect_pool.extend([0,0,0,0])
retv = _NodeCursor(rooto, idx, rect, 0, 0)
retv._save_back()
return retv
@classmethod
def create_with_children(cls, children, rooto):
rect = union_all([c for c in children])
nr = Rect(rect.x, rect.y, rect.xx, rect.yy)
assert(not rect.swapped_x)
nc = _NodeCursor.create(rooto, rect)
nc._set_children(children)
assert(not nc.is_leaf())
return nc
@classmethod
def create_leaf(cls, rooto, leaf_obj, leaf_rect):
rect = Rect(leaf_rect.x, leaf_rect.y, leaf_rect.xx, leaf_rect.yy)
rect.swapped_x = True # Mark as leaf by setting the xswap flag.
res = _NodeCursor.create(rooto, rect)
idx = res.index
res.first_child = rooto.leaf_count
rooto.leaf_count += 1
res.next_sibling = 0
rooto.leaf_pool.append(leaf_obj)
res._save_back()
res._become(idx)
assert(res.is_leaf())
return res
__slots__ = ("root", "npool", "rpool", "index", "rect",
"next_sibling", "first_child")
def __getstate__(self):
return (self.root, self.npool, self.rpool, self.index, self.rect, self.next_sibling, self.first_child)
def __setstate__(self, state):
self.root, self.npool, self.rpool, self.index, self.rect, self.next_sibling, self.first_child = state
def __init__(self, rooto, index, rect, first_child, next_sibling):
self.root = rooto
self.rpool = rooto.rect_pool
self.npool = rooto.node_pool
self.index = index
self.rect = rect
self.next_sibling = next_sibling
self.first_child = first_child
def walk(self, predicate):
if (predicate(self, self.leaf_obj())):
yield self
if not self.is_leaf():
for c in self.children():
for cr in c.walk(predicate):
yield cr
def query_rect(self, r):
""" Return things that intersect with 'r'. """
def p(o, x):
return r.does_intersect(o.rect)
for rr in self.walk(p):
yield rr
def query_point(self, point):
""" Query by a point """
def p(o, x):
return o.rect.does_containpoint(point)
for rr in self.walk(p):
yield rr
def lift(self):
return _NodeCursor(self.root,
self.index,
self.rect,
self.first_child,
self.next_sibling)
def _become(self, index):
recti = index * 4
nodei = index * 2
rp = self.rpool
x = rp[recti]
y = rp[recti + 1]
xx = rp[recti + 2]
yy = rp[recti + 3]
if (x == 0.0 and y == 0.0 and xx == 0.0 and yy == 0.0):
self.rect = NullRect
else:
self.rect = Rect(x, y, xx, yy)
self.next_sibling = self.npool[nodei]
self.first_child = self.npool[nodei + 1]
self.index = index
def is_leaf(self):
return self.rect.swapped_x
def has_children(self):
return not self.is_leaf() and 0 != self.first_child
def holds_leaves(self):
if 0 == self.first_child:
return True
else:
return self.has_children() and self.get_first_child().is_leaf()
def get_first_child(self):
fc = self.first_child
c = _NodeCursor(self.root, 0, NullRect, 0, 0)
c._become(self.first_child)
return c
def leaf_obj(self):
if self.is_leaf():
return self.root.leaf_pool[self.first_child]
else:
return None
def _save_back(self):
rp = self.rpool
recti = self.index * 4
nodei = self.index * 2
if self.rect is not NullRect:
self.rect.write_raw_coords(rp, recti)
else:
rp[recti] = 0
rp[recti + 1] = 0
rp[recti + 2] = 0
rp[recti + 3] = 0
self.npool[nodei] = self.next_sibling
self.npool[nodei + 1] = self.first_child
def nchildren(self):
i = self.index
c = 0
for x in self.children():
c += 1
return c
def insert(self, leafo, leafrect):
index = self.index
# tail recursion, made into loop:
while True:
if self.holds_leaves():
self.rect = self.rect.union(leafrect)
self._insert_child(_NodeCursor.create_leaf(
self.root, leafo, leafrect))
self._balance()
# done: become the original again
self._become(index)
return
else:
# Not holding leaves, move down a level in the tree:
# Micro-optimization:
# inlining union() calls -- logic is:
# ignored,child = min([ ((c.rect.union(leafrect)).area() - c.rect.area(),c.index) for c in self.children() ])
child = None
minarea = -1.0
for c in self.children():
x, y, xx, yy = c.rect.coords()
lx, ly, lxx, lyy = leafrect.coords()
nx = x if x < lx else lx
nxx = xx if xx > lxx else lxx
ny = y if y < ly else ly
nyy = yy if yy > lyy else lyy
a = (nxx - nx) * (nyy - ny)
if minarea < 0 or a < minarea:
minarea = a
child = c.index
# End micro-optimization
self.rect = self.rect.union(leafrect)
self._save_back()
self._become(child) # recurse.
def _balance(self):
if (self.nchildren() <= MAXCHILDREN):
return
t = time.clock()
cur_score = -10
s_children = [c.lift() for c in self.children()]
memo = {}
clusterings = [k_means_cluster(
self.root, k, s_children) for k in range(2, MAX_KMEANS)]
score, bestcluster = max(
[(silhouette_coeff(c, memo), c) for c in clusterings])
nodes = [_NodeCursor.create_with_children(
c, self.root) for c in bestcluster if len(c) > 0]
self._set_children(nodes)
dur = (time.clock() - t)
c = float(self.root.stats["overflow_f"])
oa = self.root.stats["avg_overflow_t_f"]
self.root.stats["avg_overflow_t_f"] = (
dur / (c + 1.0)) + (c * oa / (c + 1.0))
self.root.stats["overflow_f"] += 1
self.root.stats["longest_overflow"] = max(
self.root.stats["longest_overflow"], dur)
def _set_children(self, cs):
self.first_child = 0
if 0 == len(cs):
return
pred = None
for c in cs:
if pred is not None:
pred.next_sibling = c.index
pred._save_back()
if 0 == self.first_child:
self.first_child = c.index
pred = c
pred.next_sibling = 0
pred._save_back()
self._save_back()
def _insert_child(self, c):
c.next_sibling = self.first_child
self.first_child = c.index
c._save_back()
self._save_back()
def children(self):
if (0 == self.first_child):
return
idx = self.index
fc = self.first_child
ns = self.next_sibling
r = self.rect
self._become(self.first_child)
while True:
yield self
if 0 == self.next_sibling:
break
else:
self._become(self.next_sibling)
# Go back to becoming the same node we were.
#self._become(idx)
self.index = idx
self.first_child = fc
self.next_sibling = ns
self.rect = r
def avg_diagonals(node, onodes, memo_tab):
nidx = node.index
sv = 0.0
diag = 0.0
for onode in onodes:
k1 = (nidx, onode.index)
k2 = (onode.index, nidx)
if k1 in memo_tab:
diag = memo_tab[k1]
elif k2 in memo_tab:
diag = memo_tab[k2]
else:
diag = node.rect.union(onode.rect).diagonal()
memo_tab[k1] = diag
sv += diag
return sv / len(onodes)
def silhouette_w(node, cluster, next_closest_cluster, memo):
ndist = avg_diagonals(node, cluster, memo)
sdist = avg_diagonals(node, next_closest_cluster, memo)
return (sdist - ndist) / max(sdist, ndist)
def silhouette_coeff(clustering, memo_tab):
# special case for a clustering of 1.0
if (len(clustering) == 1):
return 1.0
coeffs = []
for cluster in clustering:
others = [c for c in clustering if c is not cluster]
others_cntr = [center_of_gravity(c) for c in others]
ws = [silhouette_w(node, cluster, others[closest(
others_cntr, node)], memo_tab) for node in cluster]
cluster_coeff = sum(ws) / len(ws)
coeffs.append(cluster_coeff)
return sum(coeffs) / len(coeffs)
def center_of_gravity(nodes):
totarea = 0.0
xs, ys = 0, 0
for n in nodes:
if n.rect is not NullRect:
x, y, w, h = n.rect.extent()
a = w * h
xs = xs + (a * (x + (0.5 * w)))
ys = ys + (a * (y + (0.5 * h)))
totarea = totarea + a
return (xs / totarea), (ys / totarea)
def closest(centroids, node):
x, y = center_of_gravity([node])
dist = -1
ridx = -1
for (i, (xx, yy)) in enumerate(centroids):
dsq = ((xx - x) ** 2) + ((yy - y) ** 2)
if -1 == dist or dsq < dist:
dist = dsq
ridx = i
return ridx
def k_means_cluster(root, k, nodes):
t = time.clock()
if len(nodes) <= k:
return [[n] for n in nodes]
ns = list(nodes)
root.stats["count_kmeans_iter_f"] += 1
# Initialize: take n random nodes.
#random.shuffle(ns)
cluster_starts = ns[:k]
cluster_centers = [center_of_gravity([n]) for n in ns[:k]]
# Loop until stable:
while True:
root.stats["sum_kmeans_iter_f"] += 1
clusters = [[] for c in cluster_centers]
for n in ns:
idx = closest(cluster_centers, n)
clusters[idx].append(n)
#FIXME HACK TODO: is it okay for there to be empty clusters?
clusters = [c for c in clusters if len(c) > 0]
for c in clusters:
if (len(c) == 0):
print("Errorrr....")
print(("Nodes: %d, centers: %s" % (len(ns),
repr(cluster_centers))))
assert(len(c) > 0)
rest = ns
first = False
new_cluster_centers = [center_of_gravity(c) for c in clusters]
if new_cluster_centers == cluster_centers:
root.stats["avg_kmeans_iter_f"] = float(root.stats["sum_kmeans_iter_f"] / root.stats["count_kmeans_iter_f"])
root.stats["longest_kmeans"] = max(
root.stats["longest_kmeans"], (time.clock() - t))
return clusters
else:
cluster_centers = new_cluster_centers
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import httplib as http
from flask import request
from flask import send_from_directory
from django.core.urlresolvers import reverse
from geoip import geolite2
from framework import status
from framework import sentry
from framework.auth import cas
from framework.routing import Rule
from framework.flask import redirect
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from osf.models import Institution
from osf.utils import sanitize
from website import util
from website import prereg
from website import settings
from website import language
from website.util import metrics
from website.util import paths
from website import maintenance
from website import landing_pages as landing_page_views
from website import views as website_views
from website.citations import views as citation_views
from website.search import views as search_views
from website.oauth import views as oauth_views
from website.profile.utils import get_profile_image_url
from website.profile import views as profile_views
from website.project import views as project_views
from addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
from website.preprints import views as preprint_views
from website.registries import views as registries_views
from website.reviews import views as reviews_views
from website.institutions import views as institution_views
from website.notifications import views as notification_views
from website.ember_osf_web import views as ember_osf_web_views
from website.closed_challenges import views as closed_challenges_views
from website.identifiers import views as identifier_views
from website.ember_osf_web.decorators import ember_flag_is_active
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
user_institutions = [{'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path_rounded_corners} for inst in user.affiliated_institutions.all()] if user else []
location = geolite2.lookup(request.remote_addr) if request.remote_addr else None
if request.host_url != settings.DOMAIN:
try:
inst_id = Institution.objects.get(domains__icontains=[request.host])._id
request_login_url = '{}institutions/{}'.format(settings.DOMAIN, inst_id)
except Institution.DoesNotExist:
request_login_url = request.url.replace(request.host_url, settings.DOMAIN)
else:
request_login_url = request.url
return {
'private_link_anonymous': is_private_link_anonymous_view(),
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._id if user else '',
'user_locale': user.locale if user and user.locale else '',
'user_timezone': user.timezone if user and user.timezone else '',
'user_url': user.url if user else '',
'user_profile_image': get_profile_image_url(user=user, size=25) if user else '',
'user_email_verifications': user.unconfirmed_email_info if user else [],
'user_api_url': user.api_url if user else '',
'user_entry_point': metrics.get_entry_point(user) if user else '',
'user_institutions': user_institutions if user else None,
'display_name': user.fullname if user else '',
'anon': {
'continent': getattr(location, 'continent', None),
'country': getattr(location, 'country', None),
},
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'prev_status': status.pop_previous_status_messages(),
'domain': settings.DOMAIN,
'api_domain': settings.API_DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'noteworthy_links_node': settings.NEW_AND_NOTEWORTHY_LINKS_NODE,
'popular_links_node': settings.POPULAR_LINKS_NODE,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_domain': settings.API_DOMAIN,
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'waterbutler_url': settings.WATERBUTLER_URL,
'mfr_url': settings.MFR_SERVER_URL,
'login_url': cas.get_login_url(request_login_url),
'reauth_url': util.web_url_for('auth_logout', redirect_url=request.url, reauth=True),
'profile_url': cas.get_profile_url(),
'enable_institutions': settings.ENABLE_INSTITUTIONS,
'keen': {
'public': {
'project_id': settings.KEEN['public']['project_id'],
'write_key': settings.KEEN['public']['write_key'],
},
'private': {
'project_id': settings.KEEN['private']['project_id'],
'write_key': settings.KEEN['private']['write_key'],
},
},
'maintenance': maintenance.get_maintenance(),
'recaptcha_site_key': settings.RECAPTCHA_SITE_KEY,
'custom_citations': settings.CUSTOM_CITATIONS,
'osf_support_email': settings.OSF_SUPPORT_EMAIL,
'osf_contact_email': settings.OSF_CONTACT_EMAIL,
'wafflejs_url': '{api_domain}{waffle_url}'.format(api_domain=settings.API_DOMAIN.rstrip('/'), waffle_url=reverse('wafflejs'))
}
def is_private_link_anonymous_view():
# Avoid circular import
from osf.models import PrivateLink
try:
return PrivateLink.objects.filter(key=request.args.get('view_only')).values_list('anonymous', flat=True).get()
except PrivateLink.DoesNotExist:
return False
class OsfWebRenderer(WebRenderer):
"""Render a Mako template with OSF context vars.
:param trust: Optional. If ``False``, markup-safe escaping will be enabled
"""
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', renderer=render_mako_string, trust=False)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def sitemap_file(path):
"""Serves the sitemap/* files."""
if path.endswith('.xml.gz'):
mime = 'application/x-gzip'
elif path.endswith('.xml'):
mime = 'text/xml'
else:
raise HTTPError(http.NOT_FOUND)
return send_from_directory(
settings.STATIC_FOLDER + '/sitemaps/',
path,
mimetype=mime
)
def ember_app(path=None):
"""Serve the contents of the ember application"""
ember_app_folder = None
fp = path or 'index.html'
for k in settings.EXTERNAL_EMBER_APPS.keys():
if request.path.strip('/').startswith(k):
ember_app_folder = os.path.abspath(os.path.join(os.getcwd(), settings.EXTERNAL_EMBER_APPS[k]['path']))
break
if not ember_app_folder:
raise HTTPError(http.NOT_FOUND)
if not os.path.abspath(os.path.join(ember_app_folder, fp)).startswith(ember_app_folder):
# Prevent accessing files outside of the ember build dir
raise HTTPError(http.NOT_FOUND)
if not os.path.isfile(os.path.join(ember_app_folder, fp)):
fp = 'index.html'
return send_from_directory(ember_app_folder, fp)
@ember_flag_is_active('ember_home_page')
def goodbye():
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('index'))
status.push_status_message(language.LOGOUT, kind='success', trust=False)
return {}
def make_url_map(app):
"""Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
"""
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule(
'/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string, trust=False)
),
Rule(
'/api/v1/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
json_renderer
),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
notemplate,
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
Rule('/sitemaps/<path>', 'get', sitemap_file, json_renderer),
])
# Ember Applications
if settings.USE_EXTERNAL_EMBER:
# Routes that serve up the Ember application. Hide behind feature flag.
for prefix in settings.EXTERNAL_EMBER_APPS.keys():
process_rules(app, [
Rule(
[
'/<provider>/<guid>/download',
'/<provider>/<guid>/download/',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid_download,
notemplate,
endpoint_suffix='__' + prefix
),
], prefix='/' + prefix)
process_rules(app, [
Rule(
[
'/',
'/<path:path>',
],
'get',
ember_app,
json_renderer,
endpoint_suffix='__' + prefix
),
], prefix='/' + prefix)
if settings.EXTERNAL_EMBER_APPS.get('ember_osf_web'):
process_rules(app, [
Rule(
ember_osf_web_views.routes,
'get',
ember_osf_web_views.use_ember_app,
notemplate
)
])
### Base ###
process_rules(app, [
Rule(
'/dashboard/',
'get',
website_views.dashboard,
OsfWebRenderer('home.mako', trust=False)
),
Rule(
'/myprojects/',
'get',
website_views.my_projects,
OsfWebRenderer('my_projects.mako', trust=False)
),
Rule(
'/reproducibility/',
'get',
website_views.reproducibility,
notemplate
),
Rule('/about/', 'get', website_views.redirect_about, notemplate),
Rule('/help/', 'get', website_views.redirect_help, notemplate),
Rule('/faq/', 'get', website_views.redirect_faq, notemplate),
Rule(['/getting-started/', '/getting-started/email/', '/howosfworks/'], 'get', website_views.redirect_getting_started, notemplate),
Rule('/support/', 'get', website_views.support, OsfWebRenderer('public/pages/support.mako', trust=False)),
Rule(
'/explore/',
'get',
discovery_views.redirect_explore_to_activity,
notemplate
),
Rule(
[
'/messages/',
],
'get',
{},
OsfWebRenderer('public/comingsoon.mako', trust=False)
),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako', trust=False),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako', trust=False),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/meetings/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako', trust=False),
),
Rule(
'/api/v1/meetings/submissions/',
'get',
conference_views.conference_submissions,
json_renderer,
),
Rule(
'/presentations/',
'get',
conference_views.redirect_to_meetings,
json_renderer,
),
Rule(
'/news/',
'get',
website_views.redirect_to_cos_news,
notemplate
),
Rule(
[
'/rr/',
'/registeredreports/',
'/registeredreport/',
],
'get',
registries_views.registered_reports_landing,
OsfWebRenderer('registered_reports_landing.mako', trust=False)
),
Rule(
'/erpc/',
'get',
closed_challenges_views.erpc_landing_page,
OsfWebRenderer('erpc_landing_page.mako', trust=False)
),
Rule(
'/prereg/',
'get',
prereg.prereg_landing_page,
OsfWebRenderer('prereg_landing_page.mako', trust=False)
),
Rule(
'/preprints/',
'get',
preprint_views.preprint_landing_page,
OsfWebRenderer('public/pages/preprint_landing.mako', trust=False),
),
Rule(
'/registries/',
'get',
registries_views.registries_landing_page,
OsfWebRenderer('public/pages/registries_landing.mako', trust=False),
),
Rule(
'/reviews/',
'get',
reviews_views.reviews_landing_page,
OsfWebRenderer('public/pages/reviews_landing.mako', trust=False),
),
Rule(
'/preprint/',
'get',
preprint_views.preprint_redirect,
notemplate,
),
Rule(
[
'/api/v1/<campaign>/draft_registrations/',
'/api/v1/draft_registrations/'
],
'get',
registries_views.draft_registrations,
json_renderer,
),
])
# Site-wide API routes
process_rules(app, [
Rule(
'/citations/styles/',
'get',
citation_views.list_citation_styles,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_views.get_addon_user_config,
json_renderer,
),
], prefix='/api/v1')
# OAuth
process_rules(app, [
Rule(
'/oauth/connect/<service_name>/',
'get',
oauth_views.oauth_connect,
json_renderer,
),
Rule(
'/oauth/callback/<service_name>/',
'get',
oauth_views.oauth_callback,
OsfWebRenderer('util/oauth_complete.mako', trust=False),
),
])
process_rules(app, [
Rule(
[
'/oauth/accounts/<external_account_id>/',
],
'delete',
oauth_views.oauth_disconnect,
json_renderer,
)
], prefix='/api/v1')
process_rules(app, [
Rule('/confirmed_emails/', 'put', auth_views.unconfirmed_email_add, json_renderer),
Rule('/confirmed_emails/', 'delete', auth_views.unconfirmed_email_remove, json_renderer)
], prefix='/api/v1')
### Metadata ###
process_rules(app, [
Rule(
[
'/project/<pid>/comments/timestamps/',
'/project/<pid>/node/<nid>/comments/timestamps/',
],
'put',
project_views.comment.update_comments_timestamp,
json_renderer,
),
Rule(
[
'/project/<pid>/citation/',
'/project/<pid>/node/<nid>/citation/',
],
'get',
citation_views.node_citation,
json_renderer,
),
], prefix='/api/v1')
### Forms ###
process_rules(app, [
Rule('/forms/signin/', 'get', website_views.signin_form, json_renderer),
Rule('/forms/forgot_password/', 'get', website_views.forgot_password_form, json_renderer),
], prefix='/api/v1')
### Discovery ###
process_rules(app, [
Rule(
'/explore/activity/',
'get',
discovery_views.redirect_explore_activity_to_activity,
notemplate
),
Rule(
'/activity/',
'get',
discovery_views.activity,
OsfWebRenderer('public/pages/active_nodes.mako', trust=False)
),
])
### Auth ###
process_rules(app, [
# confirm email
Rule(
'/confirm/<uid>/<token>/',
'get',
auth_views.confirm_email_get,
notemplate
),
# confirm email for login through external identity provider
Rule(
'/confirm/external/<uid>/<token>/',
'get',
auth_views.external_login_confirm_email_get,
notemplate
),
# reset password get
Rule(
'/resetpassword/<uid>/<token>/',
'get',
auth_views.reset_password_get,
OsfWebRenderer('public/resetpassword.mako', render_mako_string, trust=False)
),
# reset password post
Rule(
'/resetpassword/<uid>/<token>/',
'post',
auth_views.reset_password_post,
OsfWebRenderer('public/resetpassword.mako', render_mako_string, trust=False)
),
# resend confirmation get
Rule(
'/resend/',
'get',
auth_views.resend_confirmation_get,
OsfWebRenderer('resend.mako', render_mako_string, trust=False)
),
# resend confirmation post
Rule(
'/resend/',
'post',
auth_views.resend_confirmation_post,
OsfWebRenderer('resend.mako', render_mako_string, trust=False)
),
# oauth user email get
Rule(
'/external-login/email',
'get',
auth_views.external_login_email_get,
OsfWebRenderer('external_login_email.mako', render_mako_string, trust=False)
),
# oauth user email post
Rule(
'/external-login/email',
'post',
auth_views.external_login_email_post,
OsfWebRenderer('external_login_email.mako', render_mako_string, trust=False)
),
# user sign up page
Rule(
'/register/',
'get',
auth_views.auth_register,
OsfWebRenderer('public/register.mako', trust=False)
),
# osf login and campaign login
Rule(
[
'/login/',
'/account/'
],
'get',
auth_views.auth_login,
notemplate
),
# create user account via api
Rule(
'/api/v1/register/',
'post',
auth_views.register_user,
json_renderer
),
# osf logout and cas logout
Rule(
'/logout/',
'get',
auth_views.auth_logout,
notemplate
),
# forgot password get
Rule(
'/forgotpassword/',
'get',
auth_views.forgot_password_get,
OsfWebRenderer('public/forgot_password.mako', trust=False)
),
# forgot password post
Rule(
'/forgotpassword/',
'post',
auth_views.forgot_password_post,
OsfWebRenderer('public/forgot_password.mako', trust=False)
),
Rule(
'/login/connected_tools/',
'get',
landing_page_views.connected_tools,
notemplate
),
Rule(
'/login/enriched_profile/',
'get',
landing_page_views.enriched_profile,
notemplate
),
])
### Profile ###
# Web
process_rules(app, [
Rule(
'/profile/',
'get',
profile_views.profile_view,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
'/profile/<uid>/',
'get',
profile_views.profile_view_id,
OsfWebRenderer('profile.mako', trust=False)
),
# unregistered user claim account (contributor-ship of a project)
# user will be required to set email and password
# claim token must be present in query parameter
Rule(
['/user/<uid>/<pid>/claim/'],
['get', 'post'],
project_views.contributor.claim_user_form,
OsfWebRenderer('claim_account.mako', trust=False)
),
# registered user claim account (contributor-ship of a project)
# user will be required to verify password
# claim token must be present in query parameter
Rule(
['/user/<uid>/<pid>/claim/verify/<token>/'],
['get', 'post'],
project_views.contributor.claim_user_registered,
OsfWebRenderer('claim_account_registered.mako', trust=False)
),
Rule(
'/settings/',
'get',
profile_views.user_profile,
OsfWebRenderer('profile/settings.mako', trust=False),
),
Rule(
[
'/project/<pid>/addons/',
'/project/<pid>/node/<nid>/addons/',
],
'get',
project_views.node.node_addons,
OsfWebRenderer('project/addons.mako', trust=False)
),
Rule(
'/settings/account/',
'get',
profile_views.user_account,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/account/password',
'post',
profile_views.user_account_password,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/addons/',
'get',
profile_views.user_addons,
OsfWebRenderer('profile/addons.mako', trust=False),
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
OsfWebRenderer('profile/notifications.mako', trust=False),
),
Rule(
'/settings/applications/',
'get',
profile_views.oauth_application_list,
OsfWebRenderer('profile/oauth_app_list.mako', trust=False)
),
Rule(
'/settings/applications/create/',
'get',
profile_views.oauth_application_register,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/applications/<client_id>/',
'get',
profile_views.oauth_application_detail,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/tokens/',
'get',
profile_views.personal_access_token_list,
OsfWebRenderer('profile/personal_tokens_list.mako', trust=False)
),
Rule(
'/settings/tokens/create/',
'get',
profile_views.personal_access_token_register,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
),
Rule(
'/settings/tokens/<_id>/',
'get',
profile_views.personal_access_token_detail,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
)
])
# API
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view_json, json_renderer),
Rule('/profile/', 'put', profile_views.update_user, json_renderer),
Rule('/resend/', 'put', profile_views.resend_confirmation, json_renderer),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id_json, json_renderer),
# Used by profile.html
Rule('/user/<uid>/<pid>/claim/email/', 'post',
project_views.contributor.claim_user_post, json_renderer),
Rule(
'/profile/export/',
'post',
profile_views.request_export,
json_renderer,
),
Rule(
'/profile/deactivate/',
'post',
profile_views.request_deactivation,
json_renderer,
),
Rule(
'/profile/cancel_request_deactivation/',
'post',
profile_views.cancel_request_deactivation,
json_renderer,
),
Rule(
'/profile/logins/',
'patch',
profile_views.delete_external_identity,
json_renderer,
),
# Rules for user profile configuration
Rule('/settings/names/', 'get', profile_views.serialize_names, json_renderer),
Rule('/settings/names/', 'put', profile_views.unserialize_names, json_renderer),
Rule('/settings/names/impute/', 'get', profile_views.impute_names, json_renderer),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'get',
profile_views.serialize_social,
json_renderer,
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'get',
profile_views.serialize_jobs,
json_renderer,
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'get',
profile_views.serialize_schools,
json_renderer,
),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'put',
profile_views.unserialize_social,
json_renderer
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'put',
profile_views.unserialize_jobs,
json_renderer
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'put',
profile_views.unserialize_schools,
json_renderer
),
], prefix='/api/v1',)
### Search ###
# Web
process_rules(app, [
Rule(
'/search/',
'get',
search_views.search_view,
OsfWebRenderer('search.mako', trust=False)
),
Rule(
'/share/registration/',
'get',
{'register': settings.SHARE_REGISTRATION_URL},
json_renderer
),
Rule(
'/api/v1/user/search/',
'get', search_views.search_contributor,
json_renderer
),
Rule(
'/api/v1/search/node/',
'post',
project_views.node.search_node,
json_renderer,
),
])
# API
process_rules(app, [
Rule(['/search/', '/search/<type>/'], ['get', 'post'], search_views.search_search, json_renderer),
Rule('/search/projects/', 'get', search_views.search_projects_by_title, json_renderer),
Rule('/share/search/', 'get', website_views.legacy_share_v1_search, json_renderer),
], prefix='/api/v1')
# Institution
process_rules(app, [
Rule('/institutions/<inst_id>/', 'get', institution_views.view_institution, OsfWebRenderer('institution.mako', trust=False))
])
# Project
# Web
process_rules(app, [
# '/' route loads home.mako if logged in, otherwise loads landing.mako
Rule('/', 'get', website_views.index, OsfWebRenderer('index.mako', trust=False)),
Rule('/goodbye/', 'get', goodbye, OsfWebRenderer('landing.mako', trust=False)),
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'get',
project_views.node.view_project,
OsfWebRenderer('project/project.mako', trust=False)
),
# Create a new subproject/component
Rule(
'/project/<pid>/newnode/',
'post',
project_views.node.project_new_node,
notemplate
),
Rule('/project/new/<pid>/beforeTemplate/', 'get',
project_views.node.project_before_template, json_renderer),
Rule(
[
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
],
'get',
project_views.node.node_contributors,
OsfWebRenderer('project/contributors.mako', trust=False),
),
Rule(
[
'/project/<pid>/settings/',
'/project/<pid>/node/<nid>/settings/',
],
'get',
project_views.node.node_setting,
OsfWebRenderer('project/settings.mako', trust=False)
),
# Permissions
Rule( # TODO: Where, if anywhere, is this route used?
[
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
],
'post',
project_views.node.project_set_privacy,
OsfWebRenderer('project/project.mako', trust=False)
),
### Logs ###
# View forks
Rule(
[
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
],
'get',
project_views.node.node_forks,
OsfWebRenderer('project/forks.mako', trust=False)
),
# Registrations
Rule(
[
'/project/<pid>/register/',
'/project/<pid>/node/<nid>/register/',
],
'get',
project_views.register.node_register_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/register/<metaschema_id>/',
'/project/<pid>/node/<nid>/register/<metaschema_id>/',
],
'get',
project_views.register.node_register_template_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'get',
project_views.node.node_registrations,
OsfWebRenderer('project/registrations.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'post',
project_views.drafts.new_draft_registration,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/',
],
'get',
project_views.drafts.edit_draft_registration_page,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
],
'get',
project_views.drafts.draft_before_register_page,
OsfWebRenderer('project/register_draft.mako', trust=False)),
Rule(
[
'/project/<pid>/retraction/',
'/project/<pid>/node/<nid>/retraction/',
],
'get',
project_views.register.node_registration_retraction_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/',
],
'get',
project_views.register.node_registration_retraction_get,
OsfWebRenderer('project/retract_registration.mako', trust=False)
),
Rule(
'/ids/<category>/<path:value>/',
'get',
project_views.register.get_referent_by_identifier,
notemplate,
),
Rule(
[
'/project/<pid>/analytics/',
'/project/<pid>/node/<nid>/analytics/',
],
'get',
project_views.node.project_statistics,
OsfWebRenderer('project/statistics.mako', trust=False)
),
### Files ###
# Note: Web endpoint for files view must pass `mode` = `page` to
# include project view data and JS includes
# TODO: Start waterbutler to test
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/',
],
'get',
project_views.file.collect_file_trees,
OsfWebRenderer('project/files.mako', trust=False),
view_kwargs={'mode': 'page'},
),
Rule(
[
'/project/<pid>/files/<provider>/<path:path>/',
'/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
'/download/<fid_or_guid>/',
'get',
addon_views.persistent_file_download,
json_renderer,
),
Rule(
[
'/api/v1/project/<pid>/files/<provider>/<path:path>/',
'/api/v1/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
json_renderer
),
Rule(
[
'/project/<pid>/files/deleted/<trashed_id>/',
'/project/<pid>/node/<nid>/files/deleted/<trashed_id>/',
],
'get',
addon_views.addon_deleted_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
# Legacy Addon view file paths
'/project/<pid>/<provider>/files/<path:path>/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/',
'/project/<pid>/<provider>/files/<path:path>/download/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/download/',
# Legacy routes for `download_file`
'/project/<pid>/osffiles/<fid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/download/',
# Legacy routes for `view_file`
'/project/<pid>/osffiles/<fid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/download/<fid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/',
'/project/<pid>/files/<fid>/',
'/project/<pid>/node/<nid>/files/<fid>/',
'/project/<pid>/files/download/<fid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/',
# Legacy routes for `download_file_by_version`
'/project/<pid>/osffiles/<fid>/version/<vid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/download/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/files/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/<fid>/version/<vid>/',
'/project/<pid>/files/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
OsfWebRenderer('project/view_file.mako', trust=False),
),
Rule(
[
# api/v1 Legacy routes for `download_file`
'/api/v1/project/<pid>/osffiles/<fid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/',
'/api/v1/project/<pid>/files/download/<fid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/',
#api/v1 Legacy routes for `download_file_by_version`
'/api/v1/project/<pid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/files/download/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
json_renderer
),
Rule(
[
'/quickfiles/<fid>/'
],
'get',
addon_views.addon_view_or_download_quickfile,
json_renderer
)
])
# API
process_rules(app, [
Rule(
'/email/meeting/',
'post',
conference_views.meeting_hook,
json_renderer,
),
Rule('/mailchimp/hooks/', 'get', profile_views.mailchimp_get_endpoint, json_renderer),
Rule('/mailchimp/hooks/', 'post', profile_views.sync_data_from_mailchimp, json_renderer),
# Create project, used by [coming replacement]
Rule('/project/new/', 'post', project_views.node.project_new_post, json_renderer),
Rule([
'/project/<pid>/contributors_abbrev/',
'/project/<pid>/node/<nid>/contributors_abbrev/',
], 'get', project_views.contributor.get_node_contributors_abbrev, json_renderer),
Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, json_renderer),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, json_renderer),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'get',
project_views.node.get_pointed,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'post',
project_views.node.add_pointers,
json_renderer,
),
Rule(
[
'/pointer/',
],
'post',
project_views.node.add_pointer,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>pointer/',
],
'delete',
project_views.node.remove_pointer,
json_renderer,
),
# Draft Registrations
Rule([
'/project/<pid>/drafts/',
], 'get', project_views.drafts.get_draft_registrations, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'get', project_views.drafts.get_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'put', project_views.drafts.update_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'delete', project_views.drafts.delete_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/submit/',
], 'post', project_views.drafts.submit_draft_for_review, json_renderer),
# Meta Schemas
Rule([
'/project/drafts/schemas/',
], 'get', project_views.drafts.get_metaschemas, json_renderer),
Rule([
'/project/<pid>/get_contributors/',
'/project/<pid>/node/<nid>/get_contributors/',
], 'get', project_views.contributor.get_contributors, json_renderer),
Rule([
'/project/<pid>/get_contributors_from_parent/',
'/project/<pid>/node/<nid>/get_contributors_from_parent/',
], 'get', project_views.contributor.get_contributors_from_parent, json_renderer),
# Reorder contributors
Rule(
[
'/project/<pid>/contributors/manage/',
'/project/<pid>/node/<nid>/contributors/manage/',
],
'POST',
project_views.contributor.project_manage_contributors,
json_renderer,
),
Rule(
[
'/project/<pid>/contributor/remove/',
'/project/<pid>/node/<nid>/contributor/remove/',
],
'POST',
project_views.contributor.project_remove_contributor,
json_renderer,
),
Rule([
'/project/<pid>/get_editable_children/',
'/project/<pid>/node/<nid>/get_editable_children/',
], 'get', project_views.node.get_editable_children, json_renderer),
# Private Link
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'post', project_views.node.project_generate_private_link_post, json_renderer),
Rule([
'/project/<pid>/private_link/edit/',
'/project/<pid>/node/<nid>/private_link/edit/',
], 'put', project_views.node.project_private_link_edit, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'delete', project_views.node.remove_private_link, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'get', project_views.node.private_link_table, json_renderer),
# Create, using existing project as a template
Rule([
'/project/new/<nid>/',
], 'post', project_views.node.project_new_from_template, json_renderer),
# Update
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'put',
project_views.node.update_node,
json_renderer,
),
# Remove
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'delete',
project_views.node.component_remove,
json_renderer,
),
# Reorder components
Rule('/project/<pid>/reorder_components/', 'post',
project_views.node.project_reorder_components, json_renderer),
# Edit node
Rule([
'/project/<pid>/edit/',
'/project/<pid>/node/<nid>/edit/',
], 'post', project_views.node.edit_node, json_renderer),
# Add / remove tags
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'post', project_views.tag.project_add_tag, json_renderer),
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'delete', project_views.tag.project_remove_tag, json_renderer),
# Add / remove contributors
Rule([
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
], 'post', project_views.contributor.project_contributors_post, json_renderer),
# Forks
Rule(
[
'/project/<pid>/fork/before/',
'/project/<pid>/node/<nid>/fork/before/',
], 'get', project_views.node.project_before_fork, json_renderer,
),
Rule(
[
'/project/<pid>/pointer/fork/',
'/project/<pid>/node/<nid>/pointer/fork/',
], 'post', project_views.node.fork_pointer, json_renderer,
),
# Registrations
Rule([
'/project/<pid>/beforeregister/',
'/project/<pid>/node/<nid>/beforeregister',
], 'get', project_views.register.project_before_register, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
], 'post', project_views.drafts.register_draft_registration, json_renderer),
Rule([
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/'
], 'post', project_views.register.node_registration_retraction_post, json_renderer),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'post',
identifier_views.node_identifiers_post,
json_renderer,
),
# Endpoint to fetch Rubeus.JS/Hgrid-formatted data
Rule(
[
'/project/<pid>/files/grid/',
'/project/<pid>/node/<nid>/files/grid/'
],
'get',
project_views.file.grid_data,
json_renderer
),
# Settings
Rule(
'/files/auth/',
'get',
addon_views.get_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/waterbutler/logs/',
'/project/<pid>/node/<nid>/waterbutler/logs/',
],
'put',
addon_views.create_waterbutler_log,
json_renderer,
),
Rule(
[
'/registration/<pid>/callbacks/',
],
'put',
project_views.register.registration_callbacks,
json_renderer,
),
Rule(
'/settings/addons/',
'post',
profile_views.user_choose_addons,
json_renderer,
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
json_renderer,
),
Rule(
'/settings/notifications/',
'post',
profile_views.user_choose_mailing_lists,
json_renderer,
),
Rule(
'/subscriptions/',
'get',
notification_views.get_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/subscriptions/',
'/project/<pid>/node/<nid>/subscriptions/'
],
'get',
notification_views.get_node_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/tree/',
'/project/<pid>/node/<nid>/tree/'
],
'get',
project_views.node.get_node_tree,
json_renderer,
),
Rule(
'/subscriptions/',
'post',
notification_views.configure_subscription,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/addons/',
'/project/<pid>/node/<nid>/settings/addons/',
],
'post',
project_views.node.node_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/comments/',
'/project/<pid>/node/<nid>/settings/comments/',
],
'post',
project_views.node.configure_comments,
json_renderer,
),
# Invite Users
Rule(
[
'/project/<pid>/invite_contributor/',
'/project/<pid>/node/<nid>/invite_contributor/'
],
'post',
project_views.contributor.invite_contributor_post,
json_renderer
)
], prefix='/api/v1')
# Set up static routing for addons
# NOTE: We use nginx to serve static addon assets in production
addon_base_path = os.path.abspath('addons')
if settings.DEV_MODE:
from flask import stream_with_context, Response
import requests
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
@app.route('/ember-cli-live-reload.js')
def ember_cli_live_reload():
req = requests.get('{}/ember-cli-live-reload.js'.format(settings.LIVE_RELOAD_DOMAIN), stream=True)
return Response(stream_with_context(req.iter_content()), content_type=req.headers['content-type'])
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations:
"""VpnGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> "_models.VpnGateway":
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.VpnGateway",
**kwargs: Any
) -> "_models.VpnGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.VpnGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2021_02_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.TagsObject",
**kwargs: Any
) -> Optional["_models.VpnGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2021_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _reset_initial(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> Optional["_models.VpnGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
async def begin_reset(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Resets the primary of the vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
async def _start_packet_capture_initial(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStartParameters"] = None,
**kwargs: Any
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._start_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnGatewayPacketCaptureStartParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/startpacketcapture'} # type: ignore
async def begin_start_packet_capture(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStartParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller[str]:
"""Starts packet capture on vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param parameters: Vpn gateway packet capture parameters supplied to start packet capture on
vpn gateway.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.VpnGatewayPacketCaptureStartParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_packet_capture_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/startpacketcapture'} # type: ignore
async def _stop_packet_capture_initial(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStopParameters"] = None,
**kwargs: Any
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._stop_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnGatewayPacketCaptureStopParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_stop_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/stoppacketcapture'} # type: ignore
async def begin_stop_packet_capture(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStopParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller[str]:
"""Stops packet capture on vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param parameters: Vpn gateway packet capture parameters supplied to stop packet capture on vpn
gateway.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.VpnGatewayPacketCaptureStopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_packet_capture_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/stoppacketcapture'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnGatewaysResult"]:
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnGatewaysResult"]:
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_index('sentry_filtervalue', ['project_id', 'value', 'times_seen'])
def backwards(self, orm):
db.drop_index('sentry_filtervalue', ['project_id', 'value', 'times_seen'])
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}, 'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry', 'sentry']
| |
#!/usr/bin/python
"""Historian script for converting the timestamps in kernel trace to UTC.
TO USE:
kernel_trace.py --bugreport=<path to bugreport> --trace=<path to trace file>
--device=<device type hammerhead/shamu/flounder/flounder_lte>
"""
# Copyright 2016 Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import datetime
import getopt
import re
import sys
flag_bugreport = None
flag_trace = None
flag_device = None
def read_dmesg(bugreport, suspend_exit, suspend_enter, device):
"""Extracts the suspend exit/entries times from the bugreport."""
read_first_suspend_entry = True
first_jiffy = 0
first_utc = 0
if device == "flounder" or device == "flounder_lte":
device_suspend_pattern = "(.*)tegra124-pinctrl tegra124-pinctrl:(.*)"
elif device == "shamu" or device == "hammerhead":
device_suspend_pattern = "(.*)Suspending console(.*)"
else:
return (0, 0)
for line in bugreport:
m = re.match(r"(.*)\[(.*)\] PM: suspend ([a-z]+) (.*?) UTC", line)
if m:
if "exit" in m.group(3):
jiffy = float(m.group(2))
utc = m.group(4)
utc = utc[:-3]
utc = datetime.datetime.strptime(utc, "%Y-%m-%d %H:%M:%S.%f")
suspend_exit[jiffy] = utc
elif read_first_suspend_entry and "entry" in m.group(3):
jiffy = float(re.search(r"\[([ 0-9.]+)\]", line).group(1))
utc = re.search("PM: suspend entry (.*) UTC", line).group(1)
first_jiffy = jiffy
utc = utc[:-3]
utc = datetime.datetime.strptime(utc, "%Y-%m-%d %H:%M:%S.%f")
first_utc = utc
read_first_suspend_entry = False
elif re.match(device_suspend_pattern, line):
jiffy = float(re.search(r"\[([ 0-9.]+)\]", line).group(1))
suspend_enter.append(jiffy)
return (first_jiffy, first_utc)
def convert_timestamps(trace_file, file_handle, time_dict, first_jiffy,
first_utc):
"""Converts all the valid jiffies to UTC time in the trace file."""
line_number = 0
trace_start = 0
keys = sorted(time_dict)
# Find the point where the stats for all the cores start.
for row in trace_file:
if len(row) > 4 and ("buffer" in row[3]) and ("started" in row[4]):
trace_start = line_number
line_number += 1
file_handle.seek(0)
line_number = 0
curr_jiffy = keys[0]
next_jiffy = keys[1]
index = 1
for row in trace_file:
# Skip trace rows which contain incomplete data.
if line_number < trace_start:
line_number += 1
continue
row_no = 3
if "#" in row[0]:
continue
for row_no in range(row_no, len(row)):
if ":" in row[row_no]:
break
if row_no == len(row):
continue
jiffy = float(row[row_no][:-1])
# Skip trace points for which we do not have timestamp conversion.
if ((first_jiffy != 0 and jiffy < first_jiffy) or
(first_jiffy == 0 and jiffy < keys[0])):
continue
elif first_jiffy != 0 and jiffy < keys[0]:
diff = jiffy - first_jiffy
us = (diff - int(diff))*1000000
utc = first_utc + datetime.timedelta(seconds=int(diff),
microseconds=us)
row[row_no] = str(utc)
elif jiffy > curr_jiffy and jiffy < next_jiffy:
diff = jiffy - curr_jiffy
us = (diff - int(diff))*1000000
utc = time_dict[curr_jiffy] + datetime.timedelta(seconds=int(diff),
microseconds=us)
row[row_no] = str(utc)
else:
index += 1
curr_jiffy = next_jiffy
if index < len(keys):
next_jiffy = keys[index]
else:
next_jiffy = float("inf")
while next_jiffy < jiffy and index < len(keys):
curr_jiffy = next_jiffy
next_jiffy = keys[index]
index += 1
diff = jiffy - curr_jiffy
us = (diff - int(diff))*1000000
utc = time_dict[curr_jiffy] + datetime.timedelta(seconds=int(diff),
microseconds=us)
row[row_no] = '"' + str(utc) + '"'
for each_column in row:
sys.stdout.write(str(each_column) + " ")
sys.stdout.write("\n")
def usage():
"""Print usage of the script."""
print ("\nUsage: %s --bugreport=<path to bugreport>"
" --trace=<path to trace file>"
" --device=<device type"
" hammerhead/shamu/flounder/flounder_lte>\n") % sys.argv[0]
sys.exit(1)
def parse_argv(argv):
"""Parse arguments and set up globals."""
global flag_bugreport, flag_trace, flag_device
try:
opts, unused_args = getopt.getopt(argv,
"", ["bugreport=", "trace=", "device="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == "--bugreport":
flag_bugreport = arg
elif opt == "--trace":
flag_trace = arg
elif opt == "--device":
flag_device = arg
else:
usage()
sys.exit(2)
def main(argv):
parse_argv(argv)
if not flag_bugreport:
print "Bug report not valid"
usage()
sys.exit(1)
if not flag_trace:
print "Trace file not valid"
usage()
sys.exit(1)
if not flag_device:
print "Device not valid"
usage()
sys.exit(1)
try:
bugreport = open(flag_bugreport)
except IOError:
print "Unable to open bug report"
sys.exit(1)
suspend_exit = {}
suspend_enter = []
first_jiffy, first_utc = read_dmesg(bugreport, suspend_exit, suspend_enter,
flag_device)
if not (len(suspend_enter) and len(suspend_exit)):
return
if suspend_enter and (first_jiffy > suspend_enter[0]):
first_jiffy = 0
time_dict = {}
timestamp = sorted(suspend_exit)
index = 0
for timestamp in timestamp:
if index >= len(suspend_enter) or timestamp < suspend_enter[index]:
continue
utc = suspend_exit[timestamp]
diff = timestamp - float(suspend_enter[index])
utc -= datetime.timedelta(seconds=int(diff),
microseconds=(diff - int(diff))*1000000)
time_dict[suspend_enter[index]] = utc
index += 1
try:
file_handle = open(flag_trace, "r")
trace_file = csv.reader(file_handle, delimiter=" ", skipinitialspace=True)
except IOError:
print "Unable to open trace file"
sys.exit(1)
convert_timestamps(trace_file, file_handle, time_dict, first_jiffy,
first_utc)
if __name__ == "__main__":
main(sys.argv[1:])
| |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pyocd.debug.cache import RegisterCache
from pyocd.debug.context import DebugContext
from pyocd.coresight.cortex_m import (
CortexM,
CORE_REGISTER,
register_name_to_index,
is_psr_subregister,
sysm_to_psr_mask
)
from pyocd.core import memory_map
from pyocd.utility import conversion
from pyocd.utility import mask
import pytest
import logging
@pytest.fixture(scope='function')
def regcache(mockcore):
return RegisterCache(DebugContext(mockcore))
# Copy of the register list without composite registers.
CORE_REGS_NO_COMPOSITES = CORE_REGISTER.copy()
CORE_REGS_NO_COMPOSITES.pop('cfbp')
CORE_REGS_NO_COMPOSITES.pop('xpsr')
CORE_REGS_NO_COMPOSITES.pop('iapsr')
CORE_REGS_NO_COMPOSITES.pop('eapsr')
CORE_REGS_NO_COMPOSITES.pop('iepsr')
# Appropriate modifiers for masked registers - others modified by adding 7
REG_MODIFIER = {
'apsr': 0x30010000,
'epsr': 0x01000C00,
}
def get_modifier(r):
return REG_MODIFIER.get(r, 7)
def get_expected_reg_value(r):
i = register_name_to_index(r)
if is_psr_subregister(i):
return 0x55555555 & sysm_to_psr_mask(i)
if i < 0:
i += 100
return i + 1
def get_expected_cfbp():
return ((get_expected_reg_value('control') << 24) |
(get_expected_reg_value('faultmask') << 16) |
(get_expected_reg_value('basepri') << 8) |
get_expected_reg_value('primask'))
def get_expected_xpsr():
return (get_expected_reg_value('apsr') |
get_expected_reg_value('ipsr') |
get_expected_reg_value('epsr'))
class TestRegisterCache:
def set_core_regs(self, mockcore, modify=False):
for r in CORE_REGS_NO_COMPOSITES:
if modify:
modifier = get_modifier(r)
else:
modifier = 0
mockcore.write_core_registers_raw([r], [get_expected_reg_value(r) + modifier])
assert mockcore.read_core_registers_raw([r]) == [get_expected_reg_value(r) + modifier]
def test_r_1(self, mockcore, regcache):
assert regcache.read_core_registers_raw(['r0']) == [0] # cache initial value of 0
mockcore.write_core_registers_raw(['r0'], [1234]) # modify reg behind the cache's back
assert mockcore.read_core_registers_raw(['r0']) == [1234] # verify modified reg
assert regcache.read_core_registers_raw(['r0']) == [0] # should return cached 0 value
regcache.invalidate() # explicitly invalidate cache
assert mockcore.read_core_registers_raw(['r0']) == [1234] # verify modified reg
assert regcache.read_core_registers_raw(['r0']) == [1234] # now should return updated 1234 value
def test_run_token(self, mockcore, regcache):
assert regcache.read_core_registers_raw(['r0']) == [0] # cache initial value of 0
mockcore.write_core_registers_raw(['r0'], [1234]) # modify reg behind the cache's back
assert mockcore.read_core_registers_raw(['r0']) == [1234] # verify modified reg
assert regcache.read_core_registers_raw(['r0']) == [0] # should return cached 0 value
mockcore.run_token += 1 # bump run token to cause cache to invalidate
assert regcache.read_core_registers_raw(['r0']) == [1234] # now should return updated 1234 value
def test_reading_from_core(self, mockcore, regcache):
self.set_core_regs(mockcore)
for r in CORE_REGS_NO_COMPOSITES:
assert regcache.read_core_registers_raw([r]) == [get_expected_reg_value(r)]
def test_read_cached(self, mockcore, regcache):
self.set_core_regs(mockcore)
# cache all regs
regcache.read_core_registers_raw(CORE_REGS_NO_COMPOSITES.values())
# modify regs in mock core
self.set_core_regs(mockcore, True)
# cache should return original unmodified values
for r in CORE_REGS_NO_COMPOSITES:
assert regcache.read_core_registers_raw([r]) == [get_expected_reg_value(r)]
def test_read_cfbp(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert regcache.read_core_registers_raw(['cfbp', 'control', 'faultmask']) == [
get_expected_cfbp(), get_expected_reg_value('control'), get_expected_reg_value('faultmask')
]
def test_read_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert regcache.read_core_registers_raw(['xpsr', 'ipsr', 'apsr', 'eapsr']) == [
get_expected_xpsr(), get_expected_reg_value('ipsr'),
get_expected_reg_value('apsr'), get_expected_reg_value('eapsr')
]
def test_read_cached_cfbp(self, mockcore, regcache):
self.set_core_regs(mockcore)
# cache it
regcache.read_core_registers_raw(['cfbp'])
# modify behind the cache's back
mockcore.write_core_registers_raw(['control', 'primask'], [0x55, 0xaa])
# cache should return original value
assert regcache.read_core_registers_raw(['cfbp']) == [get_expected_cfbp()]
def test_read_cached_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
# cache it
regcache.read_core_registers_raw(['xpsr'])
# modify behind the cache's back
mockcore.write_core_registers_raw(['ipsr', 'apsr'], [0x22, 0x10000000])
# cache should return original value
assert regcache.read_core_registers_raw(['xpsr']) == [get_expected_xpsr()]
def test_write_1(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['r0']) == [get_expected_reg_value('r0')]
assert regcache.read_core_registers_raw(['r0']) == [get_expected_reg_value('r0')]
regcache.write_core_registers_raw(['r0'], [1234])
assert mockcore.read_core_registers_raw(['r0']) == [1234]
assert regcache.read_core_registers_raw(['r0']) == [1234]
def test_write_regs(self, mockcore, regcache):
self.set_core_regs(mockcore)
for r in CORE_REGS_NO_COMPOSITES:
regcache.write_core_registers_raw([r], [get_expected_reg_value(r) + get_modifier(r)])
for r in CORE_REGS_NO_COMPOSITES:
assert mockcore.read_core_registers_raw([r]) == [get_expected_reg_value(r) + get_modifier(r)]
def test_write_cfbp(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['cfbp']) == [get_expected_cfbp()]
regcache.write_core_registers_raw(['control', 'primask'], [3, 19])
assert mockcore.read_core_registers_raw(['control', 'primask', 'cfbp']) == [
3, 19,
((3 << 24) | (get_expected_reg_value('faultmask') << 16) |
(get_expected_reg_value('basepri') << 8) | 19)
]
def test_write_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['xpsr']) == [get_expected_xpsr()]
regcache.write_core_registers_raw(['iapsr'], [0x10000022])
assert mockcore.read_core_registers_raw(['ipsr', 'apsr', 'iapsr', 'xpsr']) == [
0x22, 0x10000000, 0x10000022,
0x10000022 | get_expected_reg_value('epsr')
]
def test_write_full_xpsr(self, mockcore, regcache):
self.set_core_regs(mockcore)
assert mockcore.read_core_registers_raw(['xpsr']) == [get_expected_xpsr()]
regcache.write_core_registers_raw(['xpsr'], [0xffffffff])
assert mockcore.read_core_registers_raw(['ipsr', 'apsr', 'epsr', 'xpsr']) == [
CortexM.IPSR_MASK, CortexM.APSR_MASK, CortexM.EPSR_MASK,
0xffffffff
]
def test_invalid_reg_r(self, regcache):
with pytest.raises(ValueError):
regcache.read_core_registers_raw([132423])
def test_invalid_reg_w(self, regcache):
with pytest.raises(ValueError):
regcache.write_core_registers_raw([132423], [1234])
def test_invalid_fpu_reg_r(self, mockcore, regcache):
mockcore.has_fpu = False
with pytest.raises(ValueError):
regcache.read_core_registers_raw(['s1'])
def test_invalid_fpu_reg_w(self, mockcore, regcache):
mockcore.has_fpu = False
with pytest.raises(ValueError):
regcache.write_core_registers_raw(['s1'], [1.234])
| |
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from __future__ import print_function
import sys
import unittest
import SimpleITK as sitk
import numpy as np
sizeX = 4
sizeY = 5
sizeZ = 3
class TestNumpySimpleITKInterface(unittest.TestCase):
""" This tests numpy array <-> SimpleITK Image conversion. """
def setUp(self):
pass
def _helper_check_sitk_to_numpy_type(self, sitkType, numpyType):
image = sitk.Image( (9, 10), sitkType, 1 )
a = sitk.GetArrayFromImage( image )
self.assertEqual( numpyType, a.dtype )
self.assertEqual( (10, 9), a.shape )
def test_type_to_numpy(self):
"try all sitk pixel type to convert to numpy"
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt8, np.uint8)
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt16, np.uint16)
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt32, np.uint32)
if sitk.sitkUInt64 != sitk.sitkUnknown:
self._helper_check_sitk_to_numpy_type(sitk.sitkUInt64, np.uint64)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt8, np.int8)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt16, np.int16)
self._helper_check_sitk_to_numpy_type(sitk.sitkInt32, np.int32)
if sitk.sitkInt64 != sitk.sitkUnknown:
self._helper_check_sitk_to_numpy_type(sitk.sitkInt64, np.int64)
self._helper_check_sitk_to_numpy_type(sitk.sitkFloat32, np.float32)
self._helper_check_sitk_to_numpy_type(sitk.sitkFloat64, np.float64)
#self._helper_check_sitk_to_numpy_type(sitk.sitkComplexFloat32, np.complex64)
#self._helper_check_sitk_to_numpy_type(sitk.sitkComplexFloat64, np.complex128)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt8, np.uint8)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt8, np.int8)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt16, np.uint16)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt16, np.int16)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt32, np.uint32)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt32, np.int32)
if sitk.sitkVectorUInt64 != sitk.sitkUnknown:
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorUInt64, np.uint64)
if sitk.sitkVectorInt64 != sitk.sitkUnknown:
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorInt64, np.int64)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorFloat32, np.float32)
self._helper_check_sitk_to_numpy_type(sitk.sitkVectorFloat64, np.float64)
#self._helper_check_sitk_to_numpy_type(sitk.sitkLabelUInt8, np.uint8)
#self._helper_check_sitk_to_numpy_type(sitk.sitkLabelUInt16, np.uint16)
#self._helper_check_sitk_to_numpy_type(sitk.sitkLabelUInt32, np.uint32)
#self._helper_check_sitk_to_numpy_type(sitk.sitkLabelUInt64, np.uint64)
def test_to_numpy_and_back(self):
"""Test converting an image to numpy and back"""
img = sitk.GaussianSource( sitk.sitkFloat32, [100,100], sigma=[10]*3, mean = [50,50] )
h = sitk.Hash( img )
# convert the image to and fro a numpy array
img = sitk.GetImageFromArray( sitk.GetArrayFromImage( img ) )
self.assertEqual( h, sitk.Hash( img ))
def test_isVector(self):
""" Test Behavior of isVector option. """
# Check 2D
nda = np.arange(6, dtype=np.float32).reshape([2,3])
img = sitk.GetImageFromArray(nda)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkFloat32)
self.assertEqual(img.GetPixel([1,0]), 1)
img = sitk.GetImageFromArray(nda, isVector=False)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkFloat32)
self.assertEqual(img.GetPixel([1,0]), 1)
img = sitk.GetImageFromArray(nda, isVector=True)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkVectorFloat32)
self.assertEqual(img.GetNumberOfComponentsPerPixel(), 1)
self.assertEqual(img.GetPixel([1,0]), (1,))
# Check 3D
nda = np.arange(30, dtype=np.float32).reshape([2,3,5])
img = sitk.GetImageFromArray(nda)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkFloat32)
self.assertEqual(img.GetPixel([1,0,0]), 1)
self.assertEqual(img.GetPixel([0,1,0]), 5)
img = sitk.GetImageFromArray(nda, isVector=False)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkFloat32)
self.assertEqual(img.GetPixel([1,0,0]), 1)
self.assertEqual(img.GetPixel([0,1,0]), 5)
img = sitk.GetImageFromArray(nda, isVector=True)
self.assertEqual(img.GetSize(), (3,2))
self.assertEqual(img.GetPixelID(), sitk.sitkVectorFloat32)
self.assertEqual(img.GetNumberOfComponentsPerPixel(), 5)
self.assertEqual(img.GetPixel([1,0,0]), (5,6,7,8,9))
self.assertEqual(img.GetPixel([0,1,0]), (15,16,17,18,19))
# Check 4D
nda = np.arange(210, dtype=np.float32).reshape([3,5,7,2])
# Special case to default to VectorImage
img = sitk.GetImageFromArray(nda)
self.assertEqual(img.GetSize(), (7,5,3))
self.assertEqual(img.GetPixelID(), sitk.sitkVectorFloat32)
self.assertEqual(img.GetNumberOfComponentsPerPixel(), 2)
self.assertEqual(img.GetPixel([1,0,0]), (2,3))
self.assertEqual(img.GetPixel([0,1,0]), (14,15))
img = sitk.GetImageFromArray(nda, isVector=True)
self.assertEqual(img.GetSize(), (7,5,3))
self.assertEqual(img.GetPixelID(), sitk.sitkVectorFloat32)
self.assertEqual(img.GetNumberOfComponentsPerPixel(), 2)
self.assertEqual(img.GetPixel([1,0,0]), (2,3))
self.assertEqual(img.GetPixel([0,1,0]), (14,15))
# 4D Image may not be supported by SimpleITK
try:
sitk.Image([1]*4, sitk.sitkUInt8)
except RuntimeError:
return
img = sitk.GetImageFromArray(nda, isVector=False)
self.assertEqual(img.GetSize(), nda.shape[::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkFloat32)
self.assertEqual(img.GetPixel([1,0,0,0]), 1)
self.assertEqual(img.GetPixel([0,1,0,0]), 2)
nda = np.arange(210*9, dtype=np.float32).reshape([3,5,7,9,2])
img = sitk.GetImageFromArray(nda, isVector=True)
self.assertEqual(img.GetSize(), nda.shape[-2::-1])
self.assertEqual(img.GetPixelID(), sitk.sitkVectorFloat32)
self.assertEqual(img.GetPixel([1,0,0,0]), (2,3))
self.assertEqual(img.GetPixel([0,1,0,0]), (18,19))
self.assertEqual(img.GetPixel([0,0,1,0]), (126,127))
def test_vector_image_to_numpy(self):
"""Test converting back and forth between numpy and SimpleITK
images were the SimpleITK image has multiple componets and
stored as a VectorImage."""
# Check 2D
img = sitk.PhysicalPointSource(sitk.sitkVectorFloat32, [3,4])
h = sitk.Hash( img )
nda = sitk.GetArrayFromImage(img)
self.assertEqual(nda.shape, (4,3,2))
self.assertEqual(nda[0,0].tolist(), [0,0])
self.assertEqual(nda[2,1].tolist(), [1,2])
self.assertEqual(nda[0,:,0].tolist(), [0,1,2])
img2 = sitk.GetImageFromArray( nda, isVector=True)
self.assertEqual( h, sitk.Hash(img2) )
# check 3D
img = sitk.PhysicalPointSource(sitk.sitkVectorFloat32, [3,4,5])
h = sitk.Hash( img )
nda = sitk.GetArrayFromImage(img)
self.assertEqual(nda.shape, (5,4,3,3))
self.assertEqual(nda[0,0,0].tolist(), [0,0,0])
self.assertEqual(nda[0,0,:,0].tolist(), [0,1,2])
self.assertEqual(nda[0,:,1,1].tolist(), [0,1,2,3])
img2 = sitk.GetImageFromArray(nda)
self.assertEqual(img2.GetSize(), img.GetSize())
self.assertEqual(img2.GetNumberOfComponentsPerPixel(), img.GetNumberOfComponentsPerPixel())
self.assertEqual(h, sitk.Hash(img2))
def test_legacy(self):
"""Test SimpleITK Image to numpy array."""
# self.assertRaises(TypeError, sitk.GetArrayFromImage, 3)
# 2D image
image = sitk.Image(sizeX, sizeY, sitk.sitkInt32)
for j in range(sizeY):
for i in range(sizeX):
image[i, j] = j*sizeX + i
print(sitk.GetArrayFromImage(image))
self.assertEqual( type (sitk.GetArrayFromImage(image)), np.ndarray )
# 3D image
image = sitk.Image(sizeX, sizeY, sizeZ, sitk.sitkFloat32)
for k in range(sizeZ):
for j in range(sizeY):
for i in range(sizeX):
image[i, j, k] = (sizeY*k +j)*sizeX + i
print(sitk.GetArrayFromImage(image))
self.assertEqual( type (sitk.GetArrayFromImage(image)), np.ndarray )
def test_legacy_array2sitk(self):
"""Test numpy array to SimpleITK Image."""
arr = np.arange(20, dtype=np.float64)
arr.shape = (sizeY, sizeX)
image = sitk.GetImageFromArray(arr)
self.assertEqual(image.GetSize(), (sizeX, sizeY))
self.assertEqual(image[0,0], 0.0)
self.assertEqual(image[1,1], 5.0)
self.assertEqual(image[2,2], 10.0)
arr = np.arange(60, dtype=np.int16)
arr.shape = (sizeZ, sizeY, sizeX)
image = sitk.GetImageFromArray(arr)
self.assertEqual(image.GetSize(), (sizeX, sizeY, sizeZ))
self.assertEqual(image[0,0,0], 0)
self.assertEqual(image[1,1,1], 25)
self.assertEqual(image[2,2,2], 50)
if __name__ == '__main__':
unittest.main()
| |
#
# Kivy - Crossplatform NUI toolkit
# http://kivy.org/
#
from __future__ import print_function
import sys
from copy import deepcopy
import os
from os.path import join, dirname, sep, exists, basename, isdir, abspath
from os import walk, environ, makedirs, listdir
from distutils.version import LooseVersion
from collections import OrderedDict
from time import sleep
if environ.get('KIVY_USE_SETUPTOOLS'):
from setuptools import setup, Extension
print('Using setuptools')
else:
from distutils.core import setup
from distutils.extension import Extension
print('Using distutils')
PY3 = sys.version > '3'
if PY3: # fix error with py3's LooseVersion comparisons
def ver_equal(self, other):
return self.version == other
LooseVersion.__eq__ = ver_equal
MIN_CYTHON_STRING = '0.20'
MIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING)
MAX_CYTHON_STRING = '0.23'
MAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)
CYTHON_UNSUPPORTED = ()
def getoutput(cmd, env=None):
import subprocess
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
p.wait()
if p.returncode: # if not returncode == 0
print('WARNING: A problem occured while running {0} (code {1})\n'
.format(cmd, p.returncode))
stderr_content = p.stderr.read()
if stderr_content:
print('{0}\n'.format(stderr_content))
return ""
return p.stdout.read()
def pkgconfig(*packages, **kw):
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
lenviron = None
pconfig = join(dirname(sys.executable), 'libs', 'pkgconfig')
if isdir(pconfig):
lenviron = environ.copy()
lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(
environ.get('PKG_CONFIG_PATH', ''), pconfig)
cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))
results = getoutput(cmd, lenviron).split()
for token in results:
ext = token[:2].decode('utf-8')
flag = flag_map.get(ext)
if not flag:
continue
kw.setdefault(flag, []).append(token[2:].decode('utf-8'))
return kw
# -----------------------------------------------------------------------------
# Determine on which platform we are
platform = sys.platform
# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)
if sys.platform == 'darwin':
if sys.maxsize > 2 ** 32:
osx_arch = 'x86_64'
else:
osx_arch = 'i386'
# Detect Python for android project (http://github.com/kivy/python-for-android)
ndkplatform = environ.get('NDKPLATFORM')
if ndkplatform is not None and environ.get('LIBLINK'):
platform = 'android'
kivy_ios_root = environ.get('KIVYIOSROOT', None)
if kivy_ios_root is not None:
platform = 'ios'
if exists('/opt/vc/include/bcm_host.h'):
platform = 'rpi'
if exists('/usr/lib/arm-linux-gnueabihf/libMali.so'):
platform = 'mali'
# -----------------------------------------------------------------------------
# Detect options
#
c_options = OrderedDict()
c_options['use_rpi'] = platform == 'rpi'
c_options['use_mali'] = platform == 'mali'
c_options['use_egl'] = False
c_options['use_opengl_es2'] = None
c_options['use_opengl_debug'] = False
c_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'
c_options['use_glew'] = False
c_options['use_sdl2'] = None
c_options['use_ios'] = False
c_options['use_mesagl'] = False
c_options['use_x11'] = False
c_options['use_gstreamer'] = None
c_options['use_avfoundation'] = platform == 'darwin'
c_options['use_osx_frameworks'] = platform == 'darwin'
c_options['debug_gl'] = False
# now check if environ is changing the default values
for key in list(c_options.keys()):
ukey = key.upper()
if ukey in environ:
value = bool(int(environ[ukey]))
print('Environ change {0} -> {1}'.format(key, value))
c_options[key] = value
# -----------------------------------------------------------------------------
# Cython check
# on python-for-android and kivy-ios, cython usage is external
cython_unsupported_append = '''
Please note that the following versions of Cython are not supported
at all: {}
'''.format(', '.join(map(str, CYTHON_UNSUPPORTED)))
cython_min = '''\
This version of Cython is not compatible with Kivy. Please upgrade to
at least version {0}, preferably the newest supported version {1}.
If your platform provides a Cython package, make sure you have upgraded
to the newest version. If the newest version available is still too low,
please remove it and install the newest supported Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append if CYTHON_UNSUPPORTED else '')
cython_max = '''\
This version of Cython is untested with Kivy. While this version may
work perfectly fine, it is possible that you may experience issues. If
you do have issues, please downgrade to a supported version. It is
best to use the newest supported version, {1}, but the minimum
supported version is {0}.
If your platform provides a Cython package, check if you can downgrade
to a supported version. Otherwise, uninstall the platform package and
install Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append if CYTHON_UNSUPPORTED else '')
cython_unsupported = '''\
This version of Cython suffers from known bugs and is unsupported.
Please install the newest supported version, {1}, if possible, but
the minimum supported version is {0}.
If your platform provides a Cython package, check if you can install
a supported version. Otherwise, uninstall the platform package and
install Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append)
have_cython = False
if platform in ('ios', 'android'):
print('\nCython check avoided.')
else:
try:
# check for cython
from Cython.Distutils import build_ext
have_cython = True
import Cython
cy_version_str = Cython.__version__
cy_ver = LooseVersion(cy_version_str)
print('\nDetected Cython version {}'.format(cy_version_str))
if cy_ver < MIN_CYTHON_VERSION:
print(cython_min)
raise ImportError('Incompatible Cython Version')
if cy_ver in CYTHON_UNSUPPORTED:
print(cython_unsupported)
raise ImportError('Incompatible Cython Version')
if cy_ver > MAX_CYTHON_VERSION:
print(cython_max)
sleep(1)
except ImportError:
print('\nCython is missing, its required for compiling kivy !\n\n')
raise
if not have_cython:
from distutils.command.build_ext import build_ext
# -----------------------------------------------------------------------------
# Setup classes
# the build path where kivy is being compiled
src_path = build_path = dirname(__file__)
class KivyBuildExt(build_ext):
def finalize_options(self):
retval = build_ext.finalize_options(self)
global build_path
if (self.build_lib is not None and exists(self.build_lib) and
not self.inplace):
build_path = self.build_lib
return retval
def build_extensions(self):
# build files
config_h_fn = ('graphics', 'config.h')
config_pxi_fn = ('graphics', 'config.pxi')
config_py_fn = ('setupconfig.py', )
# generate headers
config_h = '// Autogenerated file for Kivy C configuration\n'
config_h += '#define __PY3 {0}\n'.format(int(PY3))
config_pxi = '# Autogenerated file for Kivy Cython configuration\n'
config_pxi += 'DEF PY3 = {0}\n'.format(int(PY3))
config_py = '# Autogenerated file for Kivy configuration\n'
config_py += 'PY3 = {0}\n'.format(int(PY3))
config_py += 'CYTHON_MIN = {0}\nCYTHON_MAX = {1}\n'.format(
repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))
config_py += 'CYTHON_BAD = {0}\n'.format(repr(', '.join(map(
str, CYTHON_UNSUPPORTED))))
# generate content
print('Build configuration is:')
for opt, value in c_options.items():
value = int(bool(value))
print(' * {0} = {1}'.format(opt, value))
opt = opt.upper()
config_h += '#define __{0} {1}\n'.format(opt, value)
config_pxi += 'DEF {0} = {1}\n'.format(opt, value)
config_py += '{0} = {1}\n'.format(opt, value)
debug = bool(self.debug)
print(' * debug = {0}'.format(debug))
config_h += \
'#if __USE_GLEW && defined(_WIN32)\n# define GLEW_BUILD\n#endif'
config_pxi += 'DEF DEBUG = {0}\n'.format(debug)
config_py += 'DEBUG = {0}\n'.format(debug)
for fn, content in (
(config_h_fn, config_h), (config_pxi_fn, config_pxi),
(config_py_fn, config_py)):
build_fn = expand(build_path, *fn)
if self.update_if_changed(build_fn, content):
print('Updated {}'.format(build_fn))
src_fn = expand(src_path, *fn)
if src_fn != build_fn and self.update_if_changed(src_fn, content):
print('Updated {}'.format(src_fn))
c = self.compiler.compiler_type
print('Detected compiler is {}'.format(c))
if c != 'msvc':
for e in self.extensions:
e.extra_link_args += ['-lm']
build_ext.build_extensions(self)
def update_if_changed(self, fn, content):
need_update = True
if exists(fn):
with open(fn) as fd:
need_update = fd.read() != content
if need_update:
with open(fn, 'w') as fd:
fd.write(content)
return need_update
def _check_and_fix_sdl2_mixer(f_path):
print("Check if SDL2_mixer smpeg2 have an @executable_path")
rpath_from = "@executable_path/../Frameworks/SDL2.framework/Versions/A/SDL2"
rpath_to = "@rpath/../../../../SDL2.framework/Versions/A/SDL2"
smpeg2_path = ("{}/Versions/A/Frameworks/smpeg2.framework"
"/Versions/A/smpeg2").format(f_path)
output = getoutput(("otool -L '{}'").format(smpeg2_path)).decode('utf-8')
if "@executable_path" not in output:
return
print("WARNING: Your SDL2_mixer version is invalid")
print("WARNING: The smpeg2 framework embedded in SDL2_mixer contains a")
print("WARNING: reference to @executable_path that will fail the")
print("WARNING: execution of your application.")
print("WARNING: We are going to change:")
print("WARNING: from: {}".format(rpath_from))
print("WARNING: to: {}".format(rpath_to))
getoutput("install_name_tool -change {} {} {}".format(
rpath_from, rpath_to, smpeg2_path))
output = getoutput(("otool -L '{}'").format(smpeg2_path))
if b"@executable_path" not in output:
print("WARNING: Change successfully applied!")
print("WARNING: You'll never see this message again.")
else:
print("WARNING: Unable to apply the changes, sorry.")
# -----------------------------------------------------------------------------
# extract version (simulate doc generation, kivy will be not imported)
environ['KIVY_DOC_INCLUDE'] = '1'
import kivy
# extra build commands go in the cmdclass dict {'command-name': CommandClass}
# see tools.packaging.{platform}.build.py for custom build commands for
# portable packages. Also e.g. we use build_ext command from cython if its
# installed for c extensions.
from kivy.tools.packaging.factory import FactoryBuild
cmdclass = {
'build_factory': FactoryBuild,
'build_ext': KivyBuildExt}
try:
# add build rules for portable packages to cmdclass
if platform == 'win32':
from kivy.tools.packaging.win32.build import WindowsPortableBuild
cmdclass['build_portable'] = WindowsPortableBuild
elif platform == 'darwin':
from kivy.tools.packaging.osx.build import OSXPortableBuild
cmdclass['build_portable'] = OSXPortableBuild
except ImportError:
print('User distribution detected, avoid portable command.')
# Detect which opengl version headers to use
if platform in ('android', 'darwin', 'ios', 'rpi', 'mali'):
c_options['use_opengl_es2'] = True
elif platform == 'win32':
print('Windows platform detected, force GLEW usage.')
c_options['use_glew'] = True
c_options['use_opengl_es2'] = False
else:
if c_options['use_opengl_es2'] is None:
GLES = environ.get('GRAPHICS') == 'GLES'
OPENGL = environ.get('GRAPHICS') == 'OPENGL'
if GLES:
c_options['use_opengl_es2'] = True
elif OPENGL:
c_options['use_opengl_es2'] = False
else:
# auto detection of GLES headers
default_header_dirs = ['/usr/include', join(
environ.get('LOCALBASE', '/usr/local'), 'include')]
c_options['use_opengl_es2'] = False
for hdir in default_header_dirs:
filename = join(hdir, 'GLES2', 'gl2.h')
if exists(filename):
c_options['use_opengl_es2'] = True
print('NOTE: Found GLES 2.0 headers at {0}'.format(
filename))
break
if not c_options['use_opengl_es2']:
print('NOTE: Not found GLES 2.0 headers at: {}'.format(
default_header_dirs))
print(
' Please contact us if your distribution '
'uses an alternative path for the headers.')
print('Using this graphics system: {}'.format(
['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))
# check if we are in a kivy-ios build
if platform == 'ios':
print('Kivy-IOS project environment detect, use it.')
print('Kivy-IOS project located at {0}'.format(kivy_ios_root))
c_options['use_ios'] = True
c_options['use_sdl2'] = True
elif platform == 'darwin':
if c_options['use_osx_frameworks']:
if osx_arch == "i386":
print("Warning: building with frameworks fail on i386")
else:
print("OSX framework used, force to x86_64 only")
environ["ARCHFLAGS"] = environ.get("ARCHFLAGS", "-arch x86_64")
print("OSX ARCHFLAGS are: {}".format(environ["ARCHFLAGS"]))
# detect gstreamer, only on desktop
# works if we forced the options or in autodetection
if platform not in ('ios', 'android') and (c_options['use_gstreamer']
in (None, True)):
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
f_path = '/Library/Frameworks/GStreamer.framework'
if not exists(f_path):
c_options['use_gstreamer'] = False
print('Missing GStreamer framework {}'.format(f_path))
else:
c_options['use_gstreamer'] = True
gst_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190',
'-framework', 'GStreamer'],
'include_dirs': [join(f_path, 'Headers')]}
else:
# use pkg-config approach instead
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
c_options['use_gstreamer'] = True
# detect SDL2, only on desktop and iOS, or android if explicitly enabled
# works if we forced the options or in autodetection
sdl2_flags = {}
if c_options['use_sdl2'] or (
platform not in ('android',) and c_options['use_sdl2'] is None):
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
sdl2_valid = True
sdl2_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190'],
'include_dirs': [],
'extra_compile_args': ['-F/Library/Frameworks']
}
for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):
f_path = '/Library/Frameworks/{}.framework'.format(name)
if not exists(f_path):
print('Missing framework {}'.format(f_path))
sdl2_valid = False
continue
sdl2_flags['extra_link_args'] += ['-framework', name]
sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]
print('Found sdl2 frameworks: {}'.format(f_path))
if name == 'SDL2_mixer':
_check_and_fix_sdl2_mixer(f_path)
if not sdl2_valid:
c_options['use_sdl2'] = False
print('Deactivate SDL2 compilation due to missing frameworks')
else:
c_options['use_sdl2'] = True
print('Activate SDL2 compilation')
elif platform != "ios":
# use pkg-config approach instead
sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')
if 'libraries' in sdl2_flags:
c_options['use_sdl2'] = True
# -----------------------------------------------------------------------------
# declare flags
def get_modulename_from_file(filename):
filename = filename.replace(sep, '/')
pyx = '.'.join(filename.split('.')[:-1])
pyxl = pyx.split('/')
while pyxl[0] != 'kivy':
pyxl.pop(0)
if pyxl[1] == 'kivy':
pyxl.pop(0)
return '.'.join(pyxl)
def expand(root, *args):
return join(root, 'kivy', *args)
class CythonExtension(Extension):
def __init__(self, *args, **kwargs):
Extension.__init__(self, *args, **kwargs)
self.cython_directives = {
'c_string_encoding': 'utf-8',
'profile': 'USE_PROFILE' in environ,
'embedsignature': 'USE_EMBEDSIGNATURE' in environ}
# XXX with pip, setuptools is imported before distutils, and change
# our pyx to c, then, cythonize doesn't happen. So force again our
# sources
self.sources = args[1]
def merge(d1, *args):
d1 = deepcopy(d1)
for d2 in args:
for key, value in d2.items():
value = deepcopy(value)
if key in d1:
d1[key].extend(value)
else:
d1[key] = value
return d1
def determine_base_flags():
flags = {
'libraries': [],
'include_dirs': [],
'extra_link_args': [],
'extra_compile_args': []}
if c_options['use_ios']:
sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))
if not sysroot:
raise Exception('IOSSDKROOT is not set')
flags['include_dirs'] += [sysroot]
flags['extra_compile_args'] += ['-isysroot', sysroot]
flags['extra_link_args'] += ['-isysroot', sysroot]
elif platform.startswith('freebsd'):
flags['include_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'include')]
flags['extra_link_args'] += ['-L', join(
environ.get('LOCALBASE', '/usr/local'), 'lib')]
elif platform == 'darwin':
v = os.uname()
if v[2] >= '13.0.0':
# use xcode-select to search on the right Xcode path
# XXX use the best SDK available instead of a specific one
import platform as _platform
xcode_dev = getoutput('xcode-select -p').splitlines()[0]
sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])
print('Xcode detected at {}, and using OS X{} sdk'.format(
xcode_dev, sdk_mac_ver))
sysroot = join(
xcode_dev.decode('utf-8'),
'Platforms/MacOSX.platform/Developer/SDKs',
'MacOSX{}.sdk'.format(sdk_mac_ver),
'System/Library/Frameworks')
else:
sysroot = ('/System/Library/Frameworks/'
'ApplicationServices.framework/Frameworks')
flags['extra_compile_args'] += ['-F%s' % sysroot]
flags['extra_link_args'] += ['-F%s' % sysroot]
return flags
def determine_gl_flags():
flags = {'libraries': []}
if c_options['use_opengl_mock']:
return flags
if platform == 'win32':
flags['libraries'] = ['opengl32']
elif platform == 'ios':
flags['libraries'] = ['GLESv2']
flags['extra_link_args'] = ['-framework', 'OpenGLES']
elif platform == 'darwin':
flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]
flags['extra_compile_args'] = ['-arch', osx_arch]
elif platform.startswith('freebsd'):
flags['libraries'] = ['GL']
elif platform.startswith('openbsd'):
flags['include_dirs'] = ['/usr/X11R6/include']
flags['extra_link_args'] = ['-L', '/usr/X11R6/lib']
flags['libraries'] = ['GL']
elif platform == 'android':
flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]
flags['extra_link_args'] = ['-L', join(ndkplatform, 'usr', 'lib')]
flags['libraries'] = ['GLESv2']
elif platform == 'rpi':
flags['include_dirs'] = [
'/opt/vc/include',
'/opt/vc/include/interface/vcos/pthreads',
'/opt/vc/include/interface/vmcs_host/linux']
flags['library_dirs'] = ['/opt/vc/lib']
flags['libraries'] = ['bcm_host', 'EGL', 'GLESv2']
elif platform == 'mali':
flags['include_dirs'] = ['/usr/include/']
flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']
flags['libraries'] = ['GLESv2']
c_options['use_x11'] = True
c_options['use_egl'] = True
else:
flags['libraries'] = ['GL']
if c_options['use_glew']:
if platform == 'win32':
flags['libraries'] += ['glew32']
else:
flags['libraries'] += ['GLEW']
return flags
def determine_sdl2():
flags = {}
if not c_options['use_sdl2']:
return flags
sdl2_path = environ.get('KIVY_SDL2_PATH', None)
if sdl2_flags and not sdl2_path and platform == 'darwin':
return sdl2_flags
# no pkgconfig info, or we want to use a specific sdl2 path, so perform
# manual configuration
flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']
split_chr = ';' if platform == 'win32' else ':'
sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []
if not sdl2_paths:
sdl_inc = join(dirname(sys.executable), 'include', 'SDL2')
if isdir(sdl_inc):
sdl2_paths = [sdl_inc]
sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])
flags['include_dirs'] = sdl2_paths
flags['extra_link_args'] = []
flags['extra_compile_args'] = []
flags['extra_link_args'] += (
['-L' + p for p in sdl2_paths] if sdl2_paths else
['-L/usr/local/lib/'])
# ensure headers for all the SDL2 and sub libraries are available
libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']
can_compile = True
for lib in libs_to_check:
found = False
for d in flags['include_dirs']:
fn = join(d, '{}.h'.format(lib))
if exists(fn):
found = True
print('SDL2: found {} header at {}'.format(lib, fn))
break
if not found:
print('SDL2: missing sub library {}'.format(lib))
can_compile = False
if not can_compile:
c_options['use_sdl2'] = False
return {}
return flags
base_flags = determine_base_flags()
gl_flags = determine_gl_flags()
# -----------------------------------------------------------------------------
# sources to compile
# all the dependencies have been found manually with:
# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}
graphics_dependencies = {
'gl_redirect.h': ['common_subset.h', 'gl_mock.h'],
'c_opengl.pxd': ['config.pxi', 'gl_redirect.h'],
'buffer.pyx': ['common.pxi'],
'context.pxd': [
'instructions.pxd', 'texture.pxd', 'vbo.pxd',
'c_opengl.pxd', 'c_opengl_debug.pxd', 'c_opengl_mock.pxd'],
'c_opengl_debug.pyx': ['common.pxi', 'c_opengl.pxd'],
'c_opengl_mock.pyx': ['common.pxi', 'c_opengl.pxd'],
'compiler.pxd': ['instructions.pxd'],
'compiler.pyx': ['context_instructions.pxd'],
'context_instructions.pxd': [
'transformation.pxd', 'instructions.pxd', 'texture.pxd'],
'fbo.pxd': ['c_opengl.pxd', 'instructions.pxd', 'texture.pxd'],
'fbo.pyx': [
'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd',
'c_opengl_debug.pxd', 'c_opengl_mock.pxd'],
'gl_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',
'instructions.pxd', 'c_opengl_mock.pxd'],
'instructions.pxd': [
'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',
'texture.pxd', '../_event.pxd'],
'instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',
'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd',
'c_opengl_mock.pxd'],
'opengl.pyx': [
'config.pxi', 'common.pxi', 'c_opengl.pxd', 'gl_redirect.h'],
'opengl_utils.pyx': [
'opengl_utils_def.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd'],
'shader.pxd': ['c_opengl.pxd', 'transformation.pxd', 'vertex.pxd'],
'shader.pyx': [
'config.pxi', 'common.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',
'vertex.pxd', 'transformation.pxd', 'context.pxd',
'gl_debug_logger.pxi', 'c_opengl_mock.pxd'],
'stencil_instructions.pxd': ['instructions.pxd'],
'stencil_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',
'gl_debug_logger.pxi', 'c_opengl_mock.pxd'],
'scissor_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'c_opengl.pxd', 'c_opengl_debug.pxd',
'c_opengl_mock.pxd'],
'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',
'vertex_instructions.pxd', 'tesselator.pxd'],
'texture.pxd': ['c_opengl.pxd'],
'texture.pyx': [
'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',
'c_opengl.pxd', 'c_opengl_debug.pxd', 'opengl_utils.pxd',
'img_tools.pxi', 'gl_debug_logger.pxi', 'c_opengl_mock.pxd'],
'vbo.pxd': ['buffer.pxd', 'c_opengl.pxd', 'vertex.pxd'],
'vbo.pyx': [
'config.pxi', 'common.pxi', 'c_opengl_debug.pxd', 'context.pxd',
'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi',
'c_opengl_mock.pxd'],
'vertex.pxd': ['c_opengl.pxd'],
'vertex.pyx': ['config.pxi', 'common.pxi'],
'vertex_instructions.pyx': [
'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',
'instructions.pxd', 'vertex_instructions.pxd',
'c_opengl.pxd', 'c_opengl_debug.pxd', 'texture.pxd',
'vertex_instructions_line.pxi', 'c_opengl_mock.pxd'],
'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}
sources = {
'_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),
'weakproxy.pyx': {},
'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),
'graphics/buffer.pyx': base_flags,
'graphics/context.pyx': merge(base_flags, gl_flags),
'graphics/c_opengl_debug.pyx': merge(base_flags, gl_flags),
'graphics/c_opengl_mock.pyx': merge(base_flags, gl_flags),
'graphics/compiler.pyx': merge(base_flags, gl_flags),
'graphics/context_instructions.pyx': merge(base_flags, gl_flags),
'graphics/fbo.pyx': merge(base_flags, gl_flags),
'graphics/gl_instructions.pyx': merge(base_flags, gl_flags),
'graphics/instructions.pyx': merge(base_flags, gl_flags),
'graphics/opengl.pyx': merge(base_flags, gl_flags),
'graphics/opengl_utils.pyx': merge(base_flags, gl_flags),
'graphics/shader.pyx': merge(base_flags, gl_flags),
'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags),
'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags),
'graphics/texture.pyx': merge(base_flags, gl_flags),
'graphics/transformation.pyx': merge(base_flags, gl_flags),
'graphics/vbo.pyx': merge(base_flags, gl_flags),
'graphics/vertex.pyx': merge(base_flags, gl_flags),
'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags),
'core/text/text_layout.pyx': base_flags,
'graphics/tesselator.pyx': merge(base_flags, {
'include_dirs': ['kivy/lib/libtess2/Include'],
'c_depends': [
'lib/libtess2/Source/bucketalloc.c',
'lib/libtess2/Source/dict.c',
'lib/libtess2/Source/geom.c',
'lib/libtess2/Source/mesh.c',
'lib/libtess2/Source/priorityq.c',
'lib/libtess2/Source/sweep.c',
'lib/libtess2/Source/tess.c'
]
}),
'graphics/svg.pyx': merge(base_flags, gl_flags)
}
if c_options['use_sdl2']:
sdl2_flags = determine_sdl2()
if sdl2_flags:
sdl2_depends = {'depends': ['lib/sdl2.pxi']}
for source_file in ('core/window/_window_sdl2.pyx',
'core/image/_img_sdl2.pyx',
'core/text/_text_sdl2.pyx',
'core/audio/audio_sdl2.pyx',
'core/clipboard/_clipboard_sdl2.pyx'):
sources[source_file] = merge(
base_flags, gl_flags, sdl2_flags, sdl2_depends)
if platform in ('darwin', 'ios'):
# activate ImageIO provider for our core image
if platform == 'ios':
osx_flags = {'extra_link_args': [
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'AudioToolbox',
'-framework', 'CoreGraphics',
'-framework', 'QuartzCore',
'-framework', 'ImageIO',
'-framework', 'Accelerate']}
else:
osx_flags = {'extra_link_args': [
'-framework', 'ApplicationServices']}
sources['core/image/img_imageio.pyx'] = merge(
base_flags, osx_flags)
if c_options['use_avfoundation']:
import platform as _platform
mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]
if mac_ver >= [10, 7]:
osx_flags = {
'extra_link_args': ['-framework', 'AVFoundation'],
'extra_compile_args': ['-ObjC++'],
'depends': ['core/camera/camera_avfoundation_implem.m']}
sources['core/camera/camera_avfoundation.pyx'] = merge(
base_flags, osx_flags)
else:
print('AVFoundation cannot be used, OSX >= 10.7 is required')
if c_options['use_rpi']:
sources['lib/vidcore_lite/egl.pyx'] = merge(
base_flags, gl_flags)
sources['lib/vidcore_lite/bcm.pyx'] = merge(
base_flags, gl_flags)
if c_options['use_x11']:
libs = ['Xrender', 'X11']
if c_options['use_egl']:
libs += ['EGL']
else:
libs += ['GL']
sources['core/window/window_x11.pyx'] = merge(
base_flags, gl_flags, {
# FIXME add an option to depend on them but not compile them
# cause keytab is included in core, and core is included in
# window_x11
#
#'depends': [
# 'core/window/window_x11_keytab.c',
# 'core/window/window_x11_core.c'],
'libraries': libs})
if c_options['use_gstreamer']:
sources['lib/gstplayer/_gstplayer.pyx'] = merge(
base_flags, gst_flags, {
'depends': ['lib/gstplayer/_gstplayer.h']})
# -----------------------------------------------------------------------------
# extension modules
def get_dependencies(name, deps=None):
if deps is None:
deps = []
for dep in graphics_dependencies.get(name, []):
if dep not in deps:
deps.append(dep)
get_dependencies(dep, deps)
return deps
def resolve_dependencies(fn, depends):
fn = basename(fn)
deps = []
get_dependencies(fn, deps)
get_dependencies(fn.replace('.pyx', '.pxd'), deps)
return [expand(src_path, 'graphics', x) for x in deps]
def get_extensions_from_sources(sources):
ext_modules = []
if environ.get('KIVY_FAKE_BUILDEXT'):
print('Fake build_ext asked, will generate only .h/.c')
return ext_modules
for pyx, flags in sources.items():
is_graphics = pyx.startswith('graphics')
pyx = expand(src_path, pyx)
depends = [expand(src_path, x) for x in flags.pop('depends', [])]
c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]
if not have_cython:
pyx = '%s.c' % pyx[:-4]
if is_graphics:
depends = resolve_dependencies(pyx, depends)
f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (
'c', 'cpp', 'm')]
module_name = get_modulename_from_file(pyx)
flags_clean = {'depends': depends}
for key, value in flags.items():
if len(value):
flags_clean[key] = value
ext_modules.append(CythonExtension(
module_name, [pyx] + f_depends + c_depends, **flags_clean))
return ext_modules
ext_modules = get_extensions_from_sources(sources)
# -----------------------------------------------------------------------------
# automatically detect data files
data_file_prefix = 'share/kivy-'
examples = {}
examples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',
'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',
'glsl')
for root, subFolders, files in walk('examples'):
for fn in files:
ext = fn.split('.')[-1].lower()
if ext not in examples_allowed_ext:
continue
filename = join(root, fn)
directory = '%s%s' % (data_file_prefix, dirname(filename))
if directory not in examples:
examples[directory] = []
examples[directory].append(filename)
binary_deps = []
binary_deps_path = join(src_path, 'kivy', 'binary_deps')
if isdir(binary_deps_path):
for root, dirnames, filenames in walk(binary_deps_path):
for fname in filenames:
binary_deps.append(
join(root.replace(binary_deps_path, 'binary_deps'), fname))
# -----------------------------------------------------------------------------
# setup !
setup(
name='Kivy',
version=kivy.__version__,
author='Kivy Team and other contributors',
author_email='kivy-dev@googlegroups.com',
url='http://kivy.org',
license='MIT',
description=(
'A software library for rapid development of '
'hardware-accelerated multitouch applications.'),
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=[
'kivy',
'kivy.adapters',
'kivy.core',
'kivy.core.audio',
'kivy.core.camera',
'kivy.core.clipboard',
'kivy.core.image',
'kivy.core.gl',
'kivy.core.spelling',
'kivy.core.text',
'kivy.core.video',
'kivy.core.window',
'kivy.deps',
'kivy.effects',
'kivy.ext',
'kivy.graphics',
'kivy.garden',
'kivy.input',
'kivy.input.postproc',
'kivy.input.providers',
'kivy.lang',
'kivy.lib',
'kivy.lib.osc',
'kivy.lib.gstplayer',
'kivy.lib.vidcore_lite',
'kivy.modules',
'kivy.network',
'kivy.storage',
'kivy.tests',
'kivy.tools',
'kivy.tools.packaging',
'kivy.tools.packaging.pyinstaller_hooks',
'kivy.tools.highlight',
'kivy.extras',
'kivy.tools.extensions',
'kivy.uix',
'kivy.uix.behaviors', ],
package_dir={'kivy': 'kivy'},
package_data={'kivy': [
'*.pxd',
'*.pxi',
'core/text/*.pxd',
'core/text/*.pxi',
'graphics/*.pxd',
'graphics/*.pxi',
'graphics/*.h',
'lib/vidcore_lite/*.pxd',
'lib/vidcore_lite/*.pxi',
'data/*.kv',
'data/*.json',
'data/fonts/*.ttf',
'data/images/*.png',
'data/images/*.jpg',
'data/images/*.gif',
'data/images/*.atlas',
'data/keyboards/*.json',
'data/logo/*.png',
'data/glsl/*.png',
'data/glsl/*.vs',
'data/glsl/*.fs',
'tests/*.zip',
'tests/*.kv',
'tests/*.png',
'tests/*.ttf',
'tests/*.ogg',
'tools/highlight/*.vim',
'tools/highlight/*.el',
'tools/packaging/README.txt',
'tools/packaging/win32/kivy.bat',
'tools/packaging/win32/kivyenv.sh',
'tools/packaging/win32/README.txt',
'tools/packaging/osx/Info.plist',
'tools/packaging/osx/InfoPlist.strings',
'tools/gles_compat/*.h',
'tools/packaging/osx/kivy.sh'] + binary_deps},
data_files=list(examples.items()),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Artistic Software',
'Topic :: Games/Entertainment',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Multimedia :: Graphics :: Viewers',
'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: User Interfaces'],
dependency_links=[
'https://github.com/kivy-garden/garden/archive/master.zip'],
install_requires=['Kivy-Garden>=0.1.4', 'docutils', 'pygments'],
setup_requires=['cython>=' + MIN_CYTHON_STRING])
| |
# -*- coding: utf-8 -*-
"""
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
## FIXME: disabling pylint error 1101 as a quick workaround a bad class
## design with classes Nginx, NginxStatic and NginxProxy
# pylint: disable=E1101
from os.path import join, devnull, exists
from os import kill, chown, setuid, setgid, environ
from pwd import getpwnam
from signal import SIGINT, SIGTERM, SIGUSR2, SIGHUP
from subprocess import Popen
from shutil import rmtree, copy2
from Cheetah.Template import Template
from conpaas.core.misc import verify_port, verify_ip_port_list, verify_ip_or_domain
from conpaas.core.log import create_logger
S_INIT = 'INIT'
S_STARTING = 'STARTING'
S_RUNNING = 'RUNNING'
S_STOPPING = 'STOPPING'
S_STOPPED = 'STOPPED'
logger = create_logger(__name__)
NGINX_CMD = None
PHP_FPM = None
TOMCAT_INSTANCE_CREATE = None
TOMCAT_STARTUP = None
SCALARIS_CFG = None
SCALARIS_CTL = None
SCALARIS_HOME = None
VAR_TMP = None
VAR_CACHE = None
VAR_RUN = None
ETC = None
MY_IP = None
def init(config_parser):
global NGINX_CMD, PHP_FPM, TOMCAT_INSTANCE_CREATE, TOMCAT_STARTUP, SCALARIS_CFG, SCALARIS_CTL, SCALARIS_HOME
NGINX_CMD = config_parser.get('nginx', 'NGINX')
PHP_FPM = config_parser.get('php', 'PHP_FPM')
TOMCAT_INSTANCE_CREATE = config_parser.get('tomcat', 'TOMCAT_INSTANCE_CREATE')
TOMCAT_STARTUP = config_parser.get('tomcat', 'TOMCAT_STARTUP')
SCALARIS_CFG = config_parser.get('scalaris', 'SCALARIS_CFG')
SCALARIS_CTL = config_parser.get('scalaris', 'SCALARIS_CTL')
SCALARIS_HOME = config_parser.get('scalaris', 'SCALARIS_HOME')
global VAR_TMP, VAR_CACHE, VAR_RUN, ETC, MY_IP
VAR_TMP = config_parser.get('agent', 'VAR_TMP')
VAR_CACHE = config_parser.get('agent', 'VAR_CACHE')
VAR_RUN = config_parser.get('agent', 'VAR_RUN')
ETC = config_parser.get('agent', 'ETC')
MY_IP = config_parser.get('agent', 'MY_IP')
class Nginx:
def start(self):
self.state = S_STARTING
devnull_fd = open(devnull, 'w')
proc = Popen(self.start_args, stdout=devnull_fd, stderr=devnull_fd, close_fds=True)
if proc.wait() != 0:
logger.critical('Failed to start web server (code=%d)' % proc.returncode)
raise OSError('Failed to start web server (code=%d)' % proc.returncode)
self.state = S_RUNNING
logger.info('WebServer started')
def stop(self):
if self.state == S_RUNNING:
self.state = S_STOPPING
if exists(self.pid_file):
try:
pid = int(open(self.pid_file, 'r').read().strip())
except IOError as e:
logger.exception('Failed to open PID file "%s"' % (self.pid_file))
raise e
except (ValueError, TypeError) as e:
logger.exception('PID in "%s" is invalid' % (self.pid_file))
raise e
try:
kill(pid, self.stop_sig)
self.state = S_STOPPED
logger.info('WebServer stopped')
except (IOError, OSError) as e:
logger.exception('Failed to kill WebServer PID=%d' % (pid))
raise e
else:
logger.critical('Could not find PID file %s to kill WebServer' % (self.pid_file))
raise IOError('Could not find PID file %s to kill WebServer' % (self.pid_file))
else:
logger.warning('Request to kill WebServer while it is not running')
def restart(self):
self._write_config()
try:
pid = int(open(self.pid_file, 'r').read().strip())
except IOError as e:
logger.exception('Failed to open PID file "%s"' % (self.pid_file))
raise e
except (ValueError, TypeError) as e:
logger.exception('PID in "%s" is invalid' % (self.pid_file))
raise e
try:
kill(pid, SIGHUP)
except (IOError, OSError):
logger.exception('Failed to "gracefully" kill WebServer PID=%d' % (pid))
raise e
else:
self.post_restart()
logger.info('WebServer restarted')
def post_restart(self):
pass
class NginxStatic(Nginx):
def __init__(self, port=None, code_versions=None):
self.cmd = NGINX_CMD
self.config_template = join(ETC, 'nginx-static.tmpl')
self.state = S_INIT
self.configure(port=port, code_versions=code_versions)
self.start()
self.stop_sig = SIGINT
def configure(self, port=None, code_versions=None):
verify_port(port)
self.port = port
if not isinstance(code_versions, list):
raise TypeError('code_versions should be a list of strings')
for i in code_versions:
if not isinstance(i, basestring):
raise TypeError('code_versions should be a list of strings')
self.code_versions = code_versions
if self.state == S_INIT:
self.config_file = join(VAR_CACHE, 'nginx-static.conf')
self.access_log = join(VAR_CACHE, 'nginx-static-access.log')
self.timed_log = join(VAR_CACHE, 'nginx-static-timed.log')
self.error_log = join(VAR_CACHE, 'nginx-static-error.log')
self.pid_file = join(VAR_RUN, 'nginx-static.pid')
self.user = 'www-data'
self._write_config()
self.start_args = [self.cmd, '-c', self.config_file]
def _write_config(self):
tmpl = open(self.config_template).read()
template = Template(tmpl,
{
'user': self.user,
'port': self.port,
'error_log': self.error_log,
'access_log': self.access_log,
'timed_log': self.timed_log,
'pid_file': self.pid_file,
'doc_root': join(VAR_CACHE, 'www'),
'code_versions': self.code_versions})
conf_fd = open(self.config_file, 'w')
conf_fd.write(str(template))
conf_fd.close()
logger.debug('web server configuration written to %s' % (self.config_file))
def status(self):
return {'state': self.state,
'port': self.port,
'code_versions': self.code_versions}
class NginxProxy(Nginx):
def __init__(self, port=None, code_version=None, cdn=None, web_list=[], fpm_list=[], tomcat_list=[], tomcat_servlets=[]):
self.cmd = NGINX_CMD
self.config_template = join(ETC, 'nginx-proxy.tmpl')
self.state = S_INIT
self.configure(port=port, code_version=code_version, cdn=cdn, web_list=web_list, fpm_list=fpm_list, tomcat_list=tomcat_list, tomcat_servlets=tomcat_servlets)
self.start()
self.stop_sig = SIGINT
def _write_config(self):
tmpl = open(self.config_template).read()
conf_fd = open(self.config_file, 'w')
template = Template(tmpl, {
'user': self.user,
'port': self.port,
'error_log': self.error_log,
'access_log': self.access_log,
'timed_log': self.timed_log,
'pid_file': self.pid_file,
'doc_root': join(VAR_CACHE, 'www'),
'code_version': self.codeversion,
'proxy_ip': MY_IP,
'web_list': self.web_list,
'fpm_list': self.fpm_list,
'tomcat_list': self.tomcat_list,
'tomcat_servlets': self.tomcat_servlets,
'cdn': self.cdn,
})
conf_fd.write(str(template))
conf_fd.close()
logger.debug('Load Balancer configuration written to %s' % (self.config_file))
def configure(self, port=None, code_version=None, cdn=None, web_list=[], fpm_list=[], tomcat_list=[], tomcat_servlets=[]):
verify_port(port)
port = int(port)
verify_ip_port_list(web_list)
verify_ip_port_list(fpm_list)
verify_ip_port_list(tomcat_list)
if self.state == S_INIT:
self.config_file = join(VAR_CACHE, 'nginx-proxy.conf')
self.access_log = join(VAR_CACHE, 'nginx-proxy-access.log')
self.timed_log = join(VAR_CACHE, 'nginx-proxy-timed.log')
self.error_log = join(VAR_CACHE, 'nginx-proxy-error.log')
self.pid_file = join(VAR_RUN, 'nginx-proxy.pid')
self.user = 'www-data'
self.port = port
self.codeversion = code_version
self.cdn = cdn
self.web_list = web_list
self.fpm_list = fpm_list
self.tomcat_list = tomcat_list
self.tomcat_servlets = tomcat_servlets
self._write_config()
self.start_args = [self.cmd, '-c', self.config_file]
def status(self):
return {'state': self.state,
'port': self.port,
'code_version': self.codeversion,
'cdn': self.cdn,
'web_list': self.web_list,
'fpm_list': self.fpm_list,
'tomcat_list': self.tomcat_list,
'tomcat_servlets': self.tomcat_servlets,
}
class Tomcat:
def __init__(self, tomcat_port=None):
self.config_template = join(ETC, 'tomcat-server-xml.tmpl')
self.instance_dir = join(VAR_CACHE, 'tomcat_instance')
self.config_file = join(self.instance_dir, 'conf', 'server.xml')
self.start_args = [TOMCAT_STARTUP, '-security']
self.shutdown_args = [join(self.instance_dir, 'bin', 'shutdown.sh')]
verify_port(tomcat_port)
devnull_fd = open(devnull, 'w')
proc = Popen([TOMCAT_INSTANCE_CREATE, '-p', str(tomcat_port), self.instance_dir], stdout=devnull_fd, stderr=devnull_fd, close_fds=True)
if proc.wait() != 0:
logger.critical('Failed to initialize tomcat (code=%d)' % proc.returncode)
raise OSError('Failed to initialize tomcat (code=%d)' % proc.returncode)
try:
self.www_user = getpwnam('www-data')
except KeyError:
logger.exception('Failed to find user id of www-data')
raise OSError('Failed to find user id of www-data')
for child in ['logs', 'temp', 'work']:
try:
chown(join(self.instance_dir, child), self.www_user.pw_uid, self.www_user.pw_gid)
except OSError:
logger.exception('Failed to change ownership of %s' % child)
raise
self.state = S_INIT
self.configure(tomcat_port=tomcat_port)
self.start()
def configure(self, tomcat_port=None):
if tomcat_port is None:
raise TypeError('tomcat_port is required')
self.port = tomcat_port
tmpl = open(self.config_template).read()
template = Template(tmpl, {'port': self.port})
fd = open(self.config_file, 'w')
fd.write(str(template))
fd.close()
copy2(join(ETC, 'tomcat-catalina.policy'),
join(self.instance_dir, 'work', 'catalina.policy'))
def demote(self):
setgid(self.www_user.pw_gid)
setuid(self.www_user.pw_uid)
def restart(self):
pass
def start(self):
self.state = S_STARTING
devnull_fd = open(devnull, 'w')
proc = Popen(self.start_args, env={'CATALINA_BASE': self.instance_dir},
preexec_fn=self.demote, # run tomcat under user www-data
stdout=devnull_fd, stderr=devnull_fd, close_fds=True)
if proc.wait() != 0:
logger.critical('Failed to start tomcat (code=%d)' % proc.returncode)
raise OSError('Failed to start tomcat (code=%d)' % proc.returncode)
self.state = S_RUNNING
logger.info('Tomcat started')
def stop(self):
if self.state == S_RUNNING:
self.state = S_STOPPING
devnull_fd = open(devnull, 'w')
proc = Popen(self.shutdown_args, stdout=devnull_fd, stderr=devnull_fd, close_fds=True)
if proc.wait() != 0:
logger.critical('Failed to stop tomcat (code=%d)' % proc.returncode)
raise OSError('Failed to stop tomcat (code=%d)' % proc.returncode)
self.state = S_STOPPED
logger.info('Tomcat stopped')
rmtree(self.instance_dir, ignore_errors=True)
else:
logger.warning('Request to kill tomcat while it is not running')
def status(self):
return {'state': self.state}
class PHPProcessManager:
def __init__(self, port=None, scalaris=None, configuration=None):
self.config_template = join(ETC, 'fpm.tmpl')
self.cmd = PHP_FPM
self.state = S_INIT
self.configure(port=port, scalaris=scalaris, configuration=configuration)
self.start()
def configure(self, port=None, scalaris=None, configuration=None):
if port is None:
raise TypeError('port is required')
verify_port(port)
verify_ip_or_domain(scalaris)
if configuration and not isinstance(configuration, dict):
raise TypeError('configuration is not a dict')
if self.state == S_INIT:
self.scalaris_config = join(VAR_CACHE, 'fpm-scalaris.conf')
self.config_file = join(VAR_CACHE, 'fpm.conf')
self.error_log = join(VAR_CACHE, 'fpm-error.log')
self.access_log = join(VAR_CACHE, 'fpm-access.log')
self.pid_file = join(VAR_RUN, 'fpm.pid')
self.user = 'www-data'
self.group = 'www-data'
self.max_children = 5
self.max_requests = 300
self.servers_start = 1
self.servers_spare_min = 1
self.servers_spare_max = 5
self.scalaris = scalaris
tmpl = open(self.config_template).read()
fd = open(self.config_file, 'w')
template = Template(tmpl, {
'pid_file': self.pid_file,
'error_log': self.error_log,
'port': port,
'user': self.user,
'group': self.group,
'access_log': self.access_log,
'max_children': self.max_children,
'max_requests': self.max_requests,
'servers_start': self.servers_start,
'servers_spare_min': self.servers_spare_min,
'servers_spare_max': self.servers_spare_max,
'properties': configuration})
fd.write(str(template))
fd.close()
fd = open(self.scalaris_config, 'w')
fd.write("http://%s:8000/jsonrpc.yaws" % (scalaris))
fd.close()
self.port = port
self.configuration = configuration
def start(self):
self.state = S_STARTING
devnull_fd = open(devnull, 'w')
logger.info('cmd ' + str(self.cmd) + ' ' + str(self.config_file))
proc = Popen([self.cmd, '--fpm-config', self.config_file], stdout=devnull_fd, stderr=devnull_fd, close_fds=True)
if proc.wait() != 0:
logger.critical('Failed to start the php-fpm')
# FIXME EC2: It raises this error but the php-fpm is started... Guessing why it happens.
# raise OSError('Failed to start the php-fpm')
self.state = S_RUNNING
logger.info('php-fpm started')
def stop(self):
if self.state == S_RUNNING:
self.state = S_STOPPING
if exists(self.pid_file):
try:
pid = int(open(self.pid_file, 'r').read().strip())
except IOError as e:
logger.exception('Failed to open PID file "%s"' % (self.pid_file))
raise e
except (ValueError, TypeError) as e:
logger.exception('PID in "%s" is invalid' % (self.pid_file))
raise e
try:
kill(pid, SIGTERM)
self.state = S_STOPPED
logger.info('php-fpm stopped')
except (IOError, OSError) as e:
logger.exception('Failed to kill php-fpm PID=%d' % (pid))
raise e
else:
logger.critical('Could not find PID file %s to kill php-fpm' % (self.pid_file))
raise IOError('Could not find PID file %s to kill php-fpm' % (self.pid_file))
else:
logger.warning('Request to kill php-fpm while it is not running')
def restart(self):
if self.state != S_RUNNING:
logger.warning('php-fpm not running in order to restart')
if exists(self.pid_file):
try:
pid = int(open(self.pid_file, 'r').read().strip())
except IOError as e:
logger.exception('Failed to open PID file "%s"' % (self.pid_file))
raise e
except (ValueError, TypeError) as e:
logger.exception('PID in "%s" is invalid' % (self.pid_file))
raise e
try:
# Graceful restart for PHP-FPM: send it a SIGUSR2.
# It will reload the same configuration file and apply the changes.
# The new PID value will be written to the same PID file.
kill(pid, SIGUSR2)
except (IOError, OSError) as e:
logger.exception('Failed to kill php-fpm PID=%d' % (pid))
raise e
else:
logger.critical('Could not find PID file %s to restart php-fpm' % (self.pid_file))
raise IOError('Could not find PID file %s to restart php-fpm' % (self.pid_file))
def status(self):
return {'state': self.state, 'port': self.port}
class ScalarisProcessManager:
def __init__(self, first_node, known_hosts):
self.start(first_node, known_hosts)
def start(self, first_node, known_hosts):
# writing Scalaris config
conf_fd = open(SCALARIS_CFG, 'w')
conf_fd.write('{mgmt_server, null}.\n')
conf_fd.write('{known_hosts, %s}.\n' % known_hosts)
conf_fd.close()
logger.info('Scalaris configuration written to %s', SCALARIS_CFG)
# starting Scalaris
scalaris_args = [ SCALARIS_CTL, '-d' ]
if first_node:
scalaris_args.extend(['-t', 'first'])
else:
scalaris_args.extend(['-t', 'joining'])
scalaris_args.append('start')
logger.info('cmd ' + ' '.join(scalaris_args))
devnull_fd = open(devnull, 'w')
proc = Popen(scalaris_args, stdout=devnull_fd, stderr=devnull_fd, close_fds=True,
env=dict(environ, HOME=SCALARIS_HOME))
if proc.wait() != 0:
logger.critical('Failed to start Scalaris')
else:
logger.info('Scalaris started')
def stop(self):
# stopping Scalaris
scalaris_args = [ SCALARIS_CTL, 'stop' ]
logger.info('cmd ' + ' '.join(scalaris_args))
devnull_fd = open(devnull, 'w')
proc = Popen(scalaris_args, stdout=devnull_fd, stderr=devnull_fd, close_fds=True,
env=dict(environ, HOME=SCALARIS_HOME))
if proc.wait() != 0:
logger.critical('Failed to stop Scalaris')
else:
logger.info('Scalaris stopped')
| |
import os
import os.path as op
import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_equal, assert_allclose,
assert_array_less)
import itertools
import mne
from mne.datasets import testing
from mne.fixes import _get_img_fdata
from mne import read_trans, write_trans
from mne.io import read_info
from mne.transforms import (invert_transform, _get_trans,
rotation, rotation3d, rotation_angles, _find_trans,
combine_transforms, apply_trans, translation,
get_ras_to_neuromag_trans, _pol_to_cart,
quat_to_rot, rot_to_quat, _angle_between_quats,
_find_vector_rotation, _sph_to_cart, _cart_to_sph,
_topo_to_sph, _average_quats,
_SphericalSurfaceWarp as SphericalSurfaceWarp,
rotation3d_align_z_axis, _read_fs_xfm,
_write_fs_xfm, _quat_real, _fit_matched_points,
_quat_to_euler, _euler_to_quat,
_quat_to_affine, _compute_r2, _validate_pipeline)
from mne.utils import requires_nibabel, requires_dipy
data_path = testing.data_path(download=False)
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-trans.fif')
fname_eve = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
subjects_dir = op.join(data_path, 'subjects')
fname_t1 = op.join(subjects_dir, 'fsaverage', 'mri', 'T1.mgz')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname_trans = op.join(base_dir, 'sample-audvis-raw-trans.txt')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
def test_tps():
"""Test TPS warping."""
az = np.linspace(0., 2 * np.pi, 20, endpoint=False)
pol = np.linspace(0, np.pi, 12)[1:-1]
sph = np.array(np.meshgrid(1, az, pol, indexing='ij'))
sph.shape = (3, -1)
assert_equal(sph.shape[1], 200)
source = _sph_to_cart(sph.T)
destination = source.copy()
destination *= 2
destination[:, 0] += 1
# fit with 100 points
warp = SphericalSurfaceWarp()
assert 'no ' in repr(warp)
warp.fit(source[::3], destination[::2])
assert 'oct5' in repr(warp)
destination_est = warp.transform(source)
assert_allclose(destination_est, destination, atol=1e-3)
@testing.requires_testing_data
def test_get_trans():
"""Test converting '-trans.txt' to '-trans.fif'."""
trans = read_trans(fname)
trans = invert_transform(trans) # starts out as head->MRI, so invert
trans_2 = _get_trans(fname_trans)[0]
assert trans.__eq__(trans_2, atol=1e-5)
@testing.requires_testing_data
def test_io_trans(tmpdir):
"""Test reading and writing of trans files."""
tempdir = str(tmpdir)
os.mkdir(op.join(tempdir, 'sample'))
pytest.raises(RuntimeError, _find_trans, 'sample', subjects_dir=tempdir)
trans0 = read_trans(fname)
fname1 = op.join(tempdir, 'sample', 'test-trans.fif')
trans0.save(fname1)
assert fname1 == _find_trans('sample', subjects_dir=tempdir)
trans1 = read_trans(fname1)
# check all properties
assert trans0 == trans1
# check reading non -trans.fif files
pytest.raises(IOError, read_trans, fname_eve)
# check warning on bad filenames
fname2 = op.join(tempdir, 'trans-test-bad-name.fif')
with pytest.warns(RuntimeWarning, match='-trans.fif'):
write_trans(fname2, trans0)
def test_get_ras_to_neuromag_trans():
"""Test the coordinate transformation from ras to neuromag."""
# create model points in neuromag-like space
rng = np.random.RandomState(0)
anterior = [0, 1, 0]
left = [-1, 0, 0]
right = [.8, 0, 0]
up = [0, 0, 1]
rand_pts = rng.uniform(-1, 1, (3, 3))
pts = np.vstack((anterior, left, right, up, rand_pts))
# change coord system
rx, ry, rz, tx, ty, tz = rng.uniform(-2 * np.pi, 2 * np.pi, 6)
trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz))
pts_changed = apply_trans(trans, pts)
# transform back into original space
nas, lpa, rpa = pts_changed[:3]
hsp_trans = get_ras_to_neuromag_trans(nas, lpa, rpa)
pts_restored = apply_trans(hsp_trans, pts_changed)
err = "Neuromag transformation failed"
assert_allclose(pts_restored, pts, atol=1e-6, err_msg=err)
def _cartesian_to_sphere(x, y, z):
"""Convert using old function."""
hypotxy = np.hypot(x, y)
r = np.hypot(hypotxy, z)
elev = np.arctan2(z, hypotxy)
az = np.arctan2(y, x)
return az, elev, r
def _sphere_to_cartesian(theta, phi, r):
"""Convert using old function."""
z = r * np.sin(phi)
rcos_phi = r * np.cos(phi)
x = rcos_phi * np.cos(theta)
y = rcos_phi * np.sin(theta)
return x, y, z
def test_sph_to_cart():
"""Test conversion between sphere and cartesian."""
# Simple test, expected value (11, 0, 0)
r, theta, phi = 11., 0., np.pi / 2.
z = r * np.cos(phi)
rsin_phi = r * np.sin(phi)
x = rsin_phi * np.cos(theta)
y = rsin_phi * np.sin(theta)
coord = _sph_to_cart(np.array([[r, theta, phi]]))[0]
assert_allclose(coord, (x, y, z), atol=1e-7)
assert_allclose(coord, (r, 0, 0), atol=1e-7)
rng = np.random.RandomState(0)
# round-trip test
coords = rng.randn(10, 3)
assert_allclose(_sph_to_cart(_cart_to_sph(coords)), coords, atol=1e-5)
# equivalence tests to old versions
for coord in coords:
sph = _cart_to_sph(coord[np.newaxis])
cart = _sph_to_cart(sph)
sph_old = np.array(_cartesian_to_sphere(*coord))
cart_old = _sphere_to_cartesian(*sph_old)
sph_old[1] = np.pi / 2. - sph_old[1] # new convention
assert_allclose(sph[0], sph_old[[2, 0, 1]], atol=1e-7)
assert_allclose(cart[0], cart_old, atol=1e-7)
assert_allclose(cart[0], coord, atol=1e-7)
def _polar_to_cartesian(theta, r):
"""Transform polar coordinates to cartesian."""
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
def test_polar_to_cartesian():
"""Test helper transform function from polar to cartesian."""
r = 1
theta = np.pi
# expected values are (-1, 0)
x = r * np.cos(theta)
y = r * np.sin(theta)
coord = _pol_to_cart(np.array([[r, theta]]))[0]
# np.pi is an approx since pi is irrational
assert_allclose(coord, (x, y), atol=1e-7)
assert_allclose(coord, (-1, 0), atol=1e-7)
assert_allclose(coord, _polar_to_cartesian(theta, r), atol=1e-7)
rng = np.random.RandomState(0)
r = rng.randn(10)
theta = rng.rand(10) * (2 * np.pi)
polar = np.array((r, theta)).T
assert_allclose([_polar_to_cartesian(p[1], p[0]) for p in polar],
_pol_to_cart(polar), atol=1e-7)
def _topo_to_phi_theta(theta, radius):
"""Convert using old function."""
sph_phi = (0.5 - radius) * 180
sph_theta = -theta
return sph_phi, sph_theta
def test_topo_to_sph():
"""Test topo to sphere conversion."""
rng = np.random.RandomState(0)
angles = rng.rand(10) * 360
radii = rng.rand(10)
angles[0] = 30
radii[0] = 0.25
# new way
sph = _topo_to_sph(np.array([angles, radii]).T)
new = _sph_to_cart(sph)
new[:, [0, 1]] = new[:, [1, 0]] * [-1, 1]
# old way
for ii, (angle, radius) in enumerate(zip(angles, radii)):
sph_phi, sph_theta = _topo_to_phi_theta(angle, radius)
if ii == 0:
assert_allclose(_topo_to_phi_theta(angle, radius), [45, -30])
azimuth = sph_theta / 180.0 * np.pi
elevation = sph_phi / 180.0 * np.pi
assert_allclose(sph[ii], [1., azimuth, np.pi / 2. - elevation],
atol=1e-7)
r = np.ones_like(radius)
x, y, z = _sphere_to_cartesian(azimuth, elevation, r)
pos = [-y, x, z]
if ii == 0:
expected = np.array([1. / 2., np.sqrt(3) / 2., 1.])
expected /= np.sqrt(2)
assert_allclose(pos, expected, atol=1e-7)
assert_allclose(pos, new[ii], atol=1e-7)
def test_rotation():
"""Test conversion between rotation angles and transformation matrix."""
tests = [(0, 0, 1), (.5, .5, .5), (np.pi, 0, -1.5)]
for rot in tests:
x, y, z = rot
m = rotation3d(x, y, z)
m4 = rotation(x, y, z)
assert_array_equal(m, m4[:3, :3])
back = rotation_angles(m)
assert_equal(back, rot)
back4 = rotation_angles(m4)
assert_equal(back4, rot)
def test_rotation3d_align_z_axis():
"""Test rotation3d_align_z_axis."""
# The more complex z axis fails the assert presumably due to tolerance
#
inp_zs = [[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, -1],
[-0.75071668, -0.62183808, 0.22302888]]
exp_res = [[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
[[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]],
[[0., 0., 1.], [0., 1., 0.], [-1., 0., 0.]],
[[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]],
[[0.53919688, -0.38169517, -0.75071668],
[-0.38169517, 0.683832, -0.62183808],
[0.75071668, 0.62183808, 0.22302888]]]
for res, z in zip(exp_res, inp_zs):
assert_allclose(res, rotation3d_align_z_axis(z), atol=1e-7)
@testing.requires_testing_data
def test_combine():
"""Test combining transforms."""
trans = read_trans(fname)
inv = invert_transform(trans)
combine_transforms(trans, inv, trans['from'], trans['from'])
pytest.raises(RuntimeError, combine_transforms, trans, inv,
trans['to'], trans['from'])
pytest.raises(RuntimeError, combine_transforms, trans, inv,
trans['from'], trans['to'])
pytest.raises(RuntimeError, combine_transforms, trans, trans,
trans['from'], trans['to'])
def test_quaternions():
"""Test quaternion calculations."""
rots = [np.eye(3)]
for fname in [test_fif_fname, ctf_fname, hp_fif_fname]:
rots += [read_info(fname)['dev_head_t']['trans'][:3, :3]]
# nasty numerical cases
rots += [np.array([
[-0.99978541, -0.01873462, -0.00898756],
[-0.01873462, 0.62565561, 0.77987608],
[-0.00898756, 0.77987608, -0.62587152],
])]
rots += [np.array([
[0.62565561, -0.01873462, 0.77987608],
[-0.01873462, -0.99978541, -0.00898756],
[0.77987608, -0.00898756, -0.62587152],
])]
rots += [np.array([
[-0.99978541, -0.00898756, -0.01873462],
[-0.00898756, -0.62587152, 0.77987608],
[-0.01873462, 0.77987608, 0.62565561],
])]
for rot in rots:
assert_allclose(rot, quat_to_rot(rot_to_quat(rot)),
rtol=1e-5, atol=1e-5)
rot = rot[np.newaxis, np.newaxis, :, :]
assert_allclose(rot, quat_to_rot(rot_to_quat(rot)),
rtol=1e-5, atol=1e-5)
# let's make sure our angle function works in some reasonable way
for ii in range(3):
for jj in range(3):
a = np.zeros(3)
b = np.zeros(3)
a[ii] = 1.
b[jj] = 1.
expected = np.pi if ii != jj else 0.
assert_allclose(_angle_between_quats(a, b), expected, atol=1e-5)
y_180 = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1.]])
assert_allclose(_angle_between_quats(rot_to_quat(y_180),
np.zeros(3)), np.pi)
h_180_attitude_90 = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1.]])
assert_allclose(_angle_between_quats(rot_to_quat(h_180_attitude_90),
np.zeros(3)), np.pi)
def test_vector_rotation():
"""Test basic rotation matrix math."""
x = np.array([1., 0., 0.])
y = np.array([0., 1., 0.])
rot = _find_vector_rotation(x, y)
assert_array_equal(rot,
[[0, -1, 0], [1, 0, 0], [0, 0, 1]])
quat_1 = rot_to_quat(rot)
quat_2 = rot_to_quat(np.eye(3))
assert_allclose(_angle_between_quats(quat_1, quat_2), np.pi / 2.)
def test_average_quats():
"""Test averaging of quaternions."""
sq2 = 1. / np.sqrt(2.)
quats = np.array([[0, sq2, sq2],
[0, sq2, sq2],
[0, sq2, 0],
[0, 0, sq2],
[sq2, 0, 0]], float)
# In MATLAB:
# quats = [[0, sq2, sq2, 0]; [0, sq2, sq2, 0];
# [0, sq2, 0, sq2]; [0, 0, sq2, sq2]; [sq2, 0, 0, sq2]];
expected = [quats[0],
quats[0],
[0, 0.788675134594813, 0.577350269189626],
[0, 0.657192299694123, 0.657192299694123],
[0.100406058540540, 0.616329446922803, 0.616329446922803]]
# Averaging the first two should give the same thing:
for lim, ex in enumerate(expected):
assert_allclose(_average_quats(quats[:lim + 1]), ex, atol=1e-7)
quats[1] *= -1 # same quaternion (hidden value is zero here)!
rot_0, rot_1 = quat_to_rot(quats[:2])
assert_allclose(rot_0, rot_1, atol=1e-7)
for lim, ex in enumerate(expected):
assert_allclose(_average_quats(quats[:lim + 1]), ex, atol=1e-7)
# Assert some symmetry
count = 0
extras = [[sq2, sq2, 0]] + list(np.eye(3))
for quat in np.concatenate((quats, expected, extras)):
if np.isclose(_quat_real(quat), 0., atol=1e-7): # can flip sign
count += 1
angle = _angle_between_quats(quat, -quat)
assert_allclose(angle, 0., atol=1e-7)
rot_0, rot_1 = quat_to_rot(np.array((quat, -quat)))
assert_allclose(rot_0, rot_1, atol=1e-7)
assert count == 4 + len(extras)
@testing.requires_testing_data
@pytest.mark.parametrize('subject', ('fsaverage', 'sample'))
def test_fs_xfm(subject, tmpdir):
"""Test reading and writing of Freesurfer transforms."""
fname = op.join(data_path, 'subjects', subject, 'mri', 'transforms',
'talairach.xfm')
xfm, kind = _read_fs_xfm(fname)
if subject == 'fsaverage':
assert_allclose(xfm, np.eye(4), atol=1e-5) # fsaverage is in MNI
assert kind == 'MNI Transform File'
tempdir = str(tmpdir)
fname_out = op.join(tempdir, 'out.xfm')
_write_fs_xfm(fname_out, xfm, kind)
xfm_read, kind_read = _read_fs_xfm(fname_out)
assert kind_read == kind
assert_allclose(xfm, xfm_read, rtol=1e-5, atol=1e-5)
# Some wacky one
xfm[:3] = np.random.RandomState(0).randn(3, 4)
_write_fs_xfm(fname_out, xfm, 'foo')
xfm_read, kind_read = _read_fs_xfm(fname_out)
assert kind_read == 'foo'
assert_allclose(xfm, xfm_read, rtol=1e-5, atol=1e-5)
# degenerate conditions
with open(fname_out, 'w') as fid:
fid.write('foo')
with pytest.raises(ValueError, match='Failed to find'):
_read_fs_xfm(fname_out)
_write_fs_xfm(fname_out, xfm[:2], 'foo')
with pytest.raises(ValueError, match='Could not find'):
_read_fs_xfm(fname_out)
@pytest.fixture()
def quats():
"""Make some unit quats."""
quats = np.random.RandomState(0).randn(5, 3)
quats[:, 0] = 0 # identity
quats /= 2 * np.linalg.norm(quats, axis=1, keepdims=True) # some real part
return quats
def _check_fit_matched_points(
p, x, weights, do_scale, angtol=1e-5, dtol=1e-5, stol=1e-7):
__tracebackhide__ = True
mne.coreg._ALLOW_ANALITICAL = False
try:
params = mne.coreg.fit_matched_points(
p, x, weights=weights, scale=do_scale, out='params')
finally:
mne.coreg._ALLOW_ANALITICAL = True
quat_an, scale_an = _fit_matched_points(p, x, weights, scale=do_scale)
assert len(params) == 6 + int(do_scale)
q_co = _euler_to_quat(params[:3])
translate_co = params[3:6]
angle = np.rad2deg(_angle_between_quats(quat_an[:3], q_co))
dist = np.linalg.norm(quat_an[3:] - translate_co)
assert 0 <= angle < angtol, 'angle'
assert 0 <= dist < dtol, 'dist'
if do_scale:
scale_co = params[6]
assert_allclose(scale_an, scale_co, rtol=stol, err_msg='scale')
# errs
trans = _quat_to_affine(quat_an)
trans[:3, :3] *= scale_an
weights = np.ones(1) if weights is None else weights
err_an = np.linalg.norm(
weights[:, np.newaxis] * apply_trans(trans, p) - x)
trans = mne.coreg._trans_from_params((True, True, do_scale), params)
err_co = np.linalg.norm(
weights[:, np.newaxis] * apply_trans(trans, p) - x)
if err_an > 1e-14:
assert err_an < err_co * 1.5
return quat_an, scale_an
@pytest.mark.parametrize('scaling', [0.25, 1])
@pytest.mark.parametrize('do_scale', (True, False))
def test_fit_matched_points(quats, scaling, do_scale):
"""Test analytical least-squares matched point fitting."""
if scaling != 1 and not do_scale:
return # no need to test this, it will not be good
rng = np.random.RandomState(0)
fro = rng.randn(10, 3)
translation = rng.randn(3)
for qi, quat in enumerate(quats):
to = scaling * np.dot(quat_to_rot(quat), fro.T).T + translation
for corrupted in (False, True):
# mess up a point
if corrupted:
to[0, 2] += 100
weights = np.ones(len(to))
weights[0] = 0
else:
weights = None
est, scale_est = _check_fit_matched_points(
fro, to, weights=weights, do_scale=do_scale)
assert_allclose(scale_est, scaling, rtol=1e-5)
assert_allclose(est[:3], quat, atol=1e-14)
assert_allclose(est[3:], translation, atol=1e-14)
# if we don't adjust for the corruption above, it should get worse
angle = dist = None
for weighted in (False, True):
if not weighted:
weights = None
dist_bounds = (5, 20)
if scaling == 1:
angle_bounds = (5, 95)
angtol, dtol, stol = 1, 15, 3
else:
angle_bounds = (5, 105)
angtol, dtol, stol = 20, 15, 3
else:
weights = np.ones(len(to))
weights[0] = 10 # weighted=True here means "make it worse"
angle_bounds = (angle, 180) # unweighted values as new min
dist_bounds = (dist, 100)
if scaling == 1:
# XXX this angtol is not great but there is a hard to
# identify linalg/angle calculation bug on Travis...
angtol, dtol, stol = 180, 70, 3
else:
angtol, dtol, stol = 50, 70, 3
est, scale_est = _check_fit_matched_points(
fro, to, weights=weights, do_scale=do_scale,
angtol=angtol, dtol=dtol, stol=stol)
assert not np.allclose(est[:3], quat, atol=1e-5)
assert not np.allclose(est[3:], translation, atol=1e-5)
angle = np.rad2deg(_angle_between_quats(est[:3], quat))
assert_array_less(angle_bounds[0], angle)
assert_array_less(angle, angle_bounds[1])
dist = np.linalg.norm(est[3:] - translation)
assert_array_less(dist_bounds[0], dist)
assert_array_less(dist, dist_bounds[1])
def test_euler(quats):
"""Test euler transformations."""
euler = _quat_to_euler(quats)
quats_2 = _euler_to_quat(euler)
assert_allclose(quats, quats_2, atol=1e-14)
quat_rot = quat_to_rot(quats)
euler_rot = np.array([rotation(*e)[:3, :3] for e in euler])
assert_allclose(quat_rot, euler_rot, atol=1e-14)
@requires_nibabel()
@requires_dipy()
@pytest.mark.slowtest
@testing.requires_testing_data
def test_volume_registration():
"""Test volume registration."""
import nibabel as nib
from dipy.align import resample
T1 = nib.load(fname_t1)
affine = np.eye(4)
affine[0, 3] = 10
T1_resampled = resample(moving=T1.get_fdata(),
static=T1.get_fdata(),
moving_affine=T1.affine,
static_affine=T1.affine,
between_affine=np.linalg.inv(affine))
for pipeline in ('rigids', ('translation', 'sdr')):
reg_affine, sdr_morph = mne.transforms.compute_volume_registration(
T1_resampled, T1, pipeline=pipeline, zooms=10, niter=[5])
assert_allclose(affine, reg_affine, atol=0.25)
T1_aligned = mne.transforms.apply_volume_registration(
T1_resampled, T1, reg_affine, sdr_morph)
r2 = _compute_r2(_get_img_fdata(T1_aligned), _get_img_fdata(T1))
assert 99.9 < r2
# check that all orders of the pipeline work
for pipeline_len in range(1, 5):
for pipeline in itertools.combinations(
('translation', 'rigid', 'affine', 'sdr'), pipeline_len):
_validate_pipeline(pipeline)
_validate_pipeline(list(pipeline))
with pytest.raises(ValueError, match='Steps in pipeline are out of order'):
_validate_pipeline(('sdr', 'affine'))
with pytest.raises(ValueError,
match='Steps in pipeline should not be repeated'):
_validate_pipeline(('affine', 'affine'))
| |
# -*- coding: utf-8 -*-
# Built-in
# Common
import numpy as np
import scipy.sparse as scpsp
# specific
from . import _generic_check
_LALGO = [
'inv_linear_augTikho_sparse',
'inv_linear_augTikho_dense',
'inv_linear_augTikho_chol_dense',
'inv_linear_augTikho_chol_sparse',
'inv_linear_augTikho_pos_dense',
'inv_linear_DisPrinc_sparse',
]
_LREGPARAM_ALGO = [
'augTikho',
'DisPrinc',
]
# #############################################################################
# #############################################################################
# main
# #############################################################################
def _compute_check(
# input data
coll=None,
key_matrix=None,
key_data=None,
key_sigma=None,
data=None,
sigma=None,
# choice of algo
isotropic=None,
sparse=None,
positive=None,
cholesky=None,
regparam_algo=None,
algo=None,
# regularity operator
solver=None,
operator=None,
geometry=None,
# misc
conv_crit=None,
chain=None,
verb=None,
store=None,
# algo and solver-specific options
kwdargs=None,
method=None,
options=None,
):
# ----
# keys
# key_matrix
lk = list(coll.dobj.get('matrix', {}).keys())
if key_matrix is None and len(lk):
key_matrix = lk[0]
key_matrix = _generic_check._check_var(
key_matrix, 'key_matrix',
types=str,
allowed=lk,
)
keybs = coll.dobj['matrix'][key_matrix]['bsplines']
keym = coll.dobj['bsplines'][keybs]['mesh']
matrix = coll.ddata[coll.dobj['matrix'][key_matrix]['data']]['data']
shapemat = matrix.shape
crop = coll.dobj['matrix'][key_matrix]['crop']
if np.any(~np.isfinite(matrix)):
msg = "Geometry matrix should not contain NaNs or infs!"
raise Exception(msg)
# key_data
if key_data is not None or (key_data is None and data is None):
lk = [
kk for kk, vv in coll.ddata.items()
if vv['data'].ndim in [1, 2]
and vv['data'].shape[-1] == shapemat[0]
]
if key_data is None and len(lk):
key_data = lk[0]
key_data = _generic_check._check_var(
key_data, 'key_data',
types=str,
allowed=lk,
)
data = coll.ddata[key_data]['data']
# ------------
# data, sigma
# data
data = _generic_check._check_var(
data, 'data',
types=(np.ndarray, list, tuple),
)
if not isinstance(data, np.ndarray):
data = np.asarray(data)
if data.ndim not in [1, 2] or shapemat[0] not in data.shape:
msg = (
"Arg data must have dim in [1, 2]"
f" and {shapemat[0]} must be in shape\n"
f"\t- data.shape: {data.shape}"
)
raise Exception(msg)
if data.ndim == 1:
data = data[None, :]
if data.shape[1] != shapemat[0]:
data = data.T
if np.any(~np.isfinite(data)):
msg = "Arg data should not contain NaNs or inf!"
raise Exception(msg)
# key_sigma
if key_sigma is not None:
lk = [
kk for kk, vv in coll.ddata.items()
if vv['data'].ndim in [1, 2]
and vv['data'].shape[-1] == shapemat[0]
]
key_sigma = _generic_check._check_var(
key_sigma, 'key_sigma',
types=str,
allowed=lk,
)
sigma = coll.ddata[key_sigma]['data']
# sigma
if np.isscalar(sigma):
sigma = np.full((shapemat[0],), sigma*np.nanmean(np.abs(data)))
sigma = _generic_check._check_var(
sigma, 'sigma',
default=np.full((shapemat[0],), 0.05*np.nanmean(np.abs(data))),
types=(np.ndarray, list, tuple),
)
if not isinstance(sigma, np.ndarray):
sigma = np.asarray(sigma)
if sigma.ndim not in [1, 2] or shapemat[0] not in sigma.shape:
msg = (
"Arg sigma must have dim in [1, 2]"
f" and {shapemat[0]} must be in shape\n"
f"\t- sigma.shape = {sigma.shape}"
)
raise Exception(msg)
if sigma.ndim == 1:
sigma = sigma[None, :]
elif sigma.ndim == 2 and data.shape != sigma.shape:
msg = (
"Arg sigma must have the same shape as data!\n"
f"\t- data.shape: {data.shape}\n"
f"\t- sigma.shape: {sigma.shape}\n"
)
raise Exception(msg)
if sigma.shape[1] != shapemat[0]:
sigma = sigma.T
if np.any(~np.isfinite(sigma)):
msg = "Arg sigma should not contain NaNs or inf!"
raise Exception(msg)
# --------------
# choice of algo
lc = [
algo is None,
all([kk is None for kk in [isotropic, positive, sparse, cholesky]])
]
if not any(lc):
msg = (
"Please provide either (xor):\n"
"\t- algo: directly provide the algo name\n"
"\t- flags for choosing the algo:\n"
"\t\t- isotropic: whether to perform isotropic regularization\n"
"\t\t- sparse: whether to use sparse matrices\n"
"\t\t- positive: whether to enforce a positivity constraint\n"
"\t\t- cholesky: whether to use cholesky factorization\n"
)
raise Exception(msg)
if all(lc):
algo = 'inv_linear_augTikho_sparse'
lc[0] = False
if not lc[0] and lc[1]:
# extract keywrods from algo name
isotropic = True
positive = 'pos' in algo
sparse = 'sparse' in algo
cholesky = 'chol' in algo
for aa in _LREGPARAM_ALGO:
if f'_{aa}_' in algo:
regparam_algo = aa
break
else:
msg = 'Unreckognized algo for regularization parameter!'
raise Exception(msg)
elif lc[0] and not lc[1]:
# get algo name from keywords
# isotropic
isotropic = _generic_check._check_var(
isotropic, 'isotropic',
default=True,
types=bool,
)
if isotropic is False:
msg = "Anisotropic regularization unavailable yet"
raise NotImplementedError(msg)
# sparse and matrix and operator
sparse = _generic_check._check_var(
sparse, 'sparse',
default=True,
types=bool,
)
# positive
positive = _generic_check._check_var(
positive, 'positive',
default=False,
types=bool,
)
# cholesky
cholesky = _generic_check._check_var(
cholesky, 'cholesky',
default=False,
types=bool,
)
if positive and cholesky is False:
msg = "cholesky cannot be used for positive constraint!"
raise Exception(msg)
# regparam_algo
regparam_algo = _generic_check._check_var(
regparam_algo, 'regparam_algo',
default='augTikho',
types=str,
allowed=_LREGPARAM_ALGO,
)
algo = f"inv_linear_{regparam_algo}"
if cholesky:
algo += '_chol'
elif positive:
algo += '_pos'
algo += f"_{'sparse' if sparse else 'dense'}"
# final algo check
algo = _generic_check._check_var(
algo, 'algo',
default=None,
types=str,
allowed=_LALGO,
)
# -------------------
# regularity operator
# get operator
opmat, operator, geometry, dim, ref, crop = coll.add_bsplines_operator(
key=keybs,
operator=operator,
geometry=geometry,
returnas=True,
store=False,
crop=crop,
)
nchan, nbs = matrix.shape
if isinstance(opmat, tuple):
assert all([op.shape == (nbs, nbs) for op in opmat])
elif opmat.ndim == 1:
msg = "Inversion algorithm requires a quadratic operator!"
raise Exception(msg)
else:
assert opmat.shape == (nbs,) or opmat.shape == (nbs, nbs)
opmat = (opmat,)
if not scpsp.issparse(opmat[0]):
assert all([np.all(np.isfinite(op)) for op in opmat])
assert data.shape[1] == nchan
nt = data.shape[0]
# -------------------
# consistent sparsity
# sparse
if sparse is True:
if not scpsp.issparse(matrix):
matrix = scpsp.csc_matrix(matrix)
if not scpsp.issparse(opmat[0]):
opmat = [scpsp.csc_matrix(pp) for pp in opmat]
elif sparse is False:
if scpsp.issparse(matrix):
matrix = matrix.toarray()
if scpsp.issparse(opmat[0]):
opmat = [scpsp.csc_matrix(pp).toarray() for pp in opmat]
# -----------------------
# miscellaneous parameter
# conv_crit
conv_crit = _generic_check._check_var(
conv_crit, 'conv_crit',
default=1e-4,
types=float,
)
# chain
chain = _generic_check._check_var(
chain, 'chain',
default=True,
types=bool,
)
# verb
verb = _generic_check._check_var(
verb, 'verb',
default=True,
types=(bool, int),
allowed=[False, 0, True, 1, 2],
)
if verb is False:
verb = 0
if verb is True:
verb = 1
# store
store = _generic_check._check_var(
store, 'store',
default=True,
types=bool,
)
if key_data is None:
store = False
# solver
solver = _generic_check._check_var(
solver, 'solver',
default='spsolve',
types=str,
allowed=['spsolve'],
)
# ----------------------------------------
# algo-specific kwdargs and solver options
# kwdargs, method, options
kwdargs, method, options = _algo_check(
algo,
kwdargs=kwdargs,
options=options,
nchan=shapemat[0],
nbs=shapemat[1],
conv_crit=conv_crit,
)
return (
key_matrix, key_data, key_sigma, keybs, keym,
data, sigma, matrix, opmat, operator, geometry,
isotropic, sparse, positive, cholesky, regparam_algo, algo,
conv_crit, crop, chain, kwdargs, method, options,
solver, verb, store,
)
# #############################################################################
# #############################################################################
# ikwdargs / options for each algo
# #############################################################################
def _algo_check(
algo,
kwdargs=None,
method=None,
options=None,
nchan=None,
nbs=None,
conv_crit=None,
):
# ------------------------
# generic kwdargs
# kwdargs
if kwdargs is None:
kwdargs = {}
# generic kwdargs
if kwdargs.get('maxiter') is None:
kwdargs['maxiter'] = 100
if kwdargs.get('tol') is None:
kwdargs['tol'] = 1.e-6
# ------------------------
# algo specific kwdargs
# kwdargs specific to aug. tikhonov
if 'augTikho' in algo:
a0 = kwdargs.get('a0', 10)
a1 = kwdargs.get('a1', 2)
# to have [x]=1
kwdargs['b0'] = np.math.factorial(a0)**(1 / (a0 + 1))
kwdargs['b1'] = np.math.factorial(a1)**(1 / (a1 + 1))
kwdargs['a0'] = a0
kwdargs['a1'] = a1
# Exponent for rescaling of a0bis
# typically in [1/3 ; 1/2], but real limits are 0 < d < 1 (or 2 ?)
if kwdargs.get('d') is None:
kwdargs['d'] = 0.95
if kwdargs.get('conv_reg') is None:
kwdargs['conv_reg'] = True
if kwdargs.get('nbs_fixed') is None:
kwdargs['nbs_fixed'] = True
if kwdargs['nbs_fixed']:
kwdargs['a0bis'] = kwdargs['a0'] - 1. + 1200./2.
else:
kwdargs['a0bis'] = kwdargs['a0'] - 1. + nbs/2.
kwdargs['a1bis'] = kwdargs['a1'] - 1. + nchan/2.
# kwdargs specific to discrepancy principle
elif 'DisPrinc' in algo:
if kwdargs.get('chi2n_obj') is None:
kwdargs['chi2n_obj'] = 1.
if kwdargs.get('chi2n_tol') is None:
kwdargs['chi2n_tol'] = 0.05
# ------------------------
# low-level solver options
if 'quad' in algo:
if options is None:
options = {}
if method is None:
method = 'L-BFGS-B'
if method == 'L-BFGS-B':
if options.get('ftol') is None:
options['ftol'] = conv_crit/100.
if options.get('disp') is None:
options['disp'] = False
else:
raise NotImplementedError
return kwdargs, method, options
| |
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
from copy import copy
from six import print_
from six.moves import configparser
import pandas as pd
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
import zipline
from zipline.errors import NoSourceError, PipelineDateError
DEFAULTS = {
'data_frequency': 'daily',
'capital_base': '10e6',
'source': 'yahoo',
'symbols': 'AAPL',
'metadata_index': 'symbol',
'source_time_column': 'Date',
}
def parse_args(argv, ipython_mode=False):
"""Parse list of arguments.
If a config file is provided (via -c), it will read in the
supplied options and overwrite any global defaults.
All other directly supplied arguments will overwrite the config
file settings.
Arguments:
* argv : list of strings
List of arguments, e.g. ['-c', 'my.conf']
* ipython_mode : bool <default=True>
Whether to parse IPython specific arguments
like --local_namespace
Notes:
Default settings can be found in zipline.utils.cli.DEFAULTS.
"""
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(argv)
defaults = copy(DEFAULTS)
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description="Zipline version %s." % zipline.__version__,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument('--algofile', '-f')
parser.add_argument('--data-frequency',
choices=('minute', 'daily'))
parser.add_argument('--start', '-s')
parser.add_argument('--end', '-e')
parser.add_argument('--capital_base')
parser.add_argument('--source', '-d', choices=('yahoo',))
parser.add_argument('--source_time_column', '-t')
parser.add_argument('--symbols')
parser.add_argument('--output', '-o')
parser.add_argument('--metadata_path', '-m')
parser.add_argument('--metadata_index', '-x')
parser.add_argument('--print-algo', '-p', dest='print_algo',
action='store_true')
parser.add_argument('--no-print-algo', '-q', dest='print_algo',
action='store_false')
if ipython_mode:
parser.add_argument('--local_namespace', action='store_true')
args = parser.parse_args(remaining_argv)
return(vars(args))
def parse_cell_magic(line, cell):
"""Parse IPython magic
"""
args_list = line.split(' ')
args = parse_args(args_list, ipython_mode=True)
# Remove print_algo kwarg to overwrite below.
args.pop('print_algo')
local_namespace = args.pop('local_namespace', False)
# By default, execute inside IPython namespace
if not local_namespace:
args['namespace'] = get_ipython().user_ns # flake8: noqa
# If we are running inside NB, do not output to file but create a
# variable instead
output_var_name = args.pop('output', None)
perf = run_pipeline(print_algo=False, algo_text=cell, **args)
if output_var_name is not None:
get_ipython().user_ns[output_var_name] = perf # flake8: noqa
def run_pipeline(print_algo=True, **kwargs):
"""Runs a full zipline pipeline given configuration keyword
arguments.
1. Load data (start and end dates can be provided a strings as
well as the source and symobls).
2. Instantiate algorithm (supply either algo_text or algofile
kwargs containing initialize() and handle_data() functions). If
algofile is supplied, will try to look for algofile_analyze.py and
append it.
3. Run algorithm (supply capital_base as float).
4. Return performance dataframe.
:Arguments:
* print_algo : bool <default=True>
Whether to print the algorithm to command line. Will use
pygments syntax coloring if pygments is found.
"""
start = kwargs['start']
end = kwargs['end']
# Compare against None because strings/timestamps may have been given
if start is not None:
start = pd.Timestamp(start, tz='UTC')
if end is not None:
end = pd.Timestamp(end, tz='UTC')
# Fail out if only one bound is provided
if ((start is None) or (end is None)) and (start != end):
raise PipelineDateError(start=start, end=end)
# Check if start and end are provided, and if the sim_params need to read
# a start and end from the DataSource
if start is None:
overwrite_sim_params = True
else:
overwrite_sim_params = False
symbols = kwargs['symbols'].split(',')
asset_identifier = kwargs['metadata_index']
# Pull asset metadata
asset_metadata = kwargs.get('asset_metadata', None)
asset_metadata_path = kwargs['metadata_path']
# Read in a CSV file, if applicable
if asset_metadata_path is not None:
if os.path.isfile(asset_metadata_path):
asset_metadata = pd.read_csv(asset_metadata_path,
index_col=asset_identifier)
source_arg = kwargs['source']
source_time_column = kwargs['source_time_column']
if source_arg is None:
raise NoSourceError()
elif source_arg == 'yahoo':
source = zipline.data.load_bars_from_yahoo(
stocks=symbols, start=start, end=end)
elif os.path.isfile(source_arg):
source = zipline.data.load_prices_from_csv(
filepath=source_arg,
identifier_col=source_time_column
)
elif os.path.isdir(source_arg):
source = zipline.data.load_prices_from_csv_folder(
folderpath=source_arg,
identifier_col=source_time_column
)
else:
raise NotImplementedError(
'Source %s not implemented.' % kwargs['source'])
algo_text = kwargs.get('algo_text', None)
if algo_text is None:
# Expect algofile to be set
algo_fname = kwargs['algofile']
with open(algo_fname, 'r') as fd:
algo_text = fd.read()
if print_algo:
if PYGMENTS:
highlight(algo_text, PythonLexer(), TerminalFormatter(),
outfile=sys.stdout)
else:
print_(algo_text)
algo = zipline.TradingAlgorithm(script=algo_text,
namespace=kwargs.get('namespace', {}),
capital_base=float(kwargs['capital_base']),
algo_filename=kwargs.get('algofile'),
equities_metadata=asset_metadata,
identifiers=symbols,
start=start,
end=end)
perf = algo.run(source, overwrite_sim_params=overwrite_sim_params)
output_fname = kwargs.get('output', None)
if output_fname is not None:
perf.to_pickle(output_fname)
return perf
| |
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 5526
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""cond_v2 and gradient.
This is a version of cond that emits a single If op, as well as the gradient
function for If ops produced by cond_v2. This will eventually replace the
current tf.cond implementation once it reaches feature and performance parity.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import function_def_to_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util_v2 as util
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.util import nest
# NOTE(skyewm): TensorFlow uses protected class methods and fields to signify
# that they aren't part of the official public API. These protected members
# often need to be used by implementation code however. Rather than litter the
# code with pylint comments, we ignore protected access violations for
# readability.
# pylint: disable=protected-access
def cond_v2(pred, true_fn, false_fn, name="cond"):
"""Like tf.cond, except emits a single If op."""
if isinstance(pred, bool):
raise TypeError("pred must not be a Python bool", pred)
if not name:
name = "cond"
with ops.name_scope(name) as scope:
true_name = util.unique_fn_name(scope, "true")
false_name = util.unique_fn_name(scope, "false")
# Automatic control dependencies are added in defuns, but not in v1
# graphs. Propagate that behavior here.
add_control_dependencies = util.in_defun()
pred = ops.convert_to_tensor(pred)
true_graph = func_graph_module.func_graph_from_py_func(
true_name,
true_fn, [], {},
func_graph=util.CondBranchFuncGraph(
true_name, read_only_collections=False),
add_control_dependencies=add_control_dependencies,
op_return_value=pred)
false_graph = func_graph_module.func_graph_from_py_func(
false_name,
false_fn, [], {},
func_graph=util.CondBranchFuncGraph(
false_name, read_only_collections=False),
add_control_dependencies=add_control_dependencies,
op_return_value=pred)
_check_same_outputs(true_graph, false_graph)
# Add inputs to true_graph and false_graph to make them match. Note that
# this modifies true_graph and false_graph.
cond_inputs = _make_inputs_match(true_graph, false_graph,
true_graph.external_captures,
false_graph.external_captures)
# Add all intermediate tensors as function outputs so they're available for
# the gradient computation.
true_intermediates = _get_intermediates(true_graph)
false_intermediates = _get_intermediates(false_graph)
# Save the original number of outputs to return to the caller.
num_cond_outputs = len(true_graph.outputs)
# Make the number/type of new intermediate outputs match.
extra_true_outputs, extra_false_outputs = _pad_params(
true_graph, false_graph, true_intermediates, false_intermediates)
true_graph.outputs.extend(extra_true_outputs)
false_graph.outputs.extend(extra_false_outputs)
# Create the If op.
tensors = gen_functional_ops._if( # pylint: disable=protected-access
pred,
cond_inputs, [t.dtype for t in true_graph.outputs],
util.create_new_tf_function(true_graph),
util.create_new_tf_function(false_graph),
output_shapes=_get_output_shapes(true_graph.outputs,
false_graph.outputs),
name=scope)
# TODO(b/110167197) this approach requires cond_v2 to have at least 1 output
util.maybe_set_lowering_attr(tensors[0].op)
# Return identities for each output of the If op, rather than the output of
# the If op directly. This makes pruning work if the output of cond() is
# fetched: the lowering pass converts the If outputs into IdentityN outputs,
# which if fetched will cause all ops in the taken branch to be run (since
# it takes all merge ops as input). After lowering, each output identity op
# will end up with only the appropriate merge op as input.
# TODO(b/79984175): this doesn't have to be a tuple once we covert to the
# correct output structure
tensors = tuple(array_ops.identity(t) for t in tensors)
return func_graph_module.pack_sequence_as(true_graph.structured_outputs,
tensors[:num_cond_outputs])
@ops.RegisterGradient("If")
def _IfGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of an If op produced by cond_v2."""
true_graph, false_graph = _get_func_graphs(op)
# Note: op.graph != ops.get_default_graph() when we are computing the gradient
# of a nested cond.
assert true_graph.outer_graph == op.graph
assert false_graph.outer_graph == op.graph
# Create grad functions that compute the gradient of the true/false forward
# graphs. These functions will capture tensors from the forward pass
# functions.
true_grad_graph = _create_grad_func(
true_graph, grads, util.unique_grad_fn_name(true_graph.name))
false_grad_graph = _create_grad_func(
false_graph, grads, util.unique_grad_fn_name(false_graph.name))
assert ([t.dtype for t in true_grad_graph.outputs] ==
[t.dtype for t in false_grad_graph.outputs])
# Resolve references to forward graph tensors in grad graphs and ensure
# they are in-scope, i.e., belong to one of outer graphs of the grad graph.
true_grad_inputs = _resolve_grad_inputs(true_graph, true_grad_graph)
false_grad_inputs = _resolve_grad_inputs(false_graph, false_grad_graph)
# Make the inputs to true_grad_graph and false_grad_graph match. Note that
# this modifies true_grad_graph and false_grad_graph.
grad_inputs = _make_inputs_match(true_grad_graph, false_grad_graph,
true_grad_inputs, false_grad_inputs)
# Add all intermediate tensors as function outputs so they're available for
# higher-order gradient computations.
true_grad_intermediates = _get_intermediates(true_grad_graph)
false_grad_intermediates = _get_intermediates(false_grad_graph)
# Save the original number of gradient outputs to return.
num_grad_outputs = len(true_grad_graph.outputs)
# Make the number/type of new intermediate outputs match.
extra_true_grad_outputs, extra_false_grad_outputs = _pad_params(
true_grad_graph, false_grad_graph,
true_grad_intermediates, false_grad_intermediates)
true_grad_graph.outputs.extend(extra_true_grad_outputs)
false_grad_graph.outputs.extend(extra_false_grad_outputs)
# Create the gradient If op.
tensors = gen_functional_ops._if(
op.inputs[0],
grad_inputs, [t.dtype for t in true_grad_graph.outputs],
util.create_new_tf_function(true_grad_graph),
util.create_new_tf_function(false_grad_graph),
output_shapes=_get_output_shapes(true_grad_graph.outputs,
false_grad_graph.outputs))
util.maybe_set_lowering_attr(tensors[0].op)
# See comment in cond_v2.
tensors = [array_ops.identity(t) for t in tensors]
# The predicate has no gradient.
return [None] + tensors[:num_grad_outputs]
def _get_func_graphs(if_op):
"""Returns `FuncGraph`s for the input op branches.
Args:
if_op: The _If Operation.
Returns:
A 2-tuple of the `FuncGraph`s of the then_branch and else_branch.
"""
def _get_func_graph_for_branch(branch_name):
"""Generates and returns a FuncGraph for the given branch."""
inputs = if_op.inputs[1:] # First input is pred.
input_shapes = [t.shape for t in inputs]
func_name = if_op.get_attr(branch_name).name
fdef = if_op.graph._get_function(func_name).definition
# `if_op.graph` may not be the same as `ops.get_default_graph()` e.g.
# in the case of nested if ops or when the gradient is being computed
# from inside a Defun. We build the `func_graph` with `if_op.graph` as its
# `outer_graph`. This resembles how the `FuncGraph` was built in the
# forward pass. We need this so that we can resolve references to tensors
# in `func_graph` from its gradient graph in `_resolve_grad_inputs`.
with if_op.graph.as_default():
func_graph = function_def_to_graph.function_def_to_graph(
fdef, input_shapes)
func_graph.captures = collections.OrderedDict(zip(inputs,
func_graph.inputs))
# Set the if op so that the gradient code can use it.
func_graph._if = if_op
return func_graph
return (_get_func_graph_for_branch("then_branch"),
_get_func_graph_for_branch("else_branch"))
def _grad_fn(func_graph, grads):
"""The gradient function for each conditional branch.
This function builds the gradient graph of the corresponding forward-pass
conditional branch in `func_graph`. This is done by differentiating
func_graph's outputs w.r.t. its inputs.
Args:
func_graph: FuncGraph. The corresponding forward-pass function.
grads: The list of input gradient Tensors.
Returns:
The output gradient Tensors.
"""
# Filter out untrainable function outputs.
# NOTE(skyewm): If we don't do this, the untrainable tensors can sometimes
# cause _GradientsHelper to raise an exception (e.g. the implementation
# doesn't expect 'ys' to contain boolean tensors).
assert len(func_graph.outputs) == len(grads)
ys = []
grad_ys = []
for y, grad_y in zip(func_graph.outputs, grads):
if not gradients_impl.IsTrainable(y):
continue
ys.append(y)
grad_ys.append(grad_y)
# Build the gradient graph. Note that this builds the gradient computation of
# func_graph in the current graph, which requires capturing tensors from
# func_graph. The captured func_graph tensors are resolved to external tensors
# in _resolve_grad_inputs.
result = gradients_impl._GradientsHelper(
ys, func_graph.inputs, grad_ys=grad_ys,
src_graph=func_graph)
# Functions can't return None; replace Nones with zero tensors.
# TODO(b/80444525): don't return anything here and make _IfGrad return None if
# both branches have zero gradient.
for i in range(len(result)):
if result[i] is None:
result[i] = array_ops.zeros_like(func_graph.inputs[i])
return result
def _create_grad_func(func_graph, grads, name):
"""Returns the FuncGraph representation of _grad_fn."""
return func_graph_module.func_graph_from_py_func(
name,
lambda: _grad_fn(func_graph, grads), [], {},
func_graph=util.CondBranchFuncGraph(name, read_only_collections=False))
def _resolve_grad_inputs(cond_graph, grad_graph):
"""Returns the tensors to pass as inputs to `grad_graph`.
The `grad_graph` may have external references to
1. Its outer graph containing the input gradients. These references are kept
as is.
2. Tensors in the forward pass graph. These tensors may not be "live"
when the gradient is being computed. We replace such references by their
corresponding tensor in `cond_graph.outer_graph`. In the case of nested
control flow or functions, the gradient logic handling
`grad_graph.outer_graph` will make sure the tensor from
`cond_graph.outer_graph` is also correctly captured.
Args:
cond_graph: FuncGraph. The forward-pass function.
grad_graph: FuncGraph. The gradients function.
Returns:
A list of inputs tensors to be passed to grad_graph.
"""
new_inputs = []
for t in grad_graph.external_captures:
# `t` must either be in `grad_graph.outer_graph` or in the forward
# `cond_graph`.
if t.graph != grad_graph.outer_graph:
assert t.graph == cond_graph
# `internal_captures` are not treated as intermediates and hence not added
# to If op outputs. So we get the outer tensor corresponding to those
# from the list of `external_captures`.
try:
t = t.graph._if.outputs[t.graph.outputs.index(t)]
except ValueError:
index = t.graph.internal_captures.index(t)
t = t.graph.external_captures[index]
# Note: We rely on the capturing logic of the gradient If op graph to
# correctly capture the tensors in `cond_graph.outer_graph`. Both cond_v2
# and while_v2 handle this while building their gradient functions.
assert t.graph == cond_graph.outer_graph
new_inputs.append(t)
return new_inputs
def _get_intermediates(func_graph):
"""Returns all tensors in `func_graph` that aren't inputs or outputs."""
intermediates = []
for op in func_graph.get_operations():
for t in op.outputs:
if t in func_graph.inputs: continue
if t in func_graph.outputs: continue
intermediates.append(t)
return intermediates
def _separate_unique_inputs(true_inputs, false_inputs):
"""Separates tensors appearing only in true_inputs or false_inputs, or both.
Args:
true_inputs: list of Tensors
false_inputs: list of Tensors
Returns:
Three lists of Tensors:
1. The tensors that appear in both true_inputs and false_inputs
2. The tensors that only appear in true_inputs
3. The tensors that only appear in false_inputs
"""
true_inputs = set(true_inputs)
false_inputs = set(false_inputs)
shared_inputs = true_inputs.intersection(false_inputs)
true_only_inputs = true_inputs - false_inputs
false_only_inputs = false_inputs - true_inputs
return list(shared_inputs), list(true_only_inputs), list(false_only_inputs)
def _pad_params(true_graph, false_graph, true_params, false_params):
"""Returns new param lists that have matching signatures.
This is done by mirroring each param list in the other using dummy params.
There is no merging of params.
Args:
true_graph: FuncGraph
false_graph: FuncGraph
true_params: a list of Tensors from true_graph
false_params: a list of Tensors from false_graph
Returns:
A new list of Tensors in true_graph and a new list of Tensors in
false_graph. The two lists have the same number of Tensors, with matching
types and shapes across the lists.
"""
new_true_params = (true_params +
_create_dummy_params(true_graph, false_params))
new_false_inputs = (_create_dummy_params(false_graph, true_params)
+ false_params)
return new_true_params, new_false_inputs
def _make_inputs_match(true_graph, false_graph, true_inputs, false_inputs):
"""Modifies true_graph and false_graph so they have the same input signature.
This method reorders and/or adds parameters to true_graph and false_graph so
they have the same input signature, and updates the 'inputs' and 'captured'
fields of both graphs accordingly. It uses the input tensors from the outer
graph to avoid duplicating shared arguments.
Args:
true_graph: FuncGraph
false_graph: FuncGraph
true_inputs: a list of Tensors in the outer graph. The inputs for
true_graph.
false_inputs: a list of Tensors in the outer graph. The inputs for
false_graph.
Returns:
A new list of Tensors from the outer graph that are the new inputs for both
true_graph and false_graph. This is a deduped version of true_inputs +
false_inputs.
"""
shared_inputs, true_only_inputs, false_only_inputs = _separate_unique_inputs(
true_inputs, false_inputs)
new_inputs = shared_inputs + true_only_inputs + false_only_inputs
true_input_to_param = dict(zip(true_inputs, true_graph.inputs))
false_input_to_param = dict(zip(false_inputs, false_graph.inputs))
true_graph.inputs = (
[true_input_to_param[t] for t in shared_inputs] +
[true_input_to_param[t] for t in true_only_inputs] +
_create_dummy_params(true_graph, false_only_inputs))
false_graph.inputs = (
[false_input_to_param[t] for t in shared_inputs] +
_create_dummy_params(false_graph, true_only_inputs) +
[false_input_to_param[t] for t in false_only_inputs])
# Rewrite the FuncGraphs' state to reflect the new inputs.
true_graph.captures = collections.OrderedDict(zip(new_inputs,
true_graph.inputs))
false_graph.captures = collections.OrderedDict(zip(new_inputs,
false_graph.inputs))
return new_inputs
def _create_dummy_params(func_graph, template_tensors):
"""Creates tensors in func_graph to represent template_tensors.
Args:
func_graph: FuncGraph.
template_tensors: a list of tensors in the outer graph.
Returns:
A list of tensors in func_graph.
"""
with func_graph.as_default():
return [gen_functional_ops.fake_param(dtype=t.dtype, shape=t.shape)
for t in template_tensors]
def _check_same_outputs(true_graph, false_graph):
"""Raises an error if true_graph and false_graph have different outputs."""
true_output_types = [t.dtype for t in true_graph.outputs]
false_output_types = [t.dtype for t in false_graph.outputs]
if (len(true_graph.outputs) != len(false_graph.outputs) or
true_output_types != false_output_types):
raise TypeError(
"true_fn() and false_fn() must return the same number and type of "
"arguments, got:\n"
" true_fn: %s\n"
" false_fn: %s" % (true_output_types, false_output_types))
# Make sure `structured_outputs` for both graphs have the same structure.
try:
nest.assert_same_structure(true_graph.structured_outputs,
false_graph.structured_outputs)
except (ValueError, TypeError) as e:
raise ValueError("Outputs of true_fn and false_fn must have the same "
"structure: %s" % str(e))
def _get_output_shapes(true_graph_outputs, false_graph_outputs):
output_shapes = [
t_out.shape.most_specific_compatible_shape(f_out.shape)
for t_out, f_out in zip(true_graph_outputs, false_graph_outputs)
]
return output_shapes
| |
'''
This is a scaled down version of clingo-dl show casing how to implement a
propagator for difference logic.
'''
from typing import List, Sequence, Optional, MutableMapping, Tuple, Set, cast
import heapq
import sys
from clingo import ast
from clingo.symbol import Function, Number, Symbol, SymbolType, Tuple_
from clingo.theory_atoms import TheoryTerm, TheoryTermType
from clingo.solving import Model
from clingo.propagator import Assignment, PropagateControl, PropagateInit, Propagator
from clingo.application import clingo_main, Application, ApplicationOptions
from clingo.control import Control
from clingo import SolveResult, parse_term
from clingo.ast import parse_files, AST, ProgramBuilder, Transformer
Node = Symbol # pylint: disable=invalid-name
Weight = int
Level = int
Edge = Tuple[Node, Node]
WeightedEdge = Tuple[Node, Node, Weight]
MapNodeWeight = MutableMapping[Node, Weight]
THEORY = """
#theory dl{
diff_term {
- : 3, unary;
** : 2, binary, right;
* : 1, binary, left;
/ : 1, binary, left;
\\ : 1, binary, left;
+ : 0, binary, left;
- : 0, binary, left
};
&diff/1 : diff_term, {<=}, diff_term, any
}.
"""
_BOP = {"+": lambda a, b: a + b,
"-": lambda a, b: a - b,
"*": lambda a, b: a * b,
"**": lambda a, b: a ** b,
"\\": lambda a, b: a % b,
"/": lambda a, b: a // b}
def _evaluate(term: TheoryTerm) -> Symbol:
'''
Evaluates the operators in a theory term in the same fashion as clingo
evaluates its arithmetic functions.
'''
# tuples
if term.type == TheoryTermType.Tuple:
return Tuple_([_evaluate(x) for x in term.arguments])
# functions and arithmetic operations
if term.type == TheoryTermType.Function:
# binary operations
if term.name in _BOP and len(term.arguments) == 2:
term_a = _evaluate(term.arguments[0])
term_b = _evaluate(term.arguments[1])
if term_a.type != SymbolType.Number or term_b.type != SymbolType.Number:
raise RuntimeError("Invalid Binary Operation")
if term.name in ("/", "\\") and term_b.number == 0:
raise RuntimeError("Division by Zero")
return Number(_BOP[term.name](term_a.number, term_b.number))
# unary operations
if term.name == "-" and len(term.arguments) == 1:
term_a = _evaluate(term.arguments[0])
if term_a.type == SymbolType.Number:
return Number(-term_a.number)
if term_a.type == SymbolType.Function and term_a.name:
return Function(term_a.name, term_a.arguments, not term_a.positive)
raise RuntimeError("Invalid Unary Operation")
# functions
return Function(term.name, [_evaluate(x) for x in term.arguments])
# constants
if term.type == TheoryTermType.Symbol:
return Function(term.name)
# numbers
if term.type == TheoryTermType.Number:
return Number(term.number)
raise RuntimeError("Invalid Syntax")
class HeadBodyTransformer(Transformer):
'''
Transformer to tag head and body occurrences of `&diff` atoms.
'''
def visit_Literal(self, lit: AST, in_lit: bool = False) -> AST:
'''
Visit literal; any theory atom in a literal is a body literal.
'''
return lit.update(**self.visit_children(lit, True))
def visit_TheoryAtom(self, atom: AST, in_lit: bool = False) -> AST:
'''
Visit theory atom and tag as given by in_lit.
'''
# pylint: disable=invalid-name,no-self-use
term = atom.term
if term.name == "diff" and not term.arguments:
loc = "body" if in_lit else "head"
atom.term = ast.Function(
term.location,
term.name,
[ast.Function(term.location, loc, [], False)], False)
return atom
class Graph:
'''
This class captures a graph with weighted edges that can be extended
incrementally.
Adding an edge triggers a cycle check that will report negative cycles.
'''
_potential: MapNodeWeight
_graph: MutableMapping[Node, MapNodeWeight]
_gamma: MapNodeWeight
_last_edges: MutableMapping[Node, WeightedEdge]
_previous_edge: MutableMapping[Level, MutableMapping[Edge, Weight]]
_previous_potential: MutableMapping[Level, MapNodeWeight]
def __init__(self):
self._potential = {} # {node: potential}
self._graph = {} # {node: {node : weight}}
self._gamma = {} # {node: gamma}
self._last_edges = {} # {node: edge}
self._previous_edge = {} # {level: {(node, node): weight}}
self._previous_potential = {} # {level: {node: potential}}
@staticmethod
def _set(level, key, val, previous, get_current):
p = previous.setdefault(level, {})
c, k = get_current(key)
if not key in p:
p[key] = c[k] if k in c else None
c[k] = val
@staticmethod
def _reset(level, previous, get_current):
if level in previous:
for key, val in previous[level].items():
c, k = get_current(key)
if val is None:
del c[k]
else:
c[k] = val
del previous[level]
def _reset_edge(self, level: Level):
self._reset(level, self._previous_edge, lambda key: (self._graph[key[0]], key[1]))
def _reset_potential(self, level: Level):
self._reset(level, self._previous_potential, lambda key: (self._potential, key))
def _set_edge(self, level: Level, key: Edge, val: Weight):
self._set(level, key, val, self._previous_edge, lambda key: (self._graph[key[0]], key[1]))
def _set_potential(self, level: Level, key: Node, val: Weight):
self._set(level, key, val, self._previous_potential, lambda key: (self._potential, key))
def add_edge(self, level: Level, edge: WeightedEdge) -> Optional[List[WeightedEdge]]:
'''
Add an edge to the graph and return a negative cycle (if there is one).
'''
u, v, d = edge
# If edge already exists from u to v with lower weight, new edge is redundant
if u in self._graph and v in self._graph[u] and self._graph[u][v] <= d:
return None
# Initialize potential and graph
if u not in self._potential:
self._set_potential(level, u, 0)
if v not in self._potential:
self._set_potential(level, v, 0)
self._graph.setdefault(u, {})
self._graph.setdefault(v, {})
changed: Set[Node] = set() # Set of nodes for which potential has been changed
min_gamma: List[Tuple[Weight, Node]] = []
# Update potential change induced by new edge, 0 for other nodes
self._gamma[u] = 0
self._gamma[v] = self._potential[u] + d - self._potential[v]
if self._gamma[v] < 0:
heapq.heappush(min_gamma, (self._gamma[v], v))
self._last_edges[v] = (u, v, d)
# Propagate negative potential change
while len(min_gamma) > 0 and self._gamma[u] == 0:
_, s = heapq.heappop(min_gamma)
if s not in changed:
self._set_potential(level, s, self._potential[s] + self._gamma[s])
self._gamma[s] = 0
changed.add(s)
for t in self._graph[s]:
if t not in changed:
gamma_t = self._potential[s] + self._graph[s][t] - self._potential[t]
if gamma_t < self._gamma[t]:
self._gamma[t] = gamma_t
heapq.heappush(min_gamma, (gamma_t, t))
self._last_edges[t] = (s, t, self._graph[s][t])
cycle = None
# Check if there is a negative cycle
if self._gamma[u] < 0:
cycle = []
x, y, c = self._last_edges[v]
cycle.append((x, y, c))
while v != x:
x, y, c = self._last_edges[x]
cycle.append((x, y, c))
else:
self._set_edge(level, (u, v), d)
# Ensure that all gamma values are zero
self._gamma[v] = 0
while len(min_gamma) > 0:
_, s = heapq.heappop(min_gamma)
self._gamma[s] = 0
return cycle
def get_assignment(self) -> List[Tuple[Node, Weight]]:
'''
Get the current assignment to integer variables.
'''
zero = Number(0)
adjust = self._potential[zero] if zero in self._potential else 0
return [(node, adjust - self._potential[node]) for node in self._potential if node != zero]
def backtrack(self, level):
'''
Backtrack the given level.
'''
self._reset_edge(level)
self._reset_potential(level)
class DLPropagator(Propagator):
'''
A propagator for difference constraints.
'''
_l2e: MutableMapping[int, List[WeightedEdge]]
_e2l: MutableMapping[WeightedEdge, List[int]]
_states: List[Graph]
def __init__(self):
self._l2e = {} # {literal: [(node, node, weight)]}
self._e2l = {} # {(node, node, weight): [literal]}
self._states = [] # [Graph]
def _add_edge(self, init: PropagateInit, lit: int, u: Node, v: Node, w: Weight):
edge = (u, v, w)
self._l2e.setdefault(lit, []).append(edge)
self._e2l.setdefault(edge, []).append(lit)
init.add_watch(lit)
def init(self, init: PropagateInit):
'''
Initialize the propagator extracting difference constraints from the
theory data.
'''
for atom in init.theory_atoms:
term = atom.term
if term.name == "diff" and len(term.arguments) == 1:
assert atom.guard is not None
u = _evaluate(atom.elements[0].terms[0].arguments[0])
v = _evaluate(atom.elements[0].terms[0].arguments[1])
w = _evaluate(atom.guard[1]).number
lit = init.solver_literal(atom.literal)
self._add_edge(init, lit, u, v, w)
if term.arguments[0].name == "body":
self._add_edge(init, -lit, v, u, -w - 1)
def propagate(self, control: PropagateControl, changes: Sequence[int]):
'''
Add edges that became true to the graph to check for negative cycles.
'''
state = self._state(control.thread_id)
level = control.assignment.decision_level
for lit in changes:
for edge in self._l2e[lit]:
cycle = state.add_edge(level, edge)
if cycle is not None:
c = [self._literal(control, e) for e in cycle]
if control.add_nogood(c):
control.propagate()
return
def undo(self, thread_id: int, assign: Assignment, changes: Sequence[int]):
'''
Backtrack the last decision level propagated.
'''
# pylint: disable=unused-argument
self._state(thread_id).backtrack(assign.decision_level)
def on_model(self, model: Model):
'''
This function should be called when a model has been found to extend it
with the integer variable assignments.
'''
assignment = self._state(model.thread_id).get_assignment()
model.extend([Function("dl", [var, Number(value)]) for var, value in assignment])
def _state(self, thread_id: int) -> Graph:
while len(self._states) <= thread_id:
self._states.append(Graph())
return self._states[thread_id]
def _literal(self, control, edge):
for lit in self._e2l[edge]:
if control.assignment.is_true(lit):
return lit
raise RuntimeError('must not happen')
class DLApp(Application):
'''
Application extending clingo with difference constraints.
'''
program_name: str = "clingo-dl"
version: str = "1.0"
_propagator: DLPropagator
_minimize: Optional[Symbol]
_bound: Optional[int]
def __init__(self):
self._propagator = DLPropagator()
self._minimize = None
self._bound = None
def _parse_minimize(self, val):
var = parse_term(val)
if var.type == SymbolType.Number:
return False
self._minimize = var
return True
def register_options(self, options: ApplicationOptions):
'''
Register application options.
'''
group = 'Clingo.DL Options'
options.add(group, 'minimize-variable', 'Minimize the given variable', self._parse_minimize, argument="<var>")
def _read(self, path: str):
if path == "-":
return sys.stdin.read()
with open(path) as file_:
return file_.read()
def _rewrite(self, ctl: Control, files: Sequence[str]):
with ProgramBuilder(ctl) as bld:
hbt = HeadBodyTransformer()
parse_files(
files,
lambda stm: bld.add(cast(AST, hbt.visit(stm))))
def _on_model(self, model: Model):
self._propagator.on_model(model)
for symbol in model.symbols(theory=True):
if symbol.match("dl", 2):
n, v = symbol.arguments
if n == self._minimize:
self._bound = v.number
break
def main(self, ctl: Control, files: Sequence[str]):
'''
Register the difference constraint propagator, and then ground and
solve.
'''
ctl.register_propagator(self._propagator)
ctl.add("base", [], THEORY)
if not files:
files = ["-"]
self._rewrite(ctl, files)
ctl.ground([("base", [])])
if self._minimize is None:
ctl.solve(on_model=self._propagator.on_model)
else:
ctl.add("bound", ["b", "v"], "&diff(head) { v-0 } <= b.")
while cast(SolveResult, ctl.solve(on_model=self._on_model)).satisfiable:
print("Found new bound: {}".format(self._bound))
if self._bound is None:
break
ctl.ground([("bound", [Number(cast(int, self._bound) - 1), self._minimize])])
if self._bound is not None:
print("Optimum found")
if __name__ == "__main__":
sys.exit(int(clingo_main(DLApp(), sys.argv[1:])))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.