gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/python
#
# Test suite for Optik. Supplied by Johannes Gijsbers
# (taradino@softhome.net) -- translated from the original Optik
# test suite to this PyUnit-based version.
#
# $Id: test_optparse.py,v 1.10 2004/10/27 02:43:25 tim_one Exp $
#
import sys
import os
import copy
import unittest
from cStringIO import StringIO
from pprint import pprint
from test import test_support
from optparse import make_option, Option, IndentedHelpFormatter, \
TitledHelpFormatter, OptionParser, OptionContainer, OptionGroup, \
SUPPRESS_HELP, SUPPRESS_USAGE, OptionError, OptionConflictError, \
BadOptionError, OptionValueError, Values, _match_abbrev
# Do the right thing with boolean values for all known Python versions.
try:
True, False
except NameError:
(True, False) = (1, 0)
class InterceptedError(Exception):
def __init__(self,
error_message=None,
exit_status=None,
exit_message=None):
self.error_message = error_message
self.exit_status = exit_status
self.exit_message = exit_message
def __str__(self):
return self.error_message or self.exit_message or "intercepted error"
class InterceptingOptionParser(OptionParser):
def exit(self, status=0, msg=None):
raise InterceptedError(exit_status=status, exit_message=msg)
def error(self, msg):
raise InterceptedError(error_message=msg)
class BaseTest(unittest.TestCase):
def assertParseOK(self, args, expected_opts, expected_positional_args):
"""Assert the options are what we expected when parsing arguments.
Otherwise, fail with a nicely formatted message.
Keyword arguments:
args -- A list of arguments to parse with OptionParser.
expected_opts -- The options expected.
expected_positional_args -- The positional arguments expected.
Returns the options and positional args for further testing.
"""
(options, positional_args) = self.parser.parse_args(args)
optdict = vars(options)
self.assertEqual(optdict, expected_opts,
"""
Options are %(optdict)s.
Should be %(expected_opts)s.
Args were %(args)s.""" % locals())
self.assertEqual(positional_args, expected_positional_args,
"""
Positional arguments are %(positional_args)s.
Should be %(expected_positional_args)s.
Args were %(args)s.""" % locals ())
return (options, positional_args)
def assertRaises(self,
func,
args,
kwargs,
expected_exception,
expected_message):
"""
Assert that the expected exception is raised when calling a
function, and that the right error message is included with
that exception.
Arguments:
func -- the function to call
args -- positional arguments to `func`
kwargs -- keyword arguments to `func`
expected_exception -- exception that should be raised
expected_output -- output we expect to see
Returns the exception raised for further testing.
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
try:
func(*args, **kwargs)
except expected_exception, err:
actual_message = str(err)
self.assertEqual(actual_message,
expected_message,
"""\
expected exception message:
'''%(expected_message)s'''
actual exception message:
'''%(actual_message)s'''
""" % locals())
return err
else:
self.fail("""expected exception %(expected_exception)s not raised
called %(func)r
with args %(args)r
and kwargs %(kwargs)r
""" % locals ())
# -- Assertions used in more than one class --------------------
def assertParseFail(self, cmdline_args, expected_output):
"""
Assert the parser fails with the expected message. Caller
must ensure that self.parser is an InterceptingOptionParser.
"""
try:
self.parser.parse_args(cmdline_args)
except InterceptedError, err:
self.assertEqual(err.error_message, expected_output)
else:
self.assertFalse("expected parse failure")
def assertOutput(self,
cmdline_args,
expected_output,
expected_status=0,
expected_error=None):
"""Assert the parser prints the expected output on stdout."""
save_stdout = sys.stdout
try:
try:
sys.stdout = StringIO()
self.parser.parse_args(cmdline_args)
finally:
output = sys.stdout.getvalue()
sys.stdout = save_stdout
except InterceptedError, err:
self.assertEqual(output, expected_output)
self.assertEqual(err.exit_status, expected_status)
self.assertEqual(err.exit_message, expected_error)
else:
self.assertFalse("expected parser.exit()")
def assertTypeError(self, func, expected_message, *args):
"""Assert that TypeError is raised when executing func."""
self.assertRaises(func, args, None, TypeError, expected_message)
def assertHelp(self, parser, expected_help):
actual_help = parser.format_help()
if actual_help != expected_help:
raise self.failureException(
'help text failure; expected:\n"' +
expected_help + '"; got:\n"' +
actual_help + '"\n')
# -- Test make_option() aka Option -------------------------------------
# It's not necessary to test correct options here. All the tests in the
# parser.parse_args() section deal with those, because they're needed
# there.
class TestOptionChecks(BaseTest):
def setUp(self):
self.parser = OptionParser(usage=SUPPRESS_USAGE)
def assertOptionError(self, expected_message, args=[], kwargs={}):
self.assertRaises(make_option, args, kwargs,
OptionError, expected_message)
def test_opt_string_empty(self):
self.assertTypeError(make_option,
"at least one option string must be supplied")
def test_opt_string_too_short(self):
self.assertOptionError(
"invalid option string 'b': must be at least two characters long",
["b"])
def test_opt_string_short_invalid(self):
self.assertOptionError(
"invalid short option string '--': must be "
"of the form -x, (x any non-dash char)",
["--"])
def test_opt_string_long_invalid(self):
self.assertOptionError(
"invalid long option string '---': "
"must start with --, followed by non-dash",
["---"])
def test_attr_invalid(self):
self.assertOptionError(
"option -b: invalid keyword arguments: foo, bar",
["-b"], {'foo': None, 'bar': None})
def test_action_invalid(self):
self.assertOptionError(
"option -b: invalid action: 'foo'",
["-b"], {'action': 'foo'})
def test_type_invalid(self):
self.assertOptionError(
"option -b: invalid option type: 'foo'",
["-b"], {'type': 'foo'})
self.assertOptionError(
"option -b: invalid option type: 'tuple'",
["-b"], {'type': tuple})
def test_no_type_for_action(self):
self.assertOptionError(
"option -b: must not supply a type for action 'count'",
["-b"], {'action': 'count', 'type': 'int'})
def test_no_choices_list(self):
self.assertOptionError(
"option -b/--bad: must supply a list of "
"choices for type 'choice'",
["-b", "--bad"], {'type': "choice"})
def test_bad_choices_list(self):
typename = type('').__name__
self.assertOptionError(
"option -b/--bad: choices must be a list of "
"strings ('%s' supplied)" % typename,
["-b", "--bad"],
{'type': "choice", 'choices':"bad choices"})
def test_no_choices_for_type(self):
self.assertOptionError(
"option -b: must not supply choices for type 'int'",
["-b"], {'type': 'int', 'choices':"bad"})
def test_no_const_for_action(self):
self.assertOptionError(
"option -b: 'const' must not be supplied for action 'store'",
["-b"], {'action': 'store', 'const': 1})
def test_no_nargs_for_action(self):
self.assertOptionError(
"option -b: 'nargs' must not be supplied for action 'count'",
["-b"], {'action': 'count', 'nargs': 2})
def test_callback_not_callable(self):
self.assertOptionError(
"option -b: callback not callable: 'foo'",
["-b"], {'action': 'callback',
'callback': 'foo'})
def dummy(self):
pass
def test_callback_args_no_tuple(self):
self.assertOptionError(
"option -b: callback_args, if supplied, "
"must be a tuple: not 'foo'",
["-b"], {'action': 'callback',
'callback': self.dummy,
'callback_args': 'foo'})
def test_callback_kwargs_no_dict(self):
self.assertOptionError(
"option -b: callback_kwargs, if supplied, "
"must be a dict: not 'foo'",
["-b"], {'action': 'callback',
'callback': self.dummy,
'callback_kwargs': 'foo'})
def test_no_callback_for_action(self):
self.assertOptionError(
"option -b: callback supplied ('foo') for non-callback option",
["-b"], {'action': 'store',
'callback': 'foo'})
def test_no_callback_args_for_action(self):
self.assertOptionError(
"option -b: callback_args supplied for non-callback option",
["-b"], {'action': 'store',
'callback_args': 'foo'})
def test_no_callback_kwargs_for_action(self):
self.assertOptionError(
"option -b: callback_kwargs supplied for non-callback option",
["-b"], {'action': 'store',
'callback_kwargs': 'foo'})
class TestOptionParser(BaseTest):
def setUp(self):
self.parser = OptionParser()
self.parser.add_option("-v", "--verbose", "-n", "--noisy",
action="store_true", dest="verbose")
self.parser.add_option("-q", "--quiet", "--silent",
action="store_false", dest="verbose")
def test_add_option_no_Option(self):
self.assertTypeError(self.parser.add_option,
"not an Option instance: None", None)
def test_add_option_invalid_arguments(self):
self.assertTypeError(self.parser.add_option,
"invalid arguments", None, None)
def test_get_option(self):
opt1 = self.parser.get_option("-v")
self.assert_(isinstance(opt1, Option))
self.assertEqual(opt1._short_opts, ["-v", "-n"])
self.assertEqual(opt1._long_opts, ["--verbose", "--noisy"])
self.assertEqual(opt1.action, "store_true")
self.assertEqual(opt1.dest, "verbose")
def test_get_option_equals(self):
opt1 = self.parser.get_option("-v")
opt2 = self.parser.get_option("--verbose")
opt3 = self.parser.get_option("-n")
opt4 = self.parser.get_option("--noisy")
self.assert_(opt1 is opt2 is opt3 is opt4)
def test_has_option(self):
self.assert_(self.parser.has_option("-v"))
self.assert_(self.parser.has_option("--verbose"))
def assert_removed(self):
self.assert_(self.parser.get_option("-v") is None)
self.assert_(self.parser.get_option("--verbose") is None)
self.assert_(self.parser.get_option("-n") is None)
self.assert_(self.parser.get_option("--noisy") is None)
self.failIf(self.parser.has_option("-v"))
self.failIf(self.parser.has_option("--verbose"))
self.failIf(self.parser.has_option("-n"))
self.failIf(self.parser.has_option("--noisy"))
self.assert_(self.parser.has_option("-q"))
self.assert_(self.parser.has_option("--silent"))
def test_remove_short_opt(self):
self.parser.remove_option("-n")
self.assert_removed()
def test_remove_long_opt(self):
self.parser.remove_option("--verbose")
self.assert_removed()
def test_remove_nonexistent(self):
self.assertRaises(self.parser.remove_option, ('foo',), None,
ValueError, "no such option 'foo'")
class TestOptionValues(BaseTest):
def setUp(self):
pass
def test_basics(self):
values = Values()
self.assertEqual(vars(values), {})
self.assertEqual(values, {})
self.assertNotEqual(values, {"foo": "bar"})
self.assertNotEqual(values, "")
dict = {"foo": "bar", "baz": 42}
values = Values(defaults=dict)
self.assertEqual(vars(values), dict)
self.assertEqual(values, dict)
self.assertNotEqual(values, {"foo": "bar"})
self.assertNotEqual(values, {})
self.assertNotEqual(values, "")
self.assertNotEqual(values, [])
class TestTypeAliases(BaseTest):
def setUp(self):
self.parser = OptionParser()
def test_type_aliases(self):
self.parser.add_option("-x", type=int)
self.parser.add_option("-s", type=str)
self.parser.add_option("-t", type="str")
self.assertEquals(self.parser.get_option("-x").type, "int")
self.assertEquals(self.parser.get_option("-s").type, "string")
self.assertEquals(self.parser.get_option("-t").type, "string")
# Custom type for testing processing of default values.
_time_units = { 's' : 1, 'm' : 60, 'h' : 60*60, 'd' : 60*60*24 }
def _check_duration(option, opt, value):
try:
if value[-1].isdigit():
return int(value)
else:
return int(value[:-1]) * _time_units[value[-1]]
except ValueError, IndexError:
raise OptionValueError(
'option %s: invalid duration: %r' % (opt, value))
class DurationOption(Option):
TYPES = Option.TYPES + ('duration',)
TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER)
TYPE_CHECKER['duration'] = _check_duration
class TestDefaultValues(BaseTest):
def setUp(self):
self.parser = OptionParser()
self.parser.add_option("-v", "--verbose", default=True)
self.parser.add_option("-q", "--quiet", dest='verbose')
self.parser.add_option("-n", type="int", default=37)
self.parser.add_option("-m", type="int")
self.parser.add_option("-s", default="foo")
self.parser.add_option("-t")
self.parser.add_option("-u", default=None)
self.expected = { 'verbose': True,
'n': 37,
'm': None,
's': "foo",
't': None,
'u': None }
def test_basic_defaults(self):
self.assertEqual(self.parser.get_default_values(), self.expected)
def test_mixed_defaults_post(self):
self.parser.set_defaults(n=42, m=-100)
self.expected.update({'n': 42, 'm': -100})
self.assertEqual(self.parser.get_default_values(), self.expected)
def test_mixed_defaults_pre(self):
self.parser.set_defaults(x="barf", y="blah")
self.parser.add_option("-x", default="frob")
self.parser.add_option("-y")
self.expected.update({'x': "frob", 'y': "blah"})
self.assertEqual(self.parser.get_default_values(), self.expected)
self.parser.remove_option("-y")
self.parser.add_option("-y", default=None)
self.expected.update({'y': None})
self.assertEqual(self.parser.get_default_values(), self.expected)
def test_process_default(self):
self.parser.option_class = DurationOption
self.parser.add_option("-d", type="duration", default=300)
self.parser.add_option("-e", type="duration", default="6m")
self.parser.set_defaults(n="42")
self.expected.update({'d': 300, 'e': 360, 'n': 42})
self.assertEqual(self.parser.get_default_values(), self.expected)
self.parser.set_process_default_values(False)
self.expected.update({'d': 300, 'e': "6m", 'n': "42"})
self.assertEqual(self.parser.get_default_values(), self.expected)
class TestProgName(BaseTest):
"""
Test that %prog expands to the right thing in usage, version,
and help strings.
"""
def assertUsage(self, parser, expected_usage):
self.assertEqual(parser.get_usage(), expected_usage)
def assertVersion(self, parser, expected_version):
self.assertEqual(parser.get_version(), expected_version)
def test_default_progname(self):
# Make sure that program name taken from sys.argv[0] by default.
save_argv = sys.argv[:]
try:
sys.argv[0] = os.path.join("foo", "bar", "baz.py")
parser = OptionParser("usage: %prog ...", version="%prog 1.2")
expected_usage = "usage: baz.py ...\n"
self.assertUsage(parser, expected_usage)
self.assertVersion(parser, "baz.py 1.2")
self.assertHelp(parser,
expected_usage + "\n" +
"options:\n"
" --version show program's version number and exit\n"
" -h, --help show this help message and exit\n")
finally:
sys.argv[:] = save_argv
def test_custom_progname(self):
parser = OptionParser(prog="thingy",
version="%prog 0.1",
usage="%prog arg arg")
parser.remove_option("-h")
parser.remove_option("--version")
expected_usage = "usage: thingy arg arg\n"
self.assertUsage(parser, expected_usage)
self.assertVersion(parser, "thingy 0.1")
self.assertHelp(parser, expected_usage + "\n")
class TestExpandDefaults(BaseTest):
def setUp(self):
self.parser = OptionParser(prog="test")
self.help_prefix = """\
usage: test [options]
options:
-h, --help show this help message and exit
"""
self.file_help = "read from FILE [default: %default]"
self.expected_help_file = self.help_prefix + \
" -f FILE, --file=FILE read from FILE [default: foo.txt]\n"
self.expected_help_none = self.help_prefix + \
" -f FILE, --file=FILE read from FILE [default: none]\n"
def test_option_default(self):
self.parser.add_option("-f", "--file",
default="foo.txt",
help=self.file_help)
self.assertHelp(self.parser, self.expected_help_file)
def test_parser_default_1(self):
self.parser.add_option("-f", "--file",
help=self.file_help)
self.parser.set_default('file', "foo.txt")
self.assertHelp(self.parser, self.expected_help_file)
def test_parser_default_2(self):
self.parser.add_option("-f", "--file",
help=self.file_help)
self.parser.set_defaults(file="foo.txt")
self.assertHelp(self.parser, self.expected_help_file)
def test_no_default(self):
self.parser.add_option("-f", "--file",
help=self.file_help)
self.assertHelp(self.parser, self.expected_help_none)
def test_default_none_1(self):
self.parser.add_option("-f", "--file",
default=None,
help=self.file_help)
self.assertHelp(self.parser, self.expected_help_none)
def test_default_none_2(self):
self.parser.add_option("-f", "--file",
help=self.file_help)
self.parser.set_defaults(file=None)
self.assertHelp(self.parser, self.expected_help_none)
def test_float_default(self):
self.parser.add_option(
"-p", "--prob",
help="blow up with probability PROB [default: %default]")
self.parser.set_defaults(prob=0.43)
expected_help = self.help_prefix + \
" -p PROB, --prob=PROB blow up with probability PROB [default: 0.43]\n"
self.assertHelp(self.parser, expected_help)
def test_alt_expand(self):
self.parser.add_option("-f", "--file",
default="foo.txt",
help="read from FILE [default: *DEFAULT*]")
self.parser.formatter.default_tag = "*DEFAULT*"
self.assertHelp(self.parser, self.expected_help_file)
def test_no_expand(self):
self.parser.add_option("-f", "--file",
default="foo.txt",
help="read from %default file")
self.parser.formatter.default_tag = None
expected_help = self.help_prefix + \
" -f FILE, --file=FILE read from %default file\n"
self.assertHelp(self.parser, expected_help)
# -- Test parser.parse_args() ------------------------------------------
class TestStandard(BaseTest):
def setUp(self):
options = [make_option("-a", type="string"),
make_option("-b", "--boo", type="int", dest='boo'),
make_option("--foo", action="append")]
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
option_list=options)
def test_required_value(self):
self.assertParseFail(["-a"], "-a option requires an argument")
def test_invalid_integer(self):
self.assertParseFail(["-b", "5x"],
"option -b: invalid integer value: '5x'")
def test_no_such_option(self):
self.assertParseFail(["--boo13"], "no such option: --boo13")
def test_long_invalid_integer(self):
self.assertParseFail(["--boo=x5"],
"option --boo: invalid integer value: 'x5'")
def test_empty(self):
self.assertParseOK([], {'a': None, 'boo': None, 'foo': None}, [])
def test_shortopt_empty_longopt_append(self):
self.assertParseOK(["-a", "", "--foo=blah", "--foo="],
{'a': "", 'boo': None, 'foo': ["blah", ""]},
[])
def test_long_option_append(self):
self.assertParseOK(["--foo", "bar", "--foo", "", "--foo=x"],
{'a': None,
'boo': None,
'foo': ["bar", "", "x"]},
[])
def test_option_argument_joined(self):
self.assertParseOK(["-abc"],
{'a': "bc", 'boo': None, 'foo': None},
[])
def test_option_argument_split(self):
self.assertParseOK(["-a", "34"],
{'a': "34", 'boo': None, 'foo': None},
[])
def test_option_argument_joined_integer(self):
self.assertParseOK(["-b34"],
{'a': None, 'boo': 34, 'foo': None},
[])
def test_option_argument_split_negative_integer(self):
self.assertParseOK(["-b", "-5"],
{'a': None, 'boo': -5, 'foo': None},
[])
def test_long_option_argument_joined(self):
self.assertParseOK(["--boo=13"],
{'a': None, 'boo': 13, 'foo': None},
[])
def test_long_option_argument_split(self):
self.assertParseOK(["--boo", "111"],
{'a': None, 'boo': 111, 'foo': None},
[])
def test_long_option_short_option(self):
self.assertParseOK(["--foo=bar", "-axyz"],
{'a': 'xyz', 'boo': None, 'foo': ["bar"]},
[])
def test_abbrev_long_option(self):
self.assertParseOK(["--f=bar", "-axyz"],
{'a': 'xyz', 'boo': None, 'foo': ["bar"]},
[])
def test_defaults(self):
(options, args) = self.parser.parse_args([])
defaults = self.parser.get_default_values()
self.assertEqual(vars(defaults), vars(options))
def test_ambiguous_option(self):
self.parser.add_option("--foz", action="store",
type="string", dest="foo")
possibilities = ", ".join({"--foz": None, "--foo": None}.keys())
self.assertParseFail(["--f=bar"],
"ambiguous option: --f (%s?)" % possibilities)
def test_short_and_long_option_split(self):
self.assertParseOK(["-a", "xyz", "--foo", "bar"],
{'a': 'xyz', 'boo': None, 'foo': ["bar"]},
[]),
def test_short_option_split_long_option_append(self):
self.assertParseOK(["--foo=bar", "-b", "123", "--foo", "baz"],
{'a': None, 'boo': 123, 'foo': ["bar", "baz"]},
[])
def test_short_option_split_one_positional_arg(self):
self.assertParseOK(["-a", "foo", "bar"],
{'a': "foo", 'boo': None, 'foo': None},
["bar"]),
def test_short_option_consumes_separator(self):
self.assertParseOK(["-a", "--", "foo", "bar"],
{'a': "--", 'boo': None, 'foo': None},
["foo", "bar"]),
def test_short_option_joined_and_separator(self):
self.assertParseOK(["-ab", "--", "--foo", "bar"],
{'a': "b", 'boo': None, 'foo': None},
["--foo", "bar"]),
def test_invalid_option_becomes_positional_arg(self):
self.assertParseOK(["-ab", "-", "--foo", "bar"],
{'a': "b", 'boo': None, 'foo': ["bar"]},
["-"])
def test_no_append_versus_append(self):
self.assertParseOK(["-b3", "-b", "5", "--foo=bar", "--foo", "baz"],
{'a': None, 'boo': 5, 'foo': ["bar", "baz"]},
[])
def test_option_consumes_optionlike_string(self):
self.assertParseOK(["-a", "-b3"],
{'a': "-b3", 'boo': None, 'foo': None},
[])
class TestBool(BaseTest):
def setUp(self):
options = [make_option("-v",
"--verbose",
action="store_true",
dest="verbose",
default=''),
make_option("-q",
"--quiet",
action="store_false",
dest="verbose")]
self.parser = OptionParser(option_list = options)
def test_bool_default(self):
self.assertParseOK([],
{'verbose': ''},
[])
def test_bool_false(self):
(options, args) = self.assertParseOK(["-q"],
{'verbose': 0},
[])
if hasattr(__builtins__, 'False'):
self.failUnless(options.verbose is False)
def test_bool_true(self):
(options, args) = self.assertParseOK(["-v"],
{'verbose': 1},
[])
if hasattr(__builtins__, 'True'):
self.failUnless(options.verbose is True)
def test_bool_flicker_on_and_off(self):
self.assertParseOK(["-qvq", "-q", "-v"],
{'verbose': 1},
[])
class TestChoice(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.parser.add_option("-c", action="store", type="choice",
dest="choice", choices=["one", "two", "three"])
def test_valid_choice(self):
self.assertParseOK(["-c", "one", "xyz"],
{'choice': 'one'},
["xyz"])
def test_invalid_choice(self):
self.assertParseFail(["-c", "four", "abc"],
"option -c: invalid choice: 'four' "
"(choose from 'one', 'two', 'three')")
def test_add_choice_option(self):
self.parser.add_option("-d", "--default",
choices=["four", "five", "six"])
opt = self.parser.get_option("-d")
self.assertEqual(opt.type, "choice")
self.assertEqual(opt.action, "store")
class TestCount(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.v_opt = make_option("-v", action="count", dest="verbose")
self.parser.add_option(self.v_opt)
self.parser.add_option("--verbose", type="int", dest="verbose")
self.parser.add_option("-q", "--quiet",
action="store_const", dest="verbose", const=0)
def test_empty(self):
self.assertParseOK([], {'verbose': None}, [])
def test_count_one(self):
self.assertParseOK(["-v"], {'verbose': 1}, [])
def test_count_three(self):
self.assertParseOK(["-vvv"], {'verbose': 3}, [])
def test_count_three_apart(self):
self.assertParseOK(["-v", "-v", "-v"], {'verbose': 3}, [])
def test_count_override_amount(self):
self.assertParseOK(["-vvv", "--verbose=2"], {'verbose': 2}, [])
def test_count_override_quiet(self):
self.assertParseOK(["-vvv", "--verbose=2", "-q"], {'verbose': 0}, [])
def test_count_overriding(self):
self.assertParseOK(["-vvv", "--verbose=2", "-q", "-v"],
{'verbose': 1}, [])
def test_count_interspersed_args(self):
self.assertParseOK(["--quiet", "3", "-v"],
{'verbose': 1},
["3"])
def test_count_no_interspersed_args(self):
self.parser.disable_interspersed_args()
self.assertParseOK(["--quiet", "3", "-v"],
{'verbose': 0},
["3", "-v"])
def test_count_no_such_option(self):
self.assertParseFail(["-q3", "-v"], "no such option: -3")
def test_count_option_no_value(self):
self.assertParseFail(["--quiet=3", "-v"],
"--quiet option does not take a value")
def test_count_with_default(self):
self.parser.set_default('verbose', 0)
self.assertParseOK([], {'verbose':0}, [])
def test_count_overriding_default(self):
self.parser.set_default('verbose', 0)
self.assertParseOK(["-vvv", "--verbose=2", "-q", "-v"],
{'verbose': 1}, [])
class TestMultipleArgs(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.parser.add_option("-p", "--point",
action="store", nargs=3, type="float", dest="point")
def test_nargs_with_positional_args(self):
self.assertParseOK(["foo", "-p", "1", "2.5", "-4.3", "xyz"],
{'point': (1.0, 2.5, -4.3)},
["foo", "xyz"])
def test_nargs_long_opt(self):
self.assertParseOK(["--point", "-1", "2.5", "-0", "xyz"],
{'point': (-1.0, 2.5, -0.0)},
["xyz"])
def test_nargs_invalid_float_value(self):
self.assertParseFail(["-p", "1.0", "2x", "3.5"],
"option -p: "
"invalid floating-point value: '2x'")
def test_nargs_required_values(self):
self.assertParseFail(["--point", "1.0", "3.5"],
"--point option requires 3 arguments")
class TestMultipleArgsAppend(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.parser.add_option("-p", "--point", action="store", nargs=3,
type="float", dest="point")
self.parser.add_option("-f", "--foo", action="append", nargs=2,
type="int", dest="foo")
def test_nargs_append(self):
self.assertParseOK(["-f", "4", "-3", "blah", "--foo", "1", "666"],
{'point': None, 'foo': [(4, -3), (1, 666)]},
["blah"])
def test_nargs_append_required_values(self):
self.assertParseFail(["-f4,3"],
"-f option requires 2 arguments")
def test_nargs_append_simple(self):
self.assertParseOK(["--foo=3", "4"],
{'point': None, 'foo':[(3, 4)]},
[])
class TestVersion(BaseTest):
def test_version(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
version="%prog 0.1")
save_argv = sys.argv[:]
try:
sys.argv[0] = os.path.join(os.curdir, "foo", "bar")
self.assertOutput(["--version"], "bar 0.1\n")
finally:
sys.argv[:] = save_argv
def test_no_version(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.assertParseFail(["--version"],
"no such option: --version")
# -- Test conflicting default values and parser.parse_args() -----------
class TestConflictingDefaults(BaseTest):
"""Conflicting default values: the last one should win."""
def setUp(self):
self.parser = OptionParser(option_list=[
make_option("-v", action="store_true", dest="verbose", default=1)])
def test_conflict_default(self):
self.parser.add_option("-q", action="store_false", dest="verbose",
default=0)
self.assertParseOK([], {'verbose': 0}, [])
def test_conflict_default_none(self):
self.parser.add_option("-q", action="store_false", dest="verbose",
default=None)
self.assertParseOK([], {'verbose': None}, [])
class TestOptionGroup(BaseTest):
def setUp(self):
self.parser = OptionParser(usage=SUPPRESS_USAGE)
def test_option_group_create_instance(self):
group = OptionGroup(self.parser, "Spam")
self.parser.add_option_group(group)
group.add_option("--spam", action="store_true",
help="spam spam spam spam")
self.assertParseOK(["--spam"], {'spam': 1}, [])
def test_add_group_no_group(self):
self.assertTypeError(self.parser.add_option_group,
"not an OptionGroup instance: None", None)
def test_add_group_invalid_arguments(self):
self.assertTypeError(self.parser.add_option_group,
"invalid arguments", None, None)
def test_add_group_wrong_parser(self):
group = OptionGroup(self.parser, "Spam")
group.parser = OptionParser()
self.assertRaises(self.parser.add_option_group, (group,), None,
ValueError, "invalid OptionGroup (wrong parser)")
def test_group_manipulate(self):
group = self.parser.add_option_group("Group 2",
description="Some more options")
group.set_title("Bacon")
group.add_option("--bacon", type="int")
self.assert_(self.parser.get_option_group("--bacon"), group)
# -- Test extending and parser.parse_args() ----------------------------
class TestExtendAddTypes(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
option_class=self.MyOption)
self.parser.add_option("-a", None, type="string", dest="a")
self.parser.add_option("-f", "--file", type="file", dest="file")
class MyOption (Option):
def check_file (option, opt, value):
if not os.path.exists(value):
raise OptionValueError("%s: file does not exist" % value)
elif not os.path.isfile(value):
raise OptionValueError("%s: not a regular file" % value)
return value
TYPES = Option.TYPES + ("file",)
TYPE_CHECKER = copy.copy(Option.TYPE_CHECKER)
TYPE_CHECKER["file"] = check_file
def test_extend_file(self):
open(test_support.TESTFN, "w").close()
self.assertParseOK(["--file", test_support.TESTFN, "-afoo"],
{'file': test_support.TESTFN, 'a': 'foo'},
[])
os.unlink(test_support.TESTFN)
def test_extend_file_nonexistent(self):
self.assertParseFail(["--file", test_support.TESTFN, "-afoo"],
"%s: file does not exist" %
test_support.TESTFN)
def test_file_irregular(self):
os.mkdir(test_support.TESTFN)
self.assertParseFail(["--file", test_support.TESTFN, "-afoo"],
"%s: not a regular file" %
test_support.TESTFN)
os.rmdir(test_support.TESTFN)
class TestExtendAddActions(BaseTest):
def setUp(self):
options = [self.MyOption("-a", "--apple", action="extend",
type="string", dest="apple")]
self.parser = OptionParser(option_list=options)
class MyOption (Option):
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
def take_action (self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, parser, value,
values)
def test_extend_add_action(self):
self.assertParseOK(["-afoo,bar", "--apple=blah"],
{'apple': ["foo", "bar", "blah"]},
[])
def test_extend_add_action_normal(self):
self.assertParseOK(["-a", "foo", "-abar", "--apple=x,y"],
{'apple': ["foo", "bar", "x", "y"]},
[])
# -- Test callbacks and parser.parse_args() ----------------------------
class TestCallback(BaseTest):
def setUp(self):
options = [make_option("-x",
None,
action="callback",
callback=self.process_opt),
make_option("-f",
"--file",
action="callback",
callback=self.process_opt,
type="string",
dest="filename")]
self.parser = OptionParser(option_list=options)
def process_opt(self, option, opt, value, parser_):
if opt == "-x":
self.assertEqual(option._short_opts, ["-x"])
self.assertEqual(option._long_opts, [])
self.assert_(parser_ is self.parser)
self.assert_(value is None)
self.assertEqual(vars(parser_.values), {'filename': None})
parser_.values.x = 42
elif opt == "--file":
self.assertEqual(option._short_opts, ["-f"])
self.assertEqual(option._long_opts, ["--file"])
self.assert_(parser_ is self.parser)
self.assertEqual(value, "foo")
self.assertEqual(vars(parser_.values), {'filename': None, 'x': 42})
setattr(parser_.values, option.dest, value)
else:
self.fail("Unknown option %r in process_opt." % opt)
def test_callback(self):
self.assertParseOK(["-x", "--file=foo"],
{'filename': "foo", 'x': 42},
[])
def test_callback_help(self):
# This test was prompted by SF bug #960515 -- the point is
# not to inspect the help text, just to make sure that
# format_help() doesn't crash.
parser = OptionParser(usage=SUPPRESS_USAGE)
parser.remove_option("-h")
parser.add_option("-t", "--test", action="callback",
callback=lambda: None, type="string",
help="foo")
expected_help = ("options:\n"
" -t TEST, --test=TEST foo\n")
self.assertHelp(parser, expected_help)
class TestCallbackExtraArgs(BaseTest):
def setUp(self):
options = [make_option("-p", "--point", action="callback",
callback=self.process_tuple,
callback_args=(3, int), type="string",
dest="points", default=[])]
self.parser = OptionParser(option_list=options)
def process_tuple (self, option, opt, value, parser_, len, type):
self.assertEqual(len, 3)
self.assert_(type is int)
if opt == "-p":
self.assertEqual(value, "1,2,3")
elif opt == "--point":
self.assertEqual(value, "4,5,6")
value = tuple(map(type, value.split(",")))
getattr(parser_.values, option.dest).append(value)
def test_callback_extra_args(self):
self.assertParseOK(["-p1,2,3", "--point", "4,5,6"],
{'points': [(1,2,3), (4,5,6)]},
[])
class TestCallbackMeddleArgs(BaseTest):
def setUp(self):
options = [make_option(str(x), action="callback",
callback=self.process_n, dest='things')
for x in range(-1, -6, -1)]
self.parser = OptionParser(option_list=options)
# Callback that meddles in rargs, largs
def process_n (self, option, opt, value, parser_):
# option is -3, -5, etc.
nargs = int(opt[1:])
rargs = parser_.rargs
if len(rargs) < nargs:
self.fail("Expected %d arguments for %s option." % (nargs, opt))
dest = parser_.values.ensure_value(option.dest, [])
dest.append(tuple(rargs[0:nargs]))
parser_.largs.append(nargs)
del rargs[0:nargs]
def test_callback_meddle_args(self):
self.assertParseOK(["-1", "foo", "-3", "bar", "baz", "qux"],
{'things': [("foo",), ("bar", "baz", "qux")]},
[1, 3])
def test_callback_meddle_args_separator(self):
self.assertParseOK(["-2", "foo", "--"],
{'things': [('foo', '--')]},
[2])
class TestCallbackManyArgs(BaseTest):
def setUp(self):
options = [make_option("-a", "--apple", action="callback", nargs=2,
callback=self.process_many, type="string"),
make_option("-b", "--bob", action="callback", nargs=3,
callback=self.process_many, type="int")]
self.parser = OptionParser(option_list=options)
def process_many (self, option, opt, value, parser_):
if opt == "-a":
self.assertEqual(value, ("foo", "bar"))
elif opt == "--apple":
self.assertEqual(value, ("ding", "dong"))
elif opt == "-b":
self.assertEqual(value, (1, 2, 3))
elif opt == "--bob":
self.assertEqual(value, (-666, 42, 0))
def test_many_args(self):
self.assertParseOK(["-a", "foo", "bar", "--apple", "ding", "dong",
"-b", "1", "2", "3", "--bob", "-666", "42",
"0"],
{"apple": None, "bob": None},
[])
class TestCallbackCheckAbbrev(BaseTest):
def setUp(self):
self.parser = OptionParser()
self.parser.add_option("--foo-bar", action="callback",
callback=self.check_abbrev)
def check_abbrev (self, option, opt, value, parser):
self.assertEqual(opt, "--foo-bar")
def test_abbrev_callback_expansion(self):
self.assertParseOK(["--foo"], {}, [])
class TestCallbackVarArgs(BaseTest):
def setUp(self):
options = [make_option("-a", type="int", nargs=2, dest="a"),
make_option("-b", action="store_true", dest="b"),
make_option("-c", "--callback", action="callback",
callback=self.variable_args, dest="c")]
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
option_list=options)
def variable_args (self, option, opt, value, parser):
self.assert_(value is None)
done = 0
value = []
rargs = parser.rargs
while rargs:
arg = rargs[0]
if ((arg[:2] == "--" and len(arg) > 2) or
(arg[:1] == "-" and len(arg) > 1 and arg[1] != "-")):
break
else:
value.append(arg)
del rargs[0]
setattr(parser.values, option.dest, value)
def test_variable_args(self):
self.assertParseOK(["-a3", "-5", "--callback", "foo", "bar"],
{'a': (3, -5), 'b': None, 'c': ["foo", "bar"]},
[])
def test_consume_separator_stop_at_option(self):
self.assertParseOK(["-c", "37", "--", "xxx", "-b", "hello"],
{'a': None,
'b': True,
'c': ["37", "--", "xxx"]},
["hello"])
def test_positional_arg_and_variable_args(self):
self.assertParseOK(["hello", "-c", "foo", "-", "bar"],
{'a': None,
'b': None,
'c':["foo", "-", "bar"]},
["hello"])
def test_stop_at_option(self):
self.assertParseOK(["-c", "foo", "-b"],
{'a': None, 'b': True, 'c': ["foo"]},
[])
def test_stop_at_invalid_option(self):
self.assertParseFail(["-c", "3", "-5", "-a"], "no such option: -5")
# -- Test conflict handling and parser.parse_args() --------------------
class ConflictBase(BaseTest):
def setUp(self):
options = [make_option("-v", "--verbose", action="count",
dest="verbose", help="increment verbosity")]
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE,
option_list=options)
def show_version (self, option, opt, value, parser):
parser.values.show_version = 1
class TestConflict(ConflictBase):
"""Use the default conflict resolution for Optik 1.2: error."""
def assert_conflict_error(self, func):
err = self.assertRaises(
func, ("-v", "--version"), {'action' : "callback",
'callback' : self.show_version,
'help' : "show version"},
OptionConflictError,
"option -v/--version: conflicting option string(s): -v")
self.assertEqual(err.msg, "conflicting option string(s): -v")
self.assertEqual(err.option_id, "-v/--version")
def test_conflict_error(self):
self.assert_conflict_error(self.parser.add_option)
def test_conflict_error_group(self):
group = OptionGroup(self.parser, "Group 1")
self.assert_conflict_error(group.add_option)
def test_no_such_conflict_handler(self):
self.assertRaises(
self.parser.set_conflict_handler, ('foo',), None,
ValueError, "invalid conflict_resolution value 'foo'")
class TestConflictResolve(ConflictBase):
def setUp(self):
ConflictBase.setUp(self)
self.parser.set_conflict_handler("resolve")
self.parser.add_option("-v", "--version", action="callback",
callback=self.show_version, help="show version")
def test_conflict_resolve(self):
v_opt = self.parser.get_option("-v")
verbose_opt = self.parser.get_option("--verbose")
version_opt = self.parser.get_option("--version")
self.assert_(v_opt is version_opt)
self.assert_(v_opt is not verbose_opt)
self.assertEqual(v_opt._long_opts, ["--version"])
self.assertEqual(version_opt._short_opts, ["-v"])
self.assertEqual(version_opt._long_opts, ["--version"])
self.assertEqual(verbose_opt._short_opts, [])
self.assertEqual(verbose_opt._long_opts, ["--verbose"])
def test_conflict_resolve_help(self):
self.assertOutput(["-h"], """\
options:
--verbose increment verbosity
-h, --help show this help message and exit
-v, --version show version
""")
def test_conflict_resolve_short_opt(self):
self.assertParseOK(["-v"],
{'verbose': None, 'show_version': 1},
[])
def test_conflict_resolve_long_opt(self):
self.assertParseOK(["--verbose"],
{'verbose': 1},
[])
def test_conflict_resolve_long_opts(self):
self.assertParseOK(["--verbose", "--version"],
{'verbose': 1, 'show_version': 1},
[])
class TestConflictOverride(BaseTest):
def setUp(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
self.parser.set_conflict_handler("resolve")
self.parser.add_option("-n", "--dry-run",
action="store_true", dest="dry_run",
help="don't do anything")
self.parser.add_option("--dry-run", "-n",
action="store_const", const=42, dest="dry_run",
help="dry run mode")
def test_conflict_override_opts(self):
opt = self.parser.get_option("--dry-run")
self.assertEqual(opt._short_opts, ["-n"])
self.assertEqual(opt._long_opts, ["--dry-run"])
def test_conflict_override_help(self):
self.assertOutput(["-h"], """\
options:
-h, --help show this help message and exit
-n, --dry-run dry run mode
""")
def test_conflict_override_args(self):
self.assertParseOK(["-n"],
{'dry_run': 42},
[])
# -- Other testing. ----------------------------------------------------
_expected_help_basic = """\
usage: bar.py [options]
options:
-a APPLE throw APPLEs at basket
-b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the
evil spirits that cause trouble and mayhem)
--foo=FOO store FOO in the foo list for later fooing
-h, --help show this help message and exit
"""
_expected_help_long_opts_first = """\
usage: bar.py [options]
options:
-a APPLE throw APPLEs at basket
--boo=NUM, -b NUM shout "boo!" NUM times (in order to frighten away all the
evil spirits that cause trouble and mayhem)
--foo=FOO store FOO in the foo list for later fooing
--help, -h show this help message and exit
"""
_expected_help_title_formatter = """\
Usage
=====
bar.py [options]
options
=======
-a APPLE throw APPLEs at basket
--boo=NUM, -b NUM shout "boo!" NUM times (in order to frighten away all the
evil spirits that cause trouble and mayhem)
--foo=FOO store FOO in the foo list for later fooing
--help, -h show this help message and exit
"""
_expected_help_short_lines = """\
usage: bar.py [options]
options:
-a APPLE throw APPLEs at basket
-b NUM, --boo=NUM shout "boo!" NUM times (in order to
frighten away all the evil spirits
that cause trouble and mayhem)
--foo=FOO store FOO in the foo list for later
fooing
-h, --help show this help message and exit
"""
class TestHelp(BaseTest):
def setUp(self):
self.parser = self.make_parser(80)
def make_parser(self, columns):
options = [
make_option("-a", type="string", dest='a',
metavar="APPLE", help="throw APPLEs at basket"),
make_option("-b", "--boo", type="int", dest='boo',
metavar="NUM",
help=
"shout \"boo!\" NUM times (in order to frighten away "
"all the evil spirits that cause trouble and mayhem)"),
make_option("--foo", action="append", type="string", dest='foo',
help="store FOO in the foo list for later fooing"),
]
os.environ['COLUMNS'] = str(columns)
return InterceptingOptionParser(option_list=options)
def assertHelpEquals(self, expected_output):
save_argv = sys.argv[:]
try:
# Make optparse believe bar.py is being executed.
sys.argv[0] = os.path.join("foo", "bar.py")
self.assertOutput(["-h"], expected_output)
finally:
sys.argv[:] = save_argv
def test_help(self):
self.assertHelpEquals(_expected_help_basic)
def test_help_old_usage(self):
self.parser.set_usage("usage: %prog [options]")
self.assertHelpEquals(_expected_help_basic)
def test_help_long_opts_first(self):
self.parser.formatter.short_first = 0
self.assertHelpEquals(_expected_help_long_opts_first)
def test_help_title_formatter(self):
self.parser.formatter = TitledHelpFormatter()
self.assertHelpEquals(_expected_help_title_formatter)
def test_wrap_columns(self):
# Ensure that wrapping respects $COLUMNS environment variable.
# Need to reconstruct the parser, since that's the only time
# we look at $COLUMNS.
self.parser = self.make_parser(60)
self.assertHelpEquals(_expected_help_short_lines)
def test_help_description_groups(self):
self.parser.set_description(
"This is the program description for %prog. %prog has "
"an option group as well as single options.")
group = OptionGroup(
self.parser, "Dangerous Options",
"Caution: use of these options is at your own risk. "
"It is believed that some of them bite.")
group.add_option("-g", action="store_true", help="Group option.")
self.parser.add_option_group(group)
self.assertHelpEquals("""\
usage: bar.py [options]
This is the program description for bar.py. bar.py has an option group as
well as single options.
options:
-a APPLE throw APPLEs at basket
-b NUM, --boo=NUM shout "boo!" NUM times (in order to frighten away all the
evil spirits that cause trouble and mayhem)
--foo=FOO store FOO in the foo list for later fooing
-h, --help show this help message and exit
Dangerous Options:
Caution: use of these options is at your own risk. It is believed
that some of them bite.
-g Group option.
""")
class TestMatchAbbrev(BaseTest):
def test_match_abbrev(self):
self.assertEqual(_match_abbrev("--f",
{"--foz": None,
"--foo": None,
"--fie": None,
"--f": None}),
"--f")
def test_match_abbrev_error(self):
s = "--f"
wordmap = {"--foz": None, "--foo": None, "--fie": None}
possibilities = ", ".join(wordmap.keys())
self.assertRaises(
_match_abbrev, (s, wordmap), None,
BadOptionError, "ambiguous option: --f (%s?)" % possibilities)
def _testclasses():
mod = sys.modules[__name__]
return [getattr(mod, name) for name in dir(mod) if name.startswith('Test')]
def suite():
suite = unittest.TestSuite()
for testclass in _testclasses():
suite.addTest(unittest.makeSuite(testclass))
return suite
def test_main():
test_support.run_suite(suite())
if __name__ == '__main__':
unittest.main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides infrastructure to verify the correctness of
the command stream produced.
Currently it will invoke vela to generate a vela-optimized tflite
in which the command stream is contained as a custom operator.
This class include methods to parse the custom operator to extract
the command stream and perform an equivalency check for single operator
test cases.
"""
from typing import List
import os
import struct
import numpy
import math
from enum import IntEnum
import tensorflow as tf
from ethosu.vela.register_command_stream_generator import CmdMode
from ethosu.vela.register_command_stream_generator import cmd0
from ethosu.vela.register_command_stream_generator import cmd1
import tvm
from tvm import relay
import tvm.relay.backend.contrib.ethosu.op as ethosu_ops
from tvm.topi.nn.utils import get_pad_tuple
from tests.python.relay.aot.aot_test_utils import (
AOTCompiledTestModel,
AOTDataLinkage,
AOTTestModel,
AOTTestRunner,
compile_models,
run_and_check,
)
class AttachType(IntEnum):
kGroupRoot = 1
kInline = 2
kInlinedAlready = 3
kScope = 4
kScanUpdate = 5
class VelaArtifacts:
def __init__(self):
self.cs = dict()
self.flash = dict()
self.sram = dict()
self.npu_ops = set()
def print_payload(payload):
cmds = deserialize_command_stream(payload)
for cmd_val in cmds:
cmd, val = parse_cmd(cmd_val)
s = str(cmd)
s = s.ljust(40)
s += str(val)
print(s)
def parse_cmd(binary_cmd):
code = binary_cmd[0] & 0x0000FFFF # lower 16 bits
param = binary_cmd[0] >> 16 # higher 16 bits
payload_mode = CmdMode(code & CmdMode.Mask)
if payload_mode == CmdMode.Payload32:
command = cmd1(code & CmdMode.CmdOpMask)
value = binary_cmd[1]
else:
command = cmd0(code & CmdMode.CmdOpMask)
value = param
return command, value
def check_cmms_equivalency(vela_cmd, vela_value, tvm_value, ignore_cmds=None):
if ignore_cmds is None:
ignore_cmds = []
if vela_value != tvm_value and vela_cmd not in ignore_cmds:
raise RuntimeError(
"ValueMismatch :: vela={}, tvm={} for command:{}".format(
vela_value, tvm_value, vela_cmd
)
)
def verify_cmms(cmms_tvm_blob, cmms_vela_blob):
vela_cmm = deserialize_command_stream(cmms_vela_blob)
tvm_cmm = deserialize_command_stream(cmms_tvm_blob)
cmms_zip = zip(vela_cmm, tvm_cmm)
first_ifm_found = False
last_ofm_found = False
ignore_commands = (
cmd1.NPU_SET_DMA0_SRC,
cmd1.NPU_SET_DMA0_DST,
cmd1.NPU_SET_WEIGHT_BASE,
cmd1.NPU_SET_OFM_BASE0,
cmd1.NPU_SET_IFM_BASE0,
cmd1.NPU_SET_SCALE_BASE,
)
ofm_region_params = []
ofm_bases = []
for vela_cmm, tvm_cmm in cmms_zip:
vela_cmd, vela_value = parse_cmd(vela_cmm)
tvm_cmd, tvm_value = parse_cmd(tvm_cmm)
assert vela_cmd == tvm_cmd
# The first IFM region could be different, but it needs to be 1 and 3.
if vela_cmd == cmd0.NPU_SET_IFM_REGION and not first_ifm_found:
if vela_value == 1 and tvm_value == 3:
first_ifm_found = True
continue
if vela_cmd == cmd1.NPU_SET_IFM_BASE0 and not first_ifm_found:
if tvm_value != 0:
raise RuntimeError("ValueError :: tvm primary ifm base should be zero")
continue
# OFM regions should be cached to be checked later
if vela_cmd == cmd0.NPU_SET_OFM_REGION:
ofm_region_params.append((vela_value, tvm_value))
continue
# OFM bases should be cached to be checked later
if vela_cmd == cmd1.NPU_SET_OFM_BASE0:
ofm_bases.append((vela_value, tvm_value))
continue
check_cmms_equivalency(vela_cmd, vela_value, tvm_value, ignore_commands)
# The last OFM region could be different but it should be 1 and 4.
last_vela_ofm_region, last_tvm_ofm_region = ofm_region_params.pop(-1)
if not (last_vela_ofm_region == 1 and last_tvm_ofm_region == 4):
raise RuntimeError(
"ValueMismatch :: vela={}, tvm={} for last ofm region it should be 1 and 4 respectively".format(
last_vela_ofm_region, last_tvm_ofm_region
)
)
# The rest of the OFM regions should be the same.
for vela_value, tvm_value in ofm_region_params:
check_cmms_equivalency(vela_cmd, vela_value, tvm_value, ignore_commands)
# The last OFM base should be zero for tvm
_, last_tvm_ofm_base = ofm_bases.pop(-1)
if not last_tvm_ofm_base == 0:
raise RuntimeError("ValueError :: tvm primary ofm base should be zero")
def deserialize_command_stream(blob):
assert isinstance(blob, bytes)
payload_bytes = struct.unpack("<{0}I".format(len(blob) // 4), blob)
cmms = []
# remove_header
payload_bytes = payload_bytes[8:]
idx = 0
while idx < len(payload_bytes):
cmd = []
code = payload_bytes[idx]
idx += 1
cmd.append(code)
payload_mode = CmdMode(code & CmdMode.Mask)
if payload_mode == CmdMode.Payload32:
value = payload_bytes[idx]
idx += 1
cmd.append(value)
cmms.append(cmd)
return cmms
def create_test_runner(accel="ethos-u55-256"):
file_dir = os.path.dirname(os.path.abspath(__file__))
test_root = os.path.join(file_dir, "reference_system")
ethosu_macs = accel[accel.rfind("-") + 1 :]
return AOTTestRunner(
makefile="corstone300",
prologue="""
uart_init();
EthosuInit();
struct ethosu_driver* ethos_u = ethosu_reserve_driver();
""",
epilogue="""
ethosu_release_driver(ethos_u);
""",
includes=["uart.h", "ethosu_55.h", "ethosu_mod.h", "hard_fault.h"],
parameters={"ETHOSU_TEST_ROOT": test_root, "NPU_VARIANT": ethosu_macs},
pass_config={
"relay.ext.ethos-u.options": {
"accelerator_config": accel,
}
},
)
def build_source(module, inputs, outputs, accel="ethos-u55-256", output_tolerance=0):
test_runner = create_test_runner(accel)
return compile_models(
models=AOTTestModel(
module=module,
inputs=inputs,
outputs=outputs,
output_tolerance=output_tolerance,
extra_memory_in_bytes=0,
),
interface_api="c",
use_unpacked_api=True,
workspace_byte_alignment=16,
pass_config=test_runner.pass_config,
)
def verify_source(
models: List[AOTCompiledTestModel],
accel="ethos-u55-256",
):
"""
This method verifies the generated source from an NPU module by building it and running on an FVP.
"""
interface_api = "c"
test_runner = create_test_runner(accel)
run_and_check(
models,
test_runner,
interface_api,
workspace_byte_alignment=16,
data_linkage=AOTDataLinkage(section="ethosu_scratch", alignment=16),
)
def flatten_numpy_data(data):
"""Flatten the numpy tensor to be single dimensional"""
total_elements = data.size
reshaped_data = numpy.reshape(data, [total_elements])
return reshaped_data
class InputGenerator:
def __init__(self, random_state):
self._random_state = random_state
def generate(self, size, dtype):
if dtype == numpy.float32:
print("random float32")
return self._random_state.uniform(-1, 1, size).astype(dtype)
else:
print("random (u)int min=%d max=%d", numpy.iinfo(dtype).min, numpy.iinfo(dtype).max)
low = numpy.iinfo(dtype).min
high = numpy.iinfo(dtype).max + 1
return self._random_state.randint(low, high, size, dtype)
def generate_ref_data_tflite(model):
"""
This method generates reference data by running the specified model on tflite with random input data.
The random input data and generated output data are returned.
"""
expected_output_data = {}
interpreter = tf.lite.Interpreter(model_content=model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Initialize random generators with a fixed seed to get deterministic results
seed = 0
random_state = numpy.random.RandomState(seed)
inputgen = InputGenerator(random_state)
# Generate input data
input_data = {
input_detail["name"]: inputgen.generate(
input_detail["shape"],
input_detail["dtype"],
)
for input_detail in input_details
}
for index, value in enumerate(input_data.values()):
interpreter.set_tensor(index, value)
interpreter.invoke()
expected_output_data = [
interpreter.get_tensor(output_detail["index"]) for output_detail in output_details
]
return input_data, expected_output_data
def make_partitioned_function(relay_op):
ifm0 = relay.analysis.free_vars(relay_op)
ifm_shape = ifm0[0].type_annotation.shape
ifm_dtype = ifm0[0].type_annotation.dtype
ifm = relay.var("ifm", shape=ifm_shape, dtype=ifm_dtype)
glb_ethosu = relay.GlobalVar("tvmgen_default_ethosu_main_0")
func = (
relay.Function(ifm0, relay_op)
.with_attr("Inline", 1)
.with_attr("Compiler", "ethos-u")
.with_attr("global_symbol", "tvmgen_default_ethosu_main_0")
.with_attr("Primitive", 1)
)
mod = tvm.IRModule()
mod[glb_ethosu] = func
mod = relay.transform.InferType()(mod)
call = relay.Call(glb_ethosu, [ifm])
mod["main"] = relay.Function([ifm], call)
mod = relay.transform.InferType()(mod)
return mod
def generate_weights_data(shape, dtype):
size = 1
for dim in shape:
size *= dim
return (numpy.arange(size) % 255).reshape(shape).astype(dtype)
def get_convolutional_args(call, include_buffers=False, remove_constants=False):
"""A method to extract the arguments from conv2d or depthwise_conv2d extern call."""
args = call.args
conv_args = []
remove_indices = [0]
if remove_constants:
remove_indices += [41, 42, 44, 45]
for i, arg in enumerate(args):
if i in remove_indices:
continue
elif isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
conv_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.Load) and not include_buffers:
conv_args.append(arg.index)
else:
conv_args.append(arg)
return conv_args
def compute_ofm_shape(ifm_shape, padding, kernel_shape, strides, dilation=[1, 1]):
assert len(strides) == 2
assert len(dilation) == 2
assert len(kernel_shape) == 2
if padding.lower() == "valid":
h = math.ceil((ifm_shape[1] - (kernel_shape[0] - 1) * dilation[0]) / strides[0])
w = math.ceil((ifm_shape[2] - (kernel_shape[1] - 1) * dilation[1]) / strides[1])
if padding.lower() == "same":
h = math.ceil(ifm_shape[1] / strides[0])
w = math.ceil(ifm_shape[2] / strides[1])
ofm_shape = [ifm_shape[0], h, w, ifm_shape[3]]
return ofm_shape
def compute_padding_shape(ifm_shape, ofm_shape, padding, kernel_shape, strides, dilation=[1, 1]):
assert len(strides) == 2
assert len(dilation) == 2
assert len(kernel_shape) == 2
if padding.lower() == "valid":
return [0, 0, 0, 0]
if padding.lower() == "same":
effective_kernel_shape = [
dilation[0] * (kernel_shape[0] - 1) + 1,
dilation[1] * (kernel_shape[1] - 1) + 1,
]
pad_along_height = max(
(ofm_shape[1] - 1) * strides[0] + effective_kernel_shape[0] - ifm_shape[1], 0
)
pad_along_width = max(
(ofm_shape[2] - 1) * strides[1] + effective_kernel_shape[1] - ifm_shape[2], 0
)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return [pad_top, pad_left, pad_bottom, pad_right]
def make_ethosu_conv2d(
ifm,
ifm_channels,
ofm_channels,
kernel_shape,
padding,
strides,
dilation,
lut=relay.const([], dtype="int8"),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
weight_dtype="int8",
scale_bias_dtype="uint8",
rounding_mode="TFL",
):
# conv params
weight_shape = (ofm_channels, kernel_shape[0], kernel_shape[1], ifm_channels)
padding = get_pad_tuple(padding, kernel_shape)
scale_bias_data = generate_weights_data((weight_shape[0], 10), scale_bias_dtype)
scale_bias = relay.const(scale_bias_data, dtype=scale_bias_dtype)
weight_data = generate_weights_data(weight_shape, weight_dtype)
weight = relay.const(weight_data, dtype=weight_dtype)
conv = ethosu_ops.ethosu_conv2d(
ifm,
weight,
scale_bias,
lut=lut,
ifm_scale=0.5,
ifm_zero_point=10,
weight_zero_point=12,
ofm_scale=0.25,
ofm_zero_point=14,
kernel_shape=kernel_shape,
ofm_channels=ofm_channels,
strides=strides,
padding=padding,
dilation=dilation,
activation=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
upscale="NONE",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return conv
def make_ethosu_depthwise_conv2d(
ifm,
channels,
kernel_shape,
padding,
strides,
dilation,
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
weight_dtype="int8",
scale_bias_dtype="uint8",
rounding_mode="TFL",
):
# params
weight_shape = (channels, kernel_shape[0], kernel_shape[1], 1)
padding = get_pad_tuple(padding, kernel_shape)
scale_bias_data = generate_weights_data((weight_shape[0], 10), scale_bias_dtype)
scale_bias = relay.const(scale_bias_data, dtype=scale_bias_dtype)
weight_data = generate_weights_data(weight_shape, weight_dtype)
weight = relay.const(weight_data, dtype=weight_dtype)
depthwise = ethosu_ops.ethosu_depthwise_conv2d(
ifm,
weight,
scale_bias,
lut=relay.const([], dtype="int8"),
ifm_scale=0.6,
ifm_zero_point=11,
weight_zero_point=13,
ofm_scale=0.26,
ofm_zero_point=15,
kernel_shape=kernel_shape,
ofm_channels=channels,
strides=strides,
padding=padding,
dilation=dilation,
activation=activation,
clip_min=15 if activation == "CLIP" else 0,
clip_max=105 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
upscale="NONE",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return depthwise
def get_pooling_args(call, include_buffers=False):
args = call.args
pooling_args = []
for i, arg in enumerate(args):
if isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
pooling_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.Load) and not include_buffers:
pooling_args.append(arg.index)
else:
pooling_args.append(arg)
return pooling_args
def make_ethosu_pooling(
ifm,
pooling_type,
pool_shape,
ofm_channels,
strides,
padding,
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
rounding_mode="TFL",
):
pooling = ethosu_ops.ethosu_pooling(
ifm,
lut=relay.const([], dtype="int8"),
pooling_type=pooling_type,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
pool_shape=pool_shape,
ofm_channels=ofm_channels,
strides=strides,
padding=padding,
activation=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
upscale="NONE",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return pooling
def get_binary_elementwise_args(call, include_buffers=False):
args = call.args
binary_elementwise_args = []
for i, arg in enumerate(args):
if isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
binary_elementwise_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.Load) and not include_buffers:
binary_elementwise_args.append(arg.index)
else:
binary_elementwise_args.append(arg)
return binary_elementwise_args
def make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_channels,
ifm2_channels,
operator_type,
ofm_dtype,
reversed_operands=False,
activation="NONE",
ifm_layout="NHWC",
ifm2_layout="NHWC",
ofm_layout="NHWC",
rounding_mode="TFL",
):
ethosu_binary_elementwise = ethosu_ops.ethosu_binary_elementwise(
ifm=ifm,
ifm2=ifm2,
lut=relay.const([], dtype="int8"),
operator_type=operator_type,
ifm_scale=1,
ifm_zero_point=0,
ifm2_scale=1,
ifm2_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
ifm_channels=ifm_channels,
ifm2_channels=ifm2_channels,
reversed_operands=reversed_operands,
activation=activation,
ofm_dtype=ofm_dtype,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
ifm_layout=ifm_layout,
ifm2_layout=ifm2_layout,
ofm_layout=ofm_layout,
)
return ethosu_binary_elementwise
def make_ethosu_identity(
ifm,
lut=relay.const([], dtype="int8"),
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
activation="NONE",
):
identity = ethosu_ops.ethosu_identity(
ifm,
lut=lut,
ifm_scale=ifm_scale,
ifm_zero_point=ifm_zero_point,
ofm_scale=ofm_scale,
ofm_zero_point=ofm_zero_point,
activation=activation,
)
return identity
def make_ethosu_unary_elementwise(
ifm,
ofm_channels,
operator_type,
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
rounding_mode="TFL",
):
ethosu_unary_elementwise = ethosu_ops.ethosu_unary_elementwise(
ifm=ifm,
lut=relay.const([], dtype="int8"),
operator_type=operator_type,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
ofm_channels=ofm_channels,
activation=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
rounding_mode=rounding_mode,
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
return ethosu_unary_elementwise
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import textwrap
from contextlib import closing
from xml.etree import ElementTree
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.subsystems.shader import Shader
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.targets.scalac_plugin import ScalacPlugin
from pants.backend.jvm.tasks.jvm_compile.analysis_tools import AnalysisTools
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis import ZincAnalysis
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis_parser import ZincAnalysisParser
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.hash_utils import hash_file
from pants.base.workunit import WorkUnitLabel
from pants.java.distribution.distribution import DistributionLocator
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_open
from pants.util.memo import memoized_property
# Well known metadata file required to register scalac plugins with nsc.
_PLUGIN_INFO_FILE = 'scalac-plugin.xml'
# Well known metadata file to register annotation processors with a java 1.6+ compiler
_PROCESSOR_INFO_FILE = 'META-INF/services/javax.annotation.processing.Processor'
class BaseZincCompile(JvmCompile):
"""An abstract base class for zinc compilation tasks."""
_ZINC_MAIN = 'org.pantsbuild.zinc.Main'
_supports_concurrent_execution = True
@staticmethod
def write_plugin_info(resources_dir, target):
plugin_info_file = os.path.join(resources_dir, _PLUGIN_INFO_FILE)
with safe_open(plugin_info_file, 'w') as f:
f.write(textwrap.dedent("""
<plugin>
<name>{}</name>
<classname>{}</classname>
</plugin>
""".format(target.plugin, target.classname)).strip())
@staticmethod
def validate_arguments(log, whitelisted_args, args):
"""Validate that all arguments match whitelisted regexes."""
valid_patterns = {re.compile(p): v for p, v in whitelisted_args.items()}
def validate(arg_index):
arg = args[arg_index]
for pattern, has_argument in valid_patterns.items():
if pattern.match(arg):
return 2 if has_argument else 1
log.warn("Zinc argument '{}' is not supported, and is subject to change/removal!".format(arg))
return 1
arg_index = 0
while arg_index < len(args):
arg_index += validate(arg_index)
@classmethod
def subsystem_dependencies(cls):
return super(BaseZincCompile, cls).subsystem_dependencies() + (ScalaPlatform, DistributionLocator)
@property
def compiler_plugin_types(cls):
"""A tuple of target types which are compiler plugins."""
return (AnnotationProcessor, ScalacPlugin)
@classmethod
def get_jvm_options_default(cls, bootstrap_option_values):
return ('-Dfile.encoding=UTF-8', '-Dzinc.analysis.cache.limit=1000',
'-Djava.awt.headless=true', '-Xmx2g')
@classmethod
def get_args_default(cls, bootstrap_option_values):
return ('-C-encoding', '-CUTF-8', '-S-encoding', '-SUTF-8', '-S-g:vars')
@classmethod
def get_warning_args_default(cls):
return ('-C-deprecation', '-C-Xlint:all', '-C-Xlint:-serial', '-C-Xlint:-path',
'-S-deprecation', '-S-unchecked', '-S-Xlint')
@classmethod
def get_no_warning_args_default(cls):
return ('-C-nowarn', '-C-Xlint:none', '-S-nowarn', '-S-Xlint:none', )
@classmethod
def get_fatal_warnings_enabled_args_default(cls):
return ('-S-Xfatal-warnings', '-C-Werror')
@classmethod
def get_fatal_warnings_disabled_args_default(cls):
return ()
@classmethod
def register_options(cls, register):
super(BaseZincCompile, cls).register_options(register)
# TODO: disable by default because it breaks dependency parsing:
# https://github.com/pantsbuild/pants/issues/2224
# ...also, as of sbt 0.13.9, it is significantly slower for cold builds.
register('--name-hashing', advanced=True, type=bool, fingerprint=True,
help='Use zinc name hashing.')
register('--whitelisted-args', advanced=True, type=dict,
default={
'-S.*': False,
'-C.*': False,
'-file-filter': True,
'-msg-filter': True,
},
help='A dict of option regexes that make up pants\' supported API for zinc. '
'Options not listed here are subject to change/removal. The value of the dict '
'indicates that an option accepts an argument.')
register('--incremental', advanced=True, type=bool, default=True,
help='When set, zinc will use sub-target incremental compilation, which dramatically '
'improves compile performance while changing large targets. When unset, '
'changed targets will be compiled with an empty output directory, as if after '
'running clean-all.')
# TODO: Defaulting to false due to a few upstream issues for which we haven't pulled down fixes:
# https://github.com/sbt/sbt/pull/2085
# https://github.com/sbt/sbt/pull/2160
register('--incremental-caching', advanced=True, type=bool,
help='When set, the results of incremental compiles will be written to the cache. '
'This is unset by default, because it is generally a good precaution to cache '
'only clean/cold builds.')
cls.register_jvm_tool(register,
'zinc',
classpath=[
# NB: This is explicitly a `_2.10` JarDependency rather than a
# ScalaJarDependency. The latter would pick up the platform in a users'
# repo, whereas this binary is shaded and independent of the target
# platform version.
JarDependency('org.pantsbuild', 'zinc_2.10', '0.0.3')
],
main=cls._ZINC_MAIN,
custom_rules=[
# The compiler-interface and sbt-interface tool jars carry xsbt and
# xsbti interfaces that are used across the shaded tool jar boundary so
# we preserve these root packages wholesale along with the core scala
# APIs.
Shader.exclude_package('scala', recursive=True),
Shader.exclude_package('xsbt', recursive=True),
Shader.exclude_package('xsbti', recursive=True),
])
def sbt_jar(name, **kwargs):
return JarDependency(org='com.typesafe.sbt', name=name, rev='0.13.9', **kwargs)
cls.register_jvm_tool(register,
'compiler-interface',
classpath=[
sbt_jar(name='compiler-interface',
classifier='sources',
# We just want the single compiler-interface jar and not its
# dep on scala-lang
intransitive=True)
])
cls.register_jvm_tool(register,
'sbt-interface',
classpath=[
sbt_jar(name='sbt-interface',
# We just want the single sbt-interface jar and not its dep
# on scala-lang
intransitive=True)
])
@classmethod
def prepare(cls, options, round_manager):
super(BaseZincCompile, cls).prepare(options, round_manager)
ScalaPlatform.prepare_tools(round_manager)
@property
def incremental(self):
"""Zinc implements incremental compilation.
Setting this property causes the task infrastructure to clone the previous
results_dir for a target into the new results_dir for a target.
"""
return self.get_options().incremental
@property
def cache_incremental(self):
"""Optionally write the results of incremental compiles to the cache."""
return self.get_options().incremental_caching
def __init__(self, *args, **kwargs):
super(BaseZincCompile, self).__init__(*args, **kwargs)
# A directory to contain per-target subdirectories with apt processor info files.
self._processor_info_dir = os.path.join(self.workdir, 'apt-processor-info')
# Validate zinc options.
ZincCompile.validate_arguments(self.context.log, self.get_options().whitelisted_args,
self._args)
# A directory independent of any other classpath which can contain per-target
# plugin resource files.
self._plugin_info_dir = os.path.join(self.workdir, 'scalac-plugin-info')
def select(self, target):
raise NotImplementedError()
def select_source(self, source_file_path):
raise NotImplementedError()
def create_analysis_tools(self):
return AnalysisTools(DistributionLocator.cached().real_home, ZincAnalysisParser(), ZincAnalysis,
get_buildroot(), self.get_options().pants_workdir)
def zinc_classpath(self):
# Zinc takes advantage of tools.jar if it's presented in classpath.
# For example com.sun.tools.javac.Main is used for in process java compilation.
def locate_tools_jar():
try:
return DistributionLocator.cached(jdk=True).find_libs(['tools.jar'])
except DistributionLocator.Error:
self.context.log.info('Failed to locate tools.jar. '
'Install a JDK to increase performance of Zinc.')
return []
return self.tool_classpath('zinc') + locate_tools_jar()
def compiler_classpath(self):
return ScalaPlatform.global_instance().compiler_classpath(self.context.products)
def extra_compile_time_classpath_elements(self):
# Classpath entries necessary for our compiler plugins.
return self.plugin_jars
@property
def plugin_jars(self):
"""The classpath entries for jars containing code for enabled plugins."""
raise NotImplementedError()
@property
def plugin_args(self):
raise NotImplementedError()
def write_extra_resources(self, compile_context):
"""Override write_extra_resources to produce plugin and annotation processor files."""
target = compile_context.target
if isinstance(target, ScalacPlugin):
self.write_plugin_info(compile_context.classes_dir, target)
elif isinstance(target, AnnotationProcessor) and target.processors:
processor_info_file = os.path.join(compile_context.classes_dir, _PROCESSOR_INFO_FILE)
self._write_processor_info(processor_info_file, target.processors)
def _write_processor_info(self, processor_info_file, processors):
with safe_open(processor_info_file, 'w') as f:
for processor in processors:
f.write('{}\n'.format(processor.strip()))
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file,
log_file, settings, fatal_warnings):
# We add compiler_classpath to ensure the scala-library jar is on the classpath.
# TODO: This also adds the compiler jar to the classpath, which compiled code shouldn't
# usually need. Be more selective?
# TODO(John Sirois): Do we need to do this at all? If adding scala-library to the classpath is
# only intended to allow target authors to omit a scala-library dependency, then ScalaLibrary
# already overrides traversable_dependency_specs to achieve the same end; arguably at a more
# appropriate level and certainly at a more appropriate granularity.
compile_classpath = self.compiler_classpath() + classpath
self._verify_zinc_classpath(self.get_options().pants_workdir, compile_classpath)
self._verify_zinc_classpath(self.get_options().pants_workdir, upstream_analysis.keys())
zinc_args = []
zinc_args.extend([
'-log-level', self.get_options().level,
'-analysis-cache', analysis_file,
'-classpath', ':'.join(compile_classpath),
'-d', classes_output_dir
])
if not self.get_options().colors:
zinc_args.append('-no-color')
if not self.get_options().name_hashing:
zinc_args.append('-no-name-hashing')
if log_file:
zinc_args.extend(['-capture-log', log_file])
zinc_args.extend(['-compiler-interface', self.tool_jar('compiler-interface')])
zinc_args.extend(['-sbt-interface', self.tool_jar('sbt-interface')])
zinc_args.extend(['-scala-path', ':'.join(self.compiler_classpath())])
zinc_args += self.plugin_args
if upstream_analysis:
zinc_args.extend(['-analysis-map',
','.join('{}:{}'.format(*kv) for kv in upstream_analysis.items())])
zinc_args += args
zinc_args.extend([
'-C-source', '-C{}'.format(settings.source_level),
'-C-target', '-C{}'.format(settings.target_level),
])
zinc_args.extend(settings.args)
if fatal_warnings:
zinc_args.extend(self.get_options().fatal_warnings_enabled_args)
else:
zinc_args.extend(self.get_options().fatal_warnings_disabled_args)
jvm_options = list(self._jvm_options)
zinc_args.extend(sources)
self.log_zinc_file(analysis_file)
if self.runjava(classpath=self.zinc_classpath(),
main=self._ZINC_MAIN,
jvm_options=jvm_options,
args=zinc_args,
workunit_name='zinc',
workunit_labels=[WorkUnitLabel.COMPILER]):
raise TaskError('Zinc compile failed.')
@staticmethod
def _verify_zinc_classpath(pants_workdir, classpath):
for path in classpath:
if not os.path.isabs(path):
raise TaskError('Classpath entries provided to zinc should be absolute. ' + path + ' is not.')
if os.path.relpath(path, pants_workdir).startswith(os.pardir):
raise TaskError('Classpath entries provided to zinc should be in working directory. ' +
path + ' is not.')
if path != os.path.normpath(path):
raise TaskError('Classpath entries provided to zinc should be normalised (i.e. without ".." and "."). ' +
path + ' is not.')
def log_zinc_file(self, analysis_file):
self.context.log.debug('Calling zinc on: {} ({})'
.format(analysis_file,
hash_file(analysis_file).upper()
if os.path.exists(analysis_file)
else 'nonexistent'))
class ZincCompile(BaseZincCompile):
"""Compile Scala and Java code using Zinc."""
_name = 'zinc'
@classmethod
def register_options(cls, register):
super(ZincCompile, cls).register_options(register)
register('--scalac-plugins', advanced=True, type=list, fingerprint=True,
help='Use these scalac plugins.')
register('--scalac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
help='Map from plugin name to list of arguments for that plugin.')
# By default we expect no plugin-jars classpath_spec is filled in by the user, so we accept an
# empty classpath.
cls.register_jvm_tool(register, 'plugin-jars', classpath=[])
@classmethod
def product_types(cls):
return ['runtime_classpath', 'classes_by_source', 'product_deps_by_src']
def select(self, target):
# Require that targets are marked for JVM compilation, to differentiate from
# targets owned by the scalajs contrib module.
if not isinstance(target, JvmTarget):
return False
return target.has_sources('.java') or target.has_sources('.scala')
def select_source(self, source_file_path):
return source_file_path.endswith('.java') or source_file_path.endswith('.scala')
@memoized_property
def plugin_jars(self):
"""The classpath entries for jars containing code for enabled plugins."""
if self.get_options().scalac_plugins:
return self.tool_classpath('plugin-jars')
else:
return []
@memoized_property
def plugin_args(self):
if not self.get_options().scalac_plugins:
return []
plugin_args = self.get_options().scalac_plugin_args
active_plugins = self._find_plugins()
ret = []
for name, jar in active_plugins.items():
ret.append('-S-Xplugin:{}'.format(jar))
for arg in plugin_args.get(name, []):
ret.append('-S-P:{}:{}'.format(name, arg))
return ret
def _find_plugins(self):
"""Returns a map from plugin name to plugin jar."""
# Allow multiple flags and also comma-separated values in a single flag.
plugin_names = set([p for val in self.get_options().scalac_plugins for p in val.split(',')])
plugins = {}
buildroot = get_buildroot()
for jar in self.plugin_jars:
with open_zip(jar, 'r') as jarfile:
try:
with closing(jarfile.open(_PLUGIN_INFO_FILE, 'r')) as plugin_info_file:
plugin_info = ElementTree.parse(plugin_info_file).getroot()
if plugin_info.tag != 'plugin':
raise TaskError(
'File {} in {} is not a valid scalac plugin descriptor'.format(_PLUGIN_INFO_FILE,
jar))
name = plugin_info.find('name').text
if name in plugin_names:
if name in plugins:
raise TaskError('Plugin {} defined in {} and in {}'.format(name, plugins[name], jar))
# It's important to use relative paths, as the compiler flags get embedded in the zinc
# analysis file, and we port those between systems via the artifact cache.
plugins[name] = os.path.relpath(jar, buildroot)
except KeyError:
pass
unresolved_plugins = plugin_names - set(plugins.keys())
if unresolved_plugins:
raise TaskError('Could not find requested plugins: {}'.format(list(unresolved_plugins)))
return plugins
|
|
# a python implementation of a local web server which
# ... recognizes web form data from post requests
# ... and stores web form data in a mysql database.
# to run from root dir: `python software/start_local_web_server.py`
# source(s):
# + http://georgik.sinusgear.com/2011/01/07/how-to-dump-post-request-with-python/
# + https://snipt.net/raw/f8ef141069c3e7ac7e0134c6b58c25bf/?nice
# + https://github.com/PyMySQL/PyMySQL#example
# + http://www.cs.sfu.ca/CourseCentral/165/common/guide/html/sec-cgi.html
# + https://wiki.python.org/moin/BaseHttpServer
import code # to debug: `code.interact(local=locals())`
import logging # to log: `logging.warning("MY MESSAGE")` or `logging.error("MY MESSAGE")`
import SimpleHTTPServer
import SocketServer
import cgi
import json
import pymysql.cursors
import os
from bs4 import BeautifulSoup
PORT = 8818
try:
DB_ROOT_PASSWORD = os.environ["MYSQL_ROOT_PASSWORD"] # if your root user has a password, assign it to the "MYSQL_ROOT_PASSWORD" environment variable
except KeyError as e:
DB_ROOT_PASSWORD = "" # most students' root user doesn't have a password
#
# DEFINE THE LOCAL WEB SERVER
#
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
#
# OVERWRITE BEHAVIOR OF "GET" REQUESTS
#
def do_GET(self):
if ".html" in self.path: # only log messages for html pages, not images and scripts
self.log_message("GETTING: " + self.path)
self.log_message("HEADERS: " + json.dumps(dict(self.headers)))
# IF GETTING THE MENU PATH, READ MENU ITEMS FROM DATABASE
if self.path == "/menu-items/index.html":
self.log_message("QUERYING THE DATABASE")
menu_items = []
# ESTABLISH DATABASE CONNECTION
connection = pymysql.connect(
host='localhost',
port=3306,
user='root',
passwd= DB_ROOT_PASSWORD,
db='salad_db',
#charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
# EXECUTE DATABASE TRANSACTION
try:
# GET MENU ITEM RECORDS
with connection.cursor() as cursor:
sql = "SELECT * FROM menu_items ORDER BY id DESC LIMIT 100"
cursor.execute(sql)
for row in cursor.fetchall():
print(row)
menu_items.append(row)
finally:
connection.close() # for performance
# READ HTML FILE
menu_dot_html = os.path.abspath(__file__).replace(os.path.relpath(__file__), "menu-items/index.html")
print "READING HTML FILE -- %s" % menu_dot_html
html_content = BeautifulSoup(open(menu_dot_html),"lxml")
# MANIPULATE FILE CONTENTS
###menu_item_list = html_content.find(id="menu-item-list")
###print menu_item_list
###for menu_item in menu_items:
### list_item = html_content.new_tag('li')
### list_item.string = menu_item["title"]
### menu_item_list.append(list_item)
menu_item_table_body = html_content.find(id="menu-item-table-body")
for menu_item in menu_items:
table_row = html_content.new_tag('tr')
for attr_val in [
menu_item["id"],
menu_item["category"],
menu_item["title"],
menu_item["vegan_safe"],
menu_item["gluten_free"],
menu_item["description"]
]:
table_data = html_content.new_tag('td')
table_data.string = str(attr_val)
table_row.append(table_data)
menu_item_table_body.append(table_row)
# RETURN HTML CONTENT
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html_content)
else:
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
#
# OVERWRITE BEHAVIOR OF "POST" REQUESTS
#
def do_POST(self):
self.log_message("POSTING: " + self.path)
self.log_message("HEADERS: " + json.dumps(dict(self.headers)))
# READ FORM DATA
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
}
)
# LOG FORM DATA
form_dict = {}
for attribute in form.list:
form_dict[attribute.name] = attribute.value
self.log_message("POSTED DATA: " + json.dumps(form_dict))
# IF POSTING TO THE NEW MENU ITEMS PATH, CREATE A NEW MENU ITEM RECORD IN THE DATABASE
if self.path == "/menu-items/new.html":
self.log_message("STORING: " + json.dumps(form_dict))
# TRANSFORM DATA
category = form['category'].value
title = form['title'].value
calories = form['calories'].value
description = form['description'].value
try:
gluten_free = True if form['gluten_free'] else False
except KeyError as e:
gluten_free = False
try:
vegan_safe = True if form['vegan_safe'] else False
except KeyError as e:
vegan_safe = False
calories = int(calories)
gluten_free = int(gluten_free)
vegan_safe = int(vegan_safe)
# ESTABLISH DATABASE CONNECTION
connection = pymysql.connect(
host='localhost',
port=3306,
user='root',
passwd= DB_ROOT_PASSWORD,
db='salad_db',
#charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
# EXECUTE DATABASE TRANSACTION
try:
# CREATE NEW RECORD
with connection.cursor() as cursor:
sql = "INSERT INTO `menu_items` (`category`,`title`,`calories`,`gluten_free`,`vegan_safe`,`description`) VALUES (%s, %s, %s, %s, %s, %s)"
cursor.execute(sql, (category, title, calories, gluten_free, vegan_safe, description) )
connection.commit() # to save the changes
# PRINT NEW RECORD
with connection.cursor() as cursor:
sql = "SELECT * FROM menu_items ORDER BY id DESC LIMIT 1"
cursor.execute(sql)
result = cursor.fetchone()
print(result)
finally:
connection.close() # for performance
self.log_message("STORED")
# REDIRECT TO MENU INDEX
self.log_message("REDIRECTING")
self.send_response(301)
self.send_header('Location',"/menu-items/index.html")
self.end_headers()
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
#
# RUN THE LOCAL WEB SERVER
#
Handler = ServerHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "SERVING AT PORT:", PORT
httpd.serve_forever()
|
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
import collections
if is_silverlight==False:
from iptest.file_util import *
import sys
import imp
import operator
def get_builtins_dict():
if type(__builtins__) is type(sys):
return __builtins__.__dict__
return __builtins__
def test_imp_new_module():
x = imp.new_module('abc')
sys.modules['abc'] = x
x.foo = 'bar'
import abc
AreEqual(abc.foo, 'bar')
y = imp.new_module('\r\n')
sys.modules['xyz'] = y
y.foo = 'foo'
import xyz
AreEqual(xyz.foo, 'foo')
@skip("silverlight")
def test_imp_in_exec():
_imfp = 'impmodfrmpkg'
_f_imfp_init = path_combine(testpath.public_testdir, _imfp, "__init__.py")
_f_imfp_mod = path_combine(testpath.public_testdir, _imfp, "mod.py")
_f_imfp_start = path_combine(testpath.public_testdir, "imfpstart.tpy")
write_to_file(_f_imfp_init, "")
write_to_file(_f_imfp_mod, "")
write_to_file(_f_imfp_start, """
try:
from impmodfrmpkg.mod import mod
except ImportError, e:
pass
else:
raise AssertionError("Import of mod from pkg.mod unexpectedly succeeded")
""")
# import a package
import impmodfrmpkg
# create a dictionary like that package
glb = {'__name__' : impmodfrmpkg.__name__, '__path__' : impmodfrmpkg.__path__}
loc = {}
exec('import mod', glb, loc)
Assert('mod' in loc)
glb = {'__name__' : impmodfrmpkg.__name__, '__path__' : impmodfrmpkg.__path__}
loc = {}
exec('from mod import *', glb, loc)
#Assert('value' in loc) # TODO: Fix me
if is_cli or is_silverlight:
loc = {}
exec('from System import *', globals(), loc)
Assert('Int32' in loc)
Assert('Int32' not in globals())
if is_cli or is_silverlight:
exec('from System import *')
Assert('Int32' in dir())
def test_imp_basic():
magic = imp.get_magic()
suffixes = imp.get_suffixes()
Assert(isinstance(suffixes, list))
for suffix in suffixes:
Assert(isinstance(suffix, tuple))
AreEqual(len(suffix), 3)
Assert((".py", "U", 1) in suffixes)
if is_silverlight==False:
_testdir = "ImpTest"
_imptestdir = path_combine(testpath.public_testdir, _testdir)
_f_init = path_combine(_imptestdir, "__init__.py")
temp_name = ["nt",
"nt.P_WAIT",
"nt.chmod",
"sys.path",
"xxxx"
]
@skip('silverlight')
def test_imp_package():
write_to_file(_f_init, "my_name = 'imp package test'")
pf, pp, (px, pm, pt) = imp.find_module(_testdir, [testpath.public_testdir])
AreEqual(pt, imp.PKG_DIRECTORY)
AreEqual(pf, None)
AreEqual(px, "")
AreEqual(pm, "")
module = imp.load_module(_testdir, pf, pp, (px, pm, pt))
Assert(_testdir in sys.modules)
AreEqual(module.my_name, 'imp package test')
save_sys_path = sys.path
try:
sys.path = list(sys.path)
sys.path.append(testpath.public_testdir)
fm = imp.find_module(_testdir)
finally:
sys.path = save_sys_path
# unpack the result obtained above
pf, pp, (px, pm, pt) = fm
AreEqual(pt, imp.PKG_DIRECTORY)
AreEqual(pf, None)
AreEqual(px, "")
AreEqual(pm, "")
module = imp.load_module(_testdir, pf, pp, (px, pm, pt))
AreEqual(module.my_name, 'imp package test')
if is_silverlight==False:
_f_module = path_combine(_imptestdir, "imptestmod.py")
@skip('silverlight')
def test_imp_module():
write_to_file(_f_module, "value = 'imp test module'")
pf, pp, (px, pm, pt) = imp.find_module("imptestmod", [_imptestdir])
AreEqual(pt, imp.PY_SOURCE)
Assert(pf != None)
Assert(isinstance(pf, file))
module = imp.load_module("imptestmod", pf, pp, (px, pm, pt))
AreEqual(module.value, 'imp test module')
pf.close()
save_sys_path = sys.path
try:
sys.path = list(sys.path)
sys.path.append(_imptestdir)
fm = imp.find_module("imptestmod")
finally:
sys.path = save_sys_path
# unpack the result obtained above
pf, pp, (px, pm, pt) = fm
AreEqual(pt, imp.PY_SOURCE)
Assert(pf != None)
Assert(isinstance(pf, file))
AreEqual(px, ".py")
AreEqual(pm, "U")
module = imp.load_module("imptestmod", pf, pp, (px, pm, pt))
AreEqual(module.value, 'imp test module')
pf.close()
def test_direct_module_creation():
import math
for baseMod in math, sys:
module = type(baseMod)
x = module.__new__(module)
AreEqual(repr(x), "<module '?' (built-in)>")
#AreEqual(x.__dict__, None)
x.__init__('abc', 'def')
AreEqual(repr(x), "<module 'abc' (built-in)>")
AreEqual(x.__doc__, 'def')
x.__init__('aaa', 'zzz')
AreEqual(repr(x), "<module 'aaa' (built-in)>")
AreEqual(x.__doc__, 'zzz')
# can't assign to module __dict__
try:
x.__dict__ = {}
except TypeError: pass
else: AssertUnreachable()
# can't delete __dict__
try:
del(x.__dict__)
except TypeError: pass
else: AssertUnreachable()
# init doesn't clobber dict, it just re-initializes values
x.__dict__['foo'] = 'xyz'
x.__init__('xyz', 'nnn')
AreEqual(x.foo, 'xyz')
# dict is lazily created on set
x = module.__new__(module)
x.foo = 23
AreEqual(x.__dict__, {'foo':23})
AreEqual(repr(x), "<module '?' (built-in)>")
# can't pass wrong sub-type to new
try:
module.__new__(str)
except TypeError: pass
else: AssertUnreachable()
# dir on non-initialized module raises TypeError
x = module.__new__(module)
x.__name__ = 'module_does_not_exist_in_sys_dot_modules'
AssertError(ImportError, reload, x)
def test_redefine_import():
# redefining global __import__ shouldn't change import semantics
global __import__
global called
called = False
def __import__(*args):
global called
called = True
AreEqual(called, False)
del __import__
called = False
AreEqual(called, False)
def test_module_dict():
currentModule = sys.modules[__name__]
AreEqual(isinstance(currentModule.__dict__, collections.Mapping), True)
AreEqual(type({}), type(currentModule.__dict__))
AreEqual(isinstance(currentModule.__dict__, dict), True)
#test release_lock,lock_held,acquire_lock
def test_lock():
i=0
while i<5:
i+=1
if not imp.lock_held():
AssertError(RuntimeError,imp.release_lock)
imp.acquire_lock()
else:
imp.release_lock()
# test is_frozen
def test_is_frozen():
for name in temp_name:
f = imp.is_frozen(name)
if f:
Fail("result should be False")
# test init_frozen
def test_init_frozen():
for name in temp_name:
f = imp.init_frozen(name)
if f != None :
Fail("return object should be None!")
# is_builtin
def test_is_builtin():
AreEqual(imp.is_builtin("xxx"),0)
AreEqual(imp.is_builtin("12324"),0)
AreEqual(imp.is_builtin("&*^^"),0)
AreEqual(imp.is_builtin("dir"),0)
AreEqual(imp.is_builtin("__doc__"),0)
AreEqual(imp.is_builtin("__name__"),0)
AreEqual(imp.is_builtin("_locle"),0)
AreEqual(imp.is_builtin("cPickle"),1)
AreEqual(imp.is_builtin("_random"),1)
# nt module disabled in Silverlight
if not is_silverlight:
AreEqual(imp.is_builtin("nt"),1)
AreEqual(imp.is_builtin("thread"),1)
# there are a several differences between ironpython and cpython
if is_cli or is_silverlight:
AreEqual(imp.is_builtin("copy_reg"),1)
else:
AreEqual(imp.is_builtin("copy_reg"),0)
# supposedly you can't re-init these
AreEqual(imp.is_builtin("sys"), -1)
AreEqual(imp.is_builtin("__builtin__"), -1)
AreEqual(imp.is_builtin("exceptions"), -1)
imp.init_builtin("sys")
imp.init_builtin("__builtin__")
imp.init_builtin("exceptions")
@skip("win32", "multiple_execute", "stdlib")
def test_sys_path_none_builtins():
prevPath = sys.path
#import some builtin modules not previously imported
try:
sys.path = [None] + prevPath
Assert('datetime' not in list(sys.modules.keys()))
import datetime
Assert('datetime' in list(sys.modules.keys()))
sys.path = prevPath + [None]
if not imp.is_builtin('copy_reg'):
Assert('copy_reg' not in list(sys.modules.keys()))
import datetime
import copyreg
Assert('datetime' in list(sys.modules.keys()))
Assert('copy_reg' in list(sys.modules.keys()))
sys.path = [None]
if not imp.is_builtin('binascii'):
Assert('binascii' not in list(sys.modules.keys()))
import datetime
import copyreg
import binascii
Assert('datetime' in list(sys.modules.keys()))
Assert('copy_reg' in list(sys.modules.keys()))
Assert('binascii' in list(sys.modules.keys()))
finally:
sys.path = prevPath
@skip("silverlight")
def test_sys_path_none_userpy():
prevPath = sys.path
#import a *.py file
temp_syspath_none = path_combine(testpath.public_testdir, "temp_syspath_none.py")
write_to_file(temp_syspath_none, "stuff = 3.14")
try:
sys.path = [None] + prevPath
import temp_syspath_none
AreEqual(temp_syspath_none.stuff, 3.14)
finally:
sys.path = prevPath
def test_sys_path_none_negative():
prevPath = sys.path
test_paths = [ [None] + prevPath,
prevPath + [None],
[None],
]
try:
for temp_path in test_paths:
sys.path = temp_path
try:
import does_not_exist
AssertUnerachable()
except ImportError:
pass
finally:
sys.path = prevPath
#init_builtin
def test_init_builtin():
r = imp.init_builtin("c_Pickle")
AreEqual(r,None)
r = imp.init_builtin("2345")
AreEqual(r,None)
r = imp.init_builtin("xxxx")
AreEqual(r,None)
r = imp.init_builtin("^$%$#@")
AreEqual(r,None)
r = imp.init_builtin("_locale")
Assert(r!=None)
#test SEARCH_ERROR, PY_SOURCE,PY_COMPILED,C_EXTENSION,PY_RESOURCE,PKG_DIRECTORY,C_BUILTIN,PY_FROZEN,PY_CODERESOURCE
def test_flags():
AreEqual(imp.SEARCH_ERROR,0)
AreEqual(imp.PY_SOURCE,1)
AreEqual(imp.PY_COMPILED,2)
AreEqual(imp.C_EXTENSION,3)
AreEqual(imp.PY_RESOURCE,4)
AreEqual(imp.PKG_DIRECTORY,5)
AreEqual(imp.C_BUILTIN,6)
AreEqual(imp.PY_FROZEN,7)
AreEqual(imp.PY_CODERESOURCE,8)
def test_user_defined_modules():
"""test the importer using user-defined module types"""
class MockModule(object):
def __init__(self, name): self.__name__ = name
def __repr__(self): return 'MockModule("' + self.__name__ + '")'
TopModule = MockModule("TopModule")
sys.modules["TopModule"] = TopModule
SubModule = MockModule("SubModule")
theObj = object()
SubModule.Object = theObj
TopModule.SubModule = SubModule
sys.modules["TopModule.SubModule"] = SubModule
# clear the existing names from our namespace...
x, y = TopModule, SubModule
del TopModule, SubModule
# verify we can import TopModule w/ TopModule.SubModule name
import TopModule.SubModule
AreEqual(TopModule, x)
Assert('SubModule' not in dir())
# verify we can import Object from TopModule.SubModule
from TopModule.SubModule import Object
AreEqual(Object, theObj)
# verify we short-circuit the lookup in TopModule if
# we have a sys.modules entry...
SubModule2 = MockModule("SubModule2")
SubModule2.Object2 = theObj
sys.modules["TopModule.SubModule"] = SubModule2
from TopModule.SubModule import Object2
AreEqual(Object2, theObj)
del sys.modules['TopModule']
del sys.modules['TopModule.SubModule']
def test_constructed_module():
"""verify that we don't load arbitrary modules from modules, only truly nested modules"""
ModuleType = type(sys)
TopModule = ModuleType("TopModule")
sys.modules["TopModule"] = TopModule
SubModule = ModuleType("SubModule")
SubModule.Object = object()
TopModule.SubModule = SubModule
try:
import TopModule.SubModule
AssertUnreachable()
except ImportError:
pass
del sys.modules['TopModule']
@skip("multiple_execute")
def test_import_from_custom():
import builtins
try:
class foo(object):
b = 'abc'
def __import__(name, globals, locals, fromlist):
global received
received = name, fromlist
return foo()
saved = builtins.__import__
builtins.__import__ = __import__
from a import b
AreEqual(received, ('a', ('b', )))
finally:
builtins.__import__ = saved
def test_module_name():
import imp
m = imp.new_module('foo')
AreEqual(m.__str__(), "<module 'foo' (built-in)>")
m.__name__ = 'bar'
AreEqual(m.__str__(), "<module 'bar' (built-in)>")
m.__name__ = None
AreEqual(m.__str__(), "<module '?' (built-in)>")
m.__name__ = []
AreEqual(m.__str__(), "<module '?' (built-in)>")
m.__file__ = None
AreEqual(m.__str__(), "<module '?' (built-in)>")
m.__file__ = []
AreEqual(m.__str__(), "<module '?' (built-in)>")
m.__file__ = 'foo.py'
AreEqual(m.__str__(), "<module '?' from 'foo.py'>")
@skip('silverlight')
def test_cp7007():
file_contents = '''
called = 3.14
'''
strange_module_names = [ "+",
"+a",
"a+",
"++",
"+++",
"-",
"=",
"$",
"^",
]
strange_file_names = [ path_combine(testpath.public_testdir, "cp7007", x + ".py") for x in strange_module_names ]
sys.path.append(testpath.public_testdir + "\\cp7007")
for x in strange_file_names: write_to_file(x, file_contents)
try:
for x in strange_module_names:
temp_mod = __import__(x)
AreEqual(temp_mod.called, 3.14)
finally:
sys.path.remove(testpath.public_testdir + "\\cp7007")
delete_files(strange_file_names)
def test_relative_control():
"""test various flavors of relative/absolute import and ensure the right
arguments are delivered to __import__"""
def myimport(*args):
global importArgs
importArgs = list(args)
importArgs[1] = None # globals, we don't care about this
importArgs[2] = None # locals, we don't care about this either
# we'll pull values out of this class on success, but that's not
# the important part
class X:
abc = 3
absolute_import = 2
bar = 5
return X
old_import = get_builtins_dict()['__import__']
try:
get_builtins_dict()['__import__'] = myimport
import abc
AreEqual(importArgs, ['abc', None, None, None])
from . import abc
AreEqual(importArgs, ['', None, None, ('abc',), 1])
from .. import abc
AreEqual(importArgs, ['', None, None, ('abc',), 2])
from ... import abc
AreEqual(importArgs, ['', None, None, ('abc',), 3])
from ...d import abc
AreEqual(importArgs, ['d', None, None, ('abc',), 3])
from ...d import (abc, bar)
AreEqual(importArgs, ['d', None, None, ('abc', 'bar'), 3])
from d import (
abc,
bar)
AreEqual(importArgs, ['d', None, None, ('abc', 'bar')])
code = """from __future__ import absolute_import\nimport abc"""
exec(code, globals(), locals())
AreEqual(importArgs, ['abc', None, None, None, 0])
def f():exec("from import abc")
AssertError(SyntaxError, f)
finally:
get_builtins_dict()['__import__'] = old_import
@skip("multiple_execute") #http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=26829
def test_import_relative_error():
def f(): exec('from . import *')
AssertError(ValueError, f)
@skip("silverlight") #No access to CPython stdlib
def test_import_hooks_import_precence():
"""__import__ takes precedence over import hooks"""
global myimpCalled
myimpCalled = None
class myimp(object):
def find_module(self, fullname, path=None):
global myimpCalled
myimpCalled = fullname, path
def myimport(*args):
return 'myimport'
import idlelib
import idlelib.idlever
mi = myimp()
sys.meta_path.append(mi)
builtinimp = get_builtins_dict()['__import__']
try:
get_builtins_dict()['__import__'] = myimport
import abc
AreEqual(abc, 'myimport')
AreEqual(myimpCalled, None)
# reload on a built-in hits the loader protocol
imp.reload(idlelib)
AreEqual(myimpCalled, ('idlelib', None))
imp.reload(idlelib.idlever)
AreEqual(myimpCalled[0], 'idlelib.idlever')
AreEqual(myimpCalled[1][0][-7:], 'idlelib')
finally:
get_builtins_dict()['__import__'] = builtinimp
sys.meta_path.remove(mi)
def test_import_hooks_bad_importer():
class bad_importer(object): pass
mi = bad_importer()
sys.path.append(mi)
try:
def f(): import does_not_exist
AssertError(ImportError, f)
finally:
sys.path.remove(mi)
sys.path.append(None)
try:
def f(): import does_not_exist
AssertError(ImportError, f)
finally:
sys.path.remove(None)
class inst_importer(object): pass
mi = inst_importer()
def f(*args): raise Exception()
mi.find_module = f
sys.path.append(mi)
try:
def f(): import does_not_exist
AssertError(ImportError, f)
finally:
sys.path.remove(mi)
def test_import_hooks_importer():
"""importer tests - verify the importer gets passed correct values, handles
errors coming back out correctly"""
global myimpCalled
myimpCalled = None
class myimp(object):
def find_module(self, fullname, path=None):
global myimpCalled
myimpCalled = fullname, path
if fullname == 'does_not_exist_throw':
raise Exception('hello')
mi = myimp()
sys.meta_path.append(mi)
try:
try:
import does_not_exist
AssertUnreachable()
except ImportError: pass
AreEqual(myimpCalled, ('does_not_exist', None))
try:
from testpkg1 import blah
AssertUnreachable()
except ImportError:
pass
AreEqual(type(myimpCalled[1]), list)
AreEqual(myimpCalled[0], 'testpkg1.blah')
AreEqual(myimpCalled[1][0][-8:], 'testpkg1')
def f(): import does_not_exist_throw
AssertErrorWithMessage(Exception, 'hello', f)
finally:
sys.meta_path.remove(mi)
@skip("multiple_execute")
def test_import_hooks_loader():
"""loader tests - verify the loader gets the right values, handles errors correctly"""
global myimpCalled
myimpCalled = None
moduleType = type(sys)
class myloader(object):
loadcount = 0
def __init__(self, fullname, path):
self.fullname = fullname
self.path = path
def load_module(self, fullname):
if fullname == 'does_not_exist_throw':
raise Exception('hello again')
elif fullname == 'does_not_exist_return_none':
return None
else:
myloader.loadcount += 1
module = sys.modules.setdefault(fullname, moduleType(fullname))
module.__file__ = '<myloader file ' + str(myloader.loadcount) + '>'
module.fullname = self.fullname
module.path = self.path
module.__loader__ = self
if fullname[-3:] == 'pkg':
# create a package
module.__path__ = [fullname]
return module
class myimp(object):
def find_module(self, fullname, path=None):
return myloader(fullname, path)
mi = myimp()
sys.meta_path.append(mi)
try:
def f(): import does_not_exist_throw
AssertErrorWithMessage(Exception, 'hello again', f)
def f(): import does_not_exist_return_none
AssertError(ImportError, f)
import does_not_exist_create
AreEqual(does_not_exist_create.__file__, '<myloader file 1>')
AreEqual(does_not_exist_create.fullname, 'does_not_exist_create')
AreEqual(does_not_exist_create.path, None)
imp.reload(does_not_exist_create)
AreEqual(does_not_exist_create.__file__, '<myloader file 2>')
AreEqual(does_not_exist_create.fullname, 'does_not_exist_create')
AreEqual(does_not_exist_create.path, None)
import testpkg1.does_not_exist_create_sub
AreEqual(testpkg1.does_not_exist_create_sub.__file__, '<myloader file 3>')
AreEqual(testpkg1.does_not_exist_create_sub.fullname, 'testpkg1.does_not_exist_create_sub')
AreEqual(testpkg1.does_not_exist_create_sub.path[0][-8:], 'testpkg1')
imp.reload(testpkg1.does_not_exist_create_sub)
AreEqual(testpkg1.does_not_exist_create_sub.__file__, '<myloader file 4>')
AreEqual(testpkg1.does_not_exist_create_sub.fullname, 'testpkg1.does_not_exist_create_sub')
AreEqual(testpkg1.does_not_exist_create_sub.path[0][-8:], 'testpkg1')
import does_not_exist_create_pkg.does_not_exist_create_subpkg
AreEqual(does_not_exist_create_pkg.__file__, '<myloader file 5>')
AreEqual(does_not_exist_create_pkg.fullname, 'does_not_exist_create_pkg')
finally:
sys.meta_path.remove(mi)
def test_path_hooks():
import toimport
def prepare(f):
sys.path_importer_cache = {}
sys.path_hooks = [f]
if 'toimport' in sys.modules: del sys.modules['toimport']
def hook(*args): raise Exception('hello')
prepare(hook)
def f(): import toimport
AssertErrorWithMessage(Exception, 'hello', f)
# ImportError shouldn't propagate out
def hook(*args): raise ImportError('foo')
prepare(hook)
f()
# returning none should be ok
def hook(*args): pass
prepare(hook)
f()
sys.path_hooks = []
class meta_loader(object):
def __init__(self, value):
self.value = value
def load_module(self, fullname):
if type(self.value) is Exception: raise self.value
return self.value
class meta_importer(object):
def find_module(self, fullname, path=None):
AreEqual(path, None)
if fullname == 'does_not_exist_throw': raise Exception('hello')
elif fullname == 'does_not_exist_abc': return meta_loader('abc')
elif fullname == 'does_not_exist_loader_throw': return meta_loader(Exception('loader'))
elif fullname == 'does_not_exist_None': return meta_loader(None)
elif fullname == 'does_not_exist_X':
class X(object):
abc = 3
return meta_loader(X)
def common_meta_import_tests():
def f(): import does_not_exist_throw
AssertErrorWithMessage(Exception, 'hello', f)
import does_not_exist_abc
AreEqual(does_not_exist_abc, 'abc')
def f(): import does_not_exist_loader_throw
AssertErrorWithMessage(Exception, 'loader', f)
def f(): import does_not_exist_loader_None
AssertErrorWithMessage(ImportError, 'No module named does_not_exist_loader_None', f)
from does_not_exist_X import abc
AreEqual(abc, 3)
def test_path_hooks_importer_and_loader():
path = list(sys.path)
hooks = list(sys.path_hooks)
try:
sys.path.append('<myname>')
def hook(name):
if name == "<myname>":
return meta_importer()
sys.path_hooks.append(hook)
common_meta_import_tests()
finally:
sys.path = path
sys.path_hooks = hooks
def test_meta_path():
metapath = list(sys.meta_path)
sys.meta_path.append(meta_importer())
try:
common_meta_import_tests()
finally:
sys.meta_path = metapath
def test_custom_meta_path():
"""most special methods invoked by the runtime from Python only invoke on the type, not the instance.
the import methods will invoke on instances including using __getattribute__ for resolution or on
old-style classes. This test verifies we do a full member lookup to find these methods"""
metapath = list(sys.meta_path)
finder = None
loader = None
class K(object):
def __init__(self):
self.calls = []
def __getattribute__(self, name):
if name != 'calls': self.calls.append(name)
if name == 'find_module': return finder
if name == 'load_module': return loader
return object.__getattribute__(self, name)
loaderInst = K()
sys.meta_path.append(loaderInst)
def ok_finder(name, path):
loaderInst.calls.append( (name, path) )
return loaderInst
def ok_loader(name):
loaderInst.calls.append(name)
return 'abc'
try:
# dynamically resolve find_module to None
try:
import xyz
except TypeError:
AreEqual(loaderInst.calls[0], 'find_module')
loaderInst.calls = []
# dynamically resolve find_module to a function,
# and load_module to None.
finder = ok_finder
try:
import xyz
except TypeError:
AreEqual(loaderInst.calls[0], 'find_module')
AreEqual(loaderInst.calls[1], ('xyz', None))
loaderInst.calls = []
loader = ok_loader
import xyz
AreEqual(xyz, 'abc')
AreEqual(loaderInst.calls[0], 'find_module')
AreEqual(loaderInst.calls[1], ('xyz', None))
AreEqual(loaderInst.calls[2], 'load_module')
AreEqual(loaderInst.calls[3], 'xyz')
finally:
sys.meta_path = metapath
def test_import_kw_args():
AreEqual(__import__(name = 'sys', globals = globals(), locals = locals(), fromlist = [], level = -1), sys)
def test_import_list_empty_string():
"""importing w/ an empty string in the from list should be ignored"""
x = __import__('testpkg1', {}, {}, [''])
Assert(not '' in dir(x))
@skip("silverlight") #BUG?
def test_cp7050():
'''
This test case complements CPython's test_import.py
'''
try:
import Nt
AssertUnreachable("Should not have been able to import 'Nt'")
except:
pass
AssertError(ImportError, __import__, "Nt")
AssertError(ImportError, __import__, "Lib")
AssertError(ImportError, __import__, "iptest.Assert_Util")
def test_meta_path_before_builtins():
"""the meta path should be consulted before builtins are loaded"""
class MyException(Exception): pass
class K:
def find_module(self, name, path):
if name == "time": return self
return None
def load_module(self, name):
raise MyException
if 'time' in sys.modules:
del sys.modules["time"]
loader = K()
sys.meta_path.append(loader)
try:
import time
AssertUnreachable()
except MyException:
pass
sys.meta_path.remove(loader)
import time
@skip("silverlight") # no nt module on silverlight
def test_file_coding():
try:
import nt
f = file('test_coding_mod.py', 'wb+')
f.write("# coding: utf-8\nx = '\xe6ble'\n")
f.close()
import test_coding_mod
AreEqual(test_coding_mod.x[0], '\xe6')
finally:
nt.unlink('test_coding_mod.py')
try:
f = file('test_coding_2.py', 'wb+')
f.write("\xef\xbb\xbf# -*- coding: utf-8 -*-\n")
f.write("x = u'ABCDE'\n")
f.close()
import test_coding_2
AreEqual(test_coding_2.x, 'ABCDE')
finally:
nt.unlink('test_coding_2.py')
try:
f = file('test_coding_3.py', 'wb+')
f.write("# -*- coding: utf-8 -*-\n")
f.write("raise Exception()")
f.close()
try:
import test_coding_3
except Exception as e:
AreEqual(sys.exc_info()[2].tb_next.tb_lineno, 2)
finally:
nt.unlink('test_coding_3.py')
def test_module_subtype():
class x(type(sys)):
def __init__(self): self.baz = 100
def __getattr__(self, name):
if name == 'qux': raise AttributeError
return 42
def __getattribute__(self, name):
if name == 'foo' or name == 'qux': raise AttributeError
if name == 'baz': return type(sys).__getattribute__(self, name)
return 23
a = x()
AreEqual(a.foo, 42)
AreEqual(a.bar, 23)
AreEqual(a.baz, 100)
AssertError(AttributeError, lambda : a.qux)
#Real *.py file
import testpkg1.mod1
class x(type(testpkg1.mod1)):
def __init__(self): self.baz = 100
def __getattr__(self, name):
if name == 'qux': raise AttributeError
return 42
def __getattribute__(self, name):
if name == 'foo' or name == 'qux': raise AttributeError
if name == 'baz': return type(sys).__getattribute__(self, name)
return 23
a = x()
AreEqual(a.foo, 42)
AreEqual(a.bar, 23)
AreEqual(a.baz, 100)
AssertError(AttributeError, lambda : a.qux)
#Package
import testpkg1
class x(type(testpkg1)):
def __init__(self): self.baz = 100
def __getattr__(self, name):
if name == 'qux': raise AttributeError
return 42
def __getattribute__(self, name):
if name == 'foo' or name == 'qux': raise AttributeError
if name == 'baz': return type(sys).__getattribute__(self, name)
return 23
a = x()
AreEqual(a.foo, 42)
AreEqual(a.bar, 23)
AreEqual(a.baz, 100)
AssertError(AttributeError, lambda : a.qux)
@runonly("stdlib")
def test_cp13736():
_f_imp_cp13736 = path_combine(testpath.public_testdir, "impcp13736.py")
shortName = _f_imp_cp13736.rsplit("\\", 1)[1].split(".")[0]
write_to_file(_f_imp_cp13736, """
class Test(object):
def a(self):
return 34
""")
import sys
if sys.platform=="win32" and "." not in sys.path:
sys.path.append(".")
import new
import imp
moduleInfo = imp.find_module(shortName)
module = imp.load_module(shortName, moduleInfo[0], moduleInfo[1], moduleInfo[2])
t = new.classobj('Test1', (getattr(module, 'Test'),), {})
i = t()
AreEqual(i.a(), 34)
def test_import_path_seperator():
"""verify using the path seperator in a direct call will result in an ImportError"""
AssertError(ImportError, __import__, 'iptest\\warning_util')
__import__('iptest.warning_util')
def test_load_package():
import testpkg1
pkg = imp.load_package('libcopy', testpkg1.__path__[0])
AreEqual(sys.modules['libcopy'], pkg)
pkg = imp.load_package('some_new_pkg', 'some_path_that_does_not_and_never_will_exist')
AreEqual(sys.modules['some_new_pkg'], pkg)
# NullImporter isn't used on Silverlight because we cannot detect the presence dirs
@skip("silverlight")
def test_NullImporter():
def f():
class x(imp.NullImporter): pass
AssertError(TypeError, f)
AreEqual(imp.NullImporter.__module__, 'imp')
sys.path.append('directory_that_does_not_exist')
try:
import SomeFileThatDoesNotExist
except ImportError:
pass
Assert(isinstance(sys.path_importer_cache['directory_that_does_not_exist'], imp.NullImporter))
def test_get_frozen_object():
# frozen objects not supported, this always fails
AssertError(ImportError, imp.get_frozen_object, 'foo')
def test_cp17459():
AreEqual(imp.IMP_HOOK, 9)
def test_module_getattribute():
mymod = type(sys)('foo', 'bar')
attrs = ['__delattr__', '__doc__', '__hash__', '__init__', '__new__', '__reduce__', '__reduce_ex__', '__str__']
for attr in attrs:
d = mymod.__dict__
d[attr] = 42
AreEqual(getattr(mymod, attr), 42)
AreEqual(mymod.__getattribute__(attr), 42)
AreEqual(mymod.__getattribute__(attr), getattr(mymod, attr))
del d[attr]
for x in dir(type(sys)):
AreEqual(mymod.__getattribute__(x), getattr(mymod, x))
@skip("silverlight", "win32")
def test_import_lookup_after():
import nt
try:
_x_mod = path_combine(testpath.public_testdir, "x.py")
_y_mod = path_combine(testpath.public_testdir, "y.py")
write_to_file(_x_mod, """
import sys
oldmod = sys.modules['y']
newmod = object()
sys.modules['y'] = newmod
""")
write_to_file(_y_mod, "import x")
import y
AreEqual(type(y), object)
finally:
nt.unlink(_x_mod)
nt.unlink(_y_mod)
@skip("silverlight", "win32")
def test_imp_load_source():
import nt
try:
_x_mod = path_combine(testpath.public_testdir, "x.py")
write_to_file(_x_mod, """
'''some pydoc'''
X = 3.14
""")
with open(_x_mod, "r") as f:
x = imp.load_source("test_imp_load_source_x",
_x_mod,
f)
AreEqual(x.__name__, "test_imp_load_source_x")
AreEqual(x.X, 3.14)
AreEqual(x.__doc__, '''some pydoc''')
finally:
nt.unlink(_x_mod)
@skip("silverlight")
def test_imp_load_compiled():
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=17459
if not is_cpython:
AreEqual(imp.load_compiled("", ""), None)
try:
_x_mod = path_combine(testpath.public_testdir, "x.py")
write_to_file(_x_mod, "")
with open(_x_mod, "r") as f:
AreEqual(imp.load_compiled("", "", f), None)
finally:
nt.unlink(_x_mod)
@skip("silverlight")
def test_imp_load_dynamic():
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=17459
if not is_cpython:
AreEqual(imp.load_dynamic("", ""), None)
try:
_x_mod = path_combine(testpath.public_testdir, "x.py")
write_to_file(_x_mod, "")
with open(_x_mod, "r") as f:
AreEqual(imp.load_dynamic("", "", f), None)
finally:
nt.unlink(_x_mod)
def test_override_dict():
class M(type(sys)):
@property
def __dict__(self):
return 'not a dict'
@__dict__.setter
def __dict__(self, value):
global setCalled
setCalled = True
a = M('foo')
AreEqual(a.__dict__, 'not a dict')
a.__dict__ = 42
AreEqual(setCalled, True)
class MyDesc(object):
def __get__(self, instance, context):
return 'abc'
class M(type(sys)):
__dict__ = MyDesc()
a = M('foo')
AreEqual(a.__dict__, 'abc')
# instance members won't be found
class M(type(sys)): pass
a = M('foo')
a.__dict__['__dict__'] = 42
AreEqual(type(a.__dict__), dict)
AreEqual(a.__getattribute__('__dict__'), a.__dict__)
class M(type(sys)):
def baz(self):
return 'hello'
@property
def foo(self):
return 'hello'
@foo.setter
def foo(self, value):
self.bar = value
@foo.deleter
def foo(self):
del self.bar
a = M('hello')
AreEqual(a.__getattribute__('baz'), a.baz)
AreEqual(a.baz(), 'hello')
a.__setattr__('foo', 42)
AreEqual(a.__dict__['bar'], 42)
a.__delattr__('foo')
Assert('bar' not in a.__dict__)
# mix-in an old-style class
class old_class:
def old_method(self):
return 42
@property
def old_prop(self):
return 'abc'
@old_prop.setter
def old_prop(self, value):
self.op = value
@old_prop.deleter
def old_prop(self):
del self.op
M.__bases__ += (old_class, )
AreEqual(a.old_method(), 42)
a.__setattr__('old_prop', 42)
AreEqual(a.__dict__['op'], 42)
a.__delattr__('old_prop')
Assert('op' not in a.__dict__)
# change the class
class M2(type(sys)): pass
a.__setattr__('__class__', M2)
AreEqual(type(a), M2)
AssertErrorWithMessage(TypeError, "readonly attribute", a.__setattr__, '__dict__', int)
AssertErrorWithMessage(TypeError, "readonly attribute", a.__delattr__, '__dict__')
# __setattr__/__delattr__ no non-derived type
m = type(sys)('foo')
AssertErrorWithMessage(TypeError, "__class__ assignment: only for heap types", m.__setattr__, '__class__', int)
AssertErrorWithMessage(TypeError, "readonly attribute", m.__setattr__, '__dict__', int)
AssertErrorWithMessage(TypeError, "can't delete __class__ attribute", m.__delattr__, '__class__')
AssertErrorWithMessage(TypeError, "readonly attribute", m.__delattr__, '__dict__')
@skip("silverlight")
def test_ximp_load_module():
mod = imp.new_module('my_module_test')
mod.__file__ = 'does_not_exist.py'
sys.modules['my_module_test'] = mod
f = file('test.py', 'w+')
f.write('x = 42')
f.close()
with file('test.py') as inp_file:
imp.load_module('my_module_test', inp_file, 'does_not_exist.py', ('', 'U', 1))
import nt
nt.unlink('test.py')
AreEqual(mod.x, 42)
@skip("silverlight") # no stdlib in silverlight
def test_import_string_from_list_cp26098():
AreEqual(__import__('email.mime.application', globals(), locals(), 'MIMEApplication').__name__, 'email.mime.application')
@skip("win32", "silverlight")
def test_new_builtin_modules():
import clr
clr.AddReference('IronPythonTest')
import test_new_module
dir(test_new_module)
# static members should still be accessible
AreEqual(test_new_module.StaticMethod(), 42)
AreEqual(test_new_module.StaticField, 42)
AreEqual(test_new_module.StaticProperty, 42)
# built-in functions shouldn't appear to be bound
AreEqual(test_new_module.test_method.__doc__, 'test_method() -> object\r\n')
AreEqual(test_new_module.test_method.__self__, None)
# unassigned attributes should throw as if the callee failed to look them up
AssertError(NameError, lambda : test_new_module.get_test_attr())
# unassigned builtins should return the built-in as if the caller looked them up
AreEqual(test_new_module.get_min(), min)
# we should be able to assign to values
test_new_module.test_attr = 42
# and the built-in module should see them
AreEqual(test_new_module.get_test_attr(), 42)
AreEqual(test_new_module.test_attr, 42)
# static members take precedence over things in globals
AreEqual(test_new_module.test_overlap_method(), 42)
AreEqual(type(test_new_module.test_overlap_type), type)
test_new_module.inc_value()
AreEqual(test_new_module.get_value(), 1)
test_new_module.inc_value()
AreEqual(test_new_module.get_value(), 2)
# can't access private fields
AssertError(AttributeError, lambda : test_new_module._value)
#------------------------------------------------------------------------------
run_test(__name__)
if is_silverlight==False:
delete_all_f(__name__)
|
|
#
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# This is derived from gmsk2_pkt.py.
#
# Modified by: Thomas Schmid, Leslie Choong, Sanna Leidelof
#
# import Numeric
from gnuradio import gr, gru
from gnuradio.digital import packet_utils
from gnuradio import ucla
from gnuradio.ucla_blks import crc16
import gnuradio.gr.gr_threading as _threading
import ieee802_15_4
import struct
MAX_PKT_SIZE = 128
def make_ieee802_15_4_packet(FCF, seqNr, addressInfo, payload, pad_for_usrp=True, preambleLength=4, SFD=0xA7):
"""
Build a 802_15_4 packet
@param FCF: 2 bytes defining the type of frame.
@type FCF: string
@param seqNr: 1 byte sequence number.
@type seqNr: byte
@param addressInfo: 0 to 20 bytes of address information.
@type addressInfo: string
@param payload: The payload of the packet. The maximal size of the message
can not be larger than 128.
@type payload: string
@param pad_for_usrp: If we should add 0s at the end to pad for the USRP.
@type pad_for_usrp: boolean
@param preambleLength: Length of the preambble. Currently ignored.
@type preambleLength: int
@param SFD: Start of frame describtor. This is by default set to the IEEE 802.15.4 standard,
but can be changed if required.
@type SFD: byte
"""
if len(FCF) != 2:
raise ValueError, "len(FCF) must be equal to 2"
if seqNr > 255:
raise ValueError, "seqNr must be smaller than 255"
if len(addressInfo) > 20:
raise ValueError, "len(addressInfo) must be in [0, 20]"
if len(payload) > MAX_PKT_SIZE - 5 - len(addressInfo):
raise ValueError, "len(payload) must be in [0, %d]" %(MAX_PKT_SIZE)
SHR = struct.pack("BBBBB", 0, 0, 0, 0, SFD)
PHR = struct.pack("B", 3 + len(addressInfo) + len(payload) + 2)
MPDU = FCF + struct.pack("B", seqNr) + addressInfo + payload
crc = crc16.CRC16()
crc.update(MPDU)
FCS = struct.pack("H", crc.intchecksum())
pkt = ''.join((SHR, PHR, MPDU, FCS))
if pad_for_usrp:
# note that we have 16 samples which go over the USB for each bit
pkt = pkt + (_npadding_bytes(len(pkt), 8) * '\x00')+0*'\x00'
return pkt
def _npadding_bytes(pkt_byte_len, spb):
"""
Generate sufficient padding such that each packet ultimately ends
up being a multiple of 512 bytes when sent across the USB. We
send 4-byte samples across the USB (16-bit I and 16-bit Q), thus
we want to pad so that after modulation the resulting packet
is a multiple of 128 samples.
@param ptk_byte_len: len in bytes of packet, not including padding.
@param spb: samples per baud == samples per bit (1 bit / baud with GMSK)
@type spb: int
@returns number of bytes of padding to append.
"""
modulus = 128
byte_modulus = gru.lcm(modulus/8, spb) / spb
r = pkt_byte_len % byte_modulus
if r == 0:
return 0
return byte_modulus - r
def make_FCF(frameType=1, securityEnabled=0, framePending=0, acknowledgeRequest=0, intraPAN=0, destinationAddressingMode=0, sourceAddressingMode=0):
"""
Build the FCF for the 802_15_4 packet
"""
if frameType >= 2**3:
raise ValueError, "frametype must be < 8"
if securityEnabled >= 2**1:
raise ValueError, " must be < "
if framePending >= 2**1:
raise ValueError, " must be < "
if acknowledgeRequest >= 2**1:
raise ValueError, " must be < "
if intraPAN >= 2**1:
raise ValueError, " must be < "
if destinationAddressingMode >= 2**2:
raise ValueError, " must be < "
if sourceAddressingMode >= 2**2:
raise ValueError, " must be < "
return struct.pack("H", frameType
+ (securityEnabled << 3)
+ (framePending << 4)
+ (acknowledgeRequest << 5)
+ (intraPAN << 6)
+ (destinationAddressingMode << 10)
+ (sourceAddressingMode << 14))
class ieee802_15_4_mod_pkts(gr.hier_block2):
"""
IEEE 802.15.4 modulator that is a GNU Radio source.
Send packets by calling send_pkt
"""
def __init__(self, pad_for_usrp=True, *args, **kwargs):
"""
Hierarchical block for the 802_15_4 O-QPSK modulation.
Packets to be sent are enqueued by calling send_pkt.
The output is the complex modulated signal at baseband.
@param msgq_limit: maximum number of messages in message queue
@type msgq_limit: int
@param pad_for_usrp: If true, packets are padded such that they end up a multiple of 128 samples
See 802_15_4_mod for remaining parameters
"""
try:
self.msgq_limit = kwargs.pop('msgq_limit')
self.log = kwargs.get('log')
except KeyError:
pass
gr.hier_block2.__init__(self, "ieee802_15_4_mod_pkts",
gr.io_signature(0, 0, 0), # Input
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output
self.pad_for_usrp = pad_for_usrp
# accepts messages from the outside world
self.pkt_input = gr.message_source(gr.sizeof_char, self.msgq_limit)
self.ieee802_15_4_mod = ieee802_15_4.ieee802_15_4_mod(self, *args, **kwargs)
self.connect(self.pkt_input, self.ieee802_15_4_mod, self)
if self.log:
self.connect(self.pkt_input, gr.file_sink(gr.sizeof_char, 'tx-input.dat'))
def send_pkt(self, seqNr, addressInfo, payload='', eof=False):
"""
Send the payload.
@param seqNr: sequence number of packet
@type seqNr: byte
@param addressInfo: address information for packet
@type addressInfo: string
@param payload: data to send
@type payload: string
"""
if eof:
msg = gr.message(1) # tell self.pkt_input we're not sending any more packets
else:
FCF = make_FCF()
pkt = make_ieee802_15_4_packet(FCF,
seqNr,
addressInfo,
payload,
self.pad_for_usrp)
print "pkt =", packet_utils.string_to_hex_list(pkt), len(pkt)
msg = gr.message_from_string(pkt)
self.pkt_input.msgq().insert_tail(msg)
class ieee802_15_4_demod_pkts(gr.hier_block2):
"""
802_15_4 demodulator that is a GNU Radio sink.
The input is complex baseband. When packets are demodulated, they are passed to the
app via the callback.
"""
def __init__(self, *args, **kwargs):
"""
Hierarchical block for O-QPSK demodulation.
The input is the complex modulated signal at baseband.
Demodulated packets are sent to the handler.
@param callback: function of two args: ok, payload
@type callback: ok: bool; payload: string
@param threshold: detect access_code with up to threshold bits wrong (-1 -> use default)
@type threshold: int
See ieee802_15_4_demod for remaining parameters.
"""
try:
self.callback = kwargs.pop('callback')
self.threshold = kwargs.pop('threshold')
self.chan_num = kwargs.pop('channel')
except KeyError:
pass
gr.hier_block2.__init__(self, "ieee802_15_4_demod_pkts",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input
gr.io_signature(0, 0, 0)) # Output
self._rcvd_pktq = gr.msg_queue() # holds packets from the PHY
self.ieee802_15_4_demod = ieee802_15_4.ieee802_15_4_demod(self, *args, **kwargs)
self._packet_sink = ucla.ieee802_15_4_packet_sink(self._rcvd_pktq, self.threshold)
self.connect(self,self.ieee802_15_4_demod, self._packet_sink)
self._watcher = _queue_watcher_thread(self._rcvd_pktq, self.callback, self.chan_num)
def carrier_sensed(self):
"""
Return True if we detect carrier.
"""
return self._packet_sink.carrier_sensed()
class _queue_watcher_thread(_threading.Thread):
def __init__(self, rcvd_pktq, callback, chan_num):
_threading.Thread.__init__(self)
self.setDaemon(1)
self.rcvd_pktq = rcvd_pktq
self.callback = callback
self.chan_num = chan_num
self.prev_crc = -1
self.keep_running = True
self.start()
def run(self):
while self.keep_running:
print "802_15_4_pkt: waiting for packet"
msg = self.rcvd_pktq.delete_head()
ok = 0
payload = msg.to_string()
print "received packet "
if len(payload) > 2:
crc = crc16.CRC16()
else:
print "too small:", len(payload)
continue
# Calculate CRC skipping over LQI and CRC
crc.update(payload[1:-2])
crc_check = crc.intchecksum()
print "checksum: %s, received: %s" % (crc_check,
str(ord(payload[-2]) + ord(payload[-1])*256))
ok = (crc_check == ord(payload[-2]) + ord(payload[-1])*256)
msg_payload = payload
if self.prev_crc != crc_check:
self.prev_crc = crc_check
if self.callback:
self.callback(ok, msg_payload, self.chan_num)
class chan_802_15_4:
chan_map= { 11 : 2405e6,
12 : 2410e6,
13 : 2415e6,
14 : 2420e6,
15 : 2425e6,
16 : 2430e6,
17 : 2435e6,
18 : 2440e6,
19 : 2445e6,
20 : 2450e6,
21 : 2455e6,
22 : 2460e6,
23 : 2465e6,
24 : 2470e6,
25 : 2475e6,
26 : 2480e6}
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import upgrade_summary
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.script import Script
from resource_management.core.resources.system import Execute, File
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.libraries.functions.format import format
from resource_management.core.logger import Logger
from resource_management.core import shell
from ranger_service import ranger_service
from setup_ranger_xml import setup_ranger_audit_solr, setup_ranger_admin_passwd_change, update_password_configs
from resource_management.libraries.functions import solr_cloud_util
from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
from resource_management.libraries.functions.constants import Direction
import os, errno
class RangerAdmin(Script):
def install(self, env):
self.install_packages(env)
import params
env.set_params(params)
# taking backup of install.properties file
Execute(('cp', '-f', format('{ranger_home}/install.properties'), format('{ranger_home}/install-backup.properties')),
not_if = format('ls {ranger_home}/install-backup.properties'),
only_if = format('ls {ranger_home}/install.properties'),
sudo = True
)
# call config and setup db only in case of HDP version < 2.6
if not params.stack_supports_ranger_setup_db_on_start:
self.configure(env, setup_db=True)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
if upgrade_type == UPGRADE_TYPE_NON_ROLLING and params.upgrade_direction == Direction.UPGRADE:
if params.stack_supports_rolling_upgrade and not params.stack_supports_config_versioning and os.path.isfile(format('{ranger_home}/ews/stop-ranger-admin.sh')):
File(format('{ranger_home}/ews/stop-ranger-admin.sh'),
owner=params.unix_user,
group = params.unix_group
)
Execute(format('{params.ranger_stop}'), environment={'JAVA_HOME': params.java_home}, user=params.unix_user)
if params.stack_supports_pid:
File(params.ranger_admin_pid_file,
action = "delete"
)
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
stack_select.select_packages(params.version)
self.set_ru_rangeradmin_in_progress(params.upgrade_marker_file)
def post_upgrade_restart(self,env, upgrade_type=None):
import params
env.set_params(params)
if os.path.isfile(params.upgrade_marker_file):
os.remove(params.upgrade_marker_file)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
# setup db only if in case HDP version is > 2.6
self.configure(env, upgrade_type=upgrade_type, setup_db=params.stack_supports_ranger_setup_db_on_start)
if params.stack_supports_infra_client and params.audit_solr_enabled and params.is_solrCloud_enabled:
solr_cloud_util.setup_solr_client(params.config, custom_log4j = params.custom_log4j)
setup_ranger_audit_solr()
update_password_configs()
ranger_service('ranger_admin')
def status(self, env):
import status_params
env.set_params(status_params)
if status_params.stack_supports_pid:
check_process_status(status_params.ranger_admin_pid_file)
return
cmd = 'ps -ef | grep proc_rangeradmin | grep -v grep'
code, output = shell.call(cmd, timeout=20)
if code != 0:
if self.is_ru_rangeradmin_in_progress(status_params.upgrade_marker_file):
Logger.info('Ranger admin process not running - skipping as stack upgrade is in progress')
else:
Logger.debug('Ranger admin process not running')
raise ComponentIsNotRunning()
pass
def configure(self, env, upgrade_type=None, setup_db=False):
import params
env.set_params(params)
if params.xml_configurations_supported:
from setup_ranger_xml import ranger
else:
from setup_ranger import ranger
# set up db if we are not upgrading and setup_db is true
if setup_db and upgrade_type is None:
if params.xml_configurations_supported:
from setup_ranger_xml import setup_ranger_db
setup_ranger_db()
ranger('ranger_admin', upgrade_type=upgrade_type)
# set up java patches if we are not upgrading and setup_db is true
if setup_db and upgrade_type is None:
if params.xml_configurations_supported:
from setup_ranger_xml import setup_java_patch
setup_java_patch()
if params.stack_supports_ranger_admin_password_change:
setup_ranger_admin_passwd_change()
def set_ru_rangeradmin_in_progress(self, upgrade_marker_file):
config_dir = os.path.dirname(upgrade_marker_file)
try:
msg = "Starting Upgrade"
if (not os.path.exists(config_dir)):
os.makedirs(config_dir)
ofp = open(upgrade_marker_file, 'w')
ofp.write(msg)
ofp.close()
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(config_dir):
pass
else:
raise
def is_ru_rangeradmin_in_progress(self, upgrade_marker_file):
return os.path.isfile(upgrade_marker_file)
def setup_ranger_database(self, env):
import params
env.set_params(params)
upgrade_stack = stack_select._get_upgrade_stack()
if upgrade_stack is None:
raise Fail('Unable to determine the stack and stack version')
stack_version = upgrade_stack[1]
if params.xml_configurations_supported and params.upgrade_direction == Direction.UPGRADE:
Logger.info(format('Setting Ranger database schema, using version {stack_version}'))
from setup_ranger_xml import setup_ranger_db
setup_ranger_db(stack_version=stack_version)
def setup_ranger_java_patches(self, env):
import params
env.set_params(params)
upgrade_stack = stack_select._get_upgrade_stack()
if upgrade_stack is None:
raise Fail('Unable to determine the stack and stack version')
stack_version = upgrade_stack[1]
if params.xml_configurations_supported and params.upgrade_direction == Direction.UPGRADE:
Logger.info(format('Applying Ranger java patches, using version {stack_version}'))
from setup_ranger_xml import setup_java_patch
setup_java_patch(stack_version=stack_version)
def set_pre_start(self, env):
import params
env.set_params(params)
orchestration = stack_select.PACKAGE_SCOPE_STANDARD
summary = upgrade_summary.get_upgrade_summary()
if summary is not None:
orchestration = summary.orchestration
if orchestration is None:
raise Fail("The upgrade summary does not contain an orchestration type")
if orchestration.upper() in stack_select._PARTIAL_ORCHESTRATION_SCOPES:
orchestration = stack_select.PACKAGE_SCOPE_PATCH
stack_select_packages = stack_select.get_packages(orchestration, service_name = "RANGER", component_name = "RANGER_ADMIN")
if stack_select_packages is None:
raise Fail("Unable to get packages for stack-select")
Logger.info("RANGER_ADMIN component will be stack-selected to version {0} using a {1} orchestration".format(params.version, orchestration.upper()))
for stack_select_package_name in stack_select_packages:
stack_select.select(stack_select_package_name, params.version)
def get_log_folder(self):
import params
return params.admin_log_dir
def get_user(self):
import params
return params.unix_user
if __name__ == "__main__":
RangerAdmin().execute()
|
|
"""Create publication class and contain methods for data fetching."""
import time
import requests
import json
from snovault import (
collection,
load_schema,
calculated_property,
CONNECTION
)
from snovault.crud_views import (
collection_add,
item_edit,
)
from snovault.attachment import ItemWithAttachment
from snovault.util import debug_log
from .base import (
Item,
lab_award_attribution_embed_list
)
from pyramid.view import (
view_config
)
from html.parser import HTMLParser
from snovault.validators import (
validate_item_content_post,
validate_item_content_put,
validate_item_content_patch,
validate_item_content_in_place,
no_validate_item_content_post,
no_validate_item_content_put,
no_validate_item_content_patch
)
################################################
# Outside methods for online data fetch
################################################
def find_best_date(date_data):
date = None
a2d = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06',
'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'}
if 'DP' in date_data:
ymd = [d.strip() for d in date_data['DP'].split(' ')]
if not ymd or len(ymd[0]) != 4: # problem with the year
pass
else:
date = ymd.pop(0)
if ymd:
mm = a2d.get(ymd.pop(0))
if mm:
date += '-{}'.format(mm)
if ymd:
dd = ymd.pop(0)
if len(dd) <= 2:
date += '-{}'.format(dd.zfill(2))
return date
if 'DEP' in date_data:
date = date_data['DEP']
if len(date) != 8:
date = None
if not date and 'DA' in date_data:
date = date_data['DA']
if date:
datestr = date[:4]+"-"+date[4:6]+"-"+date[6:8]
if len(datestr) == 10:
return datestr
return None
def fetch_pubmed(PMID):
"Takes the number part of PMID and returns title, abstract and authors"
field2prop = {'TI': 'title', 'AB': 'abstract', 'DP': 'date_published',
'DEP': 'date_published', 'DA': 'date_published', 'JT': 'journal',
'AU': 'authors', 'CN': 'authors'}
pub_data = {v: None for v in field2prop.values()}
pub_data['authors'] = []
pub_data['date_published'] = {}
NIHe = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
NIHw = "https://www.ncbi.nlm.nih.gov/pubmed/"
url = NIHw + PMID
pub_data['url'] = url
www = "{NIH}efetch.fcgi?db=pubmed&id={id}&rettype=medline".format(NIH=NIHe, id=PMID)
# try fetching data 5 times
for count in range(5):
resp = requests.get(www)
if resp.status_code == 200:
break
if resp.status_code == 429:
time.sleep(5)
continue
if count == 4:
return {}
# parse the text to get the fields
r = resp.text
full_text = r.replace('\n ', ' ')
for line in full_text.split('\n'):
if line.strip():
key, val = [a.strip() for a in line.split('-', 1)]
if key in field2prop:
if key in ['DP', 'DEP', 'DA']:
pub_data[field2prop[key]][key] = val
elif key in ['AU', 'CN']:
pub_data[field2prop[key]].append(val)
else:
pub_data[field2prop[key]] = val
# deal with date
if pub_data['date_published']: # there is some date data
pub_data['date_published'] = find_best_date(pub_data['date_published'])
return {k: v for k, v in pub_data.items() if v is not None}
def fetch_biorxiv(url, doi):
"""Takes url and doi, returns title abstract authors date url journal version"""
fdn2biorxiv = {
'title': 'title',
'abstract': 'abstract',
'authors': 'authors',
'date_published': 'date',
'version': 'version'
}
biorxiv_api = 'https://api.biorxiv.org/details/biorxiv/'
# try fetching data 5 times and return empty if fails
for count in range(5):
r = requests.get(biorxiv_api + doi)
if r.status_code == 200:
break
if count == 4:
return
record_dict = r.json()['collection'][-1] # get latest version
pub_data = {k: record_dict.get(v) for k,v in fdn2biorxiv.items() if record_dict.get(v)}
if pub_data.get('authors'): # format authors according to 4DN schema
pub_data['authors'] = [a.replace(" ", "").replace(".", "").replace(",", " ") for a in pub_data['authors'].split(";") if a]
pub_data['url'] = url
pub_data['journal'] = 'bioRxiv'
return pub_data
def map_doi_pmid(doi):
"""If a doi is given, checks if it maps to pmid"""
NIHid = "https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/"
www = "{NIH}?ids={id}&versions=no&format=json".format(NIH=NIHid, id=doi)
# try fetching data 5 times
for count in range(5):
resp = requests.get(www)
if resp.status_code == 200:
break
# parse the text to get the fields
r = resp.text
r = requests.get(www).text
res = json.loads(r)
try:
return res['records'][0]['pmid']
except Exception:
return
def map_doi_biox(doi):
"If a doi is not mapped to pubmed, check where it goes"
DOIapi = "https://doi.org/api/handles/"
www = "{DOIapi}{doi}".format(DOIapi=DOIapi, doi=doi)
for count in range(5):
resp = requests.get(www)
if resp.status_code == 200:
break
if resp.json().get('values'):
for value in resp.json()['values']:
if value.get('type', '') == 'URL':
landing_page = value['data']['value']
break
if "biorxiv" in landing_page:
return landing_page
else:
return
################################################
# Outside methods for online data fetch
################################################
def _build_publication_embedded_list():
""" Helper function intended to be used to create the embedded list for publication.
All types should implement a function like this going forward.
"""
return Item.embedded_list + lab_award_attribution_embed_list + [
# ExperimentSet linkTo
"exp_sets_prod_in_pub.accession",
# ExperimentType linkTo
"exp_sets_prod_in_pub.experimentset_type",
"exp_sets_used_in_pub.experimentset_type",
# ExperimentType linkTo
"exp_sets_prod_in_pub.experiments_in_set.experiment_type.title",
]
@collection(
name='publications',
properties={
'title': 'Publications',
'description': 'Publication pages',
})
class Publication(Item, ItemWithAttachment):
"""Publication class."""
item_type = 'publication'
schema = load_schema('encoded:schemas/publication.json')
embedded_list = _build_publication_embedded_list()
# the following fields are patched by the update method and should always be included in the invalidation diff
default_diff = [
'title',
'abstract',
'authors',
'date_published',
'journal',
'version',
'url'
]
class Collection(Item.Collection):
pass
def _update(self, properties, sheets=None):
# logic for determing whether to use manually-provided date_published
try:
prev_date_published = self.properties.get('date_published')
except KeyError: # if new user, previous properties do not exist
prev_date_published = None
new_date_published = properties.get('date_published')
self.upgrade_properties()
pub_data = {}
p_id = properties['ID']
# parse if id is from pubmed
try:
if p_id.startswith('PMID'):
pubmed_id = p_id[5:]
pub_data = fetch_pubmed(pubmed_id)
# if id is doi, first check if it maps to pubmed id, else see where it goes
elif p_id.startswith('doi'):
doi_id = p_id[4:]
pubmed_id = map_doi_pmid(doi_id)
if pubmed_id:
pub_data = fetch_pubmed(pubmed_id)
# if it goes to biorxiv fetch from biorxiv
else:
biox_url = map_doi_biox(doi_id)
if biox_url:
pub_data = fetch_biorxiv(biox_url, doi_id)
else:
pass
except Exception:
pass
if pub_data:
for k, v in pub_data.items():
properties[k] = v
# allow override of date_published
if new_date_published is not None and prev_date_published != new_date_published:
properties['date_published'] = new_date_published
super(Publication, self)._update(properties, sheets)
return
@calculated_property(schema={
"title": "Short Attribution",
"description": "Short string containing <= 2 authors & year published.",
"type": "string"
})
def short_attribution(self, authors=None, date_published=None):
minipub = ''
if authors:
minipub = authors[0]
if len(authors) > 2:
minipub = minipub + ' et al.'
elif len(authors) == 2:
minipub = minipub + ' and ' + authors[1]
if date_published:
minipub = minipub + ' (' + date_published[0:4] + ')'
return minipub
@calculated_property(schema={
"title": "Display Title",
"description": "Publication short attribution, year, and ID (if available).",
"type": "string"
})
def display_title(self, ID, authors=None, date_published=None):
minipub = self.short_attribution(authors, date_published)
if minipub:
return minipub + ' ' + ID
return ID
@calculated_property(schema={
"title": "Number of Experiment Sets",
"description": "The number of experiment sets produced by this publication.",
"type": "integer"
})
def number_of_experiment_sets(self, request, exp_sets_prod_in_pub=None):
if exp_sets_prod_in_pub:
return len(exp_sets_prod_in_pub)
#### Add validator to ensure ID field is unique
def validate_unique_pub_id(context, request):
'''validator to ensure publication 'ID' field is unique
'''
data = request.json
# ID is required; validate_item_content_post/put/patch will handle missing field
if 'ID' in data:
lookup_res = request.registry[CONNECTION].storage.get_by_json('ID', data['ID'], 'publication')
if lookup_res:
# check_only + POST happens on GUI edit; we cannot confirm if found
# item is the same item. Let the PATCH take care of validation
if request.method == 'POST' and request.params.get('check_only', False):
return
# editing an item will cause it to find itself. That's okay
if hasattr(context, 'uuid') and getattr(lookup_res, 'uuid', None) == context.uuid:
return
error_msg = ("publication %s already exists with ID '%s'. This field must be unique"
% (lookup_res.uuid, data['ID']))
request.errors.add('body', 'Publication: non-unique ID', error_msg)
return
@view_config(context=Publication.Collection, permission='add', request_method='POST',
validators=[validate_item_content_post, validate_unique_pub_id])
@view_config(context=Publication.Collection, permission='add_unvalidated', request_method='POST',
validators=[no_validate_item_content_post],
request_param=['validate=false'])
@debug_log
def publication_add(context, request, render=None):
return collection_add(context, request, render)
@view_config(context=Publication, permission='edit', request_method='PUT',
validators=[validate_item_content_put, validate_unique_pub_id])
@view_config(context=Publication, permission='edit', request_method='PATCH',
validators=[validate_item_content_patch, validate_unique_pub_id])
@view_config(context=Publication, permission='edit_unvalidated', request_method='PUT',
validators=[no_validate_item_content_put],
request_param=['validate=false'])
@view_config(context=Publication, permission='edit_unvalidated', request_method='PATCH',
validators=[no_validate_item_content_patch],
request_param=['validate=false'])
@view_config(context=Publication, permission='index', request_method='GET',
validators=[validate_item_content_in_place, validate_unique_pub_id],
request_param=['check_only=true'])
@debug_log
def publication_edit(context, request, render=None):
return item_edit(context, request, render)
|
|
#PiRoCon - 4Tronix Initio - Motor Controller
#Martin O'Hanlon
#www.stuffaboutcode.com
import sys
import time
import RPi.GPIO as GPIO
#motor pins
MOTORAFWRDPIN = 19
MOTORABWRDPIN = 21
MOTORBFWRDPIN = 24
MOTORBBWRDPIN = 26
#encoder pins
MOTORAENCODERPIN = 7
MOTORBENCODERPIN = 11
#motor speed equivalents
# use this if one motor is significant faster than the other
# to slow down one motor more than the other
#Settings when only powered by the Pi
#MOTORAMAX = 0.8
#MOTORBMAX = 1
MOTORAMAX = 1
MOTORBMAX = 1
#motor states
STATEFORWARD = 1
STATESTOPPED = 0
STATEBACKWARD = -1
#The controller class which manages the motors and encoders
class MotorController:
def __init__(self,
motorAForwardPin = MOTORAFWRDPIN,
motorABackwardPin = MOTORABWRDPIN,
motorBForwardPin = MOTORBFWRDPIN,
motorBBackwardPin = MOTORBBWRDPIN,
motorAEncoderPin = MOTORAENCODERPIN,
motorBEncoderPin = MOTORBENCODERPIN,):
#setup motor classes
self.motorA = Motor(motorAForwardPin, motorABackwardPin, motorAEncoderPin)
self.motorB = Motor(motorBForwardPin, motorBBackwardPin, motorBEncoderPin)
#motor properties
@property
def motorA(self):
return self.motorA
@property
def motorB(self):
return self.motorB
#start
def start(self, powerA, powerB = None):
#if a second power isnt passed in, both motors are set the same
if powerB == None: powerB = powerA
self.motorA.start(powerA * MOTORAMAX)
self.motorB.start(powerB * MOTORBMAX)
#stop
def stop(self):
self.motorA.stop()
self.motorB.stop()
#rotate left
def rotateLeft(self, power):
self.start(power * -1, power)
#rotate right
def rotateRight(self, power):
self.start(power, power * -1)
#class for controlling a motor
class Motor:
def __init__(self, forwardPin, backwardPin, encoderPin):
#persist values
self.forwardPin = forwardPin
self.backwardPin = backwardPin
self.encoderPin = encoderPin
#setup GPIO pins
GPIO.setup(forwardPin, GPIO.OUT)
GPIO.setup(backwardPin, GPIO.OUT)
GPIO.setup(encoderPin,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)
#add encoder pin event
GPIO.add_event_detect(encoderPin, GPIO.RISING, callback=self._encoderCallback,bouncetime=2)
#setup pwm
self.forwardPWM = GPIO.PWM(forwardPin,50)
self.backwardPWM = GPIO.PWM(backwardPin,50)
#setup encoder/speed ticks
self.totalTicks = 0
self.currentTicks = 0
self.state = STATESTOPPED
#motor state property
@property
def state(self):
return self.state
#start
def start(self, power):
#forward or backward?
# backward
if(power < 0):
if(self.state != STATEBACKWARD): self.stop()
self._backward(power)
self.state = STATEBACKWARD
# forward
if(power > 0):
if(self.state != STATEFORWARD): self.stop()
self._forward(power)
self.state = STATEFORWARD
# stop
if(power == 0):
self.stop()
#stop
def stop(self):
#stop motor
self.forwardPWM.stop()
self.backwardPWM.stop()
self.state = STATESTOPPED
#reset ticks
self.currentTicks = 0
#reset ticks
def resetTotalTicks(self):
self.totalTicks = 0
#private function to calculate the freq for the PWM
def _calcPowerAndFreq(self, power):
# make power between 0 and 100
power = max(0,min(100,abs(power)))
# make half of freq, minimum of 11
freq = max(11,abs(power/2))
return power, freq
#forward
def _forward(self, power):
#start forward motor
power, freq = self._calcPowerAndFreq(power)
self.forwardPWM.ChangeFrequency(freq)
self.forwardPWM.start(power)
#backward
def _backward(self, power):
#start backward motor
power, freq = self._calcPowerAndFreq(power)
self.backwardPWM.ChangeFrequency(freq)
self.backwardPWM.start(power)
#encoder callback
def _encoderCallback(self, pin):
self.totalTicks += 1
self.currentTicks += 1
#tests
if __name__ == '__main__':
try:
#setup gpio
GPIO.setmode(GPIO.BOARD)
#create motor control
motors = MotorController()
#forward
print("forward")
motors.start(100)
time.sleep(2)
print("encoder ticks")
print(motors.motorA.totalTicks)
print(motors.motorB.totalTicks)
#backward
print("backward")
motors.start(-50)
time.sleep(2)
#forward curve
print("forward curve")
motors.start(100,50)
time.sleep(2)
#rotate left
print("rotate left")
motors.rotateLeft(50)
time.sleep(2)
#rotate right
print("rotate right")
motors.rotateRight(50)
time.sleep(2)
#stop
print("stop")
motors.stop()
#Ctrl C
except KeyboardInterrupt:
print "User cancelled"
#Error
except:
print "Unexpected error:", sys.exc_info()[0]
raise
finally:
print ("cleanup")
#cleanup gpio
GPIO.cleanup()
|
|
#!/usr/bin/python
import sys, os, re, platform
from os.path import exists, abspath, dirname, join, isdir
try:
# Allow use of setuptools so eggs can be built.
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
from distutils.extension import Extension
from distutils.errors import *
OFFICIAL_BUILD = 9999
def _print(s):
# Python 2/3 compatibility
sys.stdout.write(s + '\n')
class VersionCommand(Command):
description = "prints the pyodbc version, determined from git"
user_options = []
def initialize_options(self):
self.verbose = 0
def finalize_options(self):
pass
def run(self):
version_str, version = get_version()
sys.stdout.write(version_str + '\n')
class TagsCommand(Command):
description = 'runs etags'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Windows versions of etag do not seem to expand wildcards (which Unix shells normally do for Unix utilities),
# so find all of the files ourselves.
files = [ join('src', f) for f in os.listdir('src') if f.endswith(('.h', '.cpp')) ]
cmd = 'etags %s' % ' '.join(files)
return os.system(cmd)
def main():
version_str, version = get_version()
settings = get_compiler_settings(version_str)
files = [ abspath(join('src', f)) for f in os.listdir('src') if f.endswith('.cpp') ]
if exists('MANIFEST'):
os.remove('MANIFEST')
kwargs = {
'name': "pyodbc",
'version': version_str,
'description': "DB API Module for ODBC",
'long_description': ('A Python DB API 2 module for ODBC. This project provides an up-to-date, '
'convenient interface to ODBC using native data types like datetime and decimal.'),
'maintainer': "Michael Kleehammer",
'maintainer_email': "michael@kleehammer.com",
'ext_modules': [Extension('pyodbc', files, **settings)],
'license': 'MIT',
'classifiers': ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Database',
],
'url': 'http://code.google.com/p/pyodbc',
'download_url': 'http://code.google.com/p/pyodbc/downloads/list',
'cmdclass': { 'version' : VersionCommand,
'tags' : TagsCommand }
}
if sys.hexversion >= 0x02060000:
kwargs['options'] = {
'bdist_wininst': {'user_access_control' : 'auto'}
}
setup(**kwargs)
def get_compiler_settings(version_str):
settings = { 'libraries': [],
'define_macros' : [ ('PYODBC_VERSION', version_str) ] }
# This isn't the best or right way to do this, but I don't see how someone is supposed to sanely subclass the build
# command.
for option in ['assert', 'trace', 'leak-check']:
try:
sys.argv.remove('--%s' % option)
settings['define_macros'].append(('PYODBC_%s' % option.replace('-', '_').upper(), 1))
except ValueError:
pass
if os.name == 'nt':
settings['extra_compile_args'] = ['/Wall',
'/wd4668',
'/wd4820',
'/wd4711', # function selected for automatic inline expansion
'/wd4100', # unreferenced formal parameter
'/wd4127', # "conditional expression is constant" testing compilation constants
'/wd4191', # casts to PYCFunction which doesn't have the keywords parameter
]
settings['libraries'].append('odbc32')
settings['libraries'].append('advapi32')
if '--debug' in sys.argv:
sys.argv.remove('--debug')
settings['extra_compile_args'].extend('/Od /Ge /GS /GZ /RTC1 /Wp64 /Yd'.split())
elif os.environ.get("OS", '').lower().startswith('windows'):
# Windows Cygwin (posix on windows)
# OS name not windows, but still on Windows
settings['libraries'].append('odbc32')
elif sys.platform == 'darwin':
if '--macports' in sys.argv:
# OS/X using unixODBC via MacPorts.
sys.argv.remove('--macports')
settings['libraries'].append('odbc')
settings['include_dirs'] = ['/opt/local/include']
settings['library_dirs'] = ['/opt/local/lib']
else:
# OS/X now ships with iODBC.
settings['libraries'].append('iodbc')
# Apple has decided they won't maintain the iODBC system in OS/X and has added deprecation warnings in 10.8.
# For now target 10.7 to eliminate the warnings.
# Python functions take a lot of 'char *' that really should be const. gcc complains about this *a lot*
settings['extra_compile_args'] = ['-Wno-write-strings', '-Wno-deprecated-declarations']
settings['define_macros'].append( ('MAC_OS_X_VERSION_10_7',) )
else:
# Other posix-like: Linux, Solaris, etc.
# Python functions take a lot of 'char *' that really should be const. gcc complains about this *a lot*
settings['extra_compile_args'] = ['-Wno-write-strings']
# What is the proper way to detect iODBC, MyODBC, unixODBC, etc.?
settings['libraries'].append('odbc')
return settings
def add_to_path():
"""
Prepends the build directory to the path so pyodbcconf can be imported without installing it.
"""
# Now run the utility
import imp
library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ]
library_names = [ 'pyodbcconf%s' % ext for ext in library_exts ]
# Only go into directories that match our version number.
dir_suffix = '-%s.%s' % (sys.version_info[0], sys.version_info[1])
build = join(dirname(abspath(__file__)), 'build')
for top, dirs, files in os.walk(build):
dirs = [ d for d in dirs if d.endswith(dir_suffix) ]
for name in library_names:
if name in files:
sys.path.insert(0, top)
return
raise SystemExit('Did not find pyodbcconf')
def get_version():
"""
Returns the version of the product as (description, [major,minor,micro,beta]).
If the release is official, `beta` will be 9999 (OFFICIAL_BUILD).
1. If in a git repository, use the latest tag (git describe).
2. If in an unzipped source directory (from setup.py sdist),
read the version from the PKG-INFO file.
3. Use 3.0.0.0 and complain a lot.
"""
# My goal is to (1) provide accurate tags for official releases but (2) not have to manage tags for every test
# release.
#
# Official versions are tagged using 3 numbers: major, minor, micro. A build of a tagged version should produce
# the version using just these pieces, such as 2.1.4.
#
# Unofficial versions are "working towards" the next version. So the next unofficial build after 2.1.4 would be a
# beta for 2.1.5. Using 'git describe' we can find out how many changes have been made after 2.1.4 and we'll use
# this count as the beta id (beta1, beta2, etc.)
#
# Since the 4 numbers are put into the Windows DLL, we want to make sure the beta versions sort *before* the
# official, so we set the official build number to 9999, but we don't show it.
name = None # branch/feature name. Should be None for official builds.
numbers = None # The 4 integers that make up the version.
# If this is a source release the version will have already been assigned and be in the PKG-INFO file.
name, numbers = _get_version_pkginfo()
# If not a source release, we should be in a git repository. Look for the latest tag.
if not numbers:
name, numbers = _get_version_git()
if not numbers:
_print('WARNING: Unable to determine version. Using 3.0.0.0')
name, numbers = '3.0.0-unsupported', [3,0,0,0]
return name, numbers
def _get_version_pkginfo():
filename = join(dirname(abspath(__file__)), 'PKG-INFO')
if exists(filename):
re_ver = re.compile(r'^Version: \s+ (\d+)\.(\d+)\.(\d+) (?: -beta(\d+))?', re.VERBOSE)
for line in open(filename):
match = re_ver.search(line)
if match:
name = line.split(':', 1)[1].strip()
numbers = [int(n or 0) for n in match.groups()[:3]]
numbers.append(int(match.group(4) or OFFICIAL_BUILD)) # don't use 0 as a default for build
return name, numbers
return None, None
def _get_version_git():
n, result = getoutput('git describe --tags --match 3.*')
if n:
_print('WARNING: git describe failed with: %s %s' % (n, result))
return None, None
match = re.match(r'(\d+).(\d+).(\d+) (?: -(\d+)-g[0-9a-z]+)?', result, re.VERBOSE)
if not match:
return None, None
numbers = [int(n or OFFICIAL_BUILD) for n in match.groups()]
if numbers[-1] == OFFICIAL_BUILD:
name = '%s.%s.%s' % tuple(numbers[:3])
if numbers[-1] != OFFICIAL_BUILD:
# This is a beta of the next micro release, so increment the micro number to reflect this.
numbers[-2] += 1
name = '%s.%s.%s-beta%02d' % tuple(numbers)
n, result = getoutput('git branch')
branch = re.search(r'\* (\w+)', result).group(1)
if branch != 'master' and not re.match('^v\d+$', branch):
name = branch + '-' + name
return name, numbers
def getoutput(cmd):
pipe = os.popen(cmd, 'r')
text = pipe.read().rstrip('\n')
status = pipe.close() or 0
return status, text
if __name__ == '__main__':
main()
|
|
from api_lib import APITest
from publish import DefaultSigningOptions
class SnapshotsAPITestCreateShowEmpty(APITest):
"""
GET /api/snapshots/:name, POST /api/snapshots, GET /api/snapshots/:name/packages
"""
def check(self):
snapshot_name = self.random_name()
snapshot_desc = {u'Description': u'fun snapshot',
u'Name': snapshot_name}
# create empty snapshot
resp = self.post("/api/snapshots", json=snapshot_desc)
self.check_subset(snapshot_desc, resp.json())
self.check_equal(resp.status_code, 201)
self.check_subset(snapshot_desc, self.get("/api/snapshots/" + snapshot_name).json())
self.check_equal(self.get("/api/snapshots/" + snapshot_name).status_code, 200)
resp = self.get("/api/snapshots/" + snapshot_name + "/packages")
self.check_equal(resp.status_code, 200)
self.check_equal(resp.json(), [])
self.check_equal(self.get("/api/snapshots/" + self.random_name()).status_code, 404)
# create snapshot with duplicate name
resp = self.post("/api/snapshots", json=snapshot_desc)
self.check_equal(resp.status_code, 400)
class SnapshotsAPITestCreateFromRefs(APITest):
"""
GET /api/snapshots/:name, POST /api/snapshots, GET /api/snapshots/:name/packages,
GET /api/snapshots
"""
def check(self):
snapshot_name = self.random_name()
snapshot_desc = {u'Description': u'fun snapshot',
u'Name': snapshot_name,
u'SourceSnapshots': [self.random_name()]}
# creating snapshot from missing source snapshot
resp = self.post("/api/snapshots", json=snapshot_desc)
self.check_equal(resp.status_code, 404)
# create empty snapshot
empty_snapshot_name = self.random_name()
resp = self.post("/api/snapshots", json={"Name": empty_snapshot_name})
self.check_equal(resp.status_code, 201)
self.check_equal(resp.json()['Description'], 'Created as empty')
# create and upload package to repo to register package in DB
repo_name = self.random_name()
self.check_equal(self.post("/api/repos", json={"Name": repo_name}).status_code, 201)
d = self.random_name()
self.check_equal(self.upload("/api/files/" + d,
"libboost-program-options-dev_1.49.0.1_i386.deb").status_code, 200)
self.check_equal(self.post("/api/repos/" + repo_name + "/file/" + d).status_code, 200)
# create snapshot with empty snapshot as source and package
snapshot = snapshot_desc.copy()
snapshot['PackageRefs'] = ["Pi386 libboost-program-options-dev 1.49.0.1 918d2f433384e378"]
snapshot['SourceSnapshots'] = [empty_snapshot_name]
resp = self.post("/api/snapshots", json=snapshot)
self.check_equal(resp.status_code, 201)
snapshot.pop('SourceSnapshots')
snapshot.pop('PackageRefs')
self.check_subset(snapshot, resp.json())
self.check_subset(snapshot, self.get("/api/snapshots/" + snapshot_name).json())
resp = self.get("/api/snapshots/" + snapshot_name + "/packages")
self.check_equal(resp.status_code, 200)
self.check_equal(resp.json(), ["Pi386 libboost-program-options-dev 1.49.0.1 918d2f433384e378"])
# create snapshot with unreferenced package
resp = self.post("/api/snapshots", json={
"Name": self.random_name(),
"PackageRefs": ["Pi386 libboost-program-options-dev 1.49.0.1 918d2f433384e378", "Pamd64 no-such-package 1.2 91"]})
self.check_equal(resp.status_code, 404)
# list snapshots
resp = self.get("/api/snapshots", params={"sort": "time"})
self.check_equal(resp.status_code, 200)
self.check_equal([s["Name"] for s in resp.json() if s["Name"] in [empty_snapshot_name, snapshot_name]],
[empty_snapshot_name, snapshot_name])
class SnapshotsAPITestCreateFromRepo(APITest):
"""
POST /api/repos, POST /api/repos/:name/snapshots, GET /api/snapshots/:name
"""
def check(self):
repo_name = self.random_name()
snapshot_name = self.random_name()
self.check_equal(self.post("/api/repos", json={"Name": repo_name}).status_code, 201)
resp = self.post("/api/repos/" + repo_name + '/snapshots', json={'Name': snapshot_name})
self.check_equal(resp.status_code, 400)
d = self.random_name()
self.check_equal(self.upload("/api/files/" + d,
"libboost-program-options-dev_1.49.0.1_i386.deb").status_code, 200)
self.check_equal(self.post("/api/repos/" + repo_name + "/file/" + d).status_code, 200)
resp = self.post("/api/repos/" + repo_name + '/snapshots', json={'Name': snapshot_name})
self.check_equal(resp.status_code, 201)
self.check_equal(self.get("/api/snapshots/" + snapshot_name).status_code, 200)
self.check_subset({u'Architecture': 'i386',
u'Package': 'libboost-program-options-dev',
u'Version': '1.49.0.1',
'FilesHash': '918d2f433384e378'},
self.get("/api/snapshots/" + snapshot_name + "/packages", params={"format": "details"}).json()[0])
self.check_subset({u'Architecture': 'i386',
u'Package': 'libboost-program-options-dev',
u'Version': '1.49.0.1',
'FilesHash': '918d2f433384e378'},
self.get("/api/snapshots/" + snapshot_name + "/packages",
params={"format": "details", "q": "Version (> 0.6.1-1.4)"}).json()[0])
# duplicate snapshot name
resp = self.post("/api/repos/" + repo_name + '/snapshots', json={'Name': snapshot_name})
self.check_equal(resp.status_code, 400)
class SnapshotsAPITestCreateUpdate(APITest):
"""
POST /api/snapshots, PUT /api/snapshots/:name, GET /api/snapshots/:name
"""
def check(self):
snapshot_name = self.random_name()
snapshot_desc = {u'Description': u'fun snapshot',
u'Name': snapshot_name}
resp = self.post("/api/snapshots", json=snapshot_desc)
self.check_equal(resp.status_code, 201)
new_snapshot_name = self.random_name()
resp = self.put("/api/snapshots/" + snapshot_name, json={'Name': new_snapshot_name,
'Description': 'New description'})
self.check_equal(resp.status_code, 200)
resp = self.get("/api/snapshots/" + new_snapshot_name)
self.check_equal(resp.status_code, 200)
self.check_subset({"Name": new_snapshot_name,
"Description": "New description"}, resp.json())
# duplicate name
resp = self.put("/api/snapshots/" + new_snapshot_name, json={'Name': new_snapshot_name,
'Description': 'New description'})
self.check_equal(resp.status_code, 409)
# missing snapshot
resp = self.put("/api/snapshots/" + snapshot_name, json={})
self.check_equal(resp.status_code, 404)
class SnapshotsAPITestCreateDelete(APITest):
"""
POST /api/snapshots, DELETE /api/snapshots/:name, GET /api/snapshots/:name
"""
def check(self):
snapshot_name = self.random_name()
snapshot_desc = {u'Description': u'fun snapshot',
u'Name': snapshot_name}
# deleting unreferenced snapshot
resp = self.post("/api/snapshots", json=snapshot_desc)
self.check_equal(resp.status_code, 201)
self.check_equal(self.delete("/api/snapshots/" + snapshot_name).status_code, 200)
self.check_equal(self.get("/api/snapshots/" + snapshot_name).status_code, 404)
# deleting referenced snapshot
snap1, snap2 = self.random_name(), self.random_name()
self.check_equal(self.post("/api/snapshots", json={"Name": snap1}).status_code, 201)
self.check_equal(self.post("/api/snapshots", json={"Name": snap2, "SourceSnapshots": [snap1]}).status_code, 201)
self.check_equal(self.delete("/api/snapshots/" + snap1).status_code, 409)
self.check_equal(self.get("/api/snapshots/" + snap1).status_code, 200)
self.check_equal(self.delete("/api/snapshots/" + snap1, params={"force": "1"}).status_code, 200)
self.check_equal(self.get("/api/snapshots/" + snap1).status_code, 404)
# deleting published snapshot
resp = self.post("/api/publish",
json={
"SourceKind": "snapshot",
"Distribution": "trusty",
"Architectures": ["i386"],
"Sources": [{"Name": snap2}],
"Signing": DefaultSigningOptions,
})
self.check_equal(resp.status_code, 201)
self.check_equal(self.delete("/api/snapshots/" + snap2).status_code, 409)
self.check_equal(self.delete("/api/snapshots/" + snap2, params={"force": "1"}).status_code, 409)
class SnapshotsAPITestSearch(APITest):
"""
POST /api/snapshots, GET /api/snapshots?sort=name, GET /api/snapshots/:name
"""
def check(self):
repo_name = self.random_name()
self.check_equal(self.post("/api/repos", json={"Name": repo_name}).status_code, 201)
d = self.random_name()
snapshot_name = self.random_name()
self.check_equal(self.upload("/api/files/" + d,
"libboost-program-options-dev_1.49.0.1_i386.deb").status_code, 200)
self.check_equal(self.post("/api/repos/" + repo_name + "/file/" + d).status_code, 200)
resp = self.post("/api/repos/" + repo_name + '/snapshots', json={'Name': snapshot_name})
self.check_equal(resp.status_code, 201)
resp = self.get("/api/snapshots/" + snapshot_name + "/packages",
params={"q": "libboost-program-options-dev", "format": "details"})
self.check_equal(resp.status_code, 200)
self.check_equal(len(resp.json()), 1)
self.check_equal(resp.json()[0]["Package"], "libboost-program-options-dev")
resp = self.get("/api/snapshots/" + snapshot_name + "/packages")
self.check_equal(resp.status_code, 200)
self.check_equal(len(resp.json()), 1)
self.check_equal(resp.json(), ["Pi386 libboost-program-options-dev 1.49.0.1 918d2f433384e378"])
class SnapshotsAPITestDiff(APITest):
"""
GET /api/snapshot/:name/diff/:name2
"""
def check(self):
repos = [self.random_name() for x in xrange(2)]
snapshots = [self.random_name() for x in xrange(2)]
for repo_name in repos:
self.check_equal(self.post("/api/repos", json={"Name": repo_name}).status_code, 201)
d = self.random_name()
self.check_equal(self.upload("/api/files/" + d,
"libboost-program-options-dev_1.49.0.1_i386.deb").status_code, 200)
self.check_equal(self.post("/api/repos/" + repo_name + "/file/" + d).status_code, 200)
resp = self.post("/api/repos/" + repo_name + '/snapshots', json={'Name': snapshots[0]})
self.check_equal(resp.status_code, 201)
resp = self.post("/api/snapshots", json={'Name': snapshots[1]})
self.check_equal(resp.status_code, 201)
resp = self.get("/api/snapshots/" + snapshots[0] + "/diff/" + snapshots[1])
self.check_equal(resp.status_code, 200)
self.check_equal(resp.json(), [{'Left': 'Pi386 libboost-program-options-dev 1.49.0.1 918d2f433384e378',
'Right': None}])
resp = self.get("/api/snapshots/" + snapshots[1] + "/diff/" + snapshots[0])
self.check_equal(resp.status_code, 200)
self.check_equal(resp.json(), [{'Right': 'Pi386 libboost-program-options-dev 1.49.0.1 918d2f433384e378',
'Left': None}])
resp = self.get("/api/snapshots/" + snapshots[0] + "/diff/" + snapshots[0])
self.check_equal(resp.status_code, 200)
self.check_equal(resp.json(), [])
resp = self.get("/api/snapshots/" + snapshots[1] + "/diff/" + snapshots[1])
self.check_equal(resp.status_code, 200)
self.check_equal(resp.json(), [])
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions for computing robust statistics using
Tukey's biweight function.
"""
import numpy as np
from .funcs import _expand_dims, median_absolute_deviation
__all__ = ['biweight_location', 'biweight_scale', 'biweight_midvariance',
'biweight_midcovariance', 'biweight_midcorrelation']
def _stat_functions(data, ignore_nan=False):
if isinstance(data, np.ma.MaskedArray):
median_func = np.ma.median
sum_func = np.ma.sum
elif ignore_nan:
median_func = np.nanmedian
sum_func = np.nansum
else:
median_func = np.median
sum_func = np.sum
return median_func, sum_func
def biweight_location(data, c=6.0, M=None, axis=None, *, ignore_nan=False):
r"""
Compute the biweight location.
The biweight location is a robust statistic for determining the
central location of a distribution. It is given by:
.. math::
\zeta_{biloc}= M + \frac{\sum_{|u_i|<1} \ (x_i - M) (1 - u_i^2)^2}
{\sum_{|u_i|<1} \ (1 - u_i^2)^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input initial location guess) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight location tuning constant ``c`` is typically 6.0 (the
default).
Parameters
----------
data : array_like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 6.0).
M : float or array_like, optional
Initial guess for the location. If ``M`` is a scalar value,
then its value will be used for the entire array (or along each
``axis``, if specified). If ``M`` is an array, then its must be
an array containing the initial location estimate along each
``axis`` of the input array. If `None` (default), then the
median of the input array will be used (or along each ``axis``,
if specified).
axis : `None`, int, or tuple of ints, optional
The axis or axes along which the biweight locations are
computed. If `None` (default), then the biweight location of
the flattened input array will be computed.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_location : float or `~numpy.ndarray`
The biweight location of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (http://adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwloc.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight location of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_location
>>> rand = np.random.RandomState(12345)
>>> biloc = biweight_location(rand.randn(1000))
>>> print(biloc) # doctest: +FLOAT_CMP
-0.0175741540445
"""
median_func, sum_func = _stat_functions(data, ignore_nan=ignore_nan)
if isinstance(data, np.ma.MaskedArray) and ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = median_func(data, axis=axis)
if axis is not None:
M = _expand_dims(M, axis=axis) # NUMPY_LT_1_18
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis, ignore_nan=ignore_nan)
if axis is None and mad == 0.:
return M # return median if data is a constant array
if axis is not None:
mad = _expand_dims(mad, axis=axis) # NUMPY_LT_1_18
mad[mad == 0] = 1. # prevent divide by zero
u = d / (c * mad)
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid='ignore'):
mask = np.abs(u) >= 1
u = (1 - u ** 2) ** 2
u[mask] = 0
# Along the input axis if data is constant, d will be zero, thus
# the median value will be returned along that axis.
# Ignore RuntimeWarnings for divide by zero if all NaN along an axis
with np.errstate(divide='ignore', invalid='ignore'):
return M.squeeze() + (sum_func(d * u, axis=axis) /
sum_func(u, axis=axis))
def biweight_scale(data, c=9.0, M=None, axis=None, modify_sample_size=False,
*, ignore_nan=False):
r"""
Compute the biweight scale.
The biweight scale is a robust statistic for determining the
standard deviation of a distribution. It is the square root of the
`biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_.
It is given by:
.. math::
\zeta_{biscl} = \sqrt{n} \ \frac{\sqrt{\sum_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4}} {|(\sum_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))|}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
For the standard definition of biweight scale, :math:`n` is the
total number of points in the array (or along the input ``axis``, if
specified). That definition is used if ``modify_sample_size`` is
`False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \sum_{|u_i| < 1} \ 1
which results in a value closer to the true standard deviation for
small sample sizes or for a large number of rejected values.
Parameters
----------
data : array_like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array_like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : `None`, int, or tuple of ints, optional
The axis or axes along which the biweight scales are computed.
If `None` (default), then the biweight scale of the flattened
input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
scale. If `True`, then the sample size is reduced to correct
for any rejected values (i.e. the sample size used includes only
the non-rejected values), which results in a value closer to the
true standard deviation for small sample sizes or for a large
number of rejected values.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_scale : float or `~numpy.ndarray`
The biweight scale of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_midvariance, biweight_midcovariance, biweight_location, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (http://adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwscale.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight scale of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_scale
>>> rand = np.random.RandomState(12345)
>>> biscl = biweight_scale(rand.randn(1000))
>>> print(biscl) # doctest: +FLOAT_CMP
0.986726249291
"""
return np.sqrt(
biweight_midvariance(data, c=c, M=M, axis=axis,
modify_sample_size=modify_sample_size,
ignore_nan=ignore_nan))
def biweight_midvariance(data, c=9.0, M=None, axis=None,
modify_sample_size=False, *, ignore_nan=False):
r"""
Compute the biweight midvariance.
The biweight midvariance is a robust statistic for determining the
variance of a distribution. Its square root is a robust estimator
of scale (i.e. standard deviation). It is given by:
.. math::
\zeta_{bivar} = n \ \frac{\sum_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4} {(\sum_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
For the standard definition of `biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_,
:math:`n` is the total number of points in the array (or along the
input ``axis``, if specified). That definition is used if
``modify_sample_size`` is `False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \sum_{|u_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : array_like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array_like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : `None`, int, or tuple of ints, optional
The axis or axes along which the biweight midvariances are
computed. If `None` (default), then the biweight midvariance of
the flattened input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midvariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true variance for small sample sizes or for a
large number of rejected values.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_midvariance : float or `~numpy.ndarray`
The biweight midvariance of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
See Also
--------
biweight_midcovariance, biweight_midcorrelation, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance
.. [2] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (http://adsabs.harvard.edu/abs/1990AJ....100...32B)
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight midvariance of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_midvariance
>>> rand = np.random.RandomState(12345)
>>> bivar = biweight_midvariance(rand.randn(1000))
>>> print(bivar) # doctest: +FLOAT_CMP
0.97362869104
"""
median_func, sum_func = _stat_functions(data, ignore_nan=ignore_nan)
if isinstance(data, np.ma.MaskedArray) and ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = median_func(data, axis=axis)
if axis is not None:
M = _expand_dims(M, axis=axis) # NUMPY_LT_1_18
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis, ignore_nan=ignore_nan)
if axis is None and mad == 0.:
return 0. # return zero if data is a constant array
if axis is not None:
mad = _expand_dims(mad, axis=axis) # NUMPY_LT_1_18
mad[mad == 0] = 1. # prevent divide by zero
u = d / (c * mad)
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid='ignore'):
mask = np.abs(u) < 1
if isinstance(mask, np.ma.MaskedArray):
mask = mask.filled(fill_value=False) # exclude masked data values
u = u ** 2
if modify_sample_size:
n = sum_func(mask, axis=axis)
else:
# set good values to 1, bad values to 0
include_mask = np.ones(data.shape)
if isinstance(data, np.ma.MaskedArray):
include_mask[data.mask] = 0
if ignore_nan:
include_mask[np.isnan(data)] = 0
n = np.sum(include_mask, axis=axis)
f1 = d * d * (1. - u)**4
f1[~mask] = 0.
f1 = sum_func(f1, axis=axis)
f2 = (1. - u) * (1. - 5.*u)
f2[~mask] = 0.
f2 = np.abs(np.sum(f2, axis=axis))**2
with np.errstate(divide='ignore', invalid='ignore'):
return n * f1 / f2
def biweight_midcovariance(data, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcovariance between pairs of multiple
variables.
The biweight midcovariance is a robust and resistant estimator of
the covariance between two variables.
This function computes the biweight midcovariance between all pairs
of the input variables (rows) in the input data. The output array
will have a shape of (N_variables, N_variables). The diagonal
elements will be the biweight midvariances of each input variable
(see :func:`biweight_midvariance`). The off-diagonal elements will
be the biweight midcovariances between each pair of input variables.
For example, if the input array ``data`` contains three variables
(rows) ``x``, ``y``, and ``z``, the output `~numpy.ndarray`
midcovariance matrix will be:
.. math::
\begin{pmatrix}
\zeta_{xx} & \zeta_{xy} & \zeta_{xz} \\
\zeta_{yx} & \zeta_{yy} & \zeta_{yz} \\
\zeta_{zx} & \zeta_{zy} & \zeta_{zz}
\end{pmatrix}
where :math:`\zeta_{xx}`, :math:`\zeta_{yy}`, and :math:`\zeta_{zz}`
are the biweight midvariances of each variable. The biweight
midcovariance between :math:`x` and :math:`y` is :math:`\zeta_{xy}`
(:math:`= \zeta_{yx}`). The biweight midcovariance between
:math:`x` and :math:`z` is :math:`\zeta_{xz}` (:math:`=
\zeta_{zx}`). The biweight midcovariance between :math:`y` and
:math:`z` is :math:`\zeta_{yz}` (:math:`= \zeta_{zy}`).
The biweight midcovariance between two variables :math:`x` and
:math:`y` is given by:
.. math::
\zeta_{xy} = n_{xy} \ \frac{\sum_{|u_i| < 1, \ |v_i| < 1} \
(x_i - M_x) (1 - u_i^2)^2 (y_i - M_y) (1 - v_i^2)^2}
{(\sum_{|u_i| < 1} \ (1 - u_i^2) (1 - 5u_i^2))
(\sum_{|v_i| < 1} \ (1 - v_i^2) (1 - 5v_i^2))}
where :math:`M_x` and :math:`M_y` are the medians (or the input
locations) of the two variables and :math:`u_i` and :math:`v_i` are
given by:
.. math::
u_{i} = \frac{(x_i - M_x)}{c * MAD_x}
v_{i} = \frac{(y_i - M_y)}{c * MAD_y}
where :math:`c` is the biweight tuning constant and :math:`MAD_x`
and :math:`MAD_y` are the `median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ of the
:math:`x` and :math:`y` variables. The biweight midvariance tuning
constant ``c`` is typically 9.0 (the default).
For the standard definition of biweight midcovariance,
:math:`n_{xy}` is the total number of observations of each variable.
That definition is used if ``modify_sample_size`` is `False`, which
is the default.
However, if ``modify_sample_size = True``, then :math:`n_{xy}` is the
number of observations for which :math:`|u_i| < 1` and/or :math:`|v_i|
< 1`, i.e.
.. math::
n_{xx} = \sum_{|u_i| < 1} \ 1
.. math::
n_{xy} = n_{yx} = \sum_{|u_i| < 1, \ |v_i| < 1} \ 1
.. math::
n_{yy} = \sum_{|v_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : 2D or 1D array_like
Input data either as a 2D or 1D array. For a 2D array, it
should have a shape (N_variables, N_observations). A 1D array
may be input for observations of a single variable, in which
case the biweight midvariance will be calculated (no
covariance). Each row of ``data`` represents a variable, and
each column a single observation of all those variables (same as
the `numpy.cov` convention).
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or 1D array_like, optional
The location estimate of each variable, either as a scalar or
array. If ``M`` is an array, then its must be a 1D array
containing the location estimate of each row (i.e. ``a.ndim``
elements). If ``M`` is a scalar value, then its value will be
used for each variable (row). If `None` (default), then the
median of each variable (row) will be used.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of observations of each variable, which follows the
standard definition of biweight midcovariance. If `True`, then
the sample size is reduced to correct for any rejected values
(see formula above), which results in a value closer to the true
covariance for small sample sizes or for a large number of
rejected values.
Returns
-------
biweight_midcovariance : `~numpy.ndarray`
A 2D array representing the biweight midcovariances between each
pair of the variables (rows) in the input array. The output
array will have a shape of (N_variables, N_variables). The
diagonal elements will be the biweight midvariances of each
input variable. The off-diagonal elements will be the biweight
midcovariances between each pair of input variables.
See Also
--------
biweight_midvariance, biweight_midcorrelation, biweight_scale, biweight_location
References
----------
.. [1] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwmidc.htm
Examples
--------
Compute the biweight midcovariance between two random variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcovariance
>>> # Generate two random variables x and y
>>> rng = np.random.RandomState(1)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> # Calculate the biweight midcovariances between x and y
>>> bicov = biweight_midcovariance([x, y])
>>> print(bicov) # doctest: +FLOAT_CMP
[[ 0.82483155 -0.18961219]
[-0.18961219 9.80265764]]
>>> # Print standard deviation estimates
>>> print(np.sqrt(bicov.diagonal())) # doctest: +FLOAT_CMP
[ 0.90820237 3.13091961]
"""
data = np.asanyarray(data).astype(np.float64)
# ensure data is 2D
if data.ndim == 1:
data = data[np.newaxis, :]
if data.ndim != 2:
raise ValueError('The input array must be 2D or 1D.')
# estimate location if not given
if M is None:
M = np.median(data, axis=1)
M = np.asanyarray(M)
if M.ndim > 1:
raise ValueError('M must be a scalar or 1D array.')
# set up the differences
d = (data.T - M).T
# set up the weighting
mad = median_absolute_deviation(data, axis=1)
mad[mad == 0] = 1. # prevent divide by zero
u = (d.T / (c * mad)).T
# now remove the outlier points
mask = np.abs(u) < 1
u = u ** 2
if modify_sample_size:
maskf = mask.astype(float)
n = np.inner(maskf, maskf)
else:
n = data[0].size
usub1 = (1. - u)
usub5 = (1. - 5. * u)
usub1[~mask] = 0.
numerator = d * usub1 ** 2
denominator = (usub1 * usub5).sum(axis=1)[:, np.newaxis]
numerator_matrix = np.dot(numerator, numerator.T)
denominator_matrix = np.dot(denominator, denominator.T)
return n * (numerator_matrix / denominator_matrix)
def biweight_midcorrelation(x, y, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcorrelation between two variables.
The `biweight midcorrelation
<https://en.wikipedia.org/wiki/Biweight_midcorrelation>`_ is a
measure of similarity between samples. It is given by:
.. math::
r_{bicorr} = \frac{\zeta_{xy}}{\sqrt{\zeta_{xx} \ \zeta_{yy}}}
where :math:`\zeta_{xx}` is the biweight midvariance of :math:`x`,
:math:`\zeta_{yy}` is the biweight midvariance of :math:`y`, and
:math:`\zeta_{xy}` is the biweight midcovariance of :math:`x` and
:math:`y`.
Parameters
----------
x, y : 1D array_like
Input arrays for the two variables. ``x`` and ``y`` must be 1D
arrays and have the same number of elements.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0). See
`biweight_midcovariance` for more details.
M : float or array_like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified). See
`biweight_midcovariance` for more details.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midcovariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true midcovariance for small sample sizes or for a
large number of rejected values. See `biweight_midcovariance`
for more details.
Returns
-------
biweight_midcorrelation : float
The biweight midcorrelation between ``x`` and ``y``.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance, biweight_location
References
----------
.. [1] https://en.wikipedia.org/wiki/Biweight_midcorrelation
Examples
--------
Calculate the biweight midcorrelation between two variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcorrelation
>>> rng = np.random.RandomState(12345)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> bicorr = biweight_midcorrelation(x, y)
>>> print(bicorr) # doctest: +FLOAT_CMP
-0.0495780713907
"""
x = np.asanyarray(x)
y = np.asanyarray(y)
if x.ndim != 1:
raise ValueError('x must be a 1D array.')
if y.ndim != 1:
raise ValueError('y must be a 1D array.')
if x.shape != y.shape:
raise ValueError('x and y must have the same shape.')
bicorr = biweight_midcovariance([x, y], c=c, M=M,
modify_sample_size=modify_sample_size)
return bicorr[0, 1] / (np.sqrt(bicorr[0, 0] * bicorr[1, 1]))
|
|
#!/usr/bin/env python
"""Creates the Script menu.
To Do:
- add html help; note that this will have to be fed to ScriptWdg,
RO.ScriptWdg has no idea of TUI help
History:
2004-07-19 ROwen
2004-08-11 ROwen Modified for updated RO.Wdg.Toplevel.
2004-08-23 ROwen Added some diagnostic print statements (commented out).
2004-10-11 ROwen Modified to reject files whose names begin with ".".
2004-10-28 ROwen Bug fix: Open... was broken.
2005-09-22 ROwen Fix PR 272: standard scripts not available on Mac;
this was broken by the packaging overhaul for TUI 1.0.1.
Fix PR 132: Script menu may not load at first on MacOS X;
this was fixed via a hideous hack.
Modified to check/rebuild the entire menu when the root
menu is shown, instead of using lazy check/rebuild;
this simplified the hack for PR 132.
Modified to prebuild the menu at startup.
Modified test code to show a standard pull-down menu.
2011-06-16 ROwen Ditched obsolete "except (SystemExit, KeyboardInterrupt): raise" code
2012-07-18 ROwen Removed use of update_idletasks and an ugly Mac workaround that is no longer required.
2014-02-12 ROwen Moved some code to TUI.Base.ScriptLoader so other users could get to it more easily.
2015-03-18 ROwen Removed _RootNode.isAqua because it was not being used.
"""
import os
import Tkinter
import tkFileDialog
import RO.Alg
from TUI.Base.ScriptLoader import getScriptDirs, ScriptLoader
__all__ = ["getScriptMenu"]
def getScriptMenu(master):
scriptDirs = getScriptDirs()
rootNode = _RootNode(master=master, label="", pathList=scriptDirs)
rootNode.checkMenu(recurse=True)
return rootNode.menu
class _MenuNode:
"""Menu and related information about sub-menu of the Scripts menu
Each node represents one level of hiearchy in the various scripts directories.
The contents of a given subdir are dynamically tested, but the existence
of a particular subdirectory is not. This sounds like a mistake to me;
if a given subdir exists in any scripts dir, it should be checked every time
in all scripts dirs.
"""
def __init__(self, parentNode, label, pathList):
"""Construct a _MenuNode
Inputs:
- parentNode: parent menu node
- label: label of this sub-menu
- pathList: list of paths to this subdirectory in the script hierarchy
(one entry for each of the following, but only if the subdir exists:
built-in scripts dir, local TUIAddtions/Scripts and shared TUIAdditions/Scripts)
"""
# print "_MenuNode(%r, %r, %r)" % (parentNode, label, pathList)
self.parentNode = parentNode
self.label = label
self.pathList = pathList
self.itemDict = {}
self.subDict = RO.Alg.ListDict()
self.subNodeList = []
self._setMenu()
def _setMenu(self):
self.menu = Tkinter.Menu(
self.parentNode.menu,
tearoff = False,
# postcommand = self.checkMenu,
)
self.parentNode.menu.add_cascade(
label = self.label,
menu = self.menu,
)
def checkMenu(self, recurse=True):
"""Check contents of menu and rebuild if anything has changed.
Return True if anything rebuilt.
"""
# print "%s checkMenu" % (self,)
newItemDict = {}
newSubDict = RO.Alg.ListDict()
didRebuild = False
for path in self.pathList:
for baseName in os.listdir(path):
# reject files that would be invisible on unix
if baseName.startswith("."):
continue
baseBody, baseExt = os.path.splitext(baseName)
fullPath = os.path.normpath(os.path.join(path, baseName))
if os.path.isfile(fullPath) and baseExt.lower() == ".py":
# print "checkMenu newItem[%r] = %r" % (baseBody, fullPath)
newItemDict[baseBody] = fullPath
elif os.path.isdir(fullPath) and baseExt.lower() != ".py":
# print "checkMenu newSubDir[%r] = %r" % (baseBody, fullPath)
newSubDict[baseName] = fullPath
# else:
# print "checkMenu ignoring %r = %r" % (baseName, fullPath)
if (self.itemDict != newItemDict) or (self.subDict != newSubDict):
didRebuild = True
# rebuild contents
# print "checkMenu rebuild contents"
self.itemDict = newItemDict
self.subDict = newSubDict
self.menu.delete(0, "end")
self.subNodeList = []
self._fillMenu()
# else:
# print "checkMenu do not rebuild contents"
if recurse:
for subNode in self.subNodeList:
subRebuilt = subNode.checkMenu(recurse=True)
didRebuild = didRebuild or subRebuilt
return didRebuild
def _fillMenu(self):
"""Fill the menu.
"""
# print "%s _fillMenu"
itemKeys = self.itemDict.keys()
itemKeys.sort()
# print "%s found items: %s" % (self, itemKeys)
for label in itemKeys:
subPathList = list(self.getLabels()) + [label]
fullPath = self.itemDict[label]
# print "adding script %r: %r" % (label, fullPath)
self.menu.add_command(
label = label,
command = ScriptLoader(subPathList=subPathList, fullPath=fullPath),
)
subdirList = self.subDict.keys()
subdirList.sort()
# print "%s found subdirs: %s" % (self, subdirList)
for subdir in subdirList:
pathList = self.subDict[subdir]
# print "adding submenu %r: %r" % (subdir, pathList)
self.subNodeList.append(_MenuNode(self, subdir, pathList))
def getLabels(self):
"""Return a list of labels all the way up to, but not including, the root node.
"""
retVal = self.parentNode.getLabels()
retVal.append(self.label)
return retVal
def __str__(self):
return "%s %s" % (self.__class__.__name__, ":".join(self.getLabels()))
class _RootNode(_MenuNode):
"""The main scripts menu and related information
"""
def __init__(self, master, label, pathList):
"""Construct the _RootNode
Inputs:
- parentNode: parent menu node
- label: label of this sub-menu
- pathList: list of paths to scripts, as returned by TUI.Base.ScriptLoader.getScriptDirs()
"""
self.master = master
_MenuNode.__init__(self, None, label, pathList)
def _setMenu(self):
self.menu = Tkinter.Menu(
self.master,
tearoff = False,
postcommand = self.checkMenu,
)
def _fillMenu(self):
"""Fill the menu.
"""
self.menu.add_command(label="Open...", command=self.doOpen)
_MenuNode._fillMenu(self)
def doOpen(self):
"""Handle Open... menu item.
"""
initialDir = os.path.expanduser("~")
if initialDir == "~":
initialDir = None
fullPath = tkFileDialog.askopenfilename(
master = self.master,
initialdir = initialDir,
title="TUI Script",
filetypes = [("Python", "*.py")],
)
if not fullPath:
return
pathList = os.path.split(fullPath)
ScriptLoader(subPathList=pathList, fullPath=fullPath)()
def getLabels(self):
"""Return a list of labels all the way up to, but not including, the root node.
"""
return []
if __name__ == "__main__":
import RO.Wdg
root = Tkinter.Tk()
menuBar = Tkinter.Menu(root)
root["menu"] = menuBar
scriptMenu = getScriptMenu(menuBar)
menuBar.add_cascade(label="Scripts", menu=scriptMenu)
root.mainloop()
|
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import click
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor, as_completed
from dateutil.parser import parse as parse_date
from datetime import datetime, timedelta
import jsonschema
from requests.exceptions import ConnectionError
import logging
import math
import os
import sqlite3
import time
import yaml
from c7n.credentials import assumed_session
from c7n.executor import MainThreadExecutor
from c7n.utils import chunks, dumps
from c7n_org.cli import CONFIG_SCHEMA
try:
from influxdb import InfluxDBClient
HAVE_INFLUXDB = True
except ImportError:
HAVE_INFLUXDB = False
log = logging.getLogger('metrics')
CONFIG_SCHEMA['properties']['indexer'] = {'type': 'object'}
MAX_RESULT_POINTS = 1440
class Resource:
@classmethod
def id(cls, r):
return r[cls.mid]
@classmethod
def get_resources(cls, cmdb, start, end, app, env):
with sqlite3.connect(cmdb) as conn:
cursor = conn.cursor()
cursor.execute(
'''
select *
from %s
where app = ?
and env = ?
and start < ?
and (end > ? or end is null)
''' % cls.table,
(app, env,
end.strftime('%Y-%m-%dT%H:%M'),
start.strftime('%Y-%m-%dT%H:%M')))
keymeta = [v[0] for v in cursor.description]
# todo - compare/use row factory ?
return map(dict, map(lambda x: zip(keymeta, x), list(cursor)))
@staticmethod
def get_type(rtype):
return RESOURCE_INFO[rtype]
class EC2(Resource):
mid = 'instance_id'
table = 'ec2'
namespace = 'AWS/EC2'
type = 'Instance'
metrics = [
dict(name='CPUUtilization'),
dict(name='NetworkIn'),
dict(name='NetworkOut'),
dict(name='DiskReadOps'),
dict(name='DiskWriteOps'),
dict(name='DiskReadBytes'),
dict(name='DiskWriteBytes')]
@staticmethod
def get_dimensions(r):
return [{'Name': 'InstanceId', 'Value': r['instance_id']}]
class ELB(Resource):
mid = 'name'
table = 'elbs',
namespace = 'AWS/ELB'
type = 'LoadBalancer'
metrics = [
dict(name='HealthyHostCount'),
dict(name='UnHealthyHostCount'),
dict(name='BackendConnectionErrors', statistic='Sum'),
dict(name='HTTPCode_Backend_2XX', statistic='Sum'),
dict(name='HTTPCode_Backend_3XX', statistic='Sum'),
dict(name='HTTPCode_Backend_4XX', statistic='Sum'),
dict(name='HTTPCode_Backend_5XX', statistic='Sum'),
dict(name='Latency', statistic='Average'),
dict(name='RequestCount', statistic='Sum'),
dict(name='SpilloverCount', statistic='Sum'),
dict(name='SurgeQueueLength', statistic='Maximum')]
@staticmethod
def get_dimensions(r):
return [{'Name': 'LoadBalancerName', 'Value': r['name']}]
# @classmethod
# def get_resources(cls, *args, **kw):
# resources = super(ELB, cls).get_resources(*args, **kw)
# filtered = set()
# results = []
# for r in resources:
# if r['name'] in filtered:
# continue
# results.append(r)
# filtered.add(r['name'])
# return results
class EBS(Resource):
mid = 'volume_id'
table = 'ebs'
namespace = 'AWS/EBS'
type = 'Volume'
metrics = [
dict(name='VolumeReadBytes'),
dict(name='VolumeReadOps'),
dict(name='VolumeWriteBytes'),
dict(name='VolumeWriteOps'),
dict(name='VolumeTotalReadTime'),
dict(name='VolumeTotalWriteTime'),
dict(name='VolumeQueueLength')]
@staticmethod
def get_dimensions(r):
return [{'Name': 'VolumeId', 'Value': r['volume_id']}]
RESOURCE_INFO = {
'Instance': EC2,
'Volume': EBS,
'LoadBalancer': ELB}
def get_indexer(config):
itype = config['indexer'].get('type')
if itype == 'dir':
return DirIndexer(config)
elif itype == 'influx':
return InfluxIndexer(config)
raise ValueError("Unknown index type: %s" % itype)
class DirIndexer:
def __init__(self, config):
self.config = config
self.dir = config['indexer'].get('store-dir')
def index(self, metrics_set):
for r, rtype, m, point_set in metrics_set:
mdir = os.path.join(
self.dir, r['account_id'], rtype.id(r))
if not os.path.exists(mdir):
os.makedirs(mdir)
with open(os.path.join(mdir, '%s.json'), 'w') as fh:
fh.write(dumps([r, rtype, m, point_set]))
class SQLIndexer:
# metadata = rdb.MetaData()
# table = rdb.Table(
# 'resource_metrics',
# rdb.Column(),
# rdb.Column(),
# )
def __init__(self, config):
self.config = config
self.engine = self.config['indexer']['dsn']
class InfluxIndexer:
def __init__(self, config):
self.config = config
self.client = InfluxDBClient(
username=self.config['indexer']['user'],
password=self.config['indexer']['password'],
host=self.config['indexer']['host'],
database=self.config['indexer']['database'])
def first(self, resource, resource_type, metric):
mkey = ("%s_%s" % (
resource_type.namespace.split('/')[-1],
metric['name'])).lower()
return self.get_resource_time(resource_type.id(resource), mkey, 'desc')
def last(self, resource, resource_type, metric):
mkey = ("%s_%s" % (
resource_type.namespace.split('/')[-1],
metric['name'])).lower()
return self.get_resource_time(resource_type.id(resource), mkey, 'desc')
def get_resource_time(self, rid, mkey, direction='desc'):
result = self.client.query(
'''select * from %s
where ResourceId = '%s' order by time %s limit 1''' % (
mkey, rid, direction))
if len(result) == 0:
return None
return parse_date(list(result)[0][0]['time'])
def index(self, metrics_set):
points = []
for r, rtype_name, m, point_set in metrics_set:
rtype = Resource.get_type(rtype_name)
rtags = {
'ResourceId': rtype.id(r),
'ResourceType': rtype.__name__,
'AccountId': r['account_id'],
'Region': r['region'],
'App': r['app'],
'Env': r['env']}
s = m.get('statistic', 'Average')
for p in point_set:
p = dict(p)
p['fields'] = {}
p['fields'][s] = p.pop(s)
if 'Unit' in p:
pu = p.pop('Unit', None)
if pu != 'None':
p['fields']['Unit'] = pu
p['measurement'] = ("%s_%s" % (
rtype.namespace.split('/')[-1],
m['name'])).lower()
p['time'] = p.pop('Timestamp')
p['tags'] = rtags
points.append(p)
for point_set in chunks(points, 10000):
errs = 0
while True:
try:
self.client.write_points(point_set)
except ConnectionError:
errs += 1
if errs > 3:
raise
time.sleep(3)
continue
else:
break
return len(points)
def get_sessions(accounts_config, account_ids):
sessions = {}
for a in accounts_config.get('accounts', []):
if a['account_id'] not in account_ids:
continue
session = assumed_session(a['role'], 'app-metrics')
sessions[a['account_id']] = session
return sessions
def get_clients(accounts_config, account_ids, regions, service='cloudwatch'):
clients = {}
for a in accounts_config.get('accounts', []):
if a['account_id'] not in account_ids:
continue
session = assumed_session(a['role'], 'app-metrics')
for r in regions:
clients['%s-%s' % (
a['account_id'], r)] = session.client(service, region_name=r)
return clients
def get_date_ranges(start, end, period, r):
r_start = parse_date(r['start']).replace(tzinfo=None)
if r['end']:
r_end = parse_date(r['end']).replace(tzinfo=None)
else:
r_end = end
if r_start > start:
start = r_start
if r_end < end:
end = r_end
if r_end < start:
return
date_delta = (end - start)
increments = date_delta.total_seconds() / float(period)
if increments <= MAX_RESULT_POINTS:
yield (start, end)
return
parts = date_delta.total_seconds() / (MAX_RESULT_POINTS * period)
for i in range(int(math.ceil(parts))):
max_period = timedelta(seconds=(MAX_RESULT_POINTS * period))
p_start = start + max_period * i
p_end = min(end, start + max_period * (i + 1))
yield (p_start, p_end)
RETENTION_PERIODS = OrderedDict([
((0, 15), 60),
((15, 63), 300),
((63, 455), 3600)
])
def get_metric_period(start, end):
ago = datetime.now() - start
for (rstart, rend), rvalue in RETENTION_PERIODS.items():
if ago.days < rend:
return rvalue
def get_metric_tasks(indexer, resource_type, resource_set, start, end):
tasks = []
period = get_metric_period(start, end)
for r in resource_set:
dims = resource_type.get_dimensions(r)
for m in resource_type.metrics:
# TODO: incremental, needs more thought, this is barebones
# but works for always forward mode, its also chatty
# we should query out the values for the entire app's
# resources.
m_end = indexer.last(r, resource_type, m)
if m_end is not None:
m_end = m_end.replace(tzinfo=None)
if m_end > start:
start = m_end
for (start_time, end_time) in get_date_ranges(
start, end, period, r):
params = dict(
Namespace=resource_type.namespace,
MetricName=m['name'],
Statistics=[m.get('statistic', 'Average')],
StartTime=start_time,
EndTime=end_time,
Period=period,
Dimensions=dims)
tasks.append((r, resource_type.type, m, params))
return tasks
def collect_metrics(clients, tasks):
metrics = []
for (resource, rtype, metric, params) in tasks:
client = clients.get('%s-%s' % (
resource['account_id'], resource['region']))
points = client.get_metric_statistics(**params).get('Datapoints', [])
# log.info("getting metrics r:%s %s %s %s points:%d",
# Resource.get_type(rtype).id(resource),
# metric['name'], params['StartTime'],
# params['EndTime'], len(points))
if not points:
continue
metrics.append((resource, rtype, metric, points))
return metrics
@click.command('load-app-metrics')
@click.option('--app', required=True)
@click.option('--env')
@click.option(
'-r', '--resources', multiple=True,
type=click.Choice(['Instance', 'LoadBalancer', 'Volume']))
@click.option('--cmdb', required=True, type=click.Path())
@click.option('--config', required=True, type=click.Path())
@click.option('--start', required=True)
@click.option('--end', required=True)
@click.option('--debug', is_flag=True)
def cli(app, env, resources, cmdb, config, start, end, debug):
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
logging.getLogger('botocore').setLevel(logging.WARNING)
with open(config) as fh:
accounts_config = yaml.safe_load(fh.read())
jsonschema.validate(accounts_config, CONFIG_SCHEMA)
start, end = parse_date(start), parse_date(end)
log.info("Collecting app:%s env:%s metrics %s to %s", app, env, start, end)
MainThreadExecutor.c7n_async = False
executor = debug and MainThreadExecutor or ThreadPoolExecutor
indexer = get_indexer(accounts_config)
for rtype in resources:
metrics_count = 0
resource_type = RESOURCE_INFO[rtype]
resource_set = resource_type.get_resources(cmdb, start, end, app, env)
clients = get_clients(
accounts_config,
{r['account_id'] for r in resource_set},
{r['region'] for r in resource_set})
log.info("Found %d %s resources", len(resource_set), rtype)
tasks = get_metric_tasks(
indexer, resource_type, resource_set, start, end)
log.info("Collecting metrics across %d tasks", len(tasks))
t = time.time()
with executor(max_workers=6) as w:
futures = []
for task_set in chunks(tasks, 50):
futures.append(w.submit(collect_metrics, clients, task_set))
for f in as_completed(futures):
if f.exception():
log.warning(
"error processing resource set %s" % f.exception())
continue
metrics_count += indexer.index(f.result())
log.info(
"time:%0.2f app:%s resource_type:%s points:%d start:%s end:%s",
time.time() - t, app, rtype, metrics_count, start, end)
if __name__ == '__main__':
try:
cli()
except Exception:
import pdb, traceback, sys
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import numpy as np
import random
import types
import nose
from six import BytesIO
import pickle
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
from slicerator import Slicerator, pipeline, index_attr, propagate_attr
path, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(path, 'data')
def assert_letters_equal(actual, expected):
# check if both lengths are equal
assert_equal(len(actual), len(expected))
for actual_, expected_ in zip(actual, expected):
assert_equal(actual_, expected_)
def compare_slice_to_list(actual, expected):
assert_letters_equal(actual, expected)
indices = list(range(len(actual)))
for i in indices:
# test positive indexing
assert_letters_equal(actual[i], expected[i])
# test negative indexing
assert_letters_equal(actual[-i + 1], expected[-i + 1])
# in reverse order
for i in indices[::-1]:
assert_letters_equal(actual[i], expected[i])
assert_letters_equal(actual[-i + 1], expected[-i + 1])
# in shuffled order (using a consistent random seed)
r = random.Random(5)
r.shuffle(indices)
for i in indices:
assert_letters_equal(actual[i], expected[i])
assert_letters_equal(actual[-i + 1], expected[-i + 1])
# test list indexing
some_indices = [r.choice(indices) for _ in range(2)]
assert_letters_equal([actual[i] for i in some_indices],
[expected[i] for i in some_indices])
# mixing positive and negative indices
some_indices = [r.choice(indices + [-i-1 for i in indices])
for _ in range(2)]
assert_letters_equal([actual[i] for i in some_indices],
[expected[i] for i in some_indices])
# test slices
assert_letters_equal(actual[::2], expected[::2])
assert_letters_equal(actual[1::2], expected[1::2])
assert_letters_equal(actual[::3], expected[::3])
assert_letters_equal(actual[1:], expected[1:])
assert_letters_equal(actual[:], expected[:])
assert_letters_equal(actual[:-1], expected[:-1])
v = Slicerator(list('abcdefghij'))
n = Slicerator(list(range(10)))
def test_bool_mask():
mask = np.array([True, False] * 5)
s = v[mask]
assert_letters_equal(s, list('acegi'))
def test_slice_of_slice():
slice1 = v[4:]
compare_slice_to_list(slice1, list('efghij'))
slice2 = slice1[-3:]
compare_slice_to_list(slice2, list('hij'))
slice1a = v[[3, 4, 5, 6, 7, 8, 9]]
compare_slice_to_list(slice1a, list('defghij'))
slice2a = slice1a[::2]
compare_slice_to_list(slice2a, list('dfhj'))
slice2b = slice1a[::-1]
compare_slice_to_list(slice2b, list('jihgfed'))
slice2c = slice1a[::-2]
compare_slice_to_list(slice2c, list('jhfd'))
slice2d = slice1a[:0:-1]
compare_slice_to_list(slice2d, list('jihgfe'))
slice2e = slice1a[-1:1:-1]
compare_slice_to_list(slice2e, list('jihgf'))
slice2f = slice1a[-2:1:-1]
compare_slice_to_list(slice2f, list('ihgf'))
slice2g = slice1a[::-3]
compare_slice_to_list(slice2g, list('jgd'))
slice2h = slice1a[[5, 6, 2, -1, 3, 3, 3, 0]]
compare_slice_to_list(slice2h, list('ijfjgggd'))
def test_slice_of_slice_of_slice():
slice1 = v[4:]
compare_slice_to_list(slice1, list('efghij'))
slice2 = slice1[1:-1]
compare_slice_to_list(slice2, list('fghi'))
slice2a = slice1[[2, 3, 4]]
compare_slice_to_list(slice2a, list('ghi'))
slice3 = slice2[1::2]
compare_slice_to_list(slice3, list('gi'))
def test_slice_of_slice_of_slice_of_slice():
# Take the red pill. It's slices all the way down!
slice1 = v[4:]
compare_slice_to_list(slice1, list('efghij'))
slice2 = slice1[1:-1]
compare_slice_to_list(slice2, list('fghi'))
slice3 = slice2[1:]
compare_slice_to_list(slice3, list('ghi'))
slice4 = slice3[1:]
compare_slice_to_list(slice4, list('hi'))
# Give me another!
slice1 = v[2:]
compare_slice_to_list(slice1, list('cdefghij'))
slice2 = slice1[0::2]
compare_slice_to_list(slice2, list('cegi'))
slice3 = slice2[:]
compare_slice_to_list(slice3, list('cegi'))
slice4 = slice3[:-1]
compare_slice_to_list(slice4, list('ceg'))
slice4a = slice3[::-1]
compare_slice_to_list(slice4a, list('igec'))
def test_slice_with_generator():
slice1 = v[1:]
compare_slice_to_list(slice1, list('bcdefghij'))
slice2 = slice1[(i for i in range(2, 5))]
assert_letters_equal(list(slice2), list('def'))
assert_true(isinstance(slice2, types.GeneratorType))
def test_no_len_raises():
with assert_raises(ValueError):
Slicerator((i for i in range(5)), (i for i in range(5)))
def test_from_func():
v = Slicerator.from_func(lambda x: 'abcdefghij'[x], length=10)
compare_slice_to_list(v, list('abcdefghij'))
compare_slice_to_list(v[1:], list('bcdefghij'))
compare_slice_to_list(v[1:][:4], list('bcde'))
def _capitalize(letter):
return letter.upper()
def _capitalize_if_equal(letter, other_letter):
if letter == other_letter:
return letter.upper()
else:
return letter
def _a_to_z(letter):
if letter == 'a':
return 'z'
else:
return letter
@pipeline
def append_zero_inplace(list_obj):
list_obj.append(0)
return list_obj
def test_inplace_pipeline():
n_mutable = Slicerator([list([i]) for i in range(10)])
appended = append_zero_inplace(n_mutable)
assert_equal(appended[5], [5, 0]) # execute the function
assert_equal(n_mutable[5], [5]) # check the original
def test_pipeline_simple():
capitalize = pipeline(_capitalize)
cap_v = capitalize(v[:1])
assert_letters_equal([cap_v[0]], [_capitalize(v[0])])
def test_pipeline_propagation():
capitalize = pipeline(_capitalize)
cap_v = capitalize(v)
assert_letters_equal([cap_v[:1][0]], ['A'])
assert_letters_equal([cap_v[:1][:2][0]], ['A'])
def test_pipeline_nesting():
capitalize = pipeline(_capitalize)
a_to_z = pipeline(_a_to_z)
nested_v = capitalize(a_to_z(v))
assert_letters_equal([nested_v[0]], ['Z'])
assert_letters_equal([nested_v[:1][0]], ['Z'])
def _add_one(number):
return number + 1
def test_pipeline_nesting_numeric():
add_one = pipeline(_add_one)
triple_nested = add_one(add_one(add_one(n)))
assert_letters_equal([triple_nested[0]], [3])
assert_letters_equal([triple_nested[:1][0]], [3])
def test_repr():
repr(v)
def test_getattr():
class MyList(list):
attr1 = 'hello'
attr2 = 'hello again'
@index_attr
def s(self, i):
return list('ABCDEFGHIJ')[i]
def close(self):
pass
a = Slicerator(MyList('abcdefghij'), propagate_attrs=['attr1', 's'])
assert_letters_equal(a, list('abcdefghij'))
assert_true(hasattr(a, 'attr1'))
assert_false(hasattr(a, 'attr2'))
assert_true(hasattr(a, 's'))
assert_false(hasattr(a, 'close'))
assert_equal(a.attr1, 'hello')
with assert_raises(AttributeError):
a[:5].nonexistent_attr
compare_slice_to_list(list(a.s), list('ABCDEFGHIJ'))
compare_slice_to_list(list(a[::2].s), list('ACEGI'))
compare_slice_to_list(list(a[::2][1:].s), list('CEGI'))
capitalize = pipeline(_capitalize)
b = capitalize(a)
assert_letters_equal(b, list('ABCDEFGHIJ'))
assert_true(hasattr(b, 'attr1'))
assert_false(hasattr(b, 'attr2'))
assert_true(hasattr(b, 's'))
assert_false(hasattr(b, 'close'))
assert_equal(b.attr1, 'hello')
with assert_raises(AttributeError):
b[:5].nonexistent_attr
compare_slice_to_list(list(b.s), list('ABCDEFGHIJ'))
compare_slice_to_list(list(b[::2].s), list('ACEGI'))
compare_slice_to_list(list(b[::2][1:].s), list('CEGI'))
def test_getattr_subclass():
@Slicerator.from_class
class Dummy(object):
propagate_attrs = ['attr1']
def __init__(self):
self.frame = list('abcdefghij')
def __len__(self):
return len(self.frame)
def __getitem__(self, i):
return self.frame[i]
def attr1(self):
# propagates through slices of Dummy
return 'sliced'
@propagate_attr
def attr2(self):
# propagates through slices of Dummy and subclasses
return 'also in subclasses'
def attr3(self):
# does not propagate
return 'only unsliced'
class SubClass(Dummy):
propagate_attrs = ['attr4'] # overwrites propagated attrs from Dummy
def __len__(self):
return len(self.frame)
@property
def attr4(self):
# propagates through slices of SubClass
return 'only subclass'
dummy = Dummy()
subclass = SubClass()
assert_true(hasattr(dummy, 'attr1'))
assert_true(hasattr(dummy, 'attr2'))
assert_true(hasattr(dummy, 'attr3'))
assert_false(hasattr(dummy, 'attr4'))
assert_true(hasattr(dummy[1:], 'attr1'))
assert_true(hasattr(dummy[1:], 'attr2'))
assert_false(hasattr(dummy[1:], 'attr3'))
assert_false(hasattr(dummy[1:], 'attr4'))
assert_true(hasattr(dummy[1:][1:], 'attr1'))
assert_true(hasattr(dummy[1:][1:], 'attr2'))
assert_false(hasattr(dummy[1:][1:], 'attr3'))
assert_false(hasattr(dummy[1:][1:], 'attr4'))
assert_true(hasattr(subclass, 'attr1'))
assert_true(hasattr(subclass, 'attr2'))
assert_true(hasattr(subclass, 'attr3'))
assert_true(hasattr(subclass, 'attr4'))
assert_false(hasattr(subclass[1:], 'attr1'))
assert_true(hasattr(subclass[1:], 'attr2'))
assert_false(hasattr(subclass[1:], 'attr3'))
assert_true(hasattr(subclass[1:], 'attr4'))
assert_false(hasattr(subclass[1:][1:], 'attr1'))
assert_true(hasattr(subclass[1:][1:], 'attr2'))
assert_false(hasattr(subclass[1:][1:], 'attr3'))
assert_true(hasattr(subclass[1:][1:], 'attr4'))
def test_pipeline_with_args():
capitalize = pipeline(_capitalize_if_equal)
cap_a = capitalize(v, 'a')
cap_b = capitalize(v, 'b')
assert_letters_equal(cap_a, 'Abcdefghij')
assert_letters_equal(cap_b, 'aBcdefghij')
assert_letters_equal([cap_a[0]], ['A'])
assert_letters_equal([cap_b[0]], ['a'])
assert_letters_equal([cap_a[0]], ['A'])
def test_composed_pipelines():
a_to_z = pipeline(_a_to_z)
capitalize = pipeline(_capitalize_if_equal)
composed = capitalize(a_to_z(v), 'c')
assert_letters_equal(composed, 'zbCdefghij')
def test_serialize():
# dump Slicerator
stream = BytesIO()
pickle.dump(v, stream)
stream.seek(0)
v2 = pickle.load(stream)
stream.close()
compare_slice_to_list(v2, list('abcdefghij'))
compare_slice_to_list(v2[4:], list('efghij'))
compare_slice_to_list(v2[4:][:-1], list('efghi'))
# dump sliced Slicerator
stream = BytesIO()
pickle.dump(v[4:], stream)
stream.seek(0)
v2 = pickle.load(stream)
stream.close()
compare_slice_to_list(v2, list('efghij'))
compare_slice_to_list(v2[2:], list('ghij'))
compare_slice_to_list(v2[2:][:-1], list('ghi'))
# dump sliced sliced Slicerator
stream = BytesIO()
pickle.dump(v[4:][:-1], stream)
stream.seek(0)
v2 = pickle.load(stream)
stream.close()
compare_slice_to_list(v2, list('efghi'))
compare_slice_to_list(v2[2:], list('ghi'))
compare_slice_to_list(v2[2:][:-1], list('gh'))
# test pipeline
capitalize = pipeline(_capitalize_if_equal)
stream = BytesIO()
pickle.dump(capitalize(v, 'a'), stream)
stream.seek(0)
v2 = pickle.load(stream)
stream.close()
compare_slice_to_list(v2, list('Abcdefghij'))
def test_from_class():
class Dummy(object):
"""DocString"""
def __init__(self):
self.frame = list('abcdefghij')
def __len__(self):
return len(self.frame)
def __getitem__(self, i):
"""Other Docstring"""
return self.frame[i] # actual code of get_frame
def __repr__(self):
return 'Repr'
DummySli = Slicerator.from_class(Dummy)
assert Dummy()[:2] == ['a', 'b'] # Dummy is unaffected
# class slots propagate
assert DummySli.__name__ == Dummy.__name__
assert DummySli.__doc__ == Dummy.__doc__
assert DummySli.__module__ == Dummy.__module__
dummy = DummySli()
assert isinstance(dummy, Dummy) # still instance of Dummy
assert repr(dummy) == 'Repr' # repr propagates
compare_slice_to_list(dummy, 'abcdefghij')
compare_slice_to_list(dummy[1:], 'bcdefghij')
compare_slice_to_list(dummy[1:][2:], 'defghij')
capitalize = pipeline(_capitalize_if_equal)
cap_b = capitalize(dummy, 'b')
assert_letters_equal(cap_b, 'aBcdefghij')
def test_lazy_hasattr():
# this ensures that the Slicerator init does not evaluate all properties
class Dummy(object):
"""DocString"""
def __init__(self):
self.frame = list('abcdefghij')
def __len__(self):
return len(self.frame)
def __getitem__(self, i):
"""Other Docstring"""
return self.frame[i] # actual code of get_frame
@property
def forbidden_property(self):
raise RuntimeError()
DummySli = Slicerator.from_class(Dummy)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
|
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from modelcluster.fields import ParentalKey
from wagtail.admin.edit_handlers import (FieldPanel, InlinePanel,
ObjectList, PageChooserPanel,
TabbedInterface)
from wagtail.core.fields import RichTextField
from wagtail.core.models import Orderable, Page
from wagtail.images.edit_handlers import ImageChooserPanel
from themes.models import ThemeablePage
@python_2_unicode_compatible
class NewsletterListPage(ThemeablePage):
subpage_types = ['NewsletterPage']
intro_text = RichTextField()
body = RichTextField()
@property
def subpages(self):
subpages = NewsletterPage.objects.live().descendant_of(self).order_by('-issue_date')
return subpages
def __str__(self):
return self.title
content_panels = Page.content_panels + [
FieldPanel('intro_text'),
FieldPanel('body'),
]
style_panels = ThemeablePage.style_panels
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(style_panels, heading='Page Style Options'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings', classname="settings"),
])
@python_2_unicode_compatible
class NewsletterPage(ThemeablePage):
issue_date = models.DateField("Issue Date", default=now)
@property
def articles(self):
article_list = []
for article_link in self.article_links.all():
typed_article = article_link.article.content_type.get_object_for_this_type(
id=article_link.article.id)
typed_article.override_text = article_link.override_text
typed_article.override_image = article_link.override_image
article_list.append(typed_article)
return article_list
@property
def external_articles(self):
external_article_list = []
for article_link in self.external_article_links.all():
if article_link.external_article is not None:
article_link.external_article.override_text = article_link.override_text
external_article_list.append(article_link.external_article)
return external_article_list
@property
def events(self):
event_list = []
for event_link in self.event_links.all():
event_link.event.override_text = event_link.override_text
event_list.append(event_link.event)
return event_list
def __str__(self):
return self.issue_date.strftime('%Y-%m-%d')
content_panels = Page.content_panels + [
FieldPanel('issue_date'),
InlinePanel('article_links', label="Articles", help_text='The first article will be the newsletter feature story'),
InlinePanel('external_article_links', label="External Articles"),
InlinePanel('event_links', label="Partnered Events"),
]
style_panels = ThemeablePage.style_panels
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(style_panels, heading='Page Style Options'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings', classname="settings"),
])
@python_2_unicode_compatible
class NewsletterArticleLink(Orderable, models.Model):
article = models.ForeignKey(
"wagtailcore.Page",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text="Link to an internal article"
)
override_text = RichTextField(
blank=True,
default="",
help_text="Text to describe article."
)
override_image = models.ForeignKey(
'images.AttributedImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text="Circular Image to accompany article if article image not selected"
)
newsletter = ParentalKey(
"NewsletterPage",
related_name='article_links'
)
def __str__(self):
return "{}".format(
self.article.title
)
panels = [
PageChooserPanel("article", ['articles.ArticlePage', 'articles.SeriesPage']),
FieldPanel("override_text"),
ImageChooserPanel("override_image"),
]
@python_2_unicode_compatible
class NewsletterExternalArticleLink(Orderable, models.Model):
external_article = models.ForeignKey(
"articles.ExternalArticlePage",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text="Link to an external article"
)
override_text = RichTextField(
blank=True,
default="",
help_text="Text to describe article."
)
newsletter = ParentalKey(
"NewsletterPage",
related_name='external_article_links'
)
def __str__(self):
return "{}".format(
self.external_article.title
)
panels = [
PageChooserPanel("external_article", 'articles.ExternalArticlePage'),
FieldPanel("override_text"),
]
@python_2_unicode_compatible
class NewsletterEventLink(Orderable, models.Model):
event = models.ForeignKey(
"events.EventPage",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text="Link to an event"
)
override_text = RichTextField(
blank=True,
default="",
help_text="Text to describe this event."
)
newsletter = ParentalKey(
"NewsletterPage",
related_name='event_links'
)
def __str__(self):
return "{}".format(
self.event.title
)
panels = [
PageChooserPanel("event", 'events.EventPage'),
FieldPanel("override_text"),
]
|
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from PyQt4 import QtCore, QtGui
import re
from vistrails.core import debug
from vistrails.core.collection import Collection
from vistrails.core.collection.vistrail import VistrailEntity
from vistrails.core.configuration import get_vistrails_configuration
from vistrails.core.data_structures.bijectivedict import Bidict
from vistrails.core.query.multiple import MultipleSearch
from vistrails.core.vistrail.pipeline import Pipeline
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.gui.base_view import BaseView
from vistrails.gui.common_widgets import QSearchBox
from vistrails.gui.modules.utils import get_query_widget_class
from vistrails.gui.pipeline_view import QPipelineView
from vistrails.gui.ports_pane import ParameterEntry
from vistrails.gui.theme import CurrentTheme
from vistrails.gui.version_view import QVersionTreeView
from vistrails.gui.vistrail_controller import VistrailController
class QueryController(object):
LEVEL_ALL = 0
LEVEL_VISTRAIL = 1
LEVEL_WORKFLOW = 2
def __init__(self, query_view=None):
self.query_view = query_view
self.search = None
self.search_str = None
self.search_pipeline = None
self.search_level = 3
self.use_regex = False
self.vt_controller = None
self.level = QueryController.LEVEL_VISTRAIL
self.workflow_version = None
def set_level(self, level):
self.query_view.query_box.setLevel(level)
self.level_changed(level)
def set_use_regex(self, use_regex):
self.use_regex = use_regex
def level_changed(self, level):
self.query_view.set_result_level(level)
if level >= QueryController.LEVEL_VISTRAIL and \
self.query_view.query_box.backButton.isEnabled():
self.query_view.query_box.editButton.setEnabled(True)
else:
self.query_view.query_box.editButton.setEnabled(False)
self.level = level
def set_query_view(self, query_view=None):
self.query_view = query_view
def set_vistrail_controller(self, vt_controller):
self.vt_controller = vt_controller
def set_search(self, search=None):
self.search = search
self.query_view.version_result_view.controller.search = search
self.query_view.workflow_result_view.controller.search = search
def run_search(self, search_str=None):
""" set_search(search_str: str) -> None
Change the currrent version tree search statement
"""
search_pipeline = \
self.query_view.pipeline_view.scene().current_pipeline
if search_str is None:
search_str = self.query_view.query_box.getCurrentText()
self.query_view.update_controller()
if self.search is None or \
self.search.search_str != search_str or \
self.search.queryPipeline != search_pipeline or \
self.search.use_regex != self.use_regex or \
self.query_view.p_controller.changed or \
self.search_level > self.level:
self.search_str = search_str
self.search_pipeline = search_pipeline
self.search_level = self.level
# reset changed here
self.query_view.p_controller.set_changed(False)
vt_controller = self.query_view.vt_controller
controllers = []
def do_search(only_current_vistrail=False,
only_current_workflow=False):
entities_to_check = {}
open_col = Collection.getInstance()
for entity in open_col.get_current_entities():
if entity.type_id == VistrailEntity.type_id and \
entity.is_open:
controller = entity._window.controller
if only_current_vistrail and \
controller.vistrail != vt_controller.vistrail:
continue
controllers.append(controller)
if only_current_workflow:
versions_to_check = set([controller.current_version])
else:
graph = controller._current_terse_graph
versions_to_check = set(graph.vertices.iterkeys())
entities_to_check[entity] = versions_to_check
self.set_search(MultipleSearch(search_str, search_pipeline,
entities_to_check,
self.use_regex))
self.search.run()
return self.search.getResultEntities()
if self.level == QueryController.LEVEL_VISTRAIL:
result_entities = do_search(True)
self.show_vistrail_matches()
elif self.level == QueryController.LEVEL_WORKFLOW:
#self.search_level = QueryController.LEVEL_VISTRAIL
result_entities = do_search(False, True)
self.update_version_tree()
self.show_workflow_matches()
else: # self.level == QueryController.LEVEL_ALL
result_entities = do_search()
self.show_global_matches()
from vistrails.gui.vistrails_window import _app
_app.notify("search_changed", self.search, result_entities)
# May need to update version trees
# resultEntities make sure no update is created later
for controller in controllers:
controller.check_delayed_update()
else:
self.query_view.set_to_result_mode()
def set_refine(self, refine):
""" set_refine(refine: bool) -> None
Set the refine state to True or False
"""
self.query_view.version_result_view.controller.set_refine(refine)
def reset_search(self):
self.search = None
self.search_pipeline = None
self.query_view.pipeline_view.controller.change_selected_version(0)
self.query_view.pipeline_view.scene().setupScene(
self.query_view.pipeline_view.controller.current_pipeline)
self.query_view.set_to_search_mode()
self.query_view.query_box.searchBox.clearSearch()
self.query_view.vistrailChanged()
from vistrails.gui.vistrails_window import _app
_app.notify("search_changed", None, None)
def back_to_search(self):
self.query_view.set_to_search_mode()
def goto_edit(self):
# get the version info and send it to open_vistrail call
from vistrails.gui.vistrails_window import _app
version = self.query_view.version_result_view.controller.current_version
view = self.query_view.controller.vistrail_view
if self.level == QueryController.LEVEL_VISTRAIL:
view.version_selected(version, True)
_app.qactions['history'].trigger()
elif self.level == QueryController.LEVEL_WORKFLOW:
view.version_selected(version, True, double_click=True)
def update_results(self):
if self.workflow_version != \
self.query_view.vt_controller.current_version:
result_view = self.query_view.workflow_result_view
result_view.scene().setupScene(
result_view.controller.current_pipeline)
result_view.scene().fitToView(result_view, True)
self.workflow_version = \
self.query_view.vt_controller.current_version
def update_version_tree(self):
result_view = self.query_view.version_result_view
if result_view.controller.refine:
result_view.controller.recompute_terse_graph()
result_view.controller.invalidate_version_tree()
def show_vistrail_matches(self, *args, **kwargs):
if self.level != QueryController.LEVEL_VISTRAIL:
self.set_level(QueryController.LEVEL_VISTRAIL)
self.query_view.set_to_result_mode()
result_view = self.query_view.version_result_view
if result_view.controller.refine:
result_view.controller.recompute_terse_graph()
result_view.controller.invalidate_version_tree(*args, **kwargs)
def show_workflow_matches(self):
if self.level != QueryController.LEVEL_WORKFLOW:
self.set_level(QueryController.LEVEL_WORKFLOW)
self.query_view.set_to_result_mode()
result_view = self.query_view.workflow_result_view
result_view.scene().setupScene(result_view.controller.current_pipeline)
result_view.scene().fitToView(result_view, True)
def show_global_matches(self):
if self.level != QueryController.LEVEL_ALL:
self.set_level(QueryController.LEVEL_ALL)
self.query_view.set_to_result_mode()
# def invalidate_version_tree(self, *args, **kwargs):
# self.query_view.set_to_result_mode()
# result_view = self.query_view.version_result_view
# result_view.controller.search = self.search
# result_view.controller.search_str = self.search_str
# result_view.controller.invalidate_version_tree(*args, **kwargs)
# def recompute_terse_graph(self, *args, **kwargs):
# self.query_view.set_to_result_mode()
# result_view = self.query_view.version_result_view
# result_view.controller.search = self.search
# result_view.controller.search_str = self.search_str
# result_view.controller.recompute_terse_graph(*args, **kwargs)
class QQueryPipelineView(QPipelineView):
def __init__(self, parent=None):
QPipelineView.__init__(self, parent)
self.setBackgroundBrush(CurrentTheme.QUERY_BACKGROUND_BRUSH)
self.scene().current_pipeline = Pipeline()
self.query_controller = None
def set_query_controller(self, controller):
self.query_controller = controller
def execute(self):
self.query_controller.run_search(None)
class QQueryResultGlobalView(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout()
label = QtGui.QLabel("See Workspace Window")
label.setAlignment(QtCore.Qt.AlignCenter)
layout.addWidget(label, QtCore.Qt.AlignCenter)
self.setLayout(layout)
# self.setBackgroundBrush(CurrentTheme.QUERY_RESULT_BACKGROUND_BRUSH)
class QQueryResultVersionView(QVersionTreeView):
def __init__(self, parent=None):
QVersionTreeView.__init__(self, parent)
self.setBackgroundBrush(CurrentTheme.QUERY_RESULT_BACKGROUND_BRUSH)
class QQueryResultWorkflowView(QPipelineView):
def __init__(self, parent=None):
QPipelineView.__init__(self, parent)
self.setBackgroundBrush(CurrentTheme.QUERY_RESULT_BACKGROUND_BRUSH)
self.scene().set_read_only_mode(True)
class QQueryBox(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.build_widget()
self.controller = None
def set_controller(self, controller=None):
self.controller = controller
def build_widget(self):
layout = QtGui.QVBoxLayout()
layout.setMargin(4)
layout.setSpacing(2)
self.searchBox = QSearchBox(True, False, self)
layout.addWidget(self.searchBox)
options_layout = QtGui.QHBoxLayout()
options_layout.setSpacing(5)
options_layout.setAlignment(QtCore.Qt.AlignLeft)
options_layout.addWidget(QtGui.QLabel("Search:"))
searchAll = QtGui.QRadioButton("Open Vistrails")
searchCurrent = QtGui.QRadioButton("Current Vistrail")
searchWorkflow = QtGui.QRadioButton("Current Workflow")
useRegex = QtGui.QCheckBox("Regular expression")
self.level_group = QtGui.QButtonGroup()
self.level_group.addButton(searchAll)
self.level_group.addButton(searchCurrent)
self.level_group.addButton(searchWorkflow)
self.level_map = \
Bidict([(QueryController.LEVEL_ALL, searchAll),
(QueryController.LEVEL_VISTRAIL, searchCurrent),
(QueryController.LEVEL_WORKFLOW, searchWorkflow)])
options_layout.addWidget(searchAll)
options_layout.addWidget(searchCurrent)
options_layout.addWidget(searchWorkflow)
options_layout.addWidget(useRegex)
searchCurrent.setChecked(True)
self.editButton = QtGui.QPushButton("Edit")
self.editButton.setEnabled(False)
self.backButton = QtGui.QPushButton("Back to Search")
self.backButton.setEnabled(False)
options_layout.addStretch(1)
options_layout.addWidget(self.editButton, 0, QtCore.Qt.AlignRight)
options_layout.addWidget(self.backButton, 0, QtCore.Qt.AlignRight)
layout.addLayout(options_layout)
self.setLayout(layout)
self.connect(self.searchBox, QtCore.SIGNAL('resetSearch()'),
self.resetSearch)
self.connect(self.searchBox, QtCore.SIGNAL('executeSearch(QString)'),
self.executeSearch)
self.connect(self.searchBox, QtCore.SIGNAL('refineMode(bool)'),
self.refineMode)
self.connect(self.backButton, QtCore.SIGNAL('clicked()'),
self.backToSearch)
self.connect(self.editButton, QtCore.SIGNAL('clicked()'),
self.doEdit)
self.connect(self.level_group,
QtCore.SIGNAL('buttonClicked(QAbstractButton*)'),
self.levelChanged)
self.connect(useRegex, QtCore.SIGNAL('stateChanged(int)'),
self.useRegexChanged)
def resetSearch(self, emit_signal=True):
"""
resetSearch() -> None
"""
if self.controller and emit_signal:
self.controller.reset_search()
self.emit(QtCore.SIGNAL('textQueryChange(bool)'), False)
else:
self.searchBox.clearSearch()
def backToSearch(self):
if self.controller:
self.controller.back_to_search()
def doEdit(self):
if self.controller:
self.controller.goto_edit()
def levelChanged(self, button):
self.controller.set_level(self.level_map.inverse[button])
def useRegexChanged(self, status):
self.controller.set_use_regex(status != QtCore.Qt.Unchecked)
def setLevel(self, level):
self.level_map[level].setChecked(True)
def executeSearch(self, text):
"""
executeSearch(text: QString) -> None
"""
s = str(text)
if self.controller:
try:
self.controller.run_search(s)
except re.error as e:
debug.critical('Error in regular expression: %s' % str(e))
# try:
# search = CombinedSearch(s,
# search = SearchCompiler(s).searchStmt
# except SearchParseError, e:
# debug.warning("Search Parse Error", e)
# search = None
# self.controller.set_search(search, s)
# self.emit(QtCore.SIGNAL('textQueryChange(bool)'), s!='')
def refineMode(self, on):
"""
refineMode(on: bool) -> None
"""
if self.controller:
self.controller.set_refine(on)
def getCurrentText(self):
return self.searchBox.getCurrentText()
def setManualResetEnabled(self, boolVal):
self.searchBox.setManualResetEnabled(boolVal)
class QQueryView(QtGui.QWidget, BaseView):
VISUAL_SEARCH_VIEW = 0
GLOBAL_RESULT_VIEW = 1
VERSION_RESULT_VIEW = 2
WORKFLOW_RESULT_VIEW = 3
RESULT_LEVEL_MAP = \
Bidict([(QueryController.LEVEL_ALL, GLOBAL_RESULT_VIEW),
(QueryController.LEVEL_VISTRAIL, VERSION_RESULT_VIEW),
(QueryController.LEVEL_WORKFLOW, WORKFLOW_RESULT_VIEW)])
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
BaseView.__init__(self)
self.build_widget()
self.set_title("Search")
def set_controller(self, controller=None):
if self.controller:
self.disconnect(self.controller,
QtCore.SIGNAL('stateChanged'),
self.update_controller)
self.controller = controller
if controller:
self.connect(self.controller,
QtCore.SIGNAL('stateChanged'),
self.update_controller)
self.vt_controller.vistrail_view = self.version_result_view
self.vt_controller.current_pipeline_view = \
self.workflow_result_view
# self.vt_controller.vistrail_view.set_controller(self.vt_controller)
# FIXME Need to figure out how to deal with this !!!
self.vt_controller.set_vistrail(controller.vistrail, None,
set_log_on_vt=False)
hide_upgrades = not getattr(get_vistrails_configuration(),
'hideUpgrades', True)
self.vt_controller.change_selected_version(controller.current_version,
hide_upgrades, hide_upgrades)
self.version_result_view.set_controller(self.vt_controller)
self.workflow_result_view.set_controller(self.vt_controller)
self.query_controller.set_vistrail_controller(controller)
def update_controller(self):
# FIXME Need to figure out how to deal with this !!!
self.vt_controller.set_vistrail(self.controller.vistrail, None,
set_log_on_vt=False)
hide_upgrades = getattr(get_vistrails_configuration(),
'hideUpgrades', True)
self.vt_controller.change_selected_version(self.controller.current_version,
hide_upgrades, hide_upgrades)
def build_widget(self):
layout = QtGui.QVBoxLayout()
layout.setMargin(0)
layout.setSpacing(0)
self.query_controller = QueryController(self)
self.vt_controller = VistrailController(auto_save=False)
self.p_controller = VistrailController(Vistrail(), auto_save=False)
self.connect(self.p_controller,
QtCore.SIGNAL('vistrailChanged()'),
self.vistrailChanged)
self.query_box = QQueryBox()
self.query_box.set_controller(self.query_controller)
layout.addWidget(self.query_box)
self.stacked_widget = QtGui.QStackedWidget()
self.pipeline_view = QQueryPipelineView()
self.p_controller.current_pipeline_view = self.pipeline_view
self.pipeline_view.set_controller(self.p_controller)
self.pipeline_view.set_query_controller(self.query_controller)
QQueryView.VISUAL_SEARCH_VIEW = \
self.stacked_widget.addWidget(self.pipeline_view)
self.global_result_view = QQueryResultGlobalView()
QQueryView.GLOBAL_RESULT_VIEW = \
self.stacked_widget.addWidget(self.global_result_view)
self.version_result_view = QQueryResultVersionView()
self.connect(self.version_result_view.scene(),
QtCore.SIGNAL('versionSelected(int,bool,bool,bool,bool)'),
self.result_version_selected)
# self.version_result_view.set_controller(self.vt_controller)
QQueryView.VERSION_RESULT_VIEW = \
self.stacked_widget.addWidget(self.version_result_view)
self.workflow_result_view = QQueryResultWorkflowView()
# self.workflow_result_view.set_controller(self.vt_controller)
QQueryView.WORKFLOW_RESULT_VIEW = \
self.stacked_widget.addWidget(self.workflow_result_view)
self.stacked_widget.setCurrentWidget(self.pipeline_view)
layout.addWidget(self.stacked_widget)
self.setLayout(layout)
self.current_display = QQueryView.VISUAL_SEARCH_VIEW
self.current_result_view = QQueryView.VERSION_RESULT_VIEW
def set_default_layout(self):
from vistrails.gui.module_palette import QModulePalette
from vistrails.gui.module_info import QModuleInfo
self.set_palette_layout(
{QtCore.Qt.LeftDockWidgetArea: QModulePalette,
QtCore.Qt.RightDockWidgetArea: QModuleInfo,
})
def set_action_links(self):
self.action_links = \
{ 'execute': ('query_pipeline_changed', self.set_execute_action) }
# also add other notification here...
from vistrails.gui.vistrails_window import _app
_app.register_notification('query_pipeline_changed',
self.set_reset_button)
def set_reset_button(self, pipeline):
self.query_box.setManualResetEnabled(self.pipeline_non_empty(pipeline))
def set_result_level(self, level):
view_idx = QQueryView.RESULT_LEVEL_MAP[level]
if self.current_display != QQueryView.VISUAL_SEARCH_VIEW:
self.set_display_view(view_idx)
self.current_result_view = view_idx
self.query_controller.update_results()
def set_to_search_mode(self):
self.set_display_view(QQueryView.VISUAL_SEARCH_VIEW)
self.query_box.backButton.setEnabled(False)
self.query_box.editButton.setEnabled(False)
self.set_reset_button(self.p_controller.current_pipeline)
from vistrails.gui.vistrails_window import _app
_app.notify('query_pipeline_changed',
self.p_controller.current_pipeline)
def set_to_result_mode(self):
self.set_display_view(self.current_result_view)
self.query_box.backButton.setEnabled(True)
if self.query_controller.level >= QueryController.LEVEL_VISTRAIL:
self.query_box.editButton.setEnabled(True)
self.query_box.setManualResetEnabled(True)
from vistrails.gui.vistrails_window import _app
_app.notify('query_pipeline_changed',
self.p_controller.current_pipeline)
def set_display_view(self, view_type):
self.current_display = view_type
self.stacked_widget.setCurrentIndex(view_type)
def get_current_view(self):
return self.stacked_widget.currentWidget()
def set_action_defaults(self):
self.action_defaults = \
{
'execute': [('setEnabled', True, self.set_execute_action),
('setIcon', False, CurrentTheme.VISUAL_QUERY_ICON),
('setToolTip', False, 'Execute a visual query')],
'publishWeb': [('setEnabled', False, False)],
'publishPaper': [('setEnabled', False, False)],
}
def set_execute_action(self, pipeline=None):
if not self.vt_controller:
return False
if pipeline is None:
pipeline = self.p_controller.current_pipeline
if self.current_display == QQueryView.VISUAL_SEARCH_VIEW:
return self.pipeline_non_empty(pipeline)
return False
def pipeline_non_empty(self, pipeline):
return pipeline is not None and len(pipeline.modules) > 0
def vistrailChanged(self):
from vistrails.gui.vistrails_window import _app
self.p_controller.current_pipeline.ensure_connection_specs()
_app.notify('query_pipeline_changed', self.p_controller.current_pipeline)
def query_changed(self, query=None):
if query is None:
self.query_controller.reset_search()
# FIXME add support for changing the query to something specific
# DAK: removed this call as the query view maintains its own
# "current version"
# def version_changed(self, version_id):
# self.vt_controller.change_selected_version(version_id)
# self.version_result_view.select_current_version()
# self.query_controller.update_results()
def result_version_selected(self, version_id, by_click, do_validate=True,
from_root=False, double_click=False):
if by_click:
hide_upgrades = getattr(get_vistrails_configuration(),
'hideUpgrades', True)
self.query_controller.search.setCurrentController(
self.vt_controller)
self.vt_controller.change_selected_version(version_id, hide_upgrades,
hide_upgrades, from_root)
if double_click:
self.query_controller.set_level(QueryController.LEVEL_WORKFLOW)
self.query_controller.show_workflow_matches()
# set version prop directly
from vistrails.gui.version_prop import QVersionProp
prop = QVersionProp.instance()
prop.set_visible(True)
prop.updateController(self.vt_controller)
prop.updateVersion(version_id)
class QueryEntry(ParameterEntry):
def __init__(self, port_spec, function=None, parent=None):
ParameterEntry.__init__(self, port_spec, function, parent)
def get_widget(self):
return self.build_widget(get_query_widget_class, False)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mutators for enhancement factors and exchange-correlation functionals.
The EnhancementFactorMutator/XCFunctionalMutator class defined in this module
provides a mutate method, which takes an EnhancementFactor/XCFunctional
instance as input and returns a mutated EnhancementFactor/XCFunctional instance
as output. During the mutation process, EnhancementFactor/XCFunctional instance
are treated as immutable objects and no changes are made in-place.
"""
import copy
from absl import logging
import numpy as np
from symbolic_functionals.syfes.symbolic import enhancement_factors
from symbolic_functionals.syfes.symbolic import instructions
from symbolic_functionals.syfes.symbolic import xc_functionals
def _remove_small_probabilities(pool):
"""Remove small probabilities from instruction or mutation pool dictionary."""
keys_to_remove = [key for key, value in pool.items() if value < 1e-8]
for key in keys_to_remove:
pool.pop(key)
class EnhancementFactorMutator:
"""Mutator for enhancement factor.
Mutations on an EnhancementFactor instance are performed by inserting,
removing, replacing and changing arguments of instructions in the
instruction_list. Probabilities for instructions and mutation rules
are specified by instruction_pool and mutation_pool, respectively.
New instructions generated during the mutations obey the convention in
the definition of enhancement factor that input arguments can be any names
known to the enhancement factor, while output argument are limited to
variable names.
"""
_default_instruction_pool = {
instruction_name: 1. / len(instructions.INSTRUCTION_CLASSES)
for instruction_name in instructions.INSTRUCTION_CLASSES.keys()
}
_default_mutation_pool = {
'insert_instruction': 0.25,
'remove_instruction': 0.25,
'replace_instruction': 0.25,
'change_argument': 0.25,
}
def __init__(self,
instruction_pool=None,
mutation_pool=None,
max_num_instructions=None,
max_num_bound_parameters=None,
num_fixed_instructions=0,
seed=None):
"""Initializes mutator.
Args:
instruction_pool: Dict {instruction_name: instruction_probability},
the pool of possible instructions for insertion and replacement.
mutation_pool: Dict {mutation_name: mutation_probability}, the pool
of possible mutation rules. Mutation rules are implemented as methods
of this class.
max_num_instructions: Integer, the maximum number of instructions.
No more instructions will be inserted to the instruction_list beyond
this number.
max_num_bound_parameters: Integer, the maximum number of bound parameters.
num_fixed_instructions: Integer, the number of fixed instructions.
Mutator will not mutate the first num_fixed_instructions instructions
of enhancement factors.
seed: Integer, the random seed.
"""
self.instruction_pool = instruction_pool or self._default_instruction_pool
for instruction_name in self.instruction_pool:
if instruction_name not in instructions.INSTRUCTION_CLASSES:
raise ValueError(f'Unknown instruction: {instruction_name}')
if abs(sum(self.instruction_pool.values()) - 1) > 1e-8:
raise ValueError('Instruction probabilities are not normalized to 1')
if any(probability < 0. for probability in self.instruction_pool.values()):
raise ValueError('Instruction pool contains negative probabilities')
_remove_small_probabilities(self.instruction_pool)
self.mutation_pool = mutation_pool or self._default_mutation_pool
for mutation_type in self.mutation_pool:
if not hasattr(self, mutation_type):
raise ValueError(f'Unknown mutation type: {mutation_type}')
if abs(sum(self.mutation_pool.values()) - 1) > 1e-8:
raise ValueError('Mutation probabilities are not normalized to 1')
if any(probability < 0. for probability in self.mutation_pool.values()):
raise ValueError('Mutation pool contains negative probabilities')
_remove_small_probabilities(self.mutation_pool)
if max_num_instructions is None:
self.max_num_instructions = float('inf')
else:
self.max_num_instructions = max_num_instructions
if max_num_bound_parameters is None:
self.max_num_bound_parameters = float('inf')
else:
self.max_num_bound_parameters = max_num_bound_parameters
self.num_fixed_instructions = num_fixed_instructions
self.random_state = np.random.RandomState(seed=seed)
@property
def instruction_names(self):
"""List of instruction names."""
return list(self.instruction_pool.keys())
@property
def instruction_probabilities(self):
"""List of probabilities for instructions."""
return list(self.instruction_pool.values())
@property
def mutation_types(self):
"""List of mutation types."""
return list(self.mutation_pool.keys())
@property
def mutation_probabilities(self):
"""List of probabilities for mutation types."""
return list(self.mutation_pool.values())
def mutate(self, enhancement_factor, verbose=True):
"""Mutates a single instruction in the enhancement factor.
Once the instruction_list become empty, only insert_instruction will be
performed; once the instruction_list hits self.max_num_instructions limit,
only mutations other than insert_instruction will be performed.
Args:
enhancement_factor: Instance of enhancement_factors.EnhancementFactor
class, the enhancement factor to be mutated. enhancement_factor will
not be modified in-place.
verbose: Boolean, if True, prints the log of mutation.
Returns:
new_enhancement_factor: Instance of enhancement_factors.EnhancementFactor
class, the mutated enhancement factor.
mutation_type: String, the type of mutation performed.
instruction_index: Integer, the index of mutated instruction.
change: Tuple of two instances of instructions.Instruction.
* (None, new_instruction) for insert_instruction
* (old_instruction, None) for remove_instruction
* (old_instruction, new_instruction) for replace_instruction or
change_argument
Raises:
ValueError, if instruction_list is empty and probability of
insert_instruction is zero, or the length of instruction_list hits
self.max_num_instructions limit and only insert_instruction mutation
is allowed.
"""
# determine mutation type
if enhancement_factor.num_instructions == self.num_fixed_instructions:
if self.mutation_pool.get('insert_instruction', 0.) < 1e-8:
raise ValueError('Mutation cannot proceed on empty instruction list '
'with zero insertion probability')
mutation_type = 'insert_instruction'
elif enhancement_factor.num_instructions > self.max_num_instructions:
raise ValueError('Mutation cannot proceed with instruction_list longer '
'than max_num_instructions.')
elif (enhancement_factor.num_bound_parameters
> self.max_num_bound_parameters):
raise ValueError('Mutation cannot proceed with number of bound '
'parameters greater than max_num_bound_parameters.')
elif enhancement_factor.num_instructions == self.max_num_instructions:
if abs(self.mutation_pool.get('insert_instruction', 0.) - 1.) < 1e-8:
raise ValueError('Mutation cannot proceed on max_num_instructions '
'with only insertions allowed')
mutation_type = 'insert_instruction'
while mutation_type == 'insert_instruction':
mutation_type = self.get_random_mutation_type()
else:
mutation_type = self.get_random_mutation_type()
# execute mutation
new_instruction_list, instruction_index, change, message = getattr(
self, mutation_type)(enhancement_factor)
if verbose:
logging.info(message)
return (enhancement_factors.EnhancementFactor(
feature_names=enhancement_factor.feature_names,
shared_parameter_names=enhancement_factor.shared_parameter_names,
variable_names=enhancement_factor.variable_names,
instruction_list=new_instruction_list), mutation_type,
instruction_index, change)
def get_random_mutation_type(self):
"""Chooses a random mutation type based on mutation probabilities.
Returns:
String, the chosen mutation type.
"""
return self.random_state.choice(
self.mutation_types, p=self.mutation_probabilities)
def get_random_instruction_index(self,
enhancement_factor,
allow_last_index=False):
"""Chooses a random instruction index for mutation.
Args:
enhancement_factor: Instance of enhancement_factors.EnhancementFactor
class, the input enhancement factor.
allow_last_index: Boolean, if True, allow index to be
enhancement_factor.num_instructions, useful for inserting instructions.
Returns:
Integer, the chosen instruction index.
"""
return self.random_state.randint(
self.num_fixed_instructions,
enhancement_factor.num_instructions + (1 if allow_last_index else 0))
def get_random_instruction_name(self,
existing_bound_parameters,
num_inputs=None):
"""Gets a random instruction class name from instruction pool.
The instruction will be chosen such that the total number of bound
parameters will not exceed self.max_num_bound_parameters. In order to
garantee this, the new instruction must satisfies at least one of the
conditions:
* It contains bound parameters in the existing_bound_parameters list, so
it does not add new bound parameters.
* The number of bound parameters of new instruction, when added to the
number of existing bound parameters, is less than or equal to
self.max_num_bound_parameters.
Args:
existing_bound_parameters: Sequence of strings, the names of existing
bound parameters.
num_inputs: Integer, if present, specifies the number of input arguments.
Defaults to no constraint on number of input arguments.
Returns:
String, the class name of instruction.
"""
candidates = instructions.get_instruction_names_with_signature(
num_inputs=num_inputs,
max_num_bound_parameters=(
self.max_num_bound_parameters - len(existing_bound_parameters)),
instructions=self.instruction_pool)
# find instructions with bound parameters in the existing_bound_parameters
# list, these instructions do not add new bound parameters and only need
# to satisfy num_inputs constraint
for bound_parameter in existing_bound_parameters:
instruction_name = instructions.BOUND_PARAMETER_ASSOCIATION[
bound_parameter]
if instruction_name not in self.instruction_pool:
continue
if num_inputs is not None and (
num_inputs != instructions.INSTRUCTION_CLASSES[
instruction_name].get_num_inputs()):
continue
candidates.append(instruction_name)
candidates = list(set(candidates))
if not candidates:
raise ValueError(
'No instruction in instruction pool satisfies conditions: '
f'num_inputs = {num_inputs}, '
f'existing_bound_parameters = {existing_bound_parameters}')
probabilities = [self.instruction_pool[instruction_name]
for instruction_name in candidates]
assert probabilities and np.sum(probabilities) > 1e-8
probabilities /= np.sum(probabilities)
return self.random_state.choice(candidates, p=probabilities)
def get_random_instruction(self,
enhancement_factor,
existing_bound_parameters=None):
"""Gets a random instruction with random arguments.
Args:
enhancement_factor: Instance of enhancement_factors.EnhancementFactor
class, the input enhancement factor.
existing_bound_parameters: Sequence of strings, the names of existing
bound parameters. Defaults to those of enhancement_factor.
Returns:
Instance of instructions.Instruction, the resulting random instruction.
"""
if existing_bound_parameters is None:
existing_bound_parameters = enhancement_factor.bound_parameter_names
instruction_class = instructions.INSTRUCTION_CLASSES[
self.get_random_instruction_name(
existing_bound_parameters=existing_bound_parameters)]
output = self.random_state.choice(enhancement_factor.variable_names)
inputs = [
self.random_state.choice(enhancement_factor.allowed_input_names)
for _ in range(instruction_class.get_num_inputs())]
return instruction_class(output, *inputs)
def insert_instruction(self, enhancement_factor):
"""One of mutation rules: inserts a random instruction.
Args:
enhancement_factor: Instance of enhancement_factors.EnhancementFactor
class, the input enhancement factor.
Returns:
new_instruction_list: List of instructions.Instruction instances, the
new instruction list after insertion.
instruction_index: Integer, the index of inserted instruction.
change: Tuple of (None, instructions.Instruction), change of instructions.
message: String, the log of the mutation.
"""
new_instruction_list = copy.deepcopy(enhancement_factor.instruction_list)
instruction_index = self.get_random_instruction_index(
enhancement_factor, allow_last_index=True)
new_instruction = self.get_random_instruction(enhancement_factor)
new_instruction_list.insert(instruction_index, new_instruction)
return (new_instruction_list, instruction_index, (None, new_instruction),
'EnhancementFactorMutator: inserted instruction at index '
f'{instruction_index}: {new_instruction}')
def remove_instruction(self, enhancement_factor):
"""One of mutation rules: removes a random instruction.
Args:
enhancement_factor: Instance of enhancement_factors.EnhancementFactor
class, the input enhancement factor.
Returns:
new_instruction_list: List of instructions.Instruction instances, the
new instruction list after removal.
instruction_index: Integer, the index of removed instruction.
change: Tuple of (instructions.Instruction, None), change of instructions.
message: String, the log of the mutation.
"""
new_instruction_list = copy.deepcopy(enhancement_factor.instruction_list)
instruction_index = self.get_random_instruction_index(enhancement_factor)
old_instruction = new_instruction_list.pop(instruction_index)
return (new_instruction_list, instruction_index, (old_instruction, None),
'EnhancementFactorMutator: removed instruction at index '
f'{instruction_index}')
def replace_instruction(self, enhancement_factor):
"""One of mutation rules: replace a random instruction.
Input and output arguments will not be changed. The new instruction may
carry different bound parameters from the old instruction.
Args:
enhancement_factor: Instance of enhancement_factors.EnhancementFactor
class, the input enhancement factor.
Returns:
new_instruction_list: List of instructions.Instruction instances, the
new instruction list after replacement.
instruction_index: Integer, the index of replaced instruction.
change: Tuple of 2 instructions.Instruction instances, old and new
instructions.
message: String, the log of the mutation.
"""
new_instruction_list = copy.deepcopy(enhancement_factor.instruction_list)
instruction_index = self.get_random_instruction_index(enhancement_factor)
old_instruction = new_instruction_list[instruction_index]
new_instruction_name = self.get_random_instruction_name(
existing_bound_parameters=enhancement_factor.bound_parameter_names,
num_inputs=old_instruction.get_num_inputs())
new_instruction = instructions.INSTRUCTION_CLASSES[new_instruction_name](
*old_instruction.args)
new_instruction_list[instruction_index] = new_instruction
return (new_instruction_list, instruction_index, (old_instruction,
new_instruction),
f'EnhancementFactorMutator: replaced instruction at index '
f'{instruction_index}. {old_instruction} -> {new_instruction}')
def change_argument(self, enhancement_factor):
"""One of mutation rules: change an argument for a random instruction.
The argument is randomly chosen and can be input or output argument.
Bound parameters will not be changed.
Args:
enhancement_factor: Instance of enhancement_factors.EnhancementFactor
class, the input enhancement factor.
Returns:
new_instruction_list: List of instructions.Instruction instances, the
new instruction list after change of argument.
instruction_index: Integer, the index of altered instruction.
change: Tuple of 2 instructions.Instruction instances, old and new
instructions.
message: String, the log of the mutation.
"""
new_instruction_list = copy.deepcopy(enhancement_factor.instruction_list)
instruction_index = self.get_random_instruction_index(enhancement_factor)
old_instruction = new_instruction_list[instruction_index]
new_arguments = list(old_instruction.args)
mutate_arg_index = self.random_state.randint(old_instruction.get_num_args())
if mutate_arg_index == 0:
# change output argument
new_arguments[0] = self.random_state.choice(
list(enhancement_factor.variable_names))
else:
# change input argument
new_arguments[mutate_arg_index] = self.random_state.choice(
list(enhancement_factor.allowed_input_names))
new_instruction = old_instruction.__class__(*new_arguments)
new_instruction_list[instruction_index] = new_instruction
return (new_instruction_list, instruction_index, (old_instruction,
new_instruction),
f'EnhancementFactorMutator: chaged argument {mutate_arg_index} of '
f'instruction at index {instruction_index}. '
f'{old_instruction} -> {new_instruction}')
def randomize_instruction_list(self,
enhancement_factor,
num_instructions=None):
"""One of mutation rules: randomize the entire instruction list.
Args:
enhancement_factor: Instance of enhancement_factors.EnhancementFactor
class, the input enhancement factor.
num_instructions: Integer, the number of instructions in the randomized
instruction list. If not specified, the new instruction list will
have the same length with existing instruction list.
Returns:
new_instruction_list: List of instructions.Instruction instances, the
new instruction list after change of argument.
instruction_index: Integer, the index of altered instruction.
change: Tuple of 2 instructions.Instruction instances, old and new
instructions.
message: String, the log of the mutation.
Raises:
ValueError: if self.num_fixed_instructions is nonzero.
"""
if self.num_fixed_instructions:
raise ValueError('randomize_instruction_list cannot be applied with '
'fixed instructions')
num_instructions = num_instructions or enhancement_factor.num_instructions
new_instruction_list = []
bound_parameters = set()
for _ in range(num_instructions):
instruction = self.get_random_instruction(
enhancement_factor,
existing_bound_parameters=bound_parameters)
for bound_parameter in instruction.get_bound_parameters():
bound_parameters.add(bound_parameter)
new_instruction_list.append(instruction)
return (new_instruction_list, None, (None, None),
'EnhancementFactorMutator: randomized instruction list. '
f'New instruction list: {new_instruction_list}')
class XCFunctionalMutator:
"""Mutator for exchange-correlation functional.
XCFunctionalMutator contains three EnhancementFactorMutator instances.
The XCFunctionalMutator.mutate method will randomly call one of the mutate
method of the three EnhancementFactorMutator instances.
The three EnhancementFactor instances used by EnhancementFactorMutator are
kept throughout the entire mutation process, with their instruction_list
changed by mutators. Every mutation yield a new XCFunctional instance based on
the same three EnhancementFactor instances. This design ensures that when
Evaluator evaluates the functional, the jitted methods will be recompiled.
"""
_default_component_mutation_probabilities = [1 / 3, 1 / 3, 1 / 3]
def __init__(self,
mutator_x,
mutator_css,
mutator_cos,
component_mutation_probabilities=None,
seed=None):
"""Initializes XCFunctionalMutator.
Args:
mutator_x: Instance of mutators.EnhancementFactorMutator, the mutator
for exchange enhancement factor. If mutator_x is not specified, it
will be constructed using instruction_pool, mutation_pool and
max_num_instructions.
mutator_css: Instance of mutators.EnhancementFactorMutator, the mutator
for same-spin correlation enhancement factor. If mutator_css is not
specified, it will be constructed using instruction_pool, mutation_pool
and max_num_instructions.
mutator_cos: Instance of mutators.EnhancementFactorMutator, the mutator
for opposite-spin correlation enhancement factor. If mutator_cos is not
specified, it will be constructed using instruction_pool, mutation_pool
and max_num_instructions.
component_mutation_probabilities: Sequence of 3 floats, the probabilities
for mutating exchange, same-spin or opposite-spin component of the
functional.
seed: Integer, the random seed.
Raises:
ValueError, if enhancement factors in xc_functional do not correspond
to enhancemenet factors of input enhancement factor mutators,
or input component_mutation_probabilities has wrong shape, contains
negative values or not normalized to 1.
"""
self.mutator_x = mutator_x
self.mutator_css = mutator_css
self.mutator_cos = mutator_cos
self.component_mutation_probabilities = (
component_mutation_probabilities
or self._default_component_mutation_probabilities)
if len(self.component_mutation_probabilities) != 3:
raise ValueError(
'Wrong length for component_mutation_probabilities. '
f'Expected 3, got {len(self.component_mutation_probabilities)}')
if abs(sum(self.component_mutation_probabilities) - 1.) > 1e-8:
raise ValueError(
'component_mutation_probabilities not normalized to 1')
if any(probability < 0.
for probability in self.component_mutation_probabilities):
raise ValueError(
'component_mutation_probabilities contains negative probabilities')
self.random_state = np.random.RandomState(seed=seed)
def get_random_component(self):
"""Gets a random component (f_x, f_css or f_cos) of functional for mutation.
Returns:
String, the chosen component. 'f_x', 'f_css' or 'f_cos'.
"""
return self.random_state.choice(
['f_x', 'f_css', 'f_cos'],
p=self.component_mutation_probabilities)
def get_mutator_for_component(self, component):
"""Gets the corresponding mutator for a given component of functional.
Args:
component: String, the functional compoenent. 'f_x', 'f_css' or 'f_cos'.
Returns:
Instance of EnhancementFactorMutator, the corresponding mutator for the
given component. self.mutator_x, self.mutator_css or self.mutator_cos.
"""
return getattr(self, {
'f_x': 'mutator_x',
'f_css': 'mutator_css',
'f_cos': 'mutator_cos'
}[component])
def mutate(self, functional, verbose=True):
"""Mutates a random component (f_x, f_css or f_cos) of the functional.
Args:
functional: Instance of xc_functionals.XCFunctional, the exchange-
correlation functional to be mutated.
verbose: Boolean, if True, prints the log of mutation.
Returns:
new_functional: Instance of xc_functionals.XCFunctional, the new
exchange-correlation functional after mutation.
component: String, the mutated functional component. f_x, f_css or f_cos.
mutation_type: String, the type of mutation performed.
instruction_index: Integer, the index of mutated instruction.
change: Tuple of two instances of instructions.Instruction.
* (None, new_instruction) for insert_instruction
* (old_instruction, None) for remove_instruction
* (old_instruction, new_instruction) for replace_instruction or
change_argument
"""
functional_components = {
'f_x': functional.f_x,
'f_css': functional.f_css,
'f_cos': functional.f_cos
}
component = self.get_random_component()
if verbose:
logging.info('XCFunctionalMutator: component %s is chosen.', component)
new_enhancement_factor, mutation_type, instruction_index, change = (
self.get_mutator_for_component(component).mutate(
functional_components[component], verbose=verbose))
functional_components[component] = new_enhancement_factor
new_functional = xc_functionals.XCFunctional(**functional_components)
return new_functional, component, mutation_type, instruction_index, change
|
|
# CityParkingData represents the complete collection of parking lots
# pre-populated with "static" data where possible. The goal is to
# reduce api breakage due to html template changes or field format
# discrepancies by limiting the # of fields pulled dynamically.
class ParkingData:
def __init__(self):
self.city_data = dict()
self.city_data['availability_url'] = \
'http://www.cityofmadison.com/parkingUtility/garagesLots/availability/'
self.city_data['special_events_url'] = \
'http://www.cityofmadison.com/parkingUtility/calendar/index.cfm'
self.city_data['lots'] = [
{
'name': 'State Street Campus Garage',
'operatedBy': 'city',
'shortName': 'campus', # minimum reliable unique string
'address': {
'street': '430 North Frances Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53703'
},
'coordinates': {
'lat': 43.074067,
'lng': -89.39624099999999
},
'entrances': [
'400 North Frances Street',
'400 North Lake Street'
],
'totalSpots': 1065,
'openSpots': None,
'webUrl': 'http://www.cityofmadison.com/parkingUtility/garagesLots/facilities/stateStCampus.cfm'
},
{
'name': 'Brayton Lot',
'operatedBy': 'city',
'shortName': 'brayton', # minimum reliable unique string
'address': {
'street': '1 South Butler Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53703'
},
'coordinates': {
'lat': 43.076728,
'lng': -89.3802089
},
'entrances': [
{'street': '10 South Butler Street'}
],
'totalSpots': 247,
'openSpots': None,
'webUrl': 'http://www.cityofmadison.com/parkingUtility/garagesLots/facilities/brayton.cfm'
},
{
'name': 'Capitol Square North Garage',
'operatedBy': 'city',
'shortName': 'north', # minimum reliable unique string
'address': {
'street': '218 East Mifflin Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53703'
},
'coordinates': {
'lat': 43.077627,
'lng': -89.38321499999999
},
'entrances': [
'100 North Butler Street', '200 East Mifflin Street', '100 North Webster Street'
],
'totalSpots': 613,
'openSpots': None,
'webUrl': 'http://www.cityofmadison.com/parkingUtility/garagesLots/facilities/capSquareNorth.cfm'
},
{
'name': 'Government East Garage',
'operatedBy': 'city',
'shortName': 'east', # minimum reliable unique string
'address': {
'street': '215 South Pinckney Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53703'
},
'coordinates': {
'lat': 43.073934,
'lng': -89.380245
},
'entrances': [
'200 South Pinckney Street', '100 East Wilson Street'
],
'totalSpots': 516,
'openSpots': None,
'webUrl': 'http://www.cityofmadison.com/parkingUtility/garagesLots/facilities/govtEast.cfm'
},
{
'name': 'Overture Center Garage',
'operatedBy': 'city',
'shortName': 'overture', # minimum reliable unique string
'address': {
'street': '318 West Mifflin Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53703'
},
'entrances': [
'300 West Dayton Street', '300 West Mifflin Street'
],
'coordinates': {
'lat': 43.073353,
'lng': -89.38928299999999
},
'totalSpots': 620,
'openSpots': None,
'webUrl': 'http://www.cityofmadison.com/parkingUtility/garagesLots/facilities/overture.cfm'
},
{
'name': 'State Street Capitol Garage',
'operatedBy': 'city',
'shortName': 'state street capitol', # minimum reliable unique string
'address': {
'street': '214 North Carroll Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53703'
},
'coordinates': {
'lat': 43.0753667,
'lng': -89.388021
},
'entrances': [
'200 North Carroll Street', '100 West Dayton Street', '100 West Johnson Street '
],
'totalSpots': 850,
'openSpots': None,
'webUrl': 'http://www.cityofmadison.com/parkingUtility/garagesLots/facilities/stateStCapitol.cfm'
}
]
self.campus_data = dict()
self.campus_data['availability_url'] = \
'http://transportation.wisc.edu/parking/lotinfo_occupancy.aspx'
self.campus_data['special_events_url'] = \
'http://transportation.wisc.edu/newsAndEvents/events.aspx'
self.campus_data['lots'] = [
{
'shortName': '20',
'name': 'University Avenue Ramp',
'operatedBy': 'uw',
'address': {
'street': '1390 University Avenue',
'city': 'Madison',
'state': 'WI',
'postalCode': '53706'
},
'coordinates': {
'lat': 43.073397,
'lng': -89.4088843
},
'entrances': [],
'totalSpots': 220,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=20'
},
{
'shortName': '27',
'name': 'Nancy Nicholas Hall Garage',
'operatedBy': 'uw',
'address': {
'street': '1330 Linden Drive',
'city': 'Madison',
'state': 'WI',
'postalCode': '53706'
},
'coordinates': {
'lat': 43.0751477,
'lng': -89.4097714
},
'entrances': [],
'totalSpots': 48,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=27'
},
{
'shortName': '36',
'name': 'Observatory Drive Ramp',
'operatedBy': 'uw',
'address': {
'street': '1645 Observatory Drive',
'city': 'Madison',
'state': 'WI',
'postalCode': '53706'
},
'coordinates': {
'lat': 43.0764412,
'lng': -89.4138189
},
'entrances': [],
'totalSpots': 463,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=36'
},
{
'shortName': '6U',
'name': 'Helen C. White Garage Upper Level',
'operatedBy': 'uw',
'address': {
'street': '600 North Park Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53706'
},
'coordinates': {
'lat': 43.0763396,
'lng': -89.4007865
},
'entrances': [],
'totalSpots': 95,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=06'
},
{
'shortName': '6L',
'name': 'Helen C. White Garage Lower Level',
'operatedBy': 'uw',
'address': {
'street': '600 North Park Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53706'
},
'coordinates': {
'lat': 43.0767906197085,
'lng': -89.4007865
},
'entrances': [],
'totalSpots': 95,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=06'
},
{
'shortName': '7',
'name': 'Grainger Hall Garage',
'operatedBy': 'uw',
'address': {
'street': '325 North Brooks Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53715'
},
'coordinates': {
'lat': 43.07277759999999,
'lng': -89.40241119999999
},
'entrances': [],
'totalSpots': 412,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=07'
},
{
'shortName': '29',
'name': 'North Park Street Ramp',
'operatedBy': 'uw',
'address': {
'street': '21 North Park Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53715'
},
'coordinates': {
'lat': 43.0682501,
'lng': -89.4000363
},
'entrances': [],
'totalSpots': 340,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=029'
},
{
'shortName': '46',
'name': 'Lake & Johnson Ramp',
'operatedBy': 'uw',
'address': {
'street': '301 North Lake Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53715'
},
'coordinates': {
'lat': 43.0723259,
'lng': -89.396855
},
'entrances': [],
'totalSpots': 733,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=046'
},
{
'shortName': '83',
'name': 'Fluno Center Garage',
'operatedBy': 'uw',
'address': {
'street': '314 North Frances Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53703'
},
'coordinates': {
'lat': 43.0725931,
'lng': -89.3958675
},
'entrances': [],
'totalSpots': 296,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=83'
},
{
'shortName': '17',
'name': 'Engineering Drive Ramp',
'operatedBy': 'uw',
'address': {
'street': '1525 Engineering Drive',
'city': 'Madison',
'state': 'WI',
'postalCode': '53706'
},
'coordinates': {
'lat': 43.07213400000001,
'lng': -89.4122016
},
'entrances': [],
'totalSpots': 822,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=17'
},
{
'shortName': '80',
'name': 'Union South Garage',
'operatedBy': 'uw',
'address': {
'street': '1308 West Dayton Street',
'city': 'Madison',
'state': 'WI',
'postalCode': '53715'
},
'coordinates': {
'lat': 43.0712481,
'lng': -89.40804949999999
},
'entrances': [],
'totalSpots': 168,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=80'
},
{
'shortName': '76',
'name': 'University Bay Drive Ramp',
'operatedBy': 'uw',
'address': {
'street': '2501 University Bay Drive',
'city': 'Madison',
'state': 'WI',
'postalCode': '53705'
},
'coordinates': {
'lat': 43.0813065,
'lng': -89.4282504
},
'entrances': [],
'totalSpots': 1290,
'openSpots': None,
'webUrl': 'https://fpm-www1.fpm.wisc.edu/smomap/lot.aspx?lot=76'
}
]
|
|
"""Base configuration implementation."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import ipaddress
import json
from collections import OrderedDict
class InvalidConfigError(Exception):
"""This error is thrown when the config file is not valid."""
def test_config_condition(cond, msg):
"""
Evaluate condition and raise InvalidConfigError if condition True.
Args:
cond (bool): Condition on which to raise an error if it is true
msg (str): Message for the error if the condition is true
"""
if cond:
raise InvalidConfigError(msg)
class Conf:
"""Base class for FAUCET configuration."""
mutable_attrs = frozenset() # type: frozenset
defaults = {} # type: dict
defaults_types = {} # type: dict
dyn_finalized = False
dyn_hash = None
def __init__(self, _id, dp_id, conf=None):
self._id = _id
self.dp_id = dp_id
if conf is None:
conf = {}
if self.defaults is not None and self.defaults_types is not None:
diff = set(self.defaults.keys()).symmetric_difference(set(self.defaults_types.keys()))
assert not diff, diff
if isinstance(conf, dict):
self.update(conf)
self.set_defaults()
self.check_config()
self.orig_conf = {k: self.__dict__[k] for k in self.defaults}
for k, conf_v in self.orig_conf.items():
if isinstance(conf_v, Conf):
self.orig_conf[k] = conf_v.orig_conf
def __setattr__(self, name, value):
if not self.dyn_finalized or name.startswith('dyn') or name in self.mutable_attrs:
super().__setattr__(name, value)
else:
raise ValueError('cannot update %s on finalized Conf object' % name)
def _set_default(self, key, value, conf=None):
if conf is None:
conf = self.__dict__
assert key in conf, key
if conf[key] is None:
conf[key] = value
def _set_conf_defaults(self, defaults, conf):
for key, value in defaults.items():
self._set_default(key, value, conf=conf)
def set_defaults(self):
"""Set default values and run any basic sanity checks."""
self._set_conf_defaults(self.defaults, self.__dict__)
def _check_unknown_conf(self, conf):
"""Check that supplied conf dict doesn't specify keys not defined."""
sub_conf_names = set(conf.keys())
unknown_conf_names = sub_conf_names - set(self.defaults.keys())
test_config_condition(unknown_conf_names, '%s fields unknown in %s' % (
unknown_conf_names, self._id))
def _check_conf_types(self, conf, conf_types):
"""Check that conf value is of the correct type."""
test_config_condition(not isinstance(conf, dict), (
'Conf object %s contents %s must be type %s not %s' % (
self._id, conf, dict, type(conf))))
for conf_key, conf_value in conf.items():
test_config_condition(
conf_key not in conf_types, '%s field unknown in %s (known types %s)' % (
conf_key, self._id, conf_types))
if conf_value is not None:
conf_type = conf_types[conf_key]
test_config_condition(
not isinstance(conf_value, conf_type), '%s value %s must be %s not %s' % (
conf_key, conf_value,
conf_type, type(conf_value))) # pytype: disable=invalid-typevar
@staticmethod
def _set_unknown_conf(conf, conf_types):
for conf_key, conf_type in conf_types.items():
if conf_key not in conf:
if conf_type == list:
conf[conf_key] = []
else:
conf[conf_key] = None
return conf
def update(self, conf):
"""Parse supplied YAML config and sanity check."""
self.__dict__.update(conf)
self._check_unknown_conf(conf)
self._check_conf_types(conf, self.defaults_types)
@staticmethod
def check_config():
"""Check config at instantiation time for errors, typically via assert."""
return
def _conf_keys(self, conf, subconf=True, ignore_keys=None):
"""Return a list of key/values of attributes with dyn/Conf attributes/filtered."""
conf_keys = []
for key, value in sorted(
((key, value) for key, value in conf.orig_conf.items()
if key in self.defaults)):
if ignore_keys and key in ignore_keys:
continue
if not subconf and value:
if isinstance(value, Conf):
continue
if isinstance(value, (tuple, list, set)) and isinstance(value[0], Conf):
continue
conf_keys.append((key, self._str_conf(value)))
return conf_keys
@staticmethod
def _conf_dyn_keys(conf):
return [(key, value) for key, value in conf.__dict__.items() if key.startswith('dyn')]
def merge_dyn(self, other_conf):
"""Merge dynamic state from other conf object."""
self.__dict__.update(self._conf_dyn_keys(other_conf))
def _str_conf(self, conf_v):
if isinstance(conf_v, (bool, str, int)):
return conf_v
if isinstance(conf_v, (
ipaddress.IPv4Address, ipaddress.IPv4Interface, ipaddress.IPv4Network,
ipaddress.IPv6Address, ipaddress.IPv6Interface, ipaddress.IPv6Network)):
return str(conf_v)
if isinstance(conf_v, (dict, OrderedDict)):
return {str(i): self._str_conf(j) for i, j in conf_v.items() if j is not None}
if isinstance(conf_v, (list, tuple, frozenset)):
return tuple(self._str_conf(i) for i in conf_v if i is not None)
if isinstance(conf_v, Conf):
for i in ('name', '_id'):
if hasattr(conf_v, i):
return getattr(conf_v, i)
return None
def to_conf(self):
"""Return configuration as a dict."""
conf = {
k: self.orig_conf[str(k)] for k in self.defaults if k != 'name'}
return json.dumps(self._str_conf(conf), sort_keys=True, indent=4, separators=(',', ': '))
def conf_diff(self, other):
"""Return text diff between two Confs."""
differ = difflib.Differ()
return '\n'.join(differ.compare(
self.to_conf().splitlines(), other.to_conf().splitlines()))
def conf_hash(self, subconf=True, ignore_keys=None):
"""Return hash of keys configurably filtering attributes."""
return hash(frozenset(list(map(
str, self._conf_keys(self, subconf=subconf, ignore_keys=ignore_keys)))))
def __hash__(self):
if self.dyn_hash is not None:
return self.dyn_hash
dyn_hash = self.conf_hash(subconf=True)
if self.dyn_finalized:
self.dyn_hash = dyn_hash
return dyn_hash
def _finalize_val(self, val):
if isinstance(val, list):
return tuple(
self._finalize_val(v) for v in val)
if isinstance(val, set):
return frozenset(
[self._finalize_val(v) for v in val])
if isinstance(val, dict):
return OrderedDict([
(k, self._finalize_val(v)) for k, v in sorted(val.items(), key=str)])
return val
def finalize(self):
"""Configuration parsing marked complete."""
self.__dict__.update(
{k: self._finalize_val(v) for k, v in self.__dict__.items()
if not k.startswith('dyn')})
self.dyn_finalized = True
def ignore_subconf(self, other, ignore_keys=None):
"""Return True if this config same as other, ignoring sub config."""
return (self.conf_hash(
subconf=False, ignore_keys=ignore_keys) == other.conf_hash(
subconf=False, ignore_keys=ignore_keys))
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def _check_ip_str(ip_str, ip_method=ipaddress.ip_address):
try:
# bool type is deprecated by the library ipaddress
if not isinstance(ip_str, bool):
return ip_method(ip_str)
raise InvalidConfigError('Invalid IP address %s: IP address of type bool' % (ip_str))
except (ValueError, AttributeError, TypeError) as err:
raise InvalidConfigError('Invalid IP address %s: %s' % (ip_str, err)) from err
@staticmethod
def _ipvs(ipas):
return frozenset([ipa.version for ipa in ipas])
@staticmethod
def _by_ipv(ipas, ipv):
return frozenset([ipa for ipa in ipas if ipa.version == ipv])
|
|
import itertools
import datetime
import slugify
from django.views import generic
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core import urlresolvers, paginator
from django import http
from django.db.models import Q
from django.utils import timezone
from taggit.models import Tag
from rest_framework import viewsets
from rest_framework import mixins
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.decorators import detail_route, list_route
from . import forms
from . import models
from . import serializers
from . import filters
from . import suggestions
class Log(LoginRequiredMixin, generic.TemplateView):
template_name = 'events/log.html'
class EventCreate(LoginRequiredMixin, generic.CreateView):
form_class = forms.EventCreateForm
template_name = 'events/events/create.html'
success_url = urlresolvers.reverse_lazy('home')
def form_valid(self, form):
form.instance.created_by = self.request.user
r = super().form_valid(form)
form.instance.configs.create(
user=self.request.user)
return r
class EntryCreate(LoginRequiredMixin, generic.TemplateView):
template_name = 'events/entries/create.html'
def get_context_data(self):
context = super().get_context_data()
context['Entry'] = models.Entry
return context
class Search(APIView):
config = {
'config': {
'qs': models.EventConfig.objects.select_related('event'),
'user_attr': 'user',
'search_fields': ['event__verbose_name'],
'title_field': 'title',
}
}
def get(self, request, format=None):
t = request.GET['type']
query = request.GET['q']
results = self.get_results(request, t, query)
return Response({'results': self.serialize_results(results)})
def get_results(self, request, t, query):
conf = self.config[t]
lookups = {
'{}'.format(conf['user_attr']): request.user,
}
q = None
for f in conf['search_fields']:
_q = Q(**{'{}__icontains'.format(f): query})
if not q:
q = _q
else:
q |= _q
return conf['qs'].filter(**lookups).filter(q)
def serialize_results(self, qs):
return [
{'value': i.pk, 'name': i.title, 'text': i.title}
for i in qs
]
class EntryViewSet(viewsets.ModelViewSet):
serializer_class = serializers.EntrySerializer
lookup_field = 'uuid'
lookup_value_regex = '[0-9a-f-]{36}'
def get_queryset(self):
return models.Entry.objects.filter(
config__user=self.request.user
).select_related('config__event').prefetch_related('tags')
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(serializers.EntryNestedSerializer(instance).data)
@list_route(methods=['GET'])
def byday(self, request, *args, **kwargs):
qs = self.get_queryset()
form = forms.ByDayForm(request.GET)
if not form.is_valid():
return Response({'errors': form.errors.as_json()}, status=400)
qs = filters.EntryFilter(request.GET, queryset=qs).qs
data = {
'start': form.cleaned_data['start'],
'end': form.cleaned_data['end'],
'days': qs.by_day(
end=form.cleaned_data['end'],
start=form.cleaned_data['start'],
fill=False,
serializer_class=serializers.EntryNestedSerializer,
),
}
return Response(data, status=200)
@list_route(methods=['GET'])
def suggestions(self, request, *args, **kwargs):
form = forms.EntrySuggestionForm(request.GET)
if not form.is_valid():
return Response({'errors': form.errors.as_json()}, status=400)
qs = self.get_queryset()
ref = form.cleaned_data.get('date') or timezone.now()
results = suggestions.rank_by_closest(qs, 'start', ref)
results = [
{
'score': score,
'entry': serializers.EntryNestedSerializer(entry).data}
for score, entry in results
][:10]
return Response(results, status=200)
@list_route(methods=['GET'])
def stats(self, request, *args, **kwargs):
qs = self.get_queryset()
initial = {
'start': timezone.now().date(),
'end': (timezone.now() - datetime.timedelta(days=14)).date(),
'fill': True,
'ordering': 'date',
}
form = forms.StatsForm(request.GET, initial=initial)
if not form.is_valid():
return Response({'errors': form.errors.as_json()}, status=400)
data = {
'fill': form.cleaned_data['fill'] or initial['fill'],
'start': form.cleaned_data['start'] or initial['start'],
'end': form.cleaned_data['end'] or initial['end'],
'ordering': form.cleaned_data['ordering'] or initial['ordering'],
}
data['results'] = qs.stats(
'day',
**data
)
return Response(data, status=200)
class TagViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = serializers.TagSerializer
def get_queryset(self):
return Tag.objects.all()
@list_route(methods=['get'])
def search(self, request, pk=None):
qs = self.get_queryset().filter(
slug__icontains=request.GET.get('q', ''))
serializer = self.serializer_class(qs, many=True)
return Response({"results": serializer.data},
status=200)
class ConfigViewSet(viewsets.ModelViewSet):
serializer_class = serializers.ConfigSerializer
def get_queryset(self):
return self.request.user.event_configs.all()
def create(self, request, *args, **kwargs):
serializer = serializers.EventSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
try:
slug = slugify.Slugify(
to_lower=True,
max_length=190,
)(serializer.validated_data['verbose_name'])
event = models.Event.objects.get(slug=slug)
except models.Event.DoesNotExist:
self.perform_create(serializer)
event = serializer.instance
event.created_by = request.user
event.save()
config, _ = models.EventConfig.objects.get_or_create(
event=event,
user=request.user
)
serializer = self.get_serializer(instance=config)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
|
|
# coding: utf-8
"""A shortcut to deploy a fresh modoboa instance."""
import getpass
import os
import shutil
import subprocess
import sys
try:
import pip
except ImportError:
sys.stderr.write("Error: pip is required to install extensions.\n")
sys.exit(2)
import django
from django.core import management
from django.template import Context, Template
import dj_database_url
from modoboa.core.commands import Command
from modoboa.lib.api_client import ModoAPIClient
DBCONN_TPL = """
'{{ conn_name }}': {
'ENGINE': '{{ ENGINE }}',
'NAME': '{{ NAME }}',
'USER': '{% if USER %}{{ USER }}{% endif %}',
'PASSWORD': '{% if PASSWORD %}{{ PASSWORD }}{% endif %}',
'HOST': '{% if HOST %}{{ HOST }}{% endif %}',
'PORT': '{% if PORT %}{{ PORT }}{% endif %}',
'ATOMIC_REQUESTS': True,
{% if ENGINE == 'django.db.backends.mysql' %}'OPTIONS' : {
"init_command" : 'SET foreign_key_checks = 0;',
},{% endif %}
},
"""
class DeployCommand(Command):
"""The ``deploy`` command."""
help = (
"Create a fresh django project (calling startproject)"
" and apply Modoboa specific settings."
)
def __init__(self, *args, **kwargs):
super(DeployCommand, self).__init__(*args, **kwargs)
self._parser.add_argument('name', type=str,
help='The name of your Modoboa instance')
self._parser.add_argument(
'--collectstatic', action='store_true', default=False,
help='Run django collectstatic command'
)
self._parser.add_argument(
'--dburl', type=str, nargs="+", default=None,
help='A database-url with a name')
self._parser.add_argument(
'--domain', type=str, default=None,
help='The domain under which you want to deploy modoboa')
self._parser.add_argument(
'--lang', type=str, default="en-us",
help="Set the default language"
)
self._parser.add_argument(
'--timezone', type=str, default="UTC",
help="Set the local timezone"
)
self._parser.add_argument(
'--devel', action='store_true', default=False,
help='Create a development instance'
)
self._parser.add_argument(
'--extensions', type=str, nargs='*',
help="The list of extension to deploy"
)
self._parser.add_argument(
'--dont-install-extensions', action='store_true', default=False,
help='Do not install extensions using pip'
)
def _exec_django_command(self, name, cwd, *args):
"""Run a django command for the freshly created project
:param name: the command name
:param cwd: the directory where the command must be executed
"""
cmd = 'python manage.py %s %s' % (name, " ".join(args))
if not self._verbose:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True, cwd=cwd
)
output = p.communicate()
else:
p = subprocess.Popen(cmd, shell=True, cwd=cwd)
p.wait()
output = None
if p.returncode:
if output:
print >> sys.stderr, "\n".join(
[l for l in output if l is not None])
print >> sys.stderr, "%s failed, check your configuration" % cmd
def ask_db_info(self, name='default'):
"""Prompt the user for database information
Gather all information required to create a new database
connection (into settings.py).
:param name: the connection name
"""
print "Configuring database connection: %s" % name
info = {
'conn_name': name,
'ENGINE': raw_input('Database type (mysql, postgres or sqlite3): ')
}
if info['ENGINE'] not in ['mysql', 'postgres', 'sqlite3']:
raise RuntimeError('Unsupported database engine')
if info['ENGINE'] == 'sqlite3':
info['ENGINE'] = 'django.db.backends.sqlite3'
info['NAME'] = '%s.db' % name
return info
if info['ENGINE'] == 'postgres':
info['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
default_port = 5432
else:
info['ENGINE'] = 'django.db.backends.mysql'
default_port = 3306
info['HOST'] = raw_input("Database host (default: 'localhost'): ")
info['PORT'] = raw_input(
"Database port (default: '%s'): " % default_port)
# leave port setting empty, if default value is supplied and
# leave it to django
if info['PORT'] == default_port:
info['PORT'] = ''
info['NAME'] = raw_input('Database name: ')
info['USER'] = raw_input('Username: ')
info['PASSWORD'] = getpass.getpass('Password: ')
return info
def _get_extension_list(self):
"""Ask the API to get the list of all extensions.
We hardcode the API url here to avoid a loading of
django's settings since they are not available yet...
"""
url = "http://api.modoboa.org/"
official_exts = ModoAPIClient(url).list_extensions()
return [extension["name"] for extension in official_exts]
def install_extensions(self, extensions):
"""Install one or more extensions.
Return the list of extensions providing settings we must
include in the final configuration.
"""
pip_args = ["install"] + [extension[0] for extension in extensions]
pip.main(pip_args)
extra_settings = []
for extension in extensions:
module = __import__(extension[1], locals(), globals(), [])
basedir = os.path.dirname(module.__file__)
if not os.path.exists("{0}/settings.py".format(basedir)):
continue
extra_settings.append(extension[1])
return extra_settings
def handle(self, parsed_args):
import pdb
django.setup()
management.call_command(
'startproject', parsed_args.name, verbosity=False
)
path = "%(name)s/%(name)s" % {'name': parsed_args.name}
sys.path.append(parsed_args.name)
conn_tpl = Template(DBCONN_TPL)
connections = {}
if parsed_args.dburl:
for dburl in parsed_args.dburl:
conn_name, url = dburl.split(":", 1)
info = dj_database_url.config(default=url)
# In case the user fails to supply a valid database url,
# fallback to manual mode
if not info:
print "There was a problem with your database-url. \n"
info = self.ask_db_info(conn_name)
# If we set this earlier, our fallback method will never
# be triggered
info['conn_name'] = conn_name
connections[conn_name] = conn_tpl.render(Context(info))
else:
connections["default"] = conn_tpl.render(
Context(self.ask_db_info()))
if parsed_args.domain:
allowed_host = parsed_args.domain
else:
allowed_host = raw_input(
'What will be the hostname used to access Modoboa? '
)
if not allowed_host:
allowed_host = "localhost"
extra_settings = []
extensions = parsed_args.extensions
if extensions:
if "all" in extensions:
extensions = self._get_extension_list()
extensions = [(extension, extension.replace("-", "_"))
for extension in extensions]
if not parsed_args.dont_install_extensions:
extra_settings = self.install_extensions(extensions)
extensions = [extension[1] for extension in extensions]
bower_components_dir = os.path.realpath(
os.path.join(os.path.dirname(__file__), "../../bower_components")
)
mod = __import__(parsed_args.name, globals(), locals(), ['settings'])
tpl = self._render_template(
"%s/settings.py.tpl" % self._templates_dir, {
'db_connections': connections,
'secret_key': mod.settings.SECRET_KEY,
'name': parsed_args.name,
'allowed_host': allowed_host,
'lang': parsed_args.lang,
'timezone': parsed_args.timezone,
'bower_components_dir': bower_components_dir,
'devmode': parsed_args.devel,
'extensions': extensions,
'extra_settings': extra_settings
}
)
with open("%s/settings.py" % path, "w") as fp:
fp.write(tpl)
shutil.copyfile(
"%s/urls.py.tpl" % self._templates_dir, "%s/urls.py" % path
)
os.mkdir("%s/media" % parsed_args.name)
os.unlink("%s/settings.pyc" % path)
self._exec_django_command(
"migrate", parsed_args.name, '--noinput'
)
self._exec_django_command(
"load_initial_data", parsed_args.name
)
if parsed_args.collectstatic:
self._exec_django_command(
"collectstatic", parsed_args.name, '--noinput'
)
self._exec_django_command(
"set_default_site", parsed_args.name, allowed_host
)
|
|
import os
import commands
from pUtil import tolog
class FileState:
"""
This class is used to set and update the current output file state dictionary.
When the job is running, the file fileState-<JobID>.pickle is created which contains
the current state of the output file dictionary
File state dictionary format:
{ file_name1 : state1, ..., file_nameN : stateN }
where file_name does not contain file path since it will change for holding jobs (from work dir to data dir), and
the state variables have the list form "file_state", "reg_state" (for output files) and "file_state", "transfer_mode" (input files).
"file_state" can assume the following values for "output" files:
"not_created" : initial value for all files at the beginning of the job
"created" : file was created and is waiting to be transferred
"not_transferred" : file has not been transferred
"transferred" : file has already been transferred (no further action)
"missing" : file was never created, the job failed (e.g. output file of a failed job; a log should never be missing)
"file_state" can assume the following values for "input" files:
"not_transferred" : file has not been transferred (can remain in this state for FileStager and directIO modes)
"transferred" : file has already been transferred (no further action)
"reg_state" can assume the following values (relevant for output files):
"not_registered" : file was not registered in the LFC
"registered" : file was already registered in the LFC (no further action)
"transfer_mode" can assume the following values (relevant for input files)
"copy_to_scratch" : default file transfer mode
"remote_io" : direct access / remote IO tranfer mode
"file_stager" : file stager tranfer mode
"no_transfer" : input file has been skipped
E.g. a file with state = "created", "not_registered" should first be transferred and then registered in the LFC.
The file state dictionary should be created with "not_created" states as soon as the output files are known (pilot).
The "created" states should be set after the payload has run and if the file in question were actually created.
"transferred" should be set by the mover once the file in question has been transferred.
"registered" should be added to the file state once the file has been registered.
"copy_to_scratch" is to set for all input files by default. In case remote IO / FileStager instructions are found in copysetup[in]
the state will be changed to "remote_io" / "file_stager". Brokerage can also decide that remote IO is to be used. In that case,
"remote_io" will be set for the relevant input files (e.g. DBRelease and lib files are excluded, i.e. they will have "copy_to_scratch"
transfer mode).
"""
def __init__(self, workDir, jobId="0", mode="", type="output", fileName=""):
""" Default init """
self.fileStateDictionary = {} # file dictionary holding all objects
self.mode = mode # test mode
# use default filename unless specified by initiator
if fileName == "" and type != "": # assume output files
self.filename = os.path.join(workDir, "fileState-%s-%s.pickle" % (type, jobId))
else:
self.filename = os.path.join(workDir, fileName)
# add mode variable if needed (e.g. mode="test")
if self.mode != "":
self.filename = self.filename.replace(".pickle", "-%s.pickle" % (self.mode))
# load the dictionary from file if it exists
if os.path.exists(self.filename):
tolog("Using file state dictionary: %s" % (self.filename))
status = self.get()
else:
tolog("File does not exist: %s (will be created)" % (self.filename))
def get(self):
""" Read job state dictionary from file """
status = False
# De-serialize the file state file
try:
fp = open(self.filename, "r")
except:
tolog("FILESTATE FAILURE: get function could not open file: %s" % self.filename)
pass
else:
from pickle import load
try:
# load the dictionary from file
self.fileStateDictionary = load(fp)
except:
tolog("FILESTATE FAILURE: could not deserialize file: %s" % self.filename)
pass
else:
status = True
fp.close()
return status
def put(self):
"""
Create/Update the file state file
"""
status = False
# write pickle file
from pickle import dump
try:
fp = open(self.filename, "w")
except Exception, e:
tolog("FILESTATE FAILURE: Could not open file state file: %s, %s" % (self.filename, str(e)))
_cmd = "whoami; ls -lF %s" % (self.filename)
tolog("Executing command: %s" % (_cmd))
ec, rs = commands.getstatusoutput(_cmd)
tolog("%d, %s" % (ec, rs))
else:
try:
# write the dictionary to file
dump(self.fileStateDictionary, fp)
except Exception, e:
tolog("FILESTATE FAILURE: Could not pickle data to file state file: %s, %s" % (self.filename, str(e)))
else:
status = True
fp.close()
return status
def getNumberOfFiles(self):
""" Get the number of files from the file state dictionary """
return len(self.fileStateDictionary.keys())
def getStateList(self, filename):
""" Get the current state list for a file """
if self.fileStateDictionary.has_key(filename):
state_list = self.fileStateDictionary[filename]
else:
state_list = ["", ""]
return state_list
def updateStateList(self, filename, state_list):
""" Update the state list for a file """
status = False
try:
self.fileStateDictionary[filename] = state_list
except Exception, e:
tolog("FILESTATE FAILURE: could not update state list for file: %s, %s" % (filename, str(e)))
else:
status = True
return status
def getFileState(self, filename):
""" Return the current state of a given file """
# get current state list
return self.getStateList(filename)
def updateState(self, filename, mode="file_state", state="not_transferred"):
""" Update the file or registration state for a file """
status = False
# get current state list
state_list = self.getStateList(filename)
# update file state
try:
if mode == "file_state":
state_list[0] = state
elif mode == "reg_state" or mode == "transfer_mode":
state_list[1] = state
else:
tolog("FILESTATE FAILURE: unknown state: %s" % (mode))
except Exception, e:
tolog("FILESTATE FAILURE: %s" % str(e))
else:
# update state list
status = self.updateStateList(filename, state_list)
# update the file state file for every update (necessary since a failed put operation can abort everything)
if status:
status = self.put()
return status
def resetStates(self, file_list, type="output"):
""" Set all states in file list to not_created, not_registered """
# note: file state will be reset completely
tolog("Resetting file list: %s" % str(file_list))
# initialize file state dictionary
self.fileStateDictionary = {}
if type == "output":
for filename in file_list:
self.fileStateDictionary[filename] = ['not_created', 'not_registered']
else: # input
for filename in file_list:
self.fileStateDictionary[filename] = ['not_transferred', 'copy_to_scratch']
# write to file
status = self.put()
def hasOnlyCopyToScratch(self):
""" Check if there are only copy_to_scratch transfer modes in the file dictionary """
status = True
# loop over all input files and see if there is any non-copy_to_scratch transfer mode
for filename in self.fileStateDictionary.keys():
# get the file states
states = self.fileStateDictionary[filename]
tolog("filename=%s states=%s"%(filename,str(states)))
if states[1] != 'copy_to_scratch':
status = False
break
return status
def dumpFileStates(self, type="output"):
""" Print all the files and their states """
if type == "output":
tolog("File name / File state / Registration state")
else:
tolog("File name / File state / Transfer mode")
tolog("-"*100)
n = self.getNumberOfFiles()
i = 1
if n > 0:
sorted_keys = self.fileStateDictionary.keys()
sorted_keys.sort()
for filename in sorted_keys:
states = self.fileStateDictionary[filename]
if len(states) == 2:
tolog("%d. %s\t%s\t%s" % (i, filename, states[0], states[1]))
else:
tolog("%s\t-\t-" % (filename))
i += 1
else:
tolog("(No files)")
|
|
## Compiled from /files/home/nholtz/work/git/structural-analysis/matrix-methods/frame2d/Frame2D/Frame2D_Output.ipynb on Sun Dec 10 13:13:42 2017
## DO NOT EDIT THIS FILE. YOUR CHANGES WILL BE LOST!!
## In [2]:
from salib import extend, NBImporter
from Tables import Table, DataSource
## In [3]:
from Frame2D_Base import Frame2D
import Frame2D_Input
## In [5]:
@extend
class Frame2D:
def write_table(self,tablename,dsname=None,prefix=None,record=True,precision=None,args=(),makedir=False):
t = getattr(self.rawdata,tablename,None)
if t is None:
methodname = 'list_'+tablename
method = getattr(self,methodname,None)
if method and callable(method):
data = method(*args)
t = Table(data=data,tablename=tablename,columns=getattr(self,'COLUMNS_'+tablename))
if t is None:
raise ValueError("Unable to find table '{}'".format(tablename))
DataSource.write_table(t,dsname=dsname,prefix=prefix,tablename=tablename,precision=precision,makedir=makedir)
if record:
setattr(self.rawdata,tablename,t)
return t
## In [8]:
@extend
class Frame2D:
def list_nodes(self):
return [(n.id,n.x,n.y) for n in self.nodes.values()]
## In [13]:
@extend
class Frame2D:
def list_supports(self):
ans = []
for node in self.nodes.values():
if node.constraints:
cl = tuple(node.constraints)
if len(cl) < 3:
cl = cl + ('',)*(3-len(cl))
ans.append((node.id,)+cl)
return ans
## In [19]:
@extend
class Frame2D:
def list_members(self):
return [(m.id,m.nodej.id,m.nodek.id) for m in self.members.values()]
## In [22]:
@extend
class Frame2D:
def list_releases(self):
return [(m.id,)+tuple(m.releases) for m in self.members.values() if m.releases]
## In [25]:
@extend
class Frame2D:
def list_properties(self):
return [(m.id,m.size,m.Ix,m.A) for m in self.members.values()]
## In [28]:
@extend
class Frame2D:
def list_node_loads(self):
ans = []
dirns = ['FX','FY','FZ']
for loadid,node,nload in self.nodeloads:
for i in [0,1,2]:
if nload[i]:
ans.append((loadid,node.id,dirns[i],nload[i]))
return ans
## In [31]:
@extend
class Frame2D:
def list_support_displacements(self):
ans = []
dirns = ['DX','DY','RZ']
for loadid,node,nload in self.nodedeltas:
for i in [0,1,2]:
if nload[i]:
ans.append((loadid,node.id,dirns[i],nload[i]))
return ans
## In [34]:
from MemberLoads import unmakeMemberLoad
@extend
class Frame2D:
def list_member_loads(self):
ans = []
for loadid,memb,mload in self.memberloads:
ml = unmakeMemberLoad(mload)
ml['MEMBERID'] = memb.id
ml['LOAD'] = loadid
ans.append(ml)
return ans
## In [38]:
@extend
class Frame2D:
def list_load_combinations(self):
return [(case,load,factor) for case,load,factor in self.loadcombinations]
## In [42]:
@extend
class Frame2D:
COLUMNS_signatures = ['NAME','PATH','SIGNATURE']
def list_signatures(self):
return [t.signature() for tn,t in vars(self.rawdata).items() if type(t) is Table]
## In [45]:
import os, os.path
@extend
class Frame2D:
def write_all(self,dsname,makedir=False):
self.write_table('nodes',dsname=dsname,makedir=makedir)
self.write_table('supports',dsname=dsname)
self.write_table('members',dsname=dsname)
self.write_table('releases',dsname=dsname)
self.write_table('properties',dsname=dsname)
self.write_table('node_loads',dsname=dsname)
self.write_table('support_displacements',dsname=dsname)
self.write_table('member_loads',dsname=dsname)
self.write_table('load_combinations',dsname=dsname)
self.write_table('signatures',dsname=dsname,record=False)
## In [49]:
@extend
class Frame2D:
COLUMNS_node_displacements = ['NODEID','DX','DY','RZ']
def list_node_displacements(self,rs):
if not hasattr(rs,'node_displacements'):
return []
ans = []
D = rs.node_displacements
for node in self.nodes.values():
d = D[node.dofnums]
ans.append((node.id,d[0,0],d[1,0],d[2,0]))
return ans
## In [52]:
@extend
class Frame2D:
COLUMNS_reaction_forces = ['NODEID','FX','FY','MZ']
def list_reaction_forces(self,rs):
if not hasattr(rs,'reaction_forces'):
return []
R = rs.reaction_forces
ans = []
for node in self.nodes.values():
if node.constraints:
l = [node.id,None,None,None]
for dirn in node.constraints:
i = node.DIRECTIONS[dirn]
j = node.dofnums[i]
val = R[j-self.nfree,0]
l[i+1] = val
ans.append(l)
return ans
## In [55]:
@extend
class Frame2D:
COLUMNS_member_end_forces = 'MEMBERID,FXJ,FYJ,MZJ,FXK,FYK,MZK'.split(',')
def list_member_end_forces(self,rs):
if not hasattr(rs,'member_efs'):
return []
mefs = rs.member_efs
ans = []
for memb in self.members.values():
efs = mefs[memb].fefs
ans.append((memb.id,efs[0,0],efs[1,0],efs[2,0],efs[3,0],efs[4,0],efs[5,0]))
return ans
## In [58]:
@extend
class Frame2D:
def write_results(self,dsname,rs):
self.write_table('node_displacements',dsname=dsname,prefix=rs.loadcase,record=False,
precision=15,args=(rs,),makedir=True)
self.write_table('reaction_forces',dsname=dsname,prefix=rs.loadcase,record=False,
precision=15,args=(rs,))
self.write_table('member_end_forces',dsname=dsname,prefix=rs.loadcase,record=False,
precision=15,args=(rs,))
if rs.pdelta:
self.write_table('pdelta_forces',dsname=dsname,prefix=rs.loadcase,record=False,
precision=15,args=(rs,))
## In [ ]:
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_available_stacks_request(
*,
os_type_selected: Optional[Union[str, "_models.Enum4"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/availableStacks')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if os_type_selected is not None:
query_parameters['osTypeSelected'] = _SERIALIZER.query("os_type_selected", os_type_selected, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_function_app_stacks_request(
*,
stack_os_type: Optional[Union[str, "_models.Enum5"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/functionAppStacks')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if stack_os_type is not None:
query_parameters['stackOsType'] = _SERIALIZER.query("stack_os_type", stack_os_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_function_app_stacks_for_location_request(
location: str,
*,
stack_os_type: Optional[Union[str, "_models.Enum6"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/locations/{location}/functionAppStacks')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if stack_os_type is not None:
query_parameters['stackOsType'] = _SERIALIZER.query("stack_os_type", stack_os_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_web_app_stacks_for_location_request(
location: str,
*,
stack_os_type: Optional[Union[str, "_models.Enum7"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/locations/{location}/webAppStacks')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if stack_os_type is not None:
query_parameters['stackOsType'] = _SERIALIZER.query("stack_os_type", stack_os_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_operations_request(
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/operations')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_web_app_stacks_request(
*,
stack_os_type: Optional[Union[str, "_models.Enum8"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/webAppStacks')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if stack_os_type is not None:
query_parameters['stackOsType'] = _SERIALIZER.query("stack_os_type", stack_os_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_available_stacks_on_prem_request(
subscription_id: str,
*,
os_type_selected: Optional[Union[str, "_models.Enum9"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Web/availableStacks')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if os_type_selected is not None:
query_parameters['osTypeSelected'] = _SERIALIZER.query("os_type_selected", os_type_selected, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ProviderOperations(object):
"""ProviderOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_available_stacks(
self,
os_type_selected: Optional[Union[str, "_models.Enum4"]] = None,
**kwargs: Any
) -> Iterable["_models.ApplicationStackCollection"]:
"""Get available application frameworks and their versions.
Description for Get available application frameworks and their versions.
:param os_type_selected:
:type os_type_selected: str or ~azure.mgmt.web.v2020_12_01.models.Enum4
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationStackCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_12_01.models.ApplicationStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_available_stacks_request(
os_type_selected=os_type_selected,
template_url=self.get_available_stacks.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_available_stacks_request(
os_type_selected=os_type_selected,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ApplicationStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_available_stacks.metadata = {'url': '/providers/Microsoft.Web/availableStacks'} # type: ignore
@distributed_trace
def get_function_app_stacks(
self,
stack_os_type: Optional[Union[str, "_models.Enum5"]] = None,
**kwargs: Any
) -> Iterable["_models.FunctionAppStackCollection"]:
"""Get available Function app frameworks and their versions.
Description for Get available Function app frameworks and their versions.
:param stack_os_type: Stack OS Type.
:type stack_os_type: str or ~azure.mgmt.web.v2020_12_01.models.Enum5
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FunctionAppStackCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_12_01.models.FunctionAppStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FunctionAppStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_function_app_stacks_request(
stack_os_type=stack_os_type,
template_url=self.get_function_app_stacks.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_function_app_stacks_request(
stack_os_type=stack_os_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("FunctionAppStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_function_app_stacks.metadata = {'url': '/providers/Microsoft.Web/functionAppStacks'} # type: ignore
@distributed_trace
def get_function_app_stacks_for_location(
self,
location: str,
stack_os_type: Optional[Union[str, "_models.Enum6"]] = None,
**kwargs: Any
) -> Iterable["_models.FunctionAppStackCollection"]:
"""Get available Function app frameworks and their versions for location.
Description for Get available Function app frameworks and their versions for location.
:param location: Function App stack location.
:type location: str
:param stack_os_type: Stack OS Type.
:type stack_os_type: str or ~azure.mgmt.web.v2020_12_01.models.Enum6
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FunctionAppStackCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_12_01.models.FunctionAppStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FunctionAppStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_function_app_stacks_for_location_request(
location=location,
stack_os_type=stack_os_type,
template_url=self.get_function_app_stacks_for_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_function_app_stacks_for_location_request(
location=location,
stack_os_type=stack_os_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("FunctionAppStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_function_app_stacks_for_location.metadata = {'url': '/providers/Microsoft.Web/locations/{location}/functionAppStacks'} # type: ignore
@distributed_trace
def get_web_app_stacks_for_location(
self,
location: str,
stack_os_type: Optional[Union[str, "_models.Enum7"]] = None,
**kwargs: Any
) -> Iterable["_models.WebAppStackCollection"]:
"""Get available Web app frameworks and their versions for location.
Description for Get available Web app frameworks and their versions for location.
:param location: Web App stack location.
:type location: str
:param stack_os_type: Stack OS Type.
:type stack_os_type: str or ~azure.mgmt.web.v2020_12_01.models.Enum7
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebAppStackCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_12_01.models.WebAppStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_web_app_stacks_for_location_request(
location=location,
stack_os_type=stack_os_type,
template_url=self.get_web_app_stacks_for_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_web_app_stacks_for_location_request(
location=location,
stack_os_type=stack_os_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WebAppStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_web_app_stacks_for_location.metadata = {'url': '/providers/Microsoft.Web/locations/{location}/webAppStacks'} # type: ignore
@distributed_trace
def list_operations(
self,
**kwargs: Any
) -> Iterable["_models.CsmOperationCollection"]:
"""Gets all available operations for the Microsoft.Web resource provider. Also exposes resource
metric definitions.
Description for Gets all available operations for the Microsoft.Web resource provider. Also
exposes resource metric definitions.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmOperationCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_12_01.models.CsmOperationCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CsmOperationCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_operations_request(
template_url=self.list_operations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_operations_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CsmOperationCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/providers/Microsoft.Web/operations'} # type: ignore
@distributed_trace
def get_web_app_stacks(
self,
stack_os_type: Optional[Union[str, "_models.Enum8"]] = None,
**kwargs: Any
) -> Iterable["_models.WebAppStackCollection"]:
"""Get available Web app frameworks and their versions.
Description for Get available Web app frameworks and their versions.
:param stack_os_type: Stack OS Type.
:type stack_os_type: str or ~azure.mgmt.web.v2020_12_01.models.Enum8
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebAppStackCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_12_01.models.WebAppStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_web_app_stacks_request(
stack_os_type=stack_os_type,
template_url=self.get_web_app_stacks.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_web_app_stacks_request(
stack_os_type=stack_os_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WebAppStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_web_app_stacks.metadata = {'url': '/providers/Microsoft.Web/webAppStacks'} # type: ignore
@distributed_trace
def get_available_stacks_on_prem(
self,
os_type_selected: Optional[Union[str, "_models.Enum9"]] = None,
**kwargs: Any
) -> Iterable["_models.ApplicationStackCollection"]:
"""Get available application frameworks and their versions.
Description for Get available application frameworks and their versions.
:param os_type_selected:
:type os_type_selected: str or ~azure.mgmt.web.v2020_12_01.models.Enum9
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationStackCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_12_01.models.ApplicationStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_available_stacks_on_prem_request(
subscription_id=self._config.subscription_id,
os_type_selected=os_type_selected,
template_url=self.get_available_stacks_on_prem.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_available_stacks_on_prem_request(
subscription_id=self._config.subscription_id,
os_type_selected=os_type_selected,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ApplicationStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_available_stacks_on_prem.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/availableStacks'} # type: ignore
|
|
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import mock
import pytest
from google.api_core import exceptions
from google.api_core.future import async_future
class AsyncFuture(async_future.AsyncFuture):
async def done(self):
return False
async def cancel(self):
return True
async def cancelled(self):
return False
async def running(self):
return True
@pytest.mark.asyncio
async def test_polling_future_constructor():
future = AsyncFuture()
assert not await future.done()
assert not await future.cancelled()
assert await future.running()
assert await future.cancel()
@pytest.mark.asyncio
async def test_set_result():
future = AsyncFuture()
callback = mock.Mock()
future.set_result(1)
assert await future.result() == 1
callback_called = asyncio.Event()
def callback(unused_future):
callback_called.set()
future.add_done_callback(callback)
await callback_called.wait()
@pytest.mark.asyncio
async def test_set_exception():
future = AsyncFuture()
exception = ValueError("meep")
future.set_exception(exception)
assert await future.exception() == exception
with pytest.raises(ValueError):
await future.result()
callback_called = asyncio.Event()
def callback(unused_future):
callback_called.set()
future.add_done_callback(callback)
await callback_called.wait()
@pytest.mark.asyncio
async def test_invoke_callback_exception():
future = AsyncFuture()
future.set_result(42)
# This should not raise, despite the callback causing an exception.
callback_called = asyncio.Event()
def callback(unused_future):
callback_called.set()
raise ValueError()
future.add_done_callback(callback)
await callback_called.wait()
class AsyncFutureWithPoll(AsyncFuture):
def __init__(self):
super().__init__()
self.poll_count = 0
self.event = asyncio.Event()
async def done(self):
self.poll_count += 1
await self.event.wait()
self.set_result(42)
return True
@pytest.mark.asyncio
async def test_result_with_polling():
future = AsyncFutureWithPoll()
future.event.set()
result = await future.result()
assert result == 42
assert future.poll_count == 1
# Repeated calls should not cause additional polling
assert await future.result() == result
assert future.poll_count == 1
class AsyncFutureTimeout(AsyncFutureWithPoll):
async def done(self):
await asyncio.sleep(0.2)
return False
@pytest.mark.asyncio
async def test_result_timeout():
future = AsyncFutureTimeout()
with pytest.raises(asyncio.TimeoutError):
await future.result(timeout=0.2)
@pytest.mark.asyncio
async def test_exception_timeout():
future = AsyncFutureTimeout()
with pytest.raises(asyncio.TimeoutError):
await future.exception(timeout=0.2)
@pytest.mark.asyncio
async def test_result_timeout_with_retry():
future = AsyncFutureTimeout()
with pytest.raises(asyncio.TimeoutError):
await future.exception(timeout=0.4)
class AsyncFutureTransient(AsyncFutureWithPoll):
def __init__(self, errors):
super().__init__()
self._errors = errors
async def done(self):
if self._errors:
error, self._errors = self._errors[0], self._errors[1:]
raise error("testing")
self.poll_count += 1
self.set_result(42)
return True
@mock.patch("asyncio.sleep", autospec=True)
@pytest.mark.asyncio
async def test_result_transient_error(unused_sleep):
future = AsyncFutureTransient(
(
exceptions.TooManyRequests,
exceptions.InternalServerError,
exceptions.BadGateway,
)
)
result = await future.result()
assert result == 42
assert future.poll_count == 1
# Repeated calls should not cause additional polling
assert await future.result() == result
assert future.poll_count == 1
@pytest.mark.asyncio
async def test_callback_concurrency():
future = AsyncFutureWithPoll()
callback_called = asyncio.Event()
def callback(unused_future):
callback_called.set()
future.add_done_callback(callback)
# Give the thread a second to poll
await asyncio.sleep(1)
assert future.poll_count == 1
future.event.set()
await callback_called.wait()
@pytest.mark.asyncio
async def test_double_callback_concurrency():
future = AsyncFutureWithPoll()
callback_called = asyncio.Event()
def callback(unused_future):
callback_called.set()
callback_called2 = asyncio.Event()
def callback2(unused_future):
callback_called2.set()
future.add_done_callback(callback)
future.add_done_callback(callback2)
# Give the thread a second to poll
await asyncio.sleep(1)
future.event.set()
assert future.poll_count == 1
await callback_called.wait()
await callback_called2.wait()
|
|
import talib
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier
import numpy as np
import pandas
def initialize(context):
set_symbol_lookup_date('2010-01-01')
# Parameters to be changed
context.model1 = RandomForestClassifier(n_estimators=300,
max_depth=6, max_features=None)
context.model2 = RandomForestClassifier(n_estimators=300,
max_depth=6, max_features=None)
context.lookback = 14
context.history_range = 1000
context.beta_coefficient = 0.0
context.percentage_change = 0.034
context.maximum_leverage = 2.0
context.number_of_stocks = 150
context.maximum_pe_ratio = 8
context.maximum_market_cap = 0.1e9
# End of parameters
schedule_function(create_model, date_rules.month_start(), time_rules.market_open())
schedule_function(rebalance, date_rules.month_start(), time_rules.market_open())
schedule_function(trade, date_rules.every_day(), time_rules.market_open())
context.algorithm_returns = []
context.longs = []
context.shorts = []
context.training_stocks = symbols('SPY')
context.trading_stocks = []
context.beta = 1.0
context.beta_list = []
context.completed = False
def before_trading_start(context):
if context.completed: return
fundamental_df = get_fundamentals(query(fundamentals.valuation.market_cap)
.filter(fundamentals.company_reference.primary_exchange_id == 'NAS' or
fundamentals.company_reference.primary_exchange_id == 'NYSE')
.filter(fundamentals.valuation_ratios.pe_ratio < context.maximum_pe_ratio)
.filter(fundamentals.valuation.market_cap < context.maximum_market_cap)
.order_by(fundamentals.valuation.market_cap.desc())
.limit(context.number_of_stocks))
update_universe(fundamental_df.columns.values)
context.trading_stocks = [stock for stock in fundamental_df]
context.completed = True
def create_model(context, data):
X = []
Y = []
for S in context.training_stocks:
recent_prices = history(context.history_range, '1d', 'price')[S].values
recent_lows = history(context.history_range, '1d', 'low')[S].values
recent_highs = history(context.history_range, '1d', 'high')[S].values
recent_closes = history(context.history_range, '1d', 'close_price')[S].values
atr = talib.ATR(recent_highs, recent_lows, recent_closes, timeperiod=14)
prev_close = np.roll(recent_closes, 2)
upside_signal = (recent_prices - (prev_close + atr)).tolist()
downside_signal = (prev_close - (recent_prices + atr)).tolist()
price_changes = np.diff(recent_prices).tolist()
upper, middle, lower = talib.BBANDS(recent_prices,timeperiod=10,nbdevup=2,nbdevdn=2,matype=1)
upper = upper.tolist()
middle = middle.tolist()
lower = lower.tolist()
for i in range(15, context.history_range-context.lookback-1):
Z = price_changes[i:i+context.lookback] + upside_signal[i:i+context.lookback] + downside_signal[i:i+context.lookback] +\
upper[i:i+context.lookback] + middle[i:i+context.lookback] + lower[i:i+context.lookback]
if (np.any(np.isnan(Z)) or not np.all(np.isfinite(Z))): continue
X.append(Z)
if abs(price_changes[i+context.lookback]) > abs(price_changes[i]*(1+context.percentage_change)):
if price_changes[i+context.lookback] > 0:
Y.append(+1)
else:
Y.append(-1)
else:
Y.append(0)
context.model1.fit(X, Y)
context.model2.fit(X, Y)
def rebalance(context, data):
context.completed = False
def trade(context, data):
if (context.account.leverage > context.maximum_leverage): return
if not context.model1: return
for stock in context.trading_stocks:
if stock not in data:
context.trading_stocks.remove(stock)
for stock in context.trading_stocks:
if stock.security_end_date < get_datetime():
context.trading_stocks.remove(stock)
if stock in security_lists.leveraged_etf_list:
context.trading_stocks.remove(stock)
for one_stock in context.trading_stocks:
if get_open_orders(one_stock): continue
recent_prices = history(context.lookback+30, '1d', 'price')[one_stock].values
recent_lows = history(context.lookback+30, '1d', 'low')[one_stock].values
recent_highs = history(context.lookback+30, '1d', 'high')[one_stock].values
recent_closes = history(context.lookback+30, '1d', 'close_price')[one_stock].values
if (np.any(np.isnan(recent_prices)) or not np.all(np.isfinite(recent_prices))): continue
if (np.any(np.isnan(recent_lows)) or not np.all(np.isfinite(recent_lows))): continue
if (np.any(np.isnan(recent_highs)) or not np.all(np.isfinite(recent_highs))): continue
if (np.any(np.isnan(recent_closes)) or not np.all(np.isfinite(recent_closes))): continue
atr = talib.ATR(recent_highs, recent_lows, recent_closes, timeperiod=14)
prev_close = np.roll(recent_closes, 2)
upside_signal = (recent_prices - (prev_close + atr)).tolist()
downside_signal = (prev_close - (recent_prices + atr)).tolist()
price_changes = np.diff(recent_prices).tolist()
upper, middle, lower = talib.BBANDS(recent_prices,timeperiod=10,nbdevup=2,nbdevdn=2,matype=1)
upper = upper.tolist()
middle = middle.tolist()
lower = lower.tolist()
L = context.lookback
Z = price_changes[-L:] + upside_signal[-L:] + downside_signal[-L:] + upper[-L:] + middle[-L:] + lower[-L:]
if (np.any(np.isnan(Z)) or not np.all(np.isfinite(Z))): continue
prediction1 = context.model1.predict(Z)
prediction2 = context.model2.predict(Z)
if prediction1 == prediction2:
if prediction1 > 0:
if one_stock in context.shorts:
order_target_percent(one_stock, 0)
context.shorts.remove(one_stock)
elif not one_stock in context.longs:
context.longs.append(one_stock)
elif prediction1 < 0:
if one_stock in context.longs:
order_target_percent(one_stock, 0)
context.longs.remove(one_stock)
elif not one_stock in context.shorts:
context.shorts.append(one_stock)
else:
order_target_percent(one_stock, 0)
if one_stock in context.longs: context.longs.remove(one_stock)
elif one_stock in context.shorts: context.shorts.remove(one_stock)
if get_open_orders(): return
for one_stock in context.longs:
if not one_stock in context.trading_stocks:
context.longs.remove(one_stock)
else:
order_target_percent(one_stock, context.maximum_leverage/(len(context.longs)+len(context.shorts)))
for one_stock in context.shorts:
if not one_stock in context.trading_stocks:
context.shorts.remove(one_stock)
else:
order_target_percent(one_stock, (-1.0)*context.maximum_leverage/(len(context.longs)+len(context.shorts)))
order_target_percent(symbol('SPY'), (-1.0)*context.maximum_leverage*(context.beta*context.beta_coefficient))
def estimateBeta(priceY,priceX):
algorithm_returns = priceY
benchmark_returns = (priceX/np.roll(priceX,1)-1).dropna().values
if len(algorithm_returns) <> len(benchmark_returns):
minlen = min(len(algorithm_returns), len(benchmark_returns))
if minlen > 2:
algorithm_returns = algorithm_returns[-minlen:]
benchmark_returns = benchmark_returns[-minlen:]
else:
return 1.00
returns_matrix = np.vstack([algorithm_returns, benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return beta
def handle_data(context, data):
record(cash = context.portfolio.cash/(1000000))
record(lev = context.account.leverage)
context.algorithm_returns.append(context.portfolio.returns)
if len(context.algorithm_returns) > 30:
recent_prices = history(len(context.algorithm_returns), '1d', 'price')[symbol('SPY')]
context.beta_list.append(estimateBeta(pandas.Series(context.algorithm_returns[-30:]), recent_prices))
if len(context.beta_list) > 7: context.beta_list.pop(0)
context.beta = np.mean(context.beta_list)
record(Beta=context.beta)
|
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy engine for neutron. Largely copied from nova.
"""
import collections
import logging as std_logging
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
from oslo_utils import importutils
import six
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.i18n import _LE, _LW
LOG = logging.getLogger(__name__)
_ENFORCER = None
ADMIN_CTX_POLICY = 'context_is_admin'
ADVSVC_CTX_POLICY = 'context_is_advsvc'
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def init(conf=cfg.CONF, policy_file=None):
"""Init an instance of the Enforcer class."""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer(conf, policy_file=policy_file)
_ENFORCER.load_rules(True)
def refresh(policy_file=None):
"""Reset policy and init a new instance of Enforcer."""
reset()
init(policy_file=policy_file)
def get_resource_and_action(action, pluralized=None):
"""Extract resource and action (write, read) from api operation."""
data = action.split(':', 1)[0].split('_', 1)
resource = pluralized or ("%ss" % data[-1])
return (resource, data[0] != 'get')
def set_rules(policies, overwrite=True):
"""Set rules based on the provided dict of rules.
:param policies: New policies to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path)
init()
_ENFORCER.set_rules(policies, overwrite)
def _is_attribute_explicitly_set(attribute_name, resource, target, action):
"""Verify that an attribute is present and is explicitly set."""
if 'update' in action:
# In the case of update, the function should not pay attention to a
# default value of an attribute, but check whether it was explicitly
# marked as being updated instead.
return (attribute_name in target[const.ATTRIBUTES_TO_UPDATE] and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED)
return ('default' in resource[attribute_name] and
attribute_name in target and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
target[attribute_name] != resource[attribute_name]['default'])
def _should_validate_sub_attributes(attribute, sub_attr):
"""Verify that sub-attributes are iterable and should be validated."""
validate = attribute.get('validate')
return (validate and isinstance(sub_attr, collections.Iterable) and
any([k.startswith('type:dict') and
v for (k, v) in six.iteritems(validate)]))
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = list(filter(lambda k: k.startswith('type:dict'), validate.keys()))
if not key:
LOG.warn(_LW("Unable to find data type descriptor for attribute %s"),
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s.",
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
(action, attr_name,
sub_attr_name)) for
sub_attr_name in data if sub_attr_name in
target[attr_name]]
return policy.AndCheck(sub_attr_rules)
def _process_rules_list(rules, match_rule):
"""Recursively walk a policy rule to extract a list of match entries."""
if isinstance(match_rule, policy.RuleCheck):
rules.append(match_rule.match)
elif isinstance(match_rule, policy.AndCheck):
for rule in match_rule.rules:
_process_rules_list(rules, rule)
return rules
def _build_match_rule(action, target, pluralized):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
1) add entries for matching permission on objects
2) add an entry for the specific action (e.g.: create_network)
3) add an entry for attributes of a resource for which the action
is being executed (e.g.: create_network:shared)
4) add an entry for sub-attributes of a resource for which the
action is being executed
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
resource, is_write = get_resource_and_action(action, pluralized)
# Attribute-based checks shall not be enforced on GETs
if is_write:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCE_ATTRIBUTE_MAP
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target, action):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck('rule', '%s:%s' %
(action, attribute_name))
# Build match entries for sub-attributes
if _should_validate_sub_attributes(
attribute, target[attribute_name]):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
action, target)])
match_rule = policy.AndCheck([match_rule, attr_rule])
return match_rule
# This check is registered as 'tenant_id' so that it can override
# GenericCheck which was used for validating parent resource ownership.
# This will prevent us from having to handling backward compatibility
# for policy.json
# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
@policy.register('tenant_id')
class OwnerCheck(policy.Check):
"""Resource ownership check.
This check verifies the owner of the current resource, or of another
resource referenced by the one under analysis.
In the former case it falls back to a regular GenericCheck, whereas
in the latter case it leverages the plugin to load the referenced
resource and perform the check.
"""
def __init__(self, kind, match):
# Process the match
try:
self.target_field = re.findall(r'^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s. "
"Match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
policy="%s:%s" % (kind, match),
reason=err_reason)
super(OwnerCheck, self).__init__(kind, match)
def __call__(self, target, creds, enforcer):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
# however if they're not separated by a colon, use an underscore
# as a separator for backward compatibility
def do_split(separator):
parent_res, parent_field = self.target_field.split(
separator, 1)
return parent_res, parent_field
for separator in (':', '_'):
try:
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug("Unable to find ':' as separator in %s.",
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
"%ss" % parent_res, None)
if not parent_foreign_key:
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
# NOTE(salv-orlando): This check currently assumes the parent
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
# NOTE(ihrachys): if import is put in global, circular
# import failure occurs
manager = importutils.import_module('neutron.manager')
f = getattr(manager.NeutronManager.get_instance().plugin,
'get_%s' % parent_res)
# f *must* exist, if not found it is better to let neutron
# explode. Check will be performed with admin context
context = importutils.import_module('neutron.context')
try:
data = f(context.get_admin_context(),
target[parent_foreign_key],
fields=[parent_field])
target[self.target_field] = data[parent_field]
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Policy check error while calling %s!'),
f)
match = self.match % target
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False
@policy.register('field')
class FieldCheck(policy.Check):
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
# Value might need conversion - we need help from the attribute map
try:
attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field]
conv_func = attr['convert_to']
except KeyError:
conv_func = lambda x: x
self.field = field
self.value = conv_func(value)
def __call__(self, target_dict, cred_dict, enforcer):
target_value = target_dict.get(self.field)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
LOG.debug("Unable to find requested field: %(field)s in target: "
"%(target_dict)s",
{'field': self.field, 'target_dict': target_dict})
return False
return target_value == self.value
def _prepare_check(context, action, target, pluralized):
"""Prepare rule, target, and credentials for the policy engine."""
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target, pluralized)
credentials = context.to_dict()
return match_rule, target, credentials
def log_rule_list(match_rule):
if LOG.isEnabledFor(std_logging.DEBUG):
rules = _process_rules_list([], match_rule)
LOG.debug("Enforcing rules: %s", rules)
def check(context, action, target, plugin=None, might_not_exist=False,
pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param might_not_exist: If True the policy check is skipped (and the
function returns True) if the specified policy does not exist.
Defaults to false.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:return: Returns True if access is permitted else False.
"""
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
if context.is_admin:
return True
if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules):
return True
match_rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
result = _ENFORCER.enforce(match_rule,
target,
credentials,
pluralized=pluralized)
# logging applied rules in case of failure
if not result:
log_rule_list(match_rule)
return result
def enforce(context, action, target, plugin=None, pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:raises oslo_policy.policy.PolicyNotAuthorized:
if verification fails.
"""
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
if context.is_admin:
return True
rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
try:
result = _ENFORCER.enforce(rule, target, credentials, action=action,
do_raise=True)
except policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception():
log_rule_list(rule)
LOG.debug("Failed policy check for '%s'", action)
return result
def check_is_admin(context):
"""Verify context has admin rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
if ADMIN_CTX_POLICY not in _ENFORCER.rules:
return False
return _ENFORCER.enforce(ADMIN_CTX_POLICY, credentials, credentials)
def check_is_advsvc(context):
"""Verify context has advsvc rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
if ADVSVC_CTX_POLICY not in _ENFORCER.rules:
return False
return _ENFORCER.enforce(ADVSVC_CTX_POLICY, credentials, credentials)
def _extract_roles(rule, roles):
if isinstance(rule, policy.RoleCheck):
roles.append(rule.match.lower())
elif isinstance(rule, policy.RuleCheck):
_extract_roles(_ENFORCER.rules[rule.match], roles)
elif hasattr(rule, 'rules'):
for rule in rule.rules:
_extract_roles(rule, roles)
|
|
from typing import Any, List, Set, Tuple, Type
from google.appengine.ext import ndb
from backend.common.models.cached_model import TAffectedReferences
from backend.common.models.district_team import DistrictTeam
from backend.common.models.event import Event
from backend.common.models.event_team import EventTeam
from backend.common.queries import (
award_query,
district_query,
event_details_query,
event_query,
match_query,
media_query,
robot_query,
team_query,
)
from backend.common.queries.database_query import CachedDatabaseQuery
TCacheKeyAndQuery = Tuple[str, Type[CachedDatabaseQuery]]
def _queries_to_cache_keys_and_queries(
queries: List[CachedDatabaseQuery],
) -> List[TCacheKeyAndQuery]:
out = []
for query in queries:
out.append((query.cache_key, type(query)))
return out
def _filter(refs: Set[Any]) -> Set[Any]:
# Default filter() filters zeros, so we can't use it.
return {r for r in refs if r is not None}
def award_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
event_keys = _filter(affected_refs["event"])
team_keys = _filter(affected_refs["team_list"])
years = _filter(affected_refs["year"])
event_types = _filter(affected_refs["event_type_enum"])
award_types = _filter(affected_refs["award_type_enum"])
queries: List[CachedDatabaseQuery] = []
for event_key in event_keys:
queries.append(award_query.EventAwardsQuery(event_key.id()))
for team_key in team_keys:
queries.append(
award_query.TeamEventAwardsQuery(team_key.id(), event_key.id())
)
for team_key in team_keys:
queries.append(award_query.TeamAwardsQuery(team_key.id()))
for year in years:
queries.append(award_query.TeamYearAwardsQuery(team_key.id(), year))
for event_type in event_types:
for award_type in award_types:
queries.append(
award_query.TeamEventTypeAwardsQuery(
team_key.id(), event_type, award_type
)
)
return _queries_to_cache_keys_and_queries(queries)
def event_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
event_keys = _filter(affected_refs["key"])
years = _filter(affected_refs["year"])
event_district_keys = _filter(affected_refs["district_key"])
event_team_keys_future = EventTeam.query(
EventTeam.event.IN([event_key for event_key in event_keys]) # pyre-ignore[16]
).fetch_async(None, keys_only=True)
events_future = ndb.get_multi_async(event_keys)
queries: List[CachedDatabaseQuery] = []
for event_key in event_keys:
queries.append(event_query.EventQuery(event_key.id()))
queries.append(event_query.EventDivisionsQuery(event_key.id()))
for year in years:
queries.append(event_query.EventListQuery(year))
for event_district_key in event_district_keys:
queries.append(event_query.DistrictEventsQuery(event_district_key.id()))
if event_keys:
for et_key in event_team_keys_future.get_result():
team_key = et_key.id().split("_")[1]
year = int(et_key.id()[:4])
queries.append(event_query.TeamEventsQuery(team_key))
queries.append(event_query.TeamYearEventsQuery(team_key, year))
queries.append(event_query.TeamYearEventTeamsQuery(team_key, year))
events_with_parents = filter(
lambda e: e.get_result() is not None
and e.get_result().parent_event is not None,
events_future,
)
parent_keys = set([e.get_result().parent_event for e in events_with_parents])
for parent_key in parent_keys:
queries.append(event_query.EventDivisionsQuery(parent_key.id()))
return _queries_to_cache_keys_and_queries(queries)
def event_details_updated(
affected_refs: TAffectedReferences,
) -> List[TCacheKeyAndQuery]:
event_details_keys = _filter(affected_refs["key"])
queries: List[CachedDatabaseQuery] = []
for event_details_key in event_details_keys:
queries.append(event_details_query.EventDetailsQuery(event_details_key.id()))
return _queries_to_cache_keys_and_queries(queries)
def match_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
match_keys = _filter(affected_refs["key"])
event_keys = _filter(affected_refs["event"])
team_keys = _filter(affected_refs["team_keys"])
years = _filter(affected_refs["year"])
queries: List[CachedDatabaseQuery] = []
for match_key in match_keys:
queries.append(match_query.MatchQuery(match_key.id()))
# queries.append(match_query.MatchGdcvDataQuery(match_key.id()))
for event_key in event_keys:
queries.append(match_query.EventMatchesQuery(event_key.id()))
# queries.append(match_query.EventMatchesGdcvDataQuery(event_key.id()))
for team_key in team_keys:
queries.append(
match_query.TeamEventMatchesQuery(team_key.id(), event_key.id())
)
for team_key in team_keys:
for year in years:
queries.append(match_query.TeamYearMatchesQuery(team_key.id(), year))
return _queries_to_cache_keys_and_queries(queries)
def media_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
reference_keys = _filter(affected_refs["references"])
years = _filter(affected_refs["year"])
media_tags = _filter(affected_refs["media_tag_enum"])
team_keys = list(filter(lambda x: x.kind() == "Team", reference_keys))
event_team_keys_future = (
EventTeam.query(EventTeam.team.IN(team_keys)).fetch_async( # pyre-ignore[16]
None, keys_only=True
)
if team_keys
else None
)
queries: List[CachedDatabaseQuery] = []
for reference_key in reference_keys:
if reference_key.kind() == "Team":
for year in years:
queries.append(media_query.TeamYearMediaQuery(reference_key.id(), year))
for media_tag in media_tags:
queries.append(
media_query.TeamYearTagMediasQuery(
reference_key.id(), year, media_tag
)
)
for media_tag in media_tags:
queries.append(
media_query.TeamTagMediasQuery(reference_key.id(), media_tag)
)
queries.append(media_query.TeamSocialMediaQuery(reference_key.id()))
if reference_key.kind() == "Event":
queries.append(media_query.EventMediasQuery(reference_key.id()))
if event_team_keys_future:
for event_team_key in event_team_keys_future.get_result():
event_key = event_team_key.id().split("_")[0]
year = int(event_key[:4])
if year in years:
queries.append(media_query.EventTeamsMediasQuery(event_key))
queries.append(media_query.EventTeamsPreferredMediasQuery(event_key))
return _queries_to_cache_keys_and_queries(queries)
def robot_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
team_keys = _filter(affected_refs["team"])
queries: List[CachedDatabaseQuery] = []
for team_key in team_keys:
queries.append(robot_query.TeamRobotsQuery(team_key.id()))
return _queries_to_cache_keys_and_queries(queries)
def team_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
team_keys = _filter(affected_refs["key"])
event_team_keys_future = EventTeam.query(
EventTeam.team.IN([team_key for team_key in team_keys]) # pyre-ignore[16]
).fetch_async(None, keys_only=True)
district_team_keys_future = DistrictTeam.query(
DistrictTeam.team.IN([team_key for team_key in team_keys])
).fetch_async(None, keys_only=True)
queries: List[CachedDatabaseQuery] = []
for team_key in team_keys:
queries.append(team_query.TeamQuery(team_key.id()))
page_num = team_query.get_team_page_num(team_key.id())
queries.append(team_query.TeamListQuery(page_num))
for et_key in event_team_keys_future.get_result():
year = int(et_key.id()[:4])
event_key = et_key.id().split("_")[0]
page_num = team_query.get_team_page_num(et_key.id().split("_")[1])
queries.append(team_query.TeamListYearQuery(year, page_num))
queries.append(team_query.EventTeamsQuery(event_key))
queries.append(team_query.EventEventTeamsQuery(event_key))
for dt_key in district_team_keys_future.get_result():
district_key = dt_key.id().split("_")[0]
queries.append(team_query.DistrictTeamsQuery(district_key))
return _queries_to_cache_keys_and_queries(queries)
def eventteam_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
event_keys = _filter(affected_refs["event"])
team_keys = _filter(affected_refs["team"])
years = _filter(affected_refs["year"])
queries: List[CachedDatabaseQuery] = []
for team_key in team_keys:
queries.append(event_query.TeamEventsQuery(team_key.id()))
queries.append(team_query.TeamParticipationQuery(team_key.id()))
page_num = team_query.get_team_page_num(team_key.id())
for year in years:
queries.append(event_query.TeamYearEventsQuery(team_key.id(), year))
queries.append(event_query.TeamYearEventTeamsQuery(team_key.id(), year))
queries.append(team_query.TeamListYearQuery(year, page_num))
for event_key in event_keys:
queries.append(team_query.EventTeamsQuery(event_key.id()))
queries.append(team_query.EventEventTeamsQuery(event_key.id()))
queries.append(media_query.EventTeamsMediasQuery(event_key.id()))
queries.append(media_query.EventTeamsPreferredMediasQuery(event_key.id()))
return _queries_to_cache_keys_and_queries(queries)
def districtteam_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
district_keys = _filter(affected_refs["district_key"])
team_keys = _filter(affected_refs["team"])
queries: List[CachedDatabaseQuery] = []
for district_key in district_keys:
queries.append(team_query.DistrictTeamsQuery(district_key.id()))
for team_key in team_keys:
queries.append(district_query.TeamDistrictsQuery(team_key.id()))
return _queries_to_cache_keys_and_queries(queries)
def district_updated(affected_refs: TAffectedReferences) -> List[TCacheKeyAndQuery]:
years = _filter(affected_refs["year"])
district_abbrevs = _filter(affected_refs["abbreviation"])
district_keys = _filter(affected_refs["key"])
district_team_keys_future = DistrictTeam.query(
DistrictTeam.district_key.IN(list(district_keys))
).fetch_async(None, keys_only=True)
district_event_keys_future = Event.query(
Event.district_key.IN(list(district_keys)) # pyre-ignore[16]
).fetch_async(keys_only=True)
queries: List[CachedDatabaseQuery] = []
for year in years:
queries.append(district_query.DistrictsInYearQuery(year))
for abbrev in district_abbrevs:
queries.append(district_query.DistrictHistoryQuery(abbrev))
for key in district_keys:
queries.append(district_query.DistrictQuery(key.id()))
for dt_key in district_team_keys_future.get_result():
team_key = dt_key.id().split("_")[1]
queries.append(district_query.TeamDistrictsQuery(team_key))
# Necessary because APIv3 Event models include the District model
affected_event_refs = {
"key": set(),
"year": set(),
"district_key": district_keys,
}
for event_key in district_event_keys_future.get_result():
affected_event_refs["key"].add(event_key)
affected_event_refs["year"].add(int(event_key.id()[:4]))
return _queries_to_cache_keys_and_queries(queries) + event_updated(
affected_event_refs
)
|
|
#################################################################
###############written by dkudrow 08/2010########################
#Reads all of the images in a directory, sorts them by Strehl
#Ratio or consecutive order, and stacks the specified top percent
#either aligned to a guide star or not, weighting if requested.
#This version includes:
#detripling for compact binaries, drizzle algorithm for
#subpixel alignment and dynamic guide region.
#Type $python stacked.py -h for usage
####
#last modified 10/26/2010
#################################################################
print 'Initializing stacking sequence...'
import sys,os,time
import pyfits as PF
from numpy import *
from scipy import *
from scipy.interpolate import interp1d
from scipy.fftpack import fftn, ifftn
from pylab import plt, axvline, savefig, subplot, figure, plot, legend, title, hist
from myutils import readconfig
import mysciutils
sys.path.append("../LIHSPcommon")
print 'Loaded all packages'
satlevel = 1400000
def calccents(tmparr):
if shape(tmparr)==(0,0):
print "empty array passed to calccents, returning (0,0,0)"
return (0,0,0)
allx =sum(tmparr,axis=0)
xl = len(allx)
indx = arange(xl)
allf = sum(allx)
#print 'allx.shape = ' + str(allx.shape)
#print 'indx.shape = ' + str(indx.shape)
mx = sum(allx*indx)/allf
ally = sum(tmparr,axis=1)
yl = len(ally)
indy = arange(yl)
my = sum(ally*indy)/allf
sky =sum(tmparr[0,:])
sky+=sum(tmparr[-1,:])
sky+=sum(tmparr[:,0])
sky+=sum(tmparr[:,-1])
(lx,ly)=shape(tmparr)
sky*=(lx*ly)/(2.0*(lx+ly))
allf=allf-sky
#returning: weighted x centroid, weighted y centroid, sky value
return (mx,my, allf)
def createstack(inpath,gsx,gsy,rad,select,pc,shift,detrip,minsep,outpath,coresz,follow,ps):
print "searching for files in %s"%inpath
files = sorted([x for x in os.listdir(inpath) if x.endswith('.fits')])
nfiles=len(files)
if type(pc) is float or type(pc) is int:
pc = [pc]
cutoff = zeros(len(pc), float)
for i,c in enumerate(pc):
cutoff[i] = int(c/100.0*nfiles)
if cutoff[i]==0:
cutoff[i]=1
##### HEADER INFO #####
image=PF.open(inpath+'/'+files[0])
header=image[0].header
image.close()
xsize=header['NAXIS1']
ysize=header['NAXIS2']
if 'HBIN' in header:
hbin =float(header['HBIN'])
vbin =float(header['VBIN'])
elif 'CCDXBIN' in header:
hbin =float(header['CCDXBIN'])
vbin =float(header['CCDYBIN'])
if 'EXPOSURE' in header:
exp = (header['EXPOSURE'])
elif 'EXPTIME' in header:
exp = (header['EXPTIME'])
else:
print "no exposure lenght recognizable key!"
return -1
psx = 2.0/(ps*hbin)
psy = 2.0/(ps*vbin)
#value in pixel of 2''*2'' to contain centroid update within 2'' , x and y
psxsq=psx*psx
psysq=psy*psy
align='UNALIGN'
if shift == 'align':
align='ALIGN'
if select=='lucky':
imtype='LUCKY'
if shift == 'none':
print """Will not perform lucky imaging without aligning frames
Changing parameter 'shift' to '1'"""
shift = 'align'
align='ALIGN'
elif select == 'coadded':
imtype='CONSEC'
elif select == 'weighted':
imtype='WEIGHTED_TIPTILT'
if shift == 'none':
print """Will not perform weighted tip/tilt imaging without
aligningframes
Changing parametes 'shift' to '1'"""
shift = 'align'
align='ALIGN'
elif select == 'corr' or select == 'correlate':
imtype='LUCKY_CORRELATED'
#NOTE: do I need to change shift and align?
# weights = []
# for coff in cutoff:
# weights.append(zeros(coff,float))
else:
print "invalid 'select' paramter (%s): use 'coadded' for standard stacking, 'lucky' for lucky, 'weighted' for weighted average exposures, 'corr' for lucky with phase correlation"%select
return -1
if gsx == 0:
gsx = int(xsize/2)
if gsy == 0:
gsy = int(ysize/2)
for pcn,coff in enumerate(cutoff):
coff=int(coff)
print "Generating ", imtype, " image using ",coff," images, using", align,"method"
if align == 'ALIGN': print "centroid: ",gsx," ",gsy
if detrip!='none': print "detripling core ",coresz, "min distance",minsep
sys.stdout.flush()
##### SELECTION #####
################creating name files that will be useful later
outname=files[0][:-9].replace('.fits','')
if detrip == 'v':
outfile=outpath+'/'+outname+'_%3.2f_%s_%s_%s_%s.fits' % (float(pc[pcn]),imtype,align,follow, 'dv')
elif detrip == 'h':
outfile=outpath+'/'+outname+'_%3.2f_%s_%s_%s_%s.fits' % (float(pc[pcn]),imtype,align,follow, 'dh')
else:
outfile=outpath+'/'+outname+'_%3.2f_%s_%s_%s.fits' % (float(pc[pcn]),imtype,align,follow)
histfile = outpath+'/'+outname+'_'+follow+'.hist.png'
listfilename = '%s/strehl_list_%s.dat' %(outpath,follow)
best=[]
if imtype == 'CONSEC' and align == 'UNALIGN':
for name in files:
best.append([name,
0,
0,
0,
gsx,
gsy,
gsx,gsy, 0])
else:
print '#Evaluating %s images...' % nfiles
sys.stdout.flush()
if os.path.isfile(listfilename) == True:
print listfilename + ' exists'
print 'reading list of strehl sorted files'
listfile = open(listfilename)
sys.stdout.flush()
lsplit=listfile.readline().split()
if len(lsplit)<9:
print 'old version of strehl ratio file. remove ', listfilename, 'first and rerun reduction.'
return -1
best.append([lsplit[0], int(lsplit[1]), int(lsplit[2]),
float(lsplit[3]),
int(lsplit[4]), int(lsplit[5]),
lsplit[6], lsplit[7],lsplit[8]])
for l in listfile:
lsplit=l.split()
best.append([lsplit[0], int(lsplit[1]), int(lsplit[2]),
float(lsplit[3]),
int(lsplit[4]), int(lsplit[5]),
float(lsplit[6]), float(lsplit[7]),
float(lsplit[8])])
else: #strehl list does not exist
bpx,bpy=gsx,gsy
nrejected = 0
name = files[0]
regfile = '%s/../%s_%s.reg' %(outpath,name,follow)
print "\n\n printing region file for fits ", name,"to ",regfile,"\n\n"
regf = open(regfile,"w")
tmpfile=PF.open(inpath+'/'+name)
reg=tmpfile[0].data[bpy-rad:bpy+rad+1,bpx-rad:bpx+rad+1]
print reg
print '[%d:%d,%d:%d]' %(bpy-rad,bpy+rad+1,bpx-rad,bpx+rad+1)
maxvalue =reg.max()
if maxvalue<satlevel:
maxindex =where(reg==reg.max())
if len(maxindex[0]) ==1:
(mx,my,apf) =\
calccents(tmpfile[0].data[bpy-rad+maxindex[0]-rad:\
bpy-rad+maxindex[0]+rad+1,\
bpx-rad+maxindex[1]-rad:\
bpx-rad+maxindex[1]+rad+1])
print >> regf, 'global color=green dashlist=8 3 width=1 font="helvetica 10 normal" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1'
print >>regf, "physical"
print >> regf, "box(",(maxindex[1])[0]+bpx-rad+1,",",(maxindex[0])[0]+bpy-rad+1,",",2*rad+1,",",2*rad+1,",0)"
print >> regf,"circle(",maxindex[1][0]+int(bpx)-rad+1, ",",maxindex[0][0]+int(bpy)-rad+1,",1)"
print >> regf,"circle(",bpx+maxindex[1][0]-2*rad+mx+1,",",bpy+maxindex[0][0]-2*rad+my+1,",1) # color=red"
regf.close()
best.append([name,
maxindex[1],
maxindex[0],
maxvalue,
int(bpx),
int(bpy),
bpx+maxindex[1][0]-2*rad+mx,
bpy+maxindex[0][0]-2*rad+my, apf])
if follow=='dynamic':
print "dynamic"
#only updating the centroid if the new brightest pixel is within 2'' of the old one
newbpx=bpx+maxindex[1][0]-rad
newbpy=bpy+maxindex[0][0]-rad
if (newbpx-bpx)*(newbpx-bpx)<2.0/psx and (newbpy-bpy)*(newbpy-bpy)<2.0/psy:
if newbpx-2*rad > 0 or newbpx+2*rad < xsize \
or newbpy-2*rad > 0 or newbpy+2*rad < ysize \
or (bpx-newbpx)*(bpx-newbpx) > rad*rad/4.0 \
or (bpy-newbpy)*(bpy-newbpy) > rad*rad/4.0 :
bpx=newbpx
bpy=newbpy
else:
print 'Cannot update centroid cause it is outside of the allowed region'
else:
print 'Repeat brightest pixel found in %s. Frame skipped.' % name
else:
print 'Saturated pixel found in %s. Frame skipped.' % name
tmpfile.close()
for name in files[1:]:
#print 'Running image %s ...' %name
tmpfile=PF.open(inpath+'/'+name)
reg=tmpfile[0].data[bpy-rad:bpy+rad+1,bpx-rad:bpx+rad+1]
maxvalue =reg.max()
if maxvalue<satlevel:
maxindex =where(reg==reg.max())
if len(maxindex[0]) ==1:
(mx,my,apf) =\
mysciutils.calccents(
tmpfile[0].data[bpy-rad+maxindex[0][0]-rad:
bpy-rad+maxindex[0][0]+rad+1,
bpx-rad+maxindex[1][0]-rad:
bpx-rad+maxindex[1][0]+rad+1])
best.append([name,
maxindex[1],
maxindex[0],
maxvalue,
int(bpx),
int(bpy),
bpx+maxindex[1][0]-2*rad+mx,
bpy+maxindex[0][0]-2*rad+my, apf])
l = best[-1]
#print '%s %d %d %f %d %d %f %f %f\n'\
#%(l[0],l[1],l[2],l[3],int(l[4]+l[1])-rad,
#int(l[5]+l[2])-rad,l[6],l[7],l[8])
#raw_input()
if follow=='dynamic':
newbpx=bpx+maxindex[1][0]-rad
newbpy=bpy+maxindex[0][0]-rad
# print newbpx, newbpy, bpx, bpy
if (newbpx-bpx)*(newbpx-bpx)<psxsq and (newbpy-bpy)*(newbpy-bpy)<psysq:
if newbpx-2*rad > 0 or newbpx+2*rad < xsize \
or newbpy-2*rad > 0 or newbpy+2*rad < ysize \
or (bpx-newbpx)*(bpx-newbpx) > rad*rad/4.0 \
or (bpy-newbpy)*(bpy-newbpy) > rad*rad/4.0 :
bpx=newbpx
bpy=newbpy
else:
print 'Cannot update centroid cause it is outside of the allowed region, if this is common choose another star or shrink the search region radius ', name
else:
print 'Repeat brightest pixel found in %s. Frame skipped.' % name
else:
print 'Saturated pixel found in %s. Frame skipped.' % name
tmpfile.close()
if imtype!='CONSEC':
best=sorted(best,key=lambda list:list[3] ,reverse=True)
else :
best=sorted(best,key=lambda list:list[0] ,reverse=False)
# print best[:10]
nrejected = nfiles-len(best)
if coff > nfiles-nrejected:
coff=nfiles-nrejected
luckies=best[:coff]
exposure = 0.0
##########PRINTING STREHL LIST######################
print '%d images selected.' % coff
sys.stdout.flush()
if imtype !='CONSEC':
if os.path.isfile(listfilename) != True:
print '#printing list of selected images to '+listfilename
sys.stdout.flush()
listout=open(listfilename,'w')
for l in best:
strhere = '%s %d %d %f %d %d %f %f %f\n'%(
l[0],l[1],
l[2],l[3],
int(l[4]+l[1])-rad,
int(l[5]+l[2])-rad,
l[6],l[7],l[8])
listout.write(strhere)
listout.close()
if select == 'weighted':
#########creating weights as AS^2.4 according to Robert Tubbs###
weights = array(zip(*luckies)[3])
strehlnorm = 0.3/weights[0]
weights = pow(weights*strehlnorm,2.4)
a = coff/sum(weights)
weights = weights*a
###########CREATING HISTOGRAM##################
if os.path.isfile(histfile) != 1:
bins = linspace(best[-1][3],best[0][3],50)
strehl=[i[3] for i in best]
events, edges, patches = hist(strehl,bins, normed=False)
lower = resize(edges, len(edges)-1)
binmid = lower + 0.5*diff(edges)
lenbins=len(bins)-2
allevents = sum(events)
nevents = 0
xsaved = 0
for i in range(lenbins):
nevents +=events[lenbins-i]
if nevents>allevents/100.0 and xsaved == 0:
xsaved = binmid[lenbins-i]
plt.xlabel('counts')
plt.ylabel('frequency')
axvline (xsaved)
savefig(histfile,dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1)
# show()
# sys.exit()
########################################
##### DETRIPLING #####
dtr='NONE'
if detrip!='none':
c=coresz/2
print 'Separating cores...'
sys.stdout.flush()
for i in xrange(coff):
core1=[0,0,0,0,0]
core2=[0,0,0,0,0]
name=luckies[i][0]
tmpfile=PF.getdata(inpath+'/'+name)
reg=tmpfile[gsy-rad:gsy+rad+1,gsx-rad:gsx+rad+1]
for y in xrange(1,2*rad-1):
for x in xrange(1,2*rad-1):
core=reg[y-c:y+c+1,x-c:x+c+1]
if core.sum()>=core1[0]:
core1=[core.sum(),x,y,argmax(core)%coresz,argmax(core)/coresz]
if core.sum()==core1[0]:
continue
elif core.sum()>=core2[0] and sqrt((1.*x-core1[1])**2+(1.*y-core1[2])**2)>=minsep:
core2=[core.sum(),x,y,argmax(core)%coresz,argmax(core)/coresz]
if detrip=='v':
d=2
dtr='VERTICAL'
elif detrip=='h':
d=1
dtr='HORIZONTAL'
else:
print 'wrong detripling flag'
return -1
if core1[d]>core2[d]:
luckies[i]=[name,core1[1]-c+core1[3],core1[2]-c+core1[4]]
else:
luckies[i]=[name,core2[1]-c+core2[3],core2[2]-c+core2[4]]
##### ALIGNMENT AND STACKING #####
print 'Compiling Lucky Image...'
sys.stdout.flush()
if select=='corr' or select=='correlate':
print 'Using phase correlation...'
ntotal = coff
print '\nlength of fits list: %d' %ntotal
mastername = luckies[0][0]
print 'masterfits: %s' %mastername
try: masterdata = PF.getdata(inpath+'/'+mastername)
except:
print """Cannot find image %s\nAborting...""" %(inpath+'/'+mastername)
return -1
masterdata = mysciutils.drizzle(masterdata, 0)
_PAD_ = 50
coaddedtmp = mysciutils.pad(masterdata, _PAD_)
############ TUKEY WINDOW MASTER #####
lrg = int(coaddedtmp.shape[0] < coaddedtmp.shape[1]) #longest edge
alpha = double(masterdata.shape[lrg])/coaddedtmp.shape[lrg]
print "alpha = %f" %alpha
tukey = mysciutils.tukey2d(tuple(int(x*alpha-50) for x in coaddedtmp.shape), alpha)
padding = int(round((float(coaddedtmp.shape[0])-tukey.shape[0])/2))
tukeypad = mysciutils.pad(tukey, padding)
masterfft = fftn(coaddedtmp*tukeypad)
for k in xrange(1,coff):
print '\nRunning %d/%d' %(k, coff-1)
name = luckies[k][0]
print '...' + name
fits = PF.getdata(inpath+'/'+name)
fits = mysciutils.drizzle(fits, 0)
fitspad = mysciutils.pad(fits, _PAD_)
lrg = int(fits.shape[0] < fits.shape[1]) #longest edge
alpha = double(fits.shape[lrg])/fitspad.shape[lrg]
tukey = mysciutils.tukey2d(tuple(int(x*alpha-50) for x in fitspad.shape), alpha)
padding = int(round((float(fitspad.shape[0])-tukey.shape[0])/2))
tukeypad = mysciutils.pad(tukey, padding)
fitsfft = fftn(fitspad*tukeypad)
############## FINDING PHASE ############
print 'Finding phase...'
axis2_shift, axis1_shift = mysciutils.correlate([masterfft, fitsfft])
if axis2_shift >= 0:
if axis2_shift == 0: axis2_shift = -fitspad.shape[0]
if axis1_shift >= 0:
if axis1_shift == 0: axis1_shift = -fitspad.shape[1]
coaddedtmp[axis2_shift:,axis1_shift:] += fitspad[:-axis2_shift,:-axis1_shift]
else: #axis1_shift < 0
coaddedtmp[axis2_shift:,:-abs(axis1_shift)] += fitspad[:-axis2_shift,abs(axis1_shift):]
else: #axis2_shift < 0
if axis1_shift >= 0:
if axis1_shift == 0: axis1_shift = -fitspad.shape[1]
coaddedtmp[:-abs(axis2_shift),axis1_shift:] += fitspad[abs(axis2_shift):,:-axis1_shift]
else: #axis1_shift < 0
coaddedtmp[:-abs(axis2_shift),:-abs(axis1_shift)] += fitspad[abs(axis2_shift):,abs(axis1_shift):]
stack = coaddedtmp
else:
dx,dy=rad,rad
stack=zeros((2*(ysize-2)-7*rad,2*(xsize-2)-7*rad))
for k in xrange(coff):
print 'Running %d/%d...' %(k, coff-1)
name=luckies[k][0]
tmp=PF.getdata(inpath+'/'+name)
frame = mysciutils.drizzle(tmp, 0)
if shift=='align':
subpix=[[]]*4
if follow=='dynamic':
# print luckies[k], gsx
dx=2*(luckies[k][4]-gsx+luckies[k][1])
dy=2*(luckies[k][5]-gsy+luckies[k][2])
x=luckies[k][4]-rad+luckies[k][1]
y=luckies[k][5]-rad+luckies[k][2]
else:
dx=2*luckies[k][1]
dy=2*luckies[k][2]
x=gsx-rad+luckies[k][1]
y=gsy-rad+luckies[k][2]
subpix[0]=[0,0,3*tmp[y,x]+tmp[y-1,x-1]+
tmp[y-1,x]+tmp[y,x-1]] # top left
subpix[1]=[0,1,3*tmp[y,x]+tmp[y-1,x]+
tmp[y-1,x+1]+tmp[y,x+1]] # top right
subpix[2]=[1,0,3*tmp[y,x]+tmp[y,x-1]+
tmp[y+1,x-1]+tmp[y+1,x]] # bot left
subpix[3]=[1,1,3*tmp[y,x]+tmp[y,x+1]+
tmp[y+1,x]+tmp[y+1,x+1]] # bot right
offset=sorted(subpix, key=lambda list:list[2], reverse=True)
dx+=offset[0][1]
dy+=offset[0][0]
# print dy,2*(ysize-2)-8*rad+dy,dx,2*(xsize-2)-8*rad+dx
crop=frame[dy:2*(ysize-2)-7*rad+dy,dx:2*(xsize-2)-7*rad+dx]
tmpcounter=0
if shape(crop)!=shape(stack):
print "skipped image %s(if too many images are skipped try running with static centroid) "%name
continue
if select == 'weighted':
crop = crop*weights[k]
exposure += exp*weights[k]
else:
exposure +=exp
stack+=crop
##### UPDATE HEADER #####
if 'EXPOSURE' in header: header['EXPOSURE']=exposure
elif 'EXPTIME' in header: header['EXPTIME']=exposure
header.update('IMAGTYPE', '%s' % imtype, 'Consecutive, Lucky, Weighted, or Lucky_Correlated')
header.update('SELECTED', '%s' % pc[pcn], 'Percentage of images selected')
header.update('IMGALIGN', '%s' % align, 'Image realignment around guide star')
header.update('DETRIPLE', '%s' % dtr, 'Axis of detripling')
if detrip!='none':
header.update('DETRCORE', '%s' % coresz, 'Size of detripling core')
header.update('DETRSEP', '%s' % minsep, 'Minimum core separation')
else:
header.update('DETRCORE', '0', 'Size of detripling core')
header.update('DETRSEP', '0', 'Minimum core separation')
header.update('ALIGNCX', '%d' %gsx, 'x-pixel initial centroid')
header.update('ALIGNCY', '%d' %gsy, 'y-pixel initial centroid')
header.update('ALIGNR', '%d' %rad, 'search region radius')
header.update('CENTROID', '%s' %follow, 'Dynamic or static centroid')
##### WRITE OUT #####
if os.path.isfile(outfile):
os.remove(outfile)
PF.writeto(outfile, stack, header)
print 'Composite image written to %s' % outfile
strg ='chmod 777 '+outpath+'/'+outname+'/*'
os.system(strg)
return 1
########################################################
########################################################
########################################################
########################################################
if __name__ == '__main__':
if len(sys.argv) != 1 or sys.argv[1].startswith('-h') or sys.argv[1] == 'h':
print """Usage. Requires:
**name of parameter file conatining :**
Directory containing images
dark file
dark method
Guide star x coordinate
Guide star y coordinate
region dimensions x
percentage of images to be selected (0-100)
lucky: \'lucky\',\'weighted\', \'coadded\', \'corr\'
shift: \'align\' or \'none\'
detripling: \'v\' or \'h\' or \'none\'
minimum separation or cores
core size for detripling (odd integer)
dynamic guide region; 1 or 0
"""
sys.exit()
##### DECLARE VARIABLES #####
pars = readconfig(sys.argv[1])
inpath=pars['impath']+'/'+pars['imgdir']
gsx=pars['x']
gsy=pars['y']
rad=pars['r']
select=pars['sel']
pc = pars['percent']
ps = pars['ps']
shift=pars['align']
detrip=pars['detrip']
minsep=float(pars['separation'])
outpath=pars['outpath']
coresz=pars['core']
follow=pars['centroid']
print inpath
for sel in select:
createstack(inpath,gsx,gsy,rad,sel,pc,shift,detrip,minsep,outpath,coresz,follow,ps)
|
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#*********************************************************#
# @@ScriptName: collectorfluentd.py
# @@Author: Fang.Li<surivlee@gmail.com>
# @@Create Date: 2013-12-05 14:21:57
# @@Modify Date: 2014-03-13 17:53:02
# @@Function:
#*********************************************************#
import os
import time
import signal
import glob
import random
import string
import pickle
import msgpack_pure
import daemonize
from shelljob import proc
from common import log
from dataparser import Dataparser
class CollectorFluentd(object):
"""The main collector-fluentd class.
There are 3 steps in a full running circle:
1. getPluginsOutput, this function executes all plugins and gets
there outputs lines as a list object. each item indicates a metric.
2. write2Cache(outputs), write all metrics to local FS first.
The content in cache files have already been msgpack encoded.
3. sendAllCache, no param required.
sendAllCache will search all cached files in cache folder, and send them
to the remote fluentd server in order.
"""
def __init__(self, conf):
self.conf = conf
self.data_parser = Dataparser(conf)
log("Collector-fluentd daemon started, PID: %i" % daemonize.getPid())
def _executePlugins(self, files):
if not files: return []
procs = []
outputs = []
# Running plugins parallel
g = proc.Group()
for f in files:
procs.append(g.run(f))
time.sleep(0.05)
# Get lines
t0 = time.time()
while time.time() - t0 <= self.conf.plugin_timeout:
if g.is_pending():
_lines = g.readlines(max_lines=1)
if _lines and _lines[0][1].strip():
outputs.append(_lines[0][1].strip())
else:
break
# Clean up
for p in procs:
try:
os.killpg(p.pid, signal.SIGTERM)
except:
pass
g.get_exit_codes()
g.clear_finished()
return outputs
def getPluginsOutput(self):
log("Collecting metrics from plugins...", -1)
log("Current plugin path: %s" % self.conf.plugin_path, -1)
plugin_list = glob.glob(self.conf.plugin_path)
plugin_list = [
x for x in plugin_list if os.access(x, os.X_OK) and os.path.isfile(x)
]
log("Found %i valid plugins: %s" % (
len(plugin_list), str([os.path.basename(f) for f in plugin_list])), -1)
log("Executing plugins...", -1)
outputs = self._executePlugins(plugin_list)
for output in outputs:
log("Get valid plugin output: %s" % output.strip(), -1)
return outputs
def _getValidMetric(self, metric, prefix, addition={}):
m = metric.split()
if len(m) < 3:
return False
if not m[1].isdigit():
return False
try:
v = float(m[2])
if v == int(v):
v = int(v)
except:
return False
cf_datatype = "gauge"
tags = {}
for t in m[3:]:
_t = t.split("=")
if len(_t) != 2:
return False
if not _t[0].strip():
return False
if not _t[1].strip():
return False
if _t[0] == "cf_datatype":
cf_datatype = _t[1].lower()
else:
tags[_t[0].strip()] = _t[1].strip()
tags["_value"] = v
# Like RRD database, the CF datatype could be one of GAUGE, COUNTER and DERIVE
if addition:
for k in addition.keys():
tags[k] = addition[k]
pack = [prefix + m[0], int(m[1]), tags]
if cf_datatype == "gauge":
pack = self.data_parser.gauge(pack)
elif cf_datatype == "counter":
pack = self.data_parser.counter(pack)
elif cf_datatype == "derive":
pack = self.data_parser.derive(pack)
else:
return False
if pack == None:
log("Metric %s does not have a valid history data, waiting for next circle..." % m[0], 1)
return None
else:
log("Write validated metric %s to local FS..." % m[0], -1)
return msgpack_pure.packs(pack)
def write2Cache(self, outputs):
fname = "".join((
self.conf.cache_path, "/",
self.conf.cache_file_prefix,
str(int(time.time())),
"_",
''.join(random.sample(string.ascii_letters + string.digits, 8)),
".dat",
))
log("Writting current metrics to local FS...", -1)
log("Open cache file %s" % fname, -1)
valid_outputs = []
for m in outputs:
metric = self._getValidMetric(m, self.conf.metric_prefix, self.conf.tags)
if metric:
valid_outputs.append(metric)
elif metric == False:
log("Invalid metric string: %s (IGNORED)" % m, 1)
if valid_outputs:
fcache = open(fname, "wb")
pickle.dump(valid_outputs, fcache)
fcache.close()
else:
log("No new metrics generated, ignore.", 0)
def logError(self, metric):
err_msg = [" ".join((
metric,
str(int(time.time())),
"1"
))]
self.write2Cache(err_msg)
def _getCachedMsg(self):
fname = "".join((
self.conf.cache_path, "/",
self.conf.cache_file_prefix,
"*.dat",
))
cache_list = glob.glob(fname)
cache_list.sort()
if cache_list:
return cache_list[0], pickle.load(open(cache_list[0], "rb"))
else:
return None, None
def sendAllCache(self, sock):
log("Sending all cached message to remote server...", -1)
while True:
fname, msg = self._getCachedMsg()
if fname:
log("Sending cache file %s to server..." % os.path.basename(fname), -1)
for _msg in msg:
if not sock.send(_msg):
self.logError("collector.error.send")
log("Could not connect to the fluentd server, metrics will be sent next time.", 2)
return False
log("Successful sent metrics to server in cache file %s" % os.path.basename(fname), -1)
os.remove(fname)
else:
return True
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Heat
"""
import logging as sys_logging
import os
from eventlet.green import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import wsgi
LOG = logging.getLogger(__name__)
paste_deploy_group = cfg.OptGroup('paste_deploy')
paste_deploy_opts = [
cfg.StrOpt('flavor',
help=_("The flavor to use.")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use."))]
service_opts = [
cfg.IntOpt('periodic_interval',
default=60,
help=_('Seconds between running periodic tasks.')),
cfg.StrOpt('heat_metadata_server_url',
default="",
help=_('URL of the Heat metadata server.')),
cfg.StrOpt('heat_waitcondition_server_url',
default="",
help=_('URL of the Heat waitcondition server.')),
cfg.StrOpt('heat_watch_server_url',
default="",
help=_('URL of the Heat CloudWatch server.')),
cfg.StrOpt('instance_connection_is_secure',
default="0",
help=_('Instance connection to CFN/CW API via https.')),
cfg.StrOpt('instance_connection_https_validate_certificates',
default="1",
help=_('Instance connection to CFN/CW API validate certs if '
'SSL is used.')),
cfg.StrOpt('region_name_for_services',
help=_('Default region name used to get services endpoints.')),
cfg.StrOpt('heat_stack_user_role',
default="heat_stack_user",
help=_('Keystone role for heat template-defined users.')),
cfg.StrOpt('stack_user_domain_id',
deprecated_opts=[cfg.DeprecatedOpt('stack_user_domain',
group=None)],
help=_('Keystone domain ID which contains heat '
'template-defined users. If this option is set, '
'stack_user_domain_name option will be ignored.')),
cfg.StrOpt('stack_user_domain_name',
help=_('Keystone domain name which contains heat '
'template-defined users. If `stack_user_domain_id` '
'option is set, this option is ignored.')),
cfg.StrOpt('stack_domain_admin',
help=_('Keystone username, a user with roles sufficient to '
'manage users and projects in the stack_user_domain.')),
cfg.StrOpt('stack_domain_admin_password',
secret=True,
help=_('Keystone password for stack_domain_admin user.')),
cfg.IntOpt('max_template_size',
default=524288,
help=_('Maximum raw byte size of any template.')),
cfg.IntOpt('max_nested_stack_depth',
default=5,
help=_('Maximum depth allowed when using nested stacks.')),
cfg.IntOpt('num_engine_workers',
default=processutils.get_worker_count(),
help=_('Number of heat-engine processes to fork and run.'))]
engine_opts = [
cfg.StrOpt('instance_user',
default='ec2-user',
help=_("The default user for new instances. This option "
"is deprecated and will be removed in the Juno release. "
"If it's empty, Heat will use the default user set up "
"with your cloud image (for OS::Nova::Server) or "
"'ec2-user' (for AWS::EC2::Instance).")),
cfg.ListOpt('plugin_dirs',
default=['/usr/lib64/heat', '/usr/lib/heat',
'/usr/local/lib/heat', '/usr/local/lib64/heat'],
help=_('List of directories to search for plug-ins.')),
cfg.StrOpt('environment_dir',
default='/etc/heat/environment.d',
help=_('The directory to search for environment files.')),
cfg.StrOpt('deferred_auth_method',
choices=['password', 'trusts'],
default='trusts',
help=_('Select deferred auth method, '
'stored password or trusts.')),
cfg.ListOpt('trusts_delegated_roles',
default=[],
help=_('Subset of trustor roles to be delegated to heat.'
' If left unset, all roles of a user will be'
' delegated to heat when creating a stack.')),
cfg.IntOpt('max_resources_per_stack',
default=1000,
help=_('Maximum resources allowed per top-level stack.')),
cfg.IntOpt('max_stacks_per_tenant',
default=100,
help=_('Maximum number of stacks any one tenant may have'
' active at one time.')),
cfg.IntOpt('action_retry_limit',
default=5,
help=_('Number of times to retry to bring a '
'resource to a non-error state. Set to 0 to disable '
'retries.')),
cfg.IntOpt('event_purge_batch_size',
default=10,
help=_("Controls how many events will be pruned whenever a "
"stack's events exceed max_events_per_stack. Set this "
"lower to keep more events at the expense of more "
"frequent purges.")),
cfg.IntOpt('max_events_per_stack',
default=1000,
help=_('Maximum events that will be available per stack. Older'
' events will be deleted when this is reached. Set to 0'
' for unlimited events per stack.')),
cfg.IntOpt('stack_action_timeout',
default=3600,
help=_('Timeout in seconds for stack action (ie. create or'
' update).')),
cfg.IntOpt('error_wait_time',
default=240,
help=_('Error wait time in seconds for stack action (ie. create'
' or update).')),
cfg.IntOpt('engine_life_check_timeout',
default=2,
help=_('RPC timeout for the engine liveness check that is used'
' for stack locking.')),
cfg.BoolOpt('enable_cloud_watch_lite',
default=True,
help=_('Enable the legacy OS::Heat::CWLiteAlarm resource.')),
cfg.BoolOpt('enable_stack_abandon',
default=False,
help=_('Enable the preview Stack Abandon feature.')),
cfg.BoolOpt('enable_stack_adopt',
default=False,
help=_('Enable the preview Stack Adopt feature.')),
cfg.BoolOpt('convergence_engine',
default=False,
help=_('Enables engine with convergence architecture. All '
'stacks with this option will be created using '
'convergence engine .')),
cfg.StrOpt('default_software_config_transport',
choices=['POLL_SERVER_CFN',
'POLL_SERVER_HEAT',
'POLL_TEMP_URL'],
default='POLL_SERVER_CFN',
help=_('Template default for how the server should receive the '
'metadata required for software configuration. '
'POLL_SERVER_CFN will allow calls to the cfn API action '
'DescribeStackResource authenticated with the provided '
'keypair (requires enabled heat-api-cfn). '
'POLL_SERVER_HEAT will allow calls to the '
'Heat API resource-show using the provided keystone '
'credentials (requires keystone v3 API, and configured '
'stack_user_* config options). '
'POLL_TEMP_URL will create and populate a '
'Swift TempURL with metadata for polling (requires '
'object-store endpoint which supports TempURL).')),
cfg.StrOpt('default_deployment_signal_transport',
choices=['CFN_SIGNAL',
'TEMP_URL_SIGNAL',
'HEAT_SIGNAL'],
default='CFN_SIGNAL',
help=_('Template default for how the server should signal to '
'heat with the deployment output values. CFN_SIGNAL '
'will allow an HTTP POST to a CFN keypair signed URL '
'(requires enabled heat-api-cfn). '
'TEMP_URL_SIGNAL will create a Swift TempURL to be '
'signaled via HTTP PUT (requires object-store endpoint '
'which supports TempURL). '
'HEAT_SIGNAL will allow calls to the Heat API '
'resource-signal using the provided keystone '
'credentials')),
cfg.ListOpt('hidden_stack_tags',
default=[],
help=_('Stacks containing these tag names will be hidden. '
'Multiple tags should be given in a comma-delimited '
'list (eg. hidden_stack_tags=hide_me,me_too).')),
cfg.StrOpt('onready',
help=_('Deprecated.')),
cfg.BoolOpt('stack_scheduler_hints',
default=False,
help=_('When this feature is enabled, scheduler hints'
' identifying the heat stack context of a server'
' resource are passed to the configured schedulers in'
' nova, for server creates done using heat resource'
' types OS::Nova::Server and AWS::EC2::Instance.'
' heat_root_stack_id will be set to the id of the root'
' stack of the resource, heat_stack_id will be set to'
' the id of the resource\'s parent stack,'
' heat_stack_name will be set to the name of the'
' resource\'s parent stack, heat_path_in_stack will be'
' set to a list of tuples,'
' (stackresourcename, stackname) with list[0] being'
' (None, rootstackname), and heat_resource_name will'
' be set to the resource\'s name.')),
cfg.BoolOpt('encrypt_parameters_and_properties',
default=False,
help=_('Encrypt template parameters that were marked as'
' hidden and also all the resource properties before'
' storing them in database.'))]
rpc_opts = [
cfg.StrOpt('host',
default=socket.gethostname(),
help=_('Name of the engine node. '
'This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, '
'or IP address.'))]
profiler_group = cfg.OptGroup('profiler')
profiler_opts = [
cfg.BoolOpt("profiler_enabled", default=False,
help=_('If False fully disable profiling feature.')),
cfg.BoolOpt("trace_sqlalchemy", default=False,
help=_("If False do not trace SQL requests."))
]
auth_password_group = cfg.OptGroup('auth_password')
auth_password_opts = [
cfg.BoolOpt('multi_cloud',
default=False,
help=_('Allow orchestration of multiple clouds.')),
cfg.ListOpt('allowed_auth_uris',
default=[],
help=_('Allowed keystone endpoints for auth_uri when '
'multi_cloud is enabled. At least one endpoint needs '
'to be specified.'))]
# these options define baseline defaults that apply to all clients
default_clients_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate will not "
"be verified."))]
# these options can be defined for each client
# they must not specify defaults, since any options not defined in a client
# specific group is looked up on the generic group above
clients_opts = [
cfg.StrOpt('endpoint_type',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
help=_("If set, then the server's certificate will not "
"be verified."))]
heat_client_opts = [
cfg.StrOpt('url',
default='',
help=_('Optional heat url in format like'
' http://0.0.0.0:8004/v1/%(tenant_id)s.'))]
client_http_log_debug_opts = [
cfg.BoolOpt('http_log_debug',
default=False,
help=_("Allow client's debug log output."))]
revision_group = cfg.OptGroup('revision')
revision_opts = [
cfg.StrOpt('heat_revision',
default='unknown',
help=_('Heat build revision. '
'If you would prefer to manage your build revision '
'separately, you can move this section to a different '
'file and add it as another config option.'))]
def startup_sanity_check():
if (not cfg.CONF.stack_user_domain_id and
not cfg.CONF.stack_user_domain_name):
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
LOG.warn(_LW('stack_user_domain_id or stack_user_domain_name not '
'set in heat.conf falling back to using default'))
else:
domain_admin_user = cfg.CONF.stack_domain_admin
domain_admin_password = cfg.CONF.stack_domain_admin_password
if not (domain_admin_user and domain_admin_password):
raise exception.Error(_('heat.conf misconfigured, cannot '
'specify "stack_user_domain_id" or '
'"stack_user_domain_name" without '
'"stack_domain_admin" and '
'"stack_domain_admin_password"'))
auth_key_len = len(cfg.CONF.auth_encryption_key)
if auth_key_len not in [16, 24, 32]:
raise exception.Error(_('heat.conf misconfigured, auth_encryption_key '
'length must be 16, 24 or 32'))
def list_opts():
yield None, rpc_opts
yield None, engine_opts
yield None, service_opts
yield paste_deploy_group.name, paste_deploy_opts
yield auth_password_group.name, auth_password_opts
yield revision_group.name, revision_opts
yield profiler_group.name, profiler_opts
yield 'clients', default_clients_opts
for client in ('nova', 'swift', 'neutron', 'cinder',
'ceilometer', 'keystone', 'heat', 'glance', 'trove',
'sahara'):
client_specific_group = 'clients_' + client
yield client_specific_group, clients_opts
yield 'clients_heat', heat_client_opts
yield 'clients_nova', client_http_log_debug_opts
yield 'clients_cinder', client_http_log_debug_opts
cfg.CONF.register_group(paste_deploy_group)
cfg.CONF.register_group(auth_password_group)
cfg.CONF.register_group(revision_group)
cfg.CONF.register_group(profiler_group)
for group, opts in list_opts():
cfg.CONF.register_opts(opts, group=group)
def _get_deployment_flavor():
"""
Retrieve the paste_deploy.flavor config item, formatted appropriately
for appending to the application name.
"""
flavor = cfg.CONF.paste_deploy.flavor
return '' if not flavor else ('-' + flavor)
def _get_deployment_config_file():
"""
Retrieve the deployment_config_file config item, formatted as an
absolute pathname.
"""
config_path = cfg.CONF.find_file(
cfg.CONF.paste_deploy['api_paste_config'])
if config_path is None:
return None
return os.path.abspath(config_path)
def load_paste_app(app_name=None):
"""
Builds and returns a WSGI app from a paste config file.
We assume the last config file specified in the supplied ConfigOpts
object is the paste config file.
:param app_name: name of the application to load
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
if app_name is None:
app_name = cfg.CONF.prog
# append the deployment flavor to the application name,
# in order to identify the appropriate paste pipeline
app_name += _get_deployment_flavor()
conf_file = _get_deployment_config_file()
if conf_file is None:
raise RuntimeError(_("Unable to locate config file"))
try:
app = wsgi.paste_deploy_app(conf_file, app_name, cfg.CONF)
# Log the options used when starting if we're in debug mode...
if cfg.CONF.debug:
cfg.CONF.log_opt_values(logging.getLogger(app_name),
sys_logging.DEBUG)
return app
except (LookupError, ImportError) as e:
raise RuntimeError(_("Unable to load %(app_name)s from "
"configuration file %(conf_file)s."
"\nGot: %(e)r") % {'app_name': app_name,
'conf_file': conf_file,
'e': e})
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import random
import numpy as np
import os
import shutil
import logging
import paddle
import paddle.nn as nn
import paddle.utils as utils
import paddle.static as static
import paddle.nn.functional as F
import paddle.distributed.auto_parallel as auto
from paddle.fluid.initializer import NumpyArrayInitializer
from paddle.distributed.passes import new_pass, PassManager, PassContext
import paddle.distributed.fleet as fleet
from dist_pass_test_base import DistPassTestBase
logging.getLogger().setLevel(logging.INFO)
paddle.enable_static()
_global_parallel_strategy = None
_global_process_mesh = None
#np.set_printoptions(suppress=True)
class MLPLayer(nn.Layer):
def __init__(self,
hidden_size=128,
intermediate_size=4 * 128,
initializer_range=0.02):
super(MLPLayer, self).__init__()
d_model = hidden_size
dim_feedforward = intermediate_size
np.random.seed(2021)
arr0 = np.random.normal(0, 0.02, size=(d_model, dim_feedforward))
arr1 = np.random.normal(0, 0.02, size=(dim_feedforward, d_model))
weight_attr0 = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr0))
weight_attr1 = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr1))
bias_attr = None
self.linear0 = nn.Linear(
d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr)
self.linear1 = nn.Linear(
dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr)
self.linear2 = nn.Linear(
d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr)
self.linear3 = nn.Linear(
dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr)
self.linear4 = nn.Linear(
d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr)
self.linear5 = nn.Linear(
dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr)
self.norm0 = nn.LayerNorm(d_model, epsilon=1e-5)
self.norm1 = nn.LayerNorm(d_model, epsilon=1e-5)
self.norm2 = nn.LayerNorm(d_model, epsilon=1e-5)
def forward(self, input):
out = self.norm0(input)
out = self.linear0(out)
out = F.gelu(out, approximate=True)
out = self.linear1(out)
out = self.norm1(out)
out = self.linear2(out)
out = F.gelu(out, approximate=True)
out = self.linear3(out)
out = self.norm2(out)
out = self.linear4(out)
out = F.gelu(out, approximate=True)
out = self.linear5(out)
return out
def mlp_forward(input, label, hidden_size):
if _global_parallel_strategy == "dp":
auto.shard_tensor(
input,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [0, -1]
})
mlp = MLPLayer(
hidden_size=hidden_size,
intermediate_size=4 * hidden_size,
initializer_range=0.02)
predict = mlp(input)
error_cost = paddle.nn.functional.square_error_cost(predict, label)
loss = paddle.mean(error_cost)
return loss
class TestGradientMergePass(DistPassTestBase):
def init(self):
self._params_grads = None
self._config = {"k_steps": 4, "avg": True}
def apply_passes(self, main_prog, startup_prog):
self._config["params_grads"] = self._params_grads
pass_context = PassContext()
auto_parallel_gradient_merge_pass = new_pass(
"auto_parallel_gradient_merge_pass", self._config)
auto_parallel_gradient_merge_pass.apply([main_prog], [startup_prog],
pass_context)
def test_result(self):
no_pass_rets = self._distributed_launch(
model=None,
apply_pass=False,
gpus=[0],
gradient_merge=False,
batch_size=32,
max_step=2)
pass_rets = self._distributed_launch(
model=None,
apply_pass=True,
gpus=[0],
gradient_merge=True,
batch_size=8,
max_step=8)
# avg loss for gradient_merge pass
avg_loss = 0
pass_avg_ret_list = []
for i, pass_ret in enumerate(pass_rets[0]):
if (i + 1) % 4 == 0:
avg_loss += pass_ret[0]
pass_avg_ret_list.append([avg_loss / 4])
avg_loss = 0
else:
avg_loss += pass_ret[0]
for no_pass_ret, pass_ret in zip(no_pass_rets[0], pass_avg_ret_list):
print(f"no_pass_ret={no_pass_ret}, pass_ret={pass_ret}")
self.assertTrue(
np.isclose(
no_pass_ret,
pass_ret,
rtol=self.rtol,
atol=self.atol,
equal_nan=self.equal_nan))
def get_model(self, place, gradient_merge, batch_size, max_step):
paddle.seed(2021)
random.seed(2021)
np.random.seed(2021)
hidden_size = 128
global _global_parallel_strategy
global _global_process_mesh
world_size = paddle.distributed.get_world_size()
if world_size == 1:
_global_parallel_strategy = "dp"
_global_process_mesh = auto.ProcessMesh([0])
elif world_size == 2:
_global_parallel_strategy = "dp"
_global_process_mesh = auto.ProcessMesh([0, 1])
train_program = static.Program()
startup_program = static.Program()
dist_strategy = fleet.DistributedStrategy()
dist_strategy.semi_auto = True
#if gradient_merge:
# dist_strategy.gradient_merge = True
# dist_strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
fleet.init(is_collective=True, strategy=dist_strategy)
with static.program_guard(train_program, startup_program), \
utils.unique_name.guard():
input = static.data(
name="input", shape=[batch_size, hidden_size], dtype='float32')
label = static.data(
name="label", shape=[batch_size, 1], dtype='float32')
input.stop_gradient = False
loss = mlp_forward(input, label, hidden_size)
optimizer = paddle.fluid.optimizer.SGDOptimizer(learning_rate=0.01)
#optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer)
_, self._params_grads, dist_startup_prog, dist_main_prog = optimizer.minimize(
loss, startup_program)
input_data = np.random.random(size=(128, hidden_size)).astype('float32')
label_data = np.random.random(size=(128, 1)).astype('float32')
def reader():
for i in range(max_step):
x_data = input_data[i * batch_size:(i + 1) * batch_size, :]
y_data = label_data[i * batch_size:(i + 1) * batch_size, :]
yield x_data, y_data
return dist_main_prog, dist_startup_prog, [input, label], [loss], reader
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2015-2016 The Khronos Group Inc.
# Copyright (c) 2015-2016 Valve Corporation
# Copyright (c) 2015-2016 LunarG, Inc.
# Copyright (c) 2015-2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Chia-I Wu <olv@lunarg.com>
# Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
# Author: Jon Ashburn <jon@lunarg.com>
# Author: Gwan-gyeong Mun <kk.moon@samsung.com>
import sys
import vulkan
def generate_get_proc_addr_check(name):
return " if (!%s || %s[0] != 'v' || %s[1] != 'k')\n" \
" return NULL;" % ((name,) * 3)
class Subcommand(object):
def __init__(self, argv):
self.argv = argv
self.headers = vulkan.headers
self.protos = vulkan.protos
self.outfile = None
def run(self):
if self.outfile:
with open(self.outfile, "w") as outfile:
outfile.write(self.generate())
else:
print(self.generate())
def generate(self):
copyright = self.generate_copyright()
header = self.generate_header()
body = self.generate_body()
footer = self.generate_footer()
contents = []
if copyright:
contents.append(copyright)
if header:
contents.append(header)
if body:
contents.append(body)
if footer:
contents.append(footer)
return "\n\n".join(contents)
def generate_copyright(self):
return """/* THIS FILE IS GENERATED. DO NOT EDIT. */
/*
* Copyright (c) 2015-2016 The Khronos Group Inc.
* Copyright (c) 2015-2016 Valve Corporation
* Copyright (c) 2015-2016 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
*/"""
def generate_header(self):
return "\n".join(["#include <" + h + ">" for h in self.headers])
def generate_body(self):
pass
def generate_footer(self):
pass
class DispatchTableOpsSubcommand(Subcommand):
def __init__(self, argv):
self.argv = argv
self.headers = vulkan.headers_all
self.protos = vulkan.protos_all
self.outfile = None
def run(self):
if len(self.argv) < 1:
print("DispatchTableOpsSubcommand: <prefix> unspecified")
return
self.prefix = self.argv[0]
if len(self.argv) > 2:
print("DispatchTableOpsSubcommand: <prefix> [outfile]")
return
if len(self.argv) == 2:
self.outfile = self.argv[1]
super(DispatchTableOpsSubcommand, self).run()
def generate_header(self):
return "\n".join(["#include <vulkan/vulkan.h>",
"#include <vulkan/vk_layer.h>",
"#include <string.h>"])
def _generate_init_dispatch(self, type):
stmts = []
func = []
if type == "device":
# GPA has to be first one and uses wrapped object
stmts.append(" memset(table, 0, sizeof(*table));")
stmts.append(" // Core device function pointers")
stmts.append(" table->GetDeviceProcAddr = (PFN_vkGetDeviceProcAddr) gpa(device, \"vkGetDeviceProcAddr\");")
KHR_printed = False
EXT_printed = False
Win32_printed = False
XLIB_printed = False
XCB_printed = False
MIR_printed = False
WAY_printed = False
Android_printed = False
for proto in self.protos:
if proto.name == "CreateInstance" or proto.name == "EnumerateInstanceExtensionProperties" or \
proto.name == "EnumerateInstanceLayerProperties" or proto.params[0].ty == "VkInstance" or \
proto.params[0].ty == "VkPhysicalDevice" or proto.name == "GetDeviceProcAddr":
continue
if Win32_printed and 'Win32' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_WIN32_KHR")
Win32_printed = False
if XLIB_printed and 'Xlib' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_XLIB_KHR")
XLIB_printed = False
if XCB_printed and 'Xcb' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_XCB_KHR")
XCB_printed = False
if MIR_printed and 'Mir' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_MIR_KHR")
MIR_printed = False
if WAY_printed and 'Wayland' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_WAYLAND_KHR")
WAY_printed = False
if Android_printed and 'Android' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_ANDROID_KHR")
Android_printed = False
if 'KHR' in proto.name and 'Win32' in proto.name:
if not Win32_printed:
stmts.append("#ifdef VK_USE_PLATFORM_WIN32_KHR")
Win32_printed = True
if 'KHR' in proto.name and 'Xlib' in proto.name:
if not XLIB_printed:
stmts.append("#ifdef VK_USE_PLATFORM_XLIB_KHR")
XLIB_printed = True
if 'KHR' in proto.name and 'Xcb' in proto.name:
if not XCB_printed:
stmts.append("#ifdef VK_USE_PLATFORM_XCB_KHR")
XCB_printed = True
if 'KHR' in proto.name and 'Mir' in proto.name:
if not MIR_printed:
stmts.append("#ifdef VK_USE_PLATFORM_MIR_KHR")
MIR_printed = True
if 'KHR' in proto.name and 'Wayland' in proto.name:
if not WAY_printed:
stmts.append("#ifdef VK_USE_PLATFORM_WAYLAND_KHR")
WAY_printed = True
if 'KHR' in proto.name and 'Android' in proto.name:
if not Android_printed:
stmts.append("#ifdef VK_USE_PLATFORM_ANDROID_KHR")
Android_printed = True
if 'KHR' in proto.name and not KHR_printed:
stmts.append(" // KHR device extension function pointers")
KHR_printed = True
if 'EXT' in proto.name and not EXT_printed:
stmts.append(" // EXT device extension function pointers")
EXT_printed = True
stmts.append(" table->%s = (PFN_vk%s) gpa(device, \"vk%s\");" %
(proto.name, proto.name, proto.name))
func.append("static inline void %s_init_device_dispatch_table(VkDevice device,"
% self.prefix)
func.append("%s VkLayerDispatchTable *table,"
% (" " * len(self.prefix)))
func.append("%s PFN_vkGetDeviceProcAddr gpa)"
% (" " * len(self.prefix)))
else:
stmts.append(" memset(table, 0, sizeof(*table));")
stmts.append(" // Core instance function pointers")
stmts.append(" table->GetInstanceProcAddr = (PFN_vkGetInstanceProcAddr) gpa(instance, \"vkGetInstanceProcAddr\");")
KHR_printed = False
EXT_printed = False
Win32_printed = False
XLIB_printed = False
XCB_printed = False
MIR_printed = False
WAY_printed = False
Android_printed = False
for proto in self.protos:
if proto.params[0].ty != "VkInstance" and proto.params[0].ty != "VkPhysicalDevice" or \
proto.name == "CreateDevice" or proto.name == "GetInstanceProcAddr":
continue
if Win32_printed and 'Win32' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_WIN32_KHR")
Win32_printed = False
if XLIB_printed and 'Xlib' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_XLIB_KHR")
XLIB_printed = False
if XCB_printed and 'Xcb' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_XCB_KHR")
XCB_printed = False
if MIR_printed and 'Mir' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_MIR_KHR")
MIR_printed = False
if WAY_printed and 'Wayland' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_WAYLAND_KHR")
WAY_printed = False
if Android_printed and 'Android' not in proto.name:
stmts.append("#endif // VK_USE_PLATFORM_ANDROID_KHR")
Android_printed = False
if 'KHR' in proto.name and 'Win32' in proto.name:
if not Win32_printed:
stmts.append("#ifdef VK_USE_PLATFORM_WIN32_KHR")
Win32_printed = True
if 'KHR' in proto.name and 'Xlib' in proto.name:
if not XLIB_printed:
stmts.append("#ifdef VK_USE_PLATFORM_XLIB_KHR")
XLIB_printed = True
if 'KHR' in proto.name and 'Xcb' in proto.name:
if not XCB_printed:
stmts.append("#ifdef VK_USE_PLATFORM_XCB_KHR")
XCB_printed = True
if 'KHR' in proto.name and 'Mir' in proto.name:
if not MIR_printed:
stmts.append("#ifdef VK_USE_PLATFORM_MIR_KHR")
MIR_printed = True
if 'KHR' in proto.name and 'Wayland' in proto.name:
if not WAY_printed:
stmts.append("#ifdef VK_USE_PLATFORM_WAYLAND_KHR")
WAY_printed = True
if 'KHR' in proto.name and 'Android' in proto.name:
if not Android_printed:
stmts.append("#ifdef VK_USE_PLATFORM_ANDROID_KHR")
Android_printed = True
if 'KHR' in proto.name and not KHR_printed:
stmts.append(" // KHR instance extension function pointers")
KHR_printed = True
if 'EXT' in proto.name and not EXT_printed:
stmts.append(" // EXT instance extension function pointers")
EXT_printed = True
stmts.append(" table->%s = (PFN_vk%s) gpa(instance, \"vk%s\");" %
(proto.name, proto.name, proto.name))
func.append("static inline void %s_init_instance_dispatch_table(" % self.prefix)
func.append("%s VkInstance instance," % (" " * len(self.prefix)))
func.append("%s VkLayerInstanceDispatchTable *table," % (" " * len(self.prefix)))
func.append("%s PFN_vkGetInstanceProcAddr gpa)" % (" " * len(self.prefix)))
func.append("{")
func.append("%s" % "\n".join(stmts))
func.append("}")
return "\n".join(func)
def generate_body(self):
body = [self._generate_init_dispatch("device"),
self._generate_init_dispatch("instance")]
return "\n\n".join(body)
class WinDefFileSubcommand(Subcommand):
def run(self):
library_exports = {
"all": [],
"icd": [
"vk_icdGetInstanceProcAddr",
],
"layer": [
"vkGetInstanceProcAddr",
"vkGetDeviceProcAddr",
"vkEnumerateInstanceLayerProperties",
"vkEnumerateInstanceExtensionProperties"
],
"layer_multi": [
"multi2GetInstanceProcAddr",
"multi1GetDeviceProcAddr"
]
}
if len(self.argv) < 2 or len(self.argv) > 3 or self.argv[1] not in library_exports:
print("WinDefFileSubcommand: <library-name> {%s} [outfile]" %
"|".join(library_exports.keys()))
return
self.library = self.argv[0]
if self.library == "VkLayer_multi":
self.exports = library_exports["layer_multi"]
else:
self.exports = library_exports[self.argv[1]]
if len(self.argv) == 3:
self.outfile = self.argv[2]
super(WinDefFileSubcommand, self).run()
def generate_copyright(self):
return """; THIS FILE IS GENERATED. DO NOT EDIT.
;;;; Begin Copyright Notice ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Vulkan
;
; Copyright (c) 2015-2016 The Khronos Group Inc.
; Copyright (c) 2015-2016 Valve Corporation
; Copyright (c) 2015-2016 LunarG, Inc.
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
; Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
;;;; End Copyright Notice ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"""
def generate_header(self):
return "; The following is required on Windows, for exporting symbols from the DLL"
def generate_body(self):
body = []
body.append("LIBRARY " + self.library)
body.append("EXPORTS")
for proto in self.exports:
if self.library != "VkLayerSwapchain" or proto != "vkEnumerateInstanceExtensionProperties" and proto != "vkEnumerateInstanceLayerProperties":
body.append( proto)
return "\n".join(body)
def main():
wsi = {
"Win32",
"Android",
"Xcb",
"Xlib",
"Wayland",
"Mir",
"Display",
"AllPlatforms"
}
subcommands = {
"dispatch-table-ops": DispatchTableOpsSubcommand,
"win-def-file": WinDefFileSubcommand,
}
if len(sys.argv) < 3 or sys.argv[1] not in wsi or sys.argv[2] not in subcommands:
print("Usage: %s <wsi> <subcommand> [options]" % sys.argv[0])
print
print("Available sucommands are: %s" % " ".join(subcommands))
exit(1)
subcmd = subcommands[sys.argv[2]](sys.argv[3:])
subcmd.run()
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
import numpy as np
from scipy import sparse
from pygsp import utils
_logger = utils.build_logger(__name__)
@utils.filterbank_handler
def compute_cheby_coeff(f, m=30, N=None, *args, **kwargs):
r"""
Compute Chebyshev coefficients for a Filterbank.
Parameters
----------
f : Filter
Filterbank with at least 1 filter
m : int
Maximum order of Chebyshev coeff to compute
(default = 30)
N : int
Grid order used to compute quadrature
(default = m + 1)
i : int
Index of the Filterbank element to compute
(default = 0)
Returns
-------
c : ndarray
Matrix of Chebyshev coefficients
"""
G = f.G
i = kwargs.pop('i', 0)
if not N:
N = m + 1
a_arange = [0, G.lmax]
a1 = (a_arange[1] - a_arange[0]) / 2
a2 = (a_arange[1] + a_arange[0]) / 2
c = np.zeros(m + 1)
tmpN = np.arange(N)
num = np.cos(np.pi * (tmpN + 0.5) / N)
for o in range(m + 1):
c[o] = 2. / N * np.dot(f._kernels[i](a1 * num + a2),
np.cos(np.pi * o * (tmpN + 0.5) / N))
return c
def cheby_op(G, c, signal, **kwargs):
r"""
Chebyshev polynomial of graph Laplacian applied to vector.
Parameters
----------
G : Graph
c : ndarray or list of ndarrays
Chebyshev coefficients for a Filter or a Filterbank
signal : ndarray
Signal to filter
Returns
-------
r : ndarray
Result of the filtering
"""
# Handle if we do not have a list of filters but only a simple filter in cheby_coeff.
if not isinstance(c, np.ndarray):
c = np.array(c)
c = np.atleast_2d(c)
Nscales, M = c.shape
if M < 2:
raise TypeError("The coefficients have an invalid shape")
# thanks to that, we can also have 1d signal.
try:
Nv = np.shape(signal)[1]
r = np.zeros((G.N * Nscales, Nv))
except IndexError:
r = np.zeros((G.N * Nscales))
a_arange = [0, G.lmax]
a1 = float(a_arange[1] - a_arange[0]) / 2.
a2 = float(a_arange[1] + a_arange[0]) / 2.
twf_old = signal
twf_cur = (G.L.dot(signal) - a2 * signal) / a1
tmpN = np.arange(G.N, dtype=int)
for i in range(Nscales):
r[tmpN + G.N*i] = 0.5 * c[i, 0] * twf_old + c[i, 1] * twf_cur
factor = 2/a1 * (G.L - a2 * sparse.eye(G.N))
for k in range(2, M):
twf_new = factor.dot(twf_cur) - twf_old
for i in range(Nscales):
r[tmpN + G.N*i] += c[i, k] * twf_new
twf_old = twf_cur
twf_cur = twf_new
return r
def cheby_rect(G, bounds, signal, **kwargs):
r"""
Fast filtering using Chebyshev polynomial for a perfect rectangle filter.
Parameters
----------
G : Graph
bounds : array_like
The bounds of the pass-band filter
signal : array_like
Signal to filter
order : int (optional)
Order of the Chebyshev polynomial (default: 30)
Returns
-------
r : array_like
Result of the filtering
"""
if not (isinstance(bounds, (list, np.ndarray)) and len(bounds) == 2):
raise ValueError('Bounds of wrong shape.')
bounds = np.array(bounds)
m = int(kwargs.pop('order', 30) + 1)
try:
Nv = np.shape(signal)[1]
r = np.zeros((G.N, Nv))
except IndexError:
r = np.zeros((G.N))
b1, b2 = np.arccos(2. * bounds / G.lmax - 1.)
factor = 4./G.lmax * G.L - 2.*sparse.eye(G.N)
T_old = signal
T_cur = factor.dot(signal) / 2.
r = (b1 - b2)/np.pi * signal + 2./np.pi * (np.sin(b1) - np.sin(b2)) * T_cur
for k in range(2, m):
T_new = factor.dot(T_cur) - T_old
r += 2./(k*np.pi) * (np.sin(k*b1) - np.sin(k*b2)) * T_new
T_old = T_cur
T_cur = T_new
return r
def compute_jackson_cheby_coeff(filter_bounds, delta_lambda, m):
r"""
To compute the m+1 coefficients of the polynomial approximation of an ideal band-pass between a and b, between a range of values defined by lambda_min and lambda_max.
Parameters
----------
filter_bounds : list
[a, b]
delta_lambda : list
[lambda_min, lambda_max]
m : int
Returns
-------
ch : ndarray
jch : ndarray
References
----------
:cite:`tremblay2016compressive`
"""
# Parameters check
if delta_lambda[0] > filter_bounds[0] or delta_lambda[1] < filter_bounds[1]:
_logger.error("Bounds of the filter are out of the lambda values")
raise()
elif delta_lambda[0] > delta_lambda[1]:
_logger.error("lambda_min is greater than lambda_max")
raise()
# Scaling and translating to standard cheby interval
a1 = (delta_lambda[1]-delta_lambda[0])/2
a2 = (delta_lambda[1]+delta_lambda[0])/2
# Scaling bounds of the band pass according to lrange
filter_bounds[0] = (filter_bounds[0]-a2)/a1
filter_bounds[1] = (filter_bounds[1]-a2)/a1
# First compute cheby coeffs
ch = np.empty(m+1, dtype=float)
ch[0] = (2/(np.pi))*(np.arccos(filter_bounds[0])-np.arccos(filter_bounds[1]))
for i in range(1, len(ch)):
ch[i] = (2/(np.pi * i)) * \
(np.sin(i * np.arccos(filter_bounds[0])) - np.sin(i * np.arccos(filter_bounds[1])))
# Then compute jackson coeffs
jch = np.empty(m+1, dtype=float)
alpha = (np.pi/(m+2))
for i in range(len(jch)):
jch[i] = (1/np.sin(alpha)) * \
((1 - i/(m+2)) * np.sin(alpha) * np.cos(i * alpha) +
(1/(m+2)) * np.cos(alpha) * np.sin(i * alpha))
# Combine jackson and cheby coeffs
jch = ch * jch
return ch, jch
def lanczos_op(f, s, order=30):
r"""
Perform the lanczos approximation of the signal s.
Parameters
----------
f: Filter
s : ndarray
Signal to approximate.
order : int
Degree of the lanczos approximation. (default = 30)
Returns
-------
L : ndarray
lanczos approximation of s
"""
G = f.G
Nf = len(f.g)
# To have the right shape for the output array depending on the signal dim
try:
Nv = np.shape(s)[1]
is2d = True
c = np.zeros((G.N*Nf, Nv))
except IndexError:
Nv = 1
is2d = False
c = np.zeros((G.N*Nf))
tmpN = np.arange(G.N, dtype=int)
for j in range(Nv):
if is2d:
V, H, _ = lanczos(G.L.toarray(), order, s[:, j])
else:
V, H, _ = lanczos(G.L.toarray(), order, s)
Eh, Uh = np.linalg.eig(H)
Eh[Eh < 0] = 0
fe = f.evaluate(Eh)
V = np.dot(V, Uh)
for i in range(Nf):
if is2d:
c[tmpN + i*G.N, j] = np.dot(V, fe[:][i] * np.dot(V.T, s[:, j]))
else:
c[tmpN + i*G.N] = np.dot(V, fe[:][i] * np.dot(V.T, s))
return c
def lanczos(A, order, x):
r"""
TODO short description
Parameters
----------
A: ndarray
Returns
-------
"""
try:
N, M = np.shape(x)
except ValueError:
N = np.shape(x)[0]
M = 1
x = x[:, np.newaxis]
# normalization
q = np.divide(x, np.kron(np.ones((N, 1)), np.linalg.norm(x, axis=0)))
# initialization
hiv = np.arange(0, order*M, order)
V = np.zeros((N, M*order))
V[:, hiv] = q
H = np.zeros((order + 1, M*order))
r = np.dot(A, q)
H[0, hiv] = np.sum(q*r, axis=0)
r -= np.kron(np.ones((N, 1)), H[0, hiv])*q
H[1, hiv] = np.linalg.norm(r, axis=0)
orth = np.zeros((order))
orth[0] = np.linalg.norm(np.dot(V.T, V) - M)
for k in range(1, order):
if np.sum(np.abs(H[k, hiv + k - 1])) <= np.spacing(1):
H = H[:k - 1, _sum_ind(np.arange(k), hiv)]
V = V[:, _sum_ind(np.arange(k), hiv)]
orth = orth[:k]
return V, H, orth
H[k - 1, hiv + k] = H[k, hiv + k - 1]
v = q
q = r/np.tile(H[k - 1, k + hiv], (N, 1))
V[:, k + hiv] = q
r = np.dot(A, q)
r -= np.tile(H[k - 1, k + hiv], (N, 1))*v
H[k, k + hiv] = np.sum(np.multiply(q, r), axis=0)
r -= np.tile(H[k, k + hiv], (N, 1))*q
# The next line has to be checked
r -= np.dot(V, np.dot(V.T, r)) # full reorthogonalization
H[k + 1, k + hiv] = np.linalg.norm(r, axis=0)
orth[k] = np.linalg.norm(np.dot(V.T, V) - M)
H = H[np.ix_(np.arange(order), np.arange(order))]
return V, H, orth
def _sum_ind(ind1, ind2):
ind = np.tile(np.ravel(ind1), (np.size(ind2), 1)).T + np.ravel(ind2)
return np.ravel(ind)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import htpc
import requests
from json import dumps
import logging
from cherrypy.lib.auth2 import require, member_of
from htpc.helpers import fix_basepath, striphttp
class Deluge(object):
session = requests.Session()
def __init__(self):
self.logger = logging.getLogger('modules.deluge')
htpc.MODULES.append({
'name': 'Deluge',
'id': 'deluge',
'test': htpc.WEBDIR + 'deluge/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'deluge_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'deluge_name'},
{'type': 'text', 'label': 'IP / Host *', 'name': 'deluge_host'},
{'type': 'text', 'label': 'Port *', 'name': 'deluge_port'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'deluge_ssl'},
{'type': 'text', 'label': 'Basepath', 'name': 'deluge_basepath'},
{'type': 'password', 'label': 'Password', 'name': 'deluge_password'},
{"type": "text", "label": "Reverse proxy link", "placeholder": "", "desc": "Reverse proxy link ex: https://deluge.domain.com", "name": "deluge_reverse_proxy_link"}
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('deluge.html').render(scriptname='deluge', webinterface=self.webinterface())
def webinterface(self):
host = striphttp(htpc.settings.get('deluge_host', ''))
port = str(htpc.settings.get('deluge_port', ''))
deluge_basepath = fix_basepath(htpc.settings.get('deluge_basepath', ''))
ssl = 's' if htpc.settings.get('deluge_ssl') else ''
url = 'http%s://%s:%s%s' % (ssl, host, port, deluge_basepath)
if htpc.settings.get('deluge_reverse_proxy_link'):
url = htpc.settings.get('deluge_reverse_proxy_link')
return url
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def connected(self):
return self.fetch('web.connected')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def connect(self, hostid):
return self.fetch('web.connect', [hostid])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def get_hosts(self):
return self.fetch('web.get_hosts')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def queue(self):
fields = ['progress', 'is_finished', 'ratio', 'name', 'download_payload_rate',
'upload_payload_rate', 'eta', 'state', 'hash', 'total_size']
return self.fetch('core.get_torrents_status', [[], fields])
def q2(self):
""" not in use atm, todo """
par = ["queue", "name", "total_wanted", "state", "progress", "num_seeds",
"total_seeds", "num_peers", "total_peers", "download_payload_rate",
"upload_payload_rate", "eta", "ratio", "distributed_copies", "is_auto_managed",
"time_added", "tracker_host", "save_path", "total_done", "total_uploaded",
"max_download_speed", "max_upload_speed", "seeds_peers_ratio"]
return self.fetch('web.update_ui', [par, {}])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def status(self):
''' quick '''
results = self.fetch('web.update_ui', [['payload_upload_rate', 'payload_download_rate, state'], {}])
if results['error'] is None:
# py. 2.6..
d = dict(tuple(results['result']['filters']['state']))
results['result']['filters']['state'] = d
return results
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def stats(self):
fields = ['payload_upload_rate', 'payload_download_rate, state']
return self.fetch('core.get_session_status', [fields])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def start(self, torrentId):
return self.fetch('core.resume_torrent', [[torrentId]])
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def stop(self, torrentId=None):
return self.fetch('core.pause_torrent', [[torrentId]])
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def do_all(self, status):
if status == 'resume':
method = 'core.resume_all_torrents'
else:
method = 'core.pause_all_torrents'
return self.fetch(method)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def daemon(self, status, port):
if status == 'start':
action = 'web.start_daemon'
else:
action = 'web.stop_daemon'
return self.fetch(action, [int(port)])
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def set_dlspeed(self, speed):
self.logger.debug('Set download speed to %s' % speed)
if speed == '0':
speed = -1
return self.fetch('core.set_config', [{'max_download_speed': int(speed)}])
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def set_ulspeed(self, speed):
if speed == '0':
speed = -1
self.logger.debug('Set upload speed to %s' % speed)
return self.fetch('core.set_config', [{'max_upload_speed': int(speed)}])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def addtorrent(self, torrent, filename=''):
result = self.fetch('core.add_torrent_file', [filename, torrent, {}])
return result
'''
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def getconfig(self):
#should be removed
return self.fetch('core.get_config')
'''
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def get_speed(self):
''' speed limit '''
result = self.fetch('core.get_config')
# Dunno why the f, core.get_config_values didnt work...
d = {}
if result:
d['max_download_speed'] = result['result']['max_download_speed']
d['max_upload_speed'] = result['result']['max_upload_speed']
result['result'] = d
return result
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def remove(self, torrentId, removeData):
removeDataBool = bool(int(removeData))
return self.fetch('core.remove_torrent', [torrentId, removeDataBool])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def to_client(self, link='', torrentname='', **kwargs):
try:
self.logger.info('Added %s to deluge' % torrentname)
# Find download path
download_path = self.fetch('core.get_config_value', ['download_location'])
if link.startswith('magnet'):
path = link
else:
# deluge doesnt like a named download...
link = link.split('?title=')[0]
get_url = self.fetch('web.download_torrent_from_url', [link])
path = get_url['result']
return self.fetch('web.add_torrents', [[{'path': path, 'options': {'download_location': download_path['result']}}]])
except Exception as e:
self.logger.debug('Failed adding %s to deluge %s %s' % (torrentname, link, e))
def fetch(self, method, arguments=None):
""" Do request to Deluge api """
if arguments is None:
arguments = []
host = striphttp(htpc.settings.get('deluge_host', ''))
port = htpc.settings.get('deluge_port', '')
deluge_basepath = fix_basepath(htpc.settings.get('deluge_basepath', '/'))
ssl = 's' if htpc.settings.get('deluge_ssl') else ''
url = 'http%s://%s:%s%sjson' % (ssl, host, port, deluge_basepath)
self.logger.debug("Request deluge method: %s arguments %s" % (method, arguments))
try:
# format post data
data = {'id': 1, 'method': method, 'params': arguments}
headers = {'Content-Type': 'application/json'}
response = self.session.post(url, data=dumps(data), headers=headers, verify=False)
result = response.json()
if result and result['error']:
self.logger.debug('Authenticating')
self.session.post(url, data=dumps({"method": "auth.login", "params": [htpc.settings.get('deluge_password', '')], "id": 1}), headers=headers, verify=False)
response = self.session.post(url, data=dumps(data), headers=headers, verify=False)
return result
except Exception as e:
self.logger.error('Failed to fetch method %s arguments %s %s' % (method, arguments, e))
|
|
from rsqueakvm import constants, storage_classes
from rsqueakvm.model.numeric import W_Float, W_SmallInteger, W_LargeIntegerWord, W_LargeIntegerBig
from rsqueakvm.model.variable import W_BytesObject
from .util import read_image, open_reader, copy_to_module, cleanup_module, InterpreterForTest, slow_test, very_slow_test
def setup_module():
space, interp, _, _ = read_image("mini.image")
w = space.w
def perform_wrapper(receiver, selector, *args):
w_selector = None if isinstance(selector, str) else selector
return interp.perform(receiver, selector, w_selector, list(args))
perform = perform_wrapper
copy_to_module(locals(), __name__)
space.simulate_numeric_primitives.activate()
def teardown_module():
cleanup_module(__name__)
# ------ tests ------------------------------------------
def test_load_image():
pass
@very_slow_test
def test_make_new_class():
sourcecode = """makeNewClass
^ Object
subclass: #MySubForm
instanceVariableNames: 'clippingBox '
classVariableNames: 'ScreenSave '
poolDictionaries: ''
category: 'Graphics-Display Objects'"""
perform(w(0).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_res = perform(w(0), "makeNewClass")
assert isinstance(w_res.strategy, storage_classes.ClassShadow)
assert w_res.strategy.name == "MySubForm"
assert w_res.strategy._instance_size == 1
@very_slow_test
def test_change_class_layout():
sourcecode = """makeChangedClass
^ MessageSet subclass: #ChangedMessageSet
instanceVariableNames: 'changeSet uselessVar'
classVariableNames: ''
poolDictionaries: ''
category: 'Interface-Browser'
"""
perform(w(0).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_res = perform(w(0), "makeChangedClass")
assert w_res.strategy.name == "ChangedMessageSet"
assert w_res.strategy._instance_size == 15
@very_slow_test
def test_become_one_way():
sourcecode = """objectsForwardIdentityTo: to
<primitive: 72>"""
perform(space.w_Array, "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """doIt
| from to oldthing newthing |
Object subclass: #OldThing
instanceVariableNames: ''
classVariableNames: ' '
poolDictionaries: ''
category: 'Pypy'.
Object subclass: #NewThing
instanceVariableNames: 'otherThing'
classVariableNames: ''
poolDictionaries: ''
category: 'Pypy'.
oldthing := (Smalltalk at: #OldThing) new.
newthing := (Smalltalk at: #NewThing) new.
newthing instVarAt: 1 put: oldthing.
from := Array with: oldthing.
to := Array with: newthing.
from objectsForwardIdentityTo: to.
^ Array with: (from at: 1) with: (to at: 1) with: oldthing with: newthing
"""
perform(w(0).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
res_w = space.unwrap_array(perform(w(0), "doIt"))
assert res_w[0].class_shadow(space).name == "NewThing"
assert res_w[0].fetch(space, 0) is res_w[0]
assert res_w[0] is res_w[1]
assert res_w[0] is res_w[2]
assert res_w[0] is res_w[3]
def test_compile_method():
sourcecode = """fib
^self < 2
ifTrue: [ 1 ]
ifFalse: [ (self - 1) fib + (self - 2) fib ]"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
assert perform(w(10), "fib").is_same_object(w(89))
def test_allInstances_in_context():
sourcecode = """aFraction
| a |
a := 5 asInteger.
a := a / 42 asInteger.
^ Fraction allInstances"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "aFraction")
result_w = space.unwrap_array(w_result)
assert len(result_w) == 1
pointers_w = result_w[0].fetch_all(space)
assert pointers_w[0].value == 5
assert pointers_w[1].value == 42
def test_become():
sourcecode = """
testBecome
| p1 p2 a |
p1 := 1@2.
p2 := #(3 4 5).
a := p1 -> p2.
(1@2 = a key) ifFalse: [^1].
(#(3 4 5) = a value) ifFalse: [^2].
(p1 -> p2 = a) ifFalse: [^3].
(p1 == a key) ifFalse: [^4].
(p2 == a value) ifFalse: [^5].
p1 become: p2.
(1@2 = a value) ifFalse: [^6].
(3 = (a key at: 1)) ifFalse: [^7].
(4 = (a key at: 2)) ifFalse: [^8].
(5 = (a key at: 3)) ifFalse: [^9].
(p1 -> p2 = a) ifFalse: [^10].
(p1 == a key) ifFalse: [^11].
(p2 == a value) ifFalse: [^12].
^42"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "testBecome")
assert space.unwrap_int(w_result) == 42
def test_cached_methoddict():
sourcecode = """fib
^self < 2
ifTrue: [ 1 ]
ifFalse: [ ((self - 1) fib + (self - 2) fib) + 1 ]"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
assert perform(w(5), "fib").is_same_object(w(15))
sourcecode = """fib
^self < 2
ifTrue: [ 1 ]
ifFalse: [ (self - 1) fib + (self - 2) fib ]"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
assert perform(w(10), "fib").is_same_object(w(89))
def test_compiling_float():
sourcecode = """aFloat
^ 1.1"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "aFloat")
assert isinstance(w_result, W_Float)
assert w_result.value == 1.1
def test_compiling_32bit_positive_integer():
sourcecode = """aLargeInteger
^ 16rFFFFFFFF"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "aLargeInteger")
if not constants.IS_64BIT:
assert isinstance(w_result, W_LargeIntegerWord)
else:
assert isinstance(w_result, W_SmallInteger)
def test_compiling_64bit_positive_integer():
sourcecode = """aLargeInteger
^ 16rFFFFFFFFFFFFFFFF"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "aLargeInteger")
if not constants.IS_64BIT:
assert isinstance(w_result, W_LargeIntegerBig)
else:
assert isinstance(w_result, W_LargeIntegerWord)
assert w_result.unwrap_long_untranslated(space) == 0xFFFFFFFFFFFFFFFF
def test_compiling_128bit_positive_integer():
sourcecode = """aLargeInteger
^ 16rFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
w_result = perform(w(10), "aLargeInteger")
assert isinstance(w_result, W_LargeIntegerBig)
assert w_result.unwrap_long_untranslated(space) == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
def test_simulate_numericprim():
sourcecode = """absentPrimitive: anInt with: anotherInt
<primitive: 98>
^'numeric fallback for ', anInt asString, ' ', anotherInt asString"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """simulatePrimitive: aPrimitive args: args
^'numeric simulation for ', args first asString, ' ', args second asString"""
w_sim = perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
# XXX the lookup for that selector is static so the simulation lookup would be failing
interp.image.w_simulatePrimitive = w_sim
w_result = perform(w(10), "absentPrimitive:with:", w(3), w(4))
assert isinstance(w_result, W_BytesObject)
assert w_result.unwrap_string(space) == 'numeric simulation for 3 4'
def test_simulate_numericprim_fallback():
sourcecode = """absentPrimitive: anInt with: anotherInt
|errorCode|
<primitive: 98> "error: errorCode> is not implemented in the mini.image yet"
^'numeric fallback for ', anInt asString, ' ', anotherInt asString, ' because of ', errorCode asString"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """metaPrimFailed: errorCode
<primitive: 255>"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """simulatePrimitive: aPrimitive args: args
^self metaPrimFailed: 123"""
w_sim = perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
# XXX the lookup for that selector is static so the simulation lookup would be failing
interp.image.w_simulatePrimitive = w_sim
w_result = perform(w(10), "absentPrimitive:with:", w(3), w(4))
assert isinstance(w_result, W_BytesObject)
assert w_result.unwrap_string(space) == 'numeric fallback for 3 4 because of 123'
def test_simulate_externalcall():
sourcecode = """absentPrimitive: anInt with: anotherInt
| externalCallTarget |
"do not use <primitive: 'primitiveSimulation' module: 'MyPlugin'> as mini.image doesn't have that yet"
<primitive: 117>
externalCallTarget := #(MyPlugin primitiveSimulation).
^'externalcall fallback for ', anInt asString, ' ', anotherInt asString"""
perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """simulatePrimitive: aPrimitive args: args
^'externalcall simulation for ', args first asString, ' ', args second asString"""
w_sim = perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
# XXX the lookup for that selector is static so the simulation lookup would be failing
interp.image.w_simulatePrimitive = w_sim
w_result = perform(w(10), "absentPrimitive:with:", w(3), w(4))
assert isinstance(w_result, W_BytesObject)
assert w_result.unwrap_string(space) == 'externalcall simulation for 3 4'
def test_snapshotPrimitive(tmpdir):
newname = str(tmpdir.join("test_snapshot.image"))
space, interp, _, _ = read_image("mini.image")
def perform(receiver, selector, *args):
w_selector = None if isinstance(selector, str) else selector
return interp.perform(receiver, selector, w_selector, list(args))
space.simulate_numeric_primitives.activate()
space.set_system_attribute(constants.SYSTEM_ATTRIBUTE_IMAGE_NAME_INDEX, newname)
w_result = perform(space.w_smalltalkdict, "snapshotPrimitive")
assert w_result is space.w_false
space2, interp2, image2, reader2 = read_image(newname)
for f,n in {
'w_true': 'True', 'w_false': 'False', 'w_nil': 'UndefinedObject'
}.iteritems():
assert getattr(space, f).getclass(space).as_class_get_shadow(space).name == getattr(space2, f).getclass(space2).as_class_get_shadow(space).name
for f in [
'w_doesNotUnderstand',
'w_mustBeBoolean'
]:
assert space.unwrap_string(getattr(space, f)) == space2.unwrap_string(getattr(space2, f))
def test_convert_words_to_bytes():
sourcecode = """primitiveChangeClassTo: anObject
<primitive: 115>
"""
w_s = perform(space.w_Bitmap, "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
sourcecode = """calcEndianness
| wordThenBytes |
wordThenBytes := Bitmap with: 16r01020304.
wordThenBytes primitiveChangeClassTo: ByteArray basicNew.
wordThenBytes first = 4 ifTrue: [^ #little].
^ #big"""
w_s = perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None))
assert perform(w(5), w_s).unwrap_string(space) == "little"
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for pytesmo.
This file was generated with PyScaffold 1.3, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import os
import sys
import inspect
from distutils.cmd import Command
import versioneer
import setuptools
from setuptools.command.test import test as TestCommand
from setuptools import setup
from distutils.extension import Extension
from distutils.command.build_ext import build_ext as _build_ext
import pkg_resources
class Cythonize(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Make sure the compiled Cython files in the distribution are
# up-to-date
from Cython.Build import cythonize
cythonize(['pytesmo/time_series/filters.pyx'])
class NumpyBuildExt(_build_ext):
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs:
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
ext_modules = [Extension("pytesmo.time_series.filters",
["pytesmo/time_series/filters.c"], include_dirs=[]), ]
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# Change these settings according to your needs
MAIN_PACKAGE = "pytesmo"
DESCRIPTION = "python Toolbox for the evaluation of soil moisture observations"
LICENSE = "BSD 3 Clause"
URL = "http://rs.geo.tuwien.ac.at/validation_tool/pytesmo/"
AUTHOR = "pytesmo Developers"
EMAIL = "christoph.paulik@geo.tuwien.ac.at"
COVERAGE_XML = False
COVERAGE_HTML = False
JUNIT_XML = False
# Add here all kinds of additional classifiers as defined under
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4']
# Add here console scripts like ['hello_world = pytesmo.module:function']
CONSOLE_SCRIPTS = []
# Versioneer configuration
versioneer.VCS = 'git'
versioneer.versionfile_source = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.versionfile_build = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.tag_prefix = 'v' # tags are like v1.2.0
versioneer.parentdir_prefix = MAIN_PACKAGE + '-'
class PyTest(TestCommand):
user_options = [("cov=", None, "Run coverage"),
("cov-xml=", None, "Generate junit xml report"),
("cov-html=", None, "Generate junit html report"),
("junitxml=", None, "Generate xml of test results")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
self.cov_xml = False
self.cov_html = False
self.junitxml = None
def finalize_options(self):
TestCommand.finalize_options(self)
if self.cov is not None:
self.cov = ["--cov", self.cov, "--cov-report", "term-missing"]
if self.cov_xml:
self.cov.extend(["--cov-report", "xml"])
if self.cov_html:
self.cov.extend(["--cov-report", "html"])
if self.junitxml is not None:
self.junitxml = ["--junitxml", self.junitxml]
def run_tests(self):
try:
import pytest
except:
raise RuntimeError("py.test is not installed, "
"run: pip install pytest")
params = {"args": self.test_args}
if self.cov:
params["args"] += self.cov
params["plugins"] = ["cov"]
if self.junitxml:
params["args"] += self.junitxml
errno = pytest.main(**params)
sys.exit(errno)
def sphinx_builder():
try:
from sphinx.setup_command import BuildDoc
except ImportError:
class NoSphinx(Command):
user_options = []
def initialize_options(self):
raise RuntimeError("Sphinx documentation is not installed, "
"run: pip install sphinx")
return NoSphinx
class BuildSphinxDocs(BuildDoc):
def run(self):
if self.builder == "doctest":
import sphinx.ext.doctest as doctest
# Capture the DocTestBuilder class in order to return the total
# number of failures when exiting
ref = capture_objs(doctest.DocTestBuilder)
BuildDoc.run(self)
errno = ref[-1].total_failures
sys.exit(errno)
else:
BuildDoc.run(self)
return BuildSphinxDocs
class ObjKeeper(type):
instances = {}
def __init__(cls, name, bases, dct):
cls.instances[cls] = []
def __call__(cls, *args, **kwargs):
cls.instances[cls].append(super(ObjKeeper, cls).__call__(*args,
**kwargs))
return cls.instances[cls][-1]
def capture_objs(cls):
from six import add_metaclass
module = inspect.getmodule(cls)
name = cls.__name__
keeper_class = add_metaclass(ObjKeeper)(cls)
setattr(module, name, keeper_class)
cls = getattr(module, name)
return keeper_class.instances[cls]
def get_install_requirements(path):
content = open(os.path.join(__location__, path)).read()
return [req for req in content.splitlines() if req != '']
def read(fname):
return open(os.path.join(__location__, fname)).read()
def setup_package():
# Assemble additional setup commands
cmdclass = versioneer.get_cmdclass()
cmdclass['docs'] = sphinx_builder()
cmdclass['doctest'] = sphinx_builder()
cmdclass['test'] = PyTest
cmdclass['cythonize'] = Cythonize
cmdclass['build_ext'] = NumpyBuildExt
# Some helper variables
version = versioneer.get_version()
docs_path = os.path.join(__location__, "docs")
docs_build_path = os.path.join(docs_path, "_build")
install_reqs = get_install_requirements("requirements.txt")
command_options = {
'docs': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path)},
'doctest': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path),
'builder': ('setup.py', 'doctest')},
'test': {'test_suite': ('setup.py', 'tests'),
'cov': ('setup.py', 'pytesmo')}}
if JUNIT_XML:
command_options['test']['junitxml'] = ('setup.py', 'junit.xml')
if COVERAGE_XML:
command_options['test']['cov_xml'] = ('setup.py', True)
if COVERAGE_HTML:
command_options['test']['cov_html'] = ('setup.py', True)
setup(name=MAIN_PACKAGE,
version=version,
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
long_description=read('README.rst'),
classifiers=CLASSIFIERS,
test_suite='tests',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
ext_modules=ext_modules,
package_data={'pytesmo': [os.path.join('colormaps', '*.cmap')],
},
install_requires=install_reqs,
setup_requires=['six'],
cmdclass=cmdclass,
tests_require=['pytest-cov', 'pytest'],
command_options=command_options,
entry_points={'console_scripts': CONSOLE_SCRIPTS})
if __name__ == "__main__":
setup_package()
|
|
from django.db.models import Q
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import gettext_lazy as _
from django_filters import ChoiceFilter, MultipleChoiceFilter
from mapentity.filters import PolygonFilter, PythonPolygonFilter
from geotrek.altimetry.filters import AltimetryPointFilterSet
from geotrek.core.models import Topology
from geotrek.authent.filters import StructureRelatedFilterSet
from geotrek.common.filters import OptionalRangeFilter, RightFilter
from geotrek.zoning.filters import (IntersectionFilterCity, IntersectionFilterDistrict,
IntersectionFilterRestrictedArea, IntersectionFilterRestrictedAreaType,
ZoningFilterSet)
from geotrek.zoning.models import City, District, RestrictedArea, RestrictedAreaType
from .models import Intervention, Project
if 'geotrek.signage' in settings.INSTALLED_APPS:
from geotrek.signage.models import Blade
if 'geotrek.outdoor' in settings.INSTALLED_APPS:
from geotrek.outdoor.models import Site, Course
class PolygonInterventionFilterMixin:
def get_geom(self, value):
return value
def filter(self, qs, values):
if not values:
return qs
if not isinstance(values, list):
values = [values]
lookup = self.lookup_expr
content_type_exclude = []
if 'geotrek.signage' in settings.INSTALLED_APPS:
blade_content_type = ContentType.objects.get_for_model(Blade)
content_type_exclude.append(blade_content_type)
if 'geotrek.outdoor' in settings.INSTALLED_APPS:
site_content_type = ContentType.objects.get_for_model(Site)
course_content_type = ContentType.objects.get_for_model(Course)
content_type_exclude.append(site_content_type)
content_type_exclude.append(course_content_type)
topologies = []
sites = []
courses = []
for value in values:
topologies += Topology.objects.filter(**{'geom__%s' % lookup: self.get_geom(value)}).values_list('id', flat=True)
if 'geotrek.outdoor' in settings.INSTALLED_APPS:
sites += Site.objects.filter(**{'geom__%s' % lookup: self.get_geom(value)}).values_list('id', flat=True)
courses += Course.objects.filter(**{'geom__%s' % lookup: self.get_geom(value)}).values_list('id', flat=True)
topologies_intervention = Intervention.objects.existing().filter(target_id__in=topologies).exclude(
target_type__in=content_type_exclude).distinct('pk').values_list('id', flat=True)
interventions = list(topologies_intervention)
if 'geotrek.signage' in settings.INSTALLED_APPS:
blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))
blades_intervention = Intervention.objects.existing().filter(target_id__in=blades,
target_type=blade_content_type).values_list('id',
flat=True)
interventions.extend(blades_intervention)
if 'geotrek.outdoor' in settings.INSTALLED_APPS:
sites_intervention = Intervention.objects.existing() \
.filter(target_id__in=sites, target_type=site_content_type) \
.values_list('id', flat=True)
interventions.extend(sites_intervention)
courses_intervention = Intervention.objects.existing() \
.filter(target_id__in=courses, target_type=course_content_type) \
.values_list('id', flat=True)
interventions.extend(courses_intervention)
if hasattr(self, 'lookup_queryset_in'):
lookup_queryset = self.lookup_queryset_in
else:
lookup_queryset = 'pk__in'
qs = qs.filter(**{'%s' % lookup_queryset: interventions})
return qs
class InterventionIntersectionFilterRestrictedAreaType(PolygonInterventionFilterMixin,
IntersectionFilterRestrictedAreaType):
def get_geom(self, value):
return value.geom
def filter(self, qs, values):
restricted_areas = RestrictedArea.objects.filter(area_type__in=values)
if not restricted_areas and values:
return qs.none()
return super().filter(qs, list(restricted_areas))
class InterventionIntersectionFilterRestrictedArea(PolygonInterventionFilterMixin,
IntersectionFilterRestrictedArea):
def get_geom(self, value):
return value.geom
class InterventionIntersectionFilterCity(PolygonInterventionFilterMixin,
IntersectionFilterCity):
def get_geom(self, value):
return value.geom
class InterventionIntersectionFilterDistrict(PolygonInterventionFilterMixin,
IntersectionFilterDistrict):
def get_geom(self, value):
return value.geom
class PolygonTopologyFilter(PolygonInterventionFilterMixin, PolygonFilter):
pass
class ProjectIntersectionFilterCity(PolygonInterventionFilterMixin, RightFilter):
model = City
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lookup_expr = 'intersects'
self.lookup_queryset_in = 'interventions__in'
def get_geom(self, value):
return value.geom
class ProjectIntersectionFilterDistrict(PolygonInterventionFilterMixin, RightFilter):
model = District
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lookup_expr = 'intersects'
self.lookup_queryset_in = 'interventions__in'
def get_geom(self, value):
return value.geom
class ProjectIntersectionFilterRestrictedArea(PolygonInterventionFilterMixin, RightFilter):
model = RestrictedArea
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lookup_expr = 'intersects'
self.lookup_queryset_in = 'interventions__in'
def get_geom(self, value):
return value.geom
class ProjectIntersectionFilterRestrictedAreaType(PolygonInterventionFilterMixin, RightFilter):
model = RestrictedAreaType
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lookup_expr = 'intersects'
self.lookup_queryset_in = 'interventions__in'
def filter(self, qs, values):
restricted_areas = RestrictedArea.objects.filter(area_type__in=values)
if not restricted_areas and values:
return qs.none()
return super().filter(qs, list(restricted_areas))
def get_geom(self, value):
return value.geom
class AltimetryInterventionFilterSet(AltimetryPointFilterSet):
length_3d = OptionalRangeFilter(field_name='length', label=_('length 3d'))
ascent = OptionalRangeFilter(label=_('ascent'))
descent = OptionalRangeFilter(label=_('descent'))
slope = OptionalRangeFilter(label=_('slope'))
class InterventionFilterSet(AltimetryInterventionFilterSet, ZoningFilterSet, StructureRelatedFilterSet):
ON_CHOICES = (('infrastructure', _("Infrastructure")), ('signage', _("Signage")), ('blade', _("Blade")),
('topology', _("Path")), ('trek', _("Trek")), ('poi', _("POI")), ('service', _("Service")),
('trail', _("Trail")))
if 'geotrek.outdoor' in settings.INSTALLED_APPS:
ON_CHOICES += (('course', _("Outdoor Course")), ('site', _("Outdoor Site")),)
bbox = PolygonTopologyFilter(lookup_expr='intersects')
year = MultipleChoiceFilter(choices=Intervention.objects.year_choices(),
field_name='date', lookup_expr='year', label=_("Year"))
on = ChoiceFilter(field_name='target_type__model', choices=ON_CHOICES, label=_("On"), empty_label=_("On"))
area_type = InterventionIntersectionFilterRestrictedAreaType(label=_('Restricted area type'), required=False,
lookup_expr='intersects')
area = InterventionIntersectionFilterRestrictedArea(label=_('Restricted area'), required=False,
lookup_expr='intersects')
city = InterventionIntersectionFilterCity(label=_('City'), required=False, lookup_expr='intersects')
district = InterventionIntersectionFilterDistrict(label=_('District'), required=False, lookup_expr='intersects')
class Meta(StructureRelatedFilterSet.Meta):
model = Intervention
fields = StructureRelatedFilterSet.Meta.fields + [
'status', 'type', 'stake', 'subcontracting', 'project', 'on',
]
class ProjectFilterSet(StructureRelatedFilterSet):
bbox = PythonPolygonFilter(field_name='geom')
year = MultipleChoiceFilter(
label=_("Year of activity"), method='filter_year',
choices=lambda: Project.objects.year_choices() # Could change over time
)
city = ProjectIntersectionFilterCity(label=_('City'), required=False)
district = ProjectIntersectionFilterDistrict(label=_('District'), required=False)
area_type = ProjectIntersectionFilterRestrictedAreaType(label=_('Restricted area type'), required=False)
area = ProjectIntersectionFilterRestrictedArea(label=_('Restricted area'), required=False)
class Meta(StructureRelatedFilterSet.Meta):
model = Project
fields = StructureRelatedFilterSet.Meta.fields + [
'year', 'type', 'domain', 'contractors', 'project_owner',
'project_manager', 'founders'
]
def filter_year(self, qs, name, values):
q = Q()
for value in values:
q |= Q(begin_year__lte=value, end_year__gte=value)
return qs.filter(q)
|
|
import skimage.io as io
import numpy as np
import os
def pascal_segmentation_lut():
"""Return look-up table with number and correspondng class names
for PASCAL VOC segmentation dataset. Two special classes are: 0 -
background and 255 - ambigious region. All others are numerated from
1 to 20.
Returns
-------
classes_lut : dict
look-up table with number and correspondng class names
"""
class_names = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'potted-plant',
'sheep', 'sofa', 'train', 'tv/monitor', 'ambigious']
enumerated_array = enumerate(class_names[:-1])
classes_lut = list(enumerated_array)
# Add a special class representing ambigious regions
# which has index 255.
classes_lut.append((255, class_names[-1]))
classes_lut = dict(classes_lut)
return classes_lut
def get_pascal_segmentation_images_lists_txts(pascal_root):
"""Return full paths to files in PASCAL VOC with train and val image name lists.
This function returns full paths to files which contain names of images
and respective annotations for the segmentation in PASCAL VOC.
Parameters
----------
pascal_root : string
Full path to the root of PASCAL VOC dataset.
Returns
-------
full_filenames_txts : [string, string, string]
Array that contains paths for train/val/trainval txts with images names.
"""
segmentation_images_lists_relative_folder = 'ImageSets/Segmentation'
segmentation_images_lists_folder = os.path.join(pascal_root,
segmentation_images_lists_relative_folder)
pascal_train_list_filename = os.path.join(segmentation_images_lists_folder,
'train.txt')
pascal_validation_list_filename = os.path.join(segmentation_images_lists_folder,
'val.txt')
pascal_trainval_list_filname = os.path.join(segmentation_images_lists_folder,
'trainval.txt')
return [
pascal_train_list_filename,
pascal_validation_list_filename,
pascal_trainval_list_filname
]
def readlines_with_strip(filename):
"""Reads lines from specified file with whitespaced removed on both sides.
The function reads each line in the specified file and applies string.strip()
function to each line which results in removing all whitespaces on both ends
of each string. Also removes the newline symbol which is usually present
after the lines wre read using readlines() function.
Parameters
----------
filename : string
Full path to the root of PASCAL VOC dataset.
Returns
-------
clean_lines : array of strings
Strings that were read from the file and cleaned up.
"""
# Get raw filnames from the file
with open(filename, 'r') as f:
lines = f.readlines()
# Clean filenames from whitespaces and newline symbols
clean_lines = map(lambda x: x.strip(), lines)
return clean_lines
def readlines_with_strip_array_version(filenames_array):
"""The function that is similar to readlines_with_strip() but for array of filenames.
Takes array of filenames as an input and applies readlines_with_strip() to each element.
Parameters
----------
array of filenams : array of strings
Array of strings. Each specifies a path to a file.
Returns
-------
clean_lines : array of (array of strings)
Strings that were read from the file and cleaned up.
"""
multiple_files_clean_lines = map(readlines_with_strip, filenames_array)
return multiple_files_clean_lines
def add_full_path_and_extention_to_filenames(filenames_array, full_path, extention):
"""Concatenates full path to the left of the image and file extention to the right.
The function accepts array of filenames without fullpath and extention like 'cat'
and adds specified full path and extetion to each of the filenames in the array like
'full/path/to/somewhere/cat.jpg.
Parameters
----------
filenames_array : array of strings
Array of strings representing filenames
full_path : string
Full path string to be added on the left to each filename
extention : string
Extention string to be added on the right to each filename
Returns
-------
full_filenames : array of strings
updated array with filenames
"""
full_filenames = map(lambda x: os.path.join(full_path, x) + '.' + extention, filenames_array)
return full_filenames
def add_full_path_and_extention_to_filenames_array_version(filenames_array_array, full_path, extention):
"""Array version of the add_full_path_and_extention_to_filenames() function.
Applies add_full_path_and_extention_to_filenames() to each element of array.
Parameters
----------
filenames_array_array : array of array of strings
Array of strings representing filenames
full_path : string
Full path string to be added on the left to each filename
extention : string
Extention string to be added on the right to each filename
Returns
-------
full_filenames : array of array of strings
updated array of array with filenames
"""
result = map(lambda x: add_full_path_and_extention_to_filenames(x, full_path, extention),
filenames_array_array)
return result
def get_pascal_segmentation_image_annotation_filenames_pairs(pascal_root):
"""Return (image, annotation) filenames pairs from PASCAL VOC segmentation dataset.
Returns three dimensional array where first dimension represents the type
of the dataset: train, val or trainval in the respective order. Second
dimension represents the a pair of images in that belongs to a particular
dataset. And third one is responsible for the first or second element in the
dataset.
Parameters
----------
pascal_root : string
Path to the PASCAL VOC dataset root that is usually named 'VOC2012'
after being extracted from tar file.
Returns
-------
image_annotation_filename_pairs :
Array with filename pairs.
"""
pascal_relative_images_folder = 'JPEGImages'
pascal_relative_class_annotations_folder = 'SegmentationClass'
images_extention = 'jpg'
annotations_extention = 'png'
pascal_images_folder = os.path.join(pascal_root, pascal_relative_images_folder)
pascal_class_annotations_folder = os.path.join(pascal_root, pascal_relative_class_annotations_folder)
pascal_images_lists_txts = get_pascal_segmentation_images_lists_txts(pascal_root)
pascal_image_names = readlines_with_strip_array_version(pascal_images_lists_txts)
images_full_names = add_full_path_and_extention_to_filenames_array_version(pascal_image_names,
pascal_images_folder,
images_extention)
annotations_full_names = add_full_path_and_extention_to_filenames_array_version(pascal_image_names,
pascal_class_annotations_folder,
annotations_extention)
# Combine so that we have [(images full filenames, annotation full names), .. ]
# where each element in the array represent train, val, trainval sets.
# Overall, we have 3 elements in the array.
temp = zip(images_full_names, annotations_full_names)
# Now we should combine the elements of images full filenames annotation full names
# so that we have pairs of respective image plus annotation
# [[(pair_1), (pair_1), ..], [(pair_1), (pair_2), ..] ..]
# Overall, we have 3 elements -- representing train/val/trainval datasets
image_annotation_filename_pairs = map(lambda x: zip(*x), temp)
return image_annotation_filename_pairs
def convert_pascal_berkeley_augmented_mat_annotations_to_png(pascal_berkeley_augmented_root):
""" Creates a new folder in the root folder of the dataset with annotations stored in .png.
The function accepts a full path to the root of Berkeley augmented Pascal VOC segmentation
dataset and converts annotations that are stored in .mat files to .png files. It creates
a new folder dataset/cls_png where all the converted files will be located. If this
directory already exists the function does nothing. The Berkley augmented dataset
can be downloaded from here:
http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz
Parameters
----------
pascal_berkeley_augmented_root : string
Full path to the root of augmented Berkley PASCAL VOC dataset.
"""
import scipy.io
def read_class_annotation_array_from_berkeley_mat(mat_filename, key='GTcls'):
# Mat to png conversion for http://www.cs.berkeley.edu/~bharath2/codes/SBD/download.html
# 'GTcls' key is for class segmentation
# 'GTinst' key is for instance segmentation
# Credit: https://github.com/martinkersner/train-DeepLab/blob/master/utils.py
mat = scipy.io.loadmat(mat_filename, mat_dtype=True, squeeze_me=True, struct_as_record=False)
return mat[key].Segmentation
mat_file_extension_string = '.mat'
png_file_extension_string = '.png'
relative_path_to_annotation_mat_files = 'dataset/cls'
relative_path_to_annotation_png_files = 'dataset/cls_png'
mat_file_extension_string_length = len(mat_file_extension_string)
annotation_mat_files_fullpath = os.path.join(pascal_berkeley_augmented_root,
relative_path_to_annotation_mat_files)
annotation_png_save_fullpath = os.path.join(pascal_berkeley_augmented_root,
relative_path_to_annotation_png_files)
# Create the folder where all the converted png files will be placed
# If the folder already exists, do nothing
if not os.path.exists(annotation_png_save_fullpath):
os.makedirs(annotation_png_save_fullpath)
else:
return
mat_files_names = os.listdir(annotation_mat_files_fullpath)
for current_mat_file_name in mat_files_names:
current_file_name_without_extention = current_mat_file_name[:-mat_file_extension_string_length]
current_mat_file_full_path = os.path.join(annotation_mat_files_fullpath,
current_mat_file_name)
current_png_file_full_path_to_be_saved = os.path.join(annotation_png_save_fullpath,
current_file_name_without_extention)
current_png_file_full_path_to_be_saved += png_file_extension_string
annotation_array = read_class_annotation_array_from_berkeley_mat(current_mat_file_full_path)
# TODO: hide 'low-contrast' image warning during saving.
io.imsave(current_png_file_full_path_to_be_saved, annotation_array)
def get_pascal_berkeley_augmented_segmentation_images_lists_txts(pascal_berkeley_root):
"""Return full paths to files in PASCAL Berkley augmented VOC with train and val image name lists.
This function returns full paths to files which contain names of images
and respective annotations for the segmentation in PASCAL VOC.
Parameters
----------
pascal_berkeley_root : string
Full path to the root of PASCAL VOC Berkley augmented dataset.
Returns
-------
full_filenames_txts : [string, string]
Array that contains paths for train/val txts with images names.
"""
segmentation_images_lists_relative_folder = 'dataset'
segmentation_images_lists_folder = os.path.join(pascal_berkeley_root,
segmentation_images_lists_relative_folder)
# TODO: add function that will joing both train.txt and val.txt into trainval.txt
pascal_train_list_filename = os.path.join(segmentation_images_lists_folder,
'train.txt')
pascal_validation_list_filename = os.path.join(segmentation_images_lists_folder,
'val.txt')
return [
pascal_train_list_filename,
pascal_validation_list_filename
]
def get_pascal_berkeley_augmented_segmentation_image_annotation_filenames_pairs(pascal_berkeley_root):
"""Return (image, annotation) filenames pairs from PASCAL Berkeley VOC segmentation dataset.
Returns three dimensional array where first dimension represents the type
of the dataset: train, val in the respective order. Second
dimension represents the a pair of images in that belongs to a particular
dataset. And third one is responsible for the first or second element in the
dataset.
Parameters
----------
pascal_berkeley_root : string
Path to the PASCAL Berkeley VOC dataset root that is usually named 'benchmark_RELEASE'
after being extracted from tar file.
Returns
-------
image_annotation_filename_pairs :
Array with filename pairs.
"""
pascal_relative_images_folder = 'dataset/img'
pascal_relative_class_annotations_folder = 'dataset/cls_png'
images_extention = 'jpg'
annotations_extention = 'png'
pascal_images_folder = os.path.join(pascal_berkeley_root, pascal_relative_images_folder)
pascal_class_annotations_folder = os.path.join(pascal_berkeley_root, pascal_relative_class_annotations_folder)
pascal_images_lists_txts = get_pascal_berkeley_augmented_segmentation_images_lists_txts(pascal_berkeley_root)
pascal_image_names = readlines_with_strip_array_version(pascal_images_lists_txts)
images_full_names = add_full_path_and_extention_to_filenames_array_version(pascal_image_names,
pascal_images_folder,
images_extention)
annotations_full_names = add_full_path_and_extention_to_filenames_array_version(pascal_image_names,
pascal_class_annotations_folder,
annotations_extention)
# Combine so that we have [(images full filenames, annotation full names), .. ]
# where each element in the array represent train, val, trainval sets.
# Overall, we have 3 elements in the array.
temp = zip(images_full_names, annotations_full_names)
# Now we should combine the elements of images full filenames annotation full names
# so that we have pairs of respective image plus annotation
# [[(pair_1), (pair_1), ..], [(pair_1), (pair_2), ..] ..]
# Overall, we have 3 elements -- representing train/val/trainval datasets
image_annotation_filename_pairs = map(lambda x: zip(*x), temp)
return image_annotation_filename_pairs
def get_pascal_berkeley_augmented_selected_image_annotation_filenames_pairs(pascal_berkeley_root, selected_names):
"""Returns (image, annotation) filenames pairs from PASCAL Berkeley VOC segmentation dataset for selected names.
The function accepts the selected file names from PASCAL Berkeley VOC segmentation dataset
and returns image, annotation pairs with fullpath and extention for those names.
Parameters
----------
pascal_berkeley_root : string
Path to the PASCAL Berkeley VOC dataset root that is usually named 'benchmark_RELEASE'
after being extracted from tar file.
selected_names : array of strings
Selected filenames from PASCAL VOC Berkeley that can be read from txt files that
come with dataset.
Returns
-------
image_annotation_pairs :
Array with filename pairs with fullnames.
"""
pascal_relative_images_folder = 'dataset/img'
pascal_relative_class_annotations_folder = 'dataset/cls_png'
images_extention = 'jpg'
annotations_extention = 'png'
pascal_images_folder = os.path.join(pascal_berkeley_root, pascal_relative_images_folder)
pascal_class_annotations_folder = os.path.join(pascal_berkeley_root, pascal_relative_class_annotations_folder)
images_full_names = add_full_path_and_extention_to_filenames(selected_names,
pascal_images_folder,
images_extention)
annotations_full_names = add_full_path_and_extention_to_filenames(selected_names,
pascal_class_annotations_folder,
annotations_extention)
image_annotation_pairs = zip(images_full_names,
annotations_full_names)
return image_annotation_pairs
def get_pascal_selected_image_annotation_filenames_pairs(pascal_root, selected_names):
"""Returns (image, annotation) filenames pairs from PASCAL VOC segmentation dataset for selected names.
The function accepts the selected file names from PASCAL VOC segmentation dataset
and returns image, annotation pairs with fullpath and extention for those names.
Parameters
----------
pascal_root : string
Path to the PASCAL VOC dataset root that is usually named 'VOC2012'
after being extracted from tar file.
selected_names : array of strings
Selected filenames from PASCAL VOC that can be read from txt files that
come with dataset.
Returns
-------
image_annotation_pairs :
Array with filename pairs with fullnames.
"""
pascal_relative_images_folder = 'JPEGImages'
pascal_relative_class_annotations_folder = 'SegmentationClass'
images_extention = 'jpg'
annotations_extention = 'png'
pascal_images_folder = os.path.join(pascal_root, pascal_relative_images_folder)
pascal_class_annotations_folder = os.path.join(pascal_root, pascal_relative_class_annotations_folder)
images_full_names = add_full_path_and_extention_to_filenames(selected_names,
pascal_images_folder,
images_extention)
annotations_full_names = add_full_path_and_extention_to_filenames(selected_names,
pascal_class_annotations_folder,
annotations_extention)
image_annotation_pairs = zip(images_full_names,
annotations_full_names)
return image_annotation_pairs
def get_augmented_pascal_image_annotation_filename_pairs(pascal_root, pascal_berkeley_root, mode=2):
"""Returns image/annotation filenames pairs train/val splits from combined Pascal VOC.
Returns two arrays with train and validation split respectively that has
image full filename/ annotation full filename pairs in each of the that were derived
from PASCAL and PASCAL Berkeley Augmented dataset. The Berkley augmented dataset
can be downloaded from here:
http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz
Consider running convert_pascal_berkeley_augmented_mat_annotations_to_png() after extraction.
The PASCAL VOC dataset can be downloaded from here:
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
Consider specifying root full names for both of them as arguments for this function
after extracting them.
The function has three type of train/val splits(credit matconvnet-fcn):
Let BT, BV, PT, PV, and PX be the Berkeley training and validation
sets and PASCAL segmentation challenge training, validation, and
test sets. Let T, V, X the final trainig, validation, and test
sets.
Mode 1::
V = PV (same validation set as PASCAL)
Mode 2:: (default))
V = PV \ BT (PASCAL val set that is not a Berkeley training
image)
Mode 3::
V = PV \ (BV + BT)
In all cases:
S = PT + PV + BT + BV
X = PX (the test set is uncahgend)
T = (S \ V) \ X (the rest is training material)
Parameters
----------
pascal_root : string
Path to the PASCAL VOC dataset root that is usually named 'VOC2012'
after being extracted from tar file.
pascal_berkeley_root : string
Path to the PASCAL Berkeley VOC dataset root that is usually named 'benchmark_RELEASE'
after being extracted from tar file.
mode: int
The type of train/val data split. Read the function main description for more info.
Returns
-------
image_annotation_pairs : [[(string, string), .. , (string, string)][(string, string), .., (string, string)]]
Array with filename pairs with fullnames.
"""
pascal_txts = get_pascal_segmentation_images_lists_txts(pascal_root=pascal_root)
berkeley_txts = get_pascal_berkeley_augmented_segmentation_images_lists_txts(pascal_berkeley_root=pascal_berkeley_root)
pascal_name_lists = readlines_with_strip_array_version(pascal_txts)
berkeley_name_lists = readlines_with_strip_array_version(berkeley_txts)
pascal_train_name_set, pascal_val_name_set, _ = map(lambda x: set(x), pascal_name_lists)
berkeley_train_name_set, berkeley_val_name_set = map(lambda x: set(x), berkeley_name_lists)
all_berkeley = berkeley_train_name_set | berkeley_val_name_set
all_pascal = pascal_train_name_set | pascal_val_name_set
everything = all_berkeley | all_pascal
# Extract the validation subset based on selected mode
if mode == 1:
# 1449 validation images, 10582 training images
validation = pascal_val_name_set
if mode == 2:
# 904 validatioin images, 11127 training images
validation = pascal_val_name_set - berkeley_train_name_set
if mode == 3:
# 346 validation images, 11685 training images
validation = pascal_val_name_set - all_berkeley
# The rest of the dataset is for training
train = everything - validation
# Get the part that can be extracted from berkeley
train_from_berkeley = train & all_berkeley
# The rest of the data will be loaded from pascal
train_from_pascal = train - train_from_berkeley
train_from_berkeley_image_annotation_pairs = \
get_pascal_berkeley_augmented_selected_image_annotation_filenames_pairs(pascal_berkeley_root,
list(train_from_berkeley))
train_from_pascal_image_annotation_pairs = \
get_pascal_selected_image_annotation_filenames_pairs(pascal_root,
list(train_from_pascal))
overall_train_image_annotation_filename_pairs = \
train_from_berkeley_image_annotation_pairs + train_from_pascal_image_annotation_pairs
overall_val_image_annotation_filename_pairs = \
get_pascal_selected_image_annotation_filenames_pairs(pascal_root,
validation)
return overall_train_image_annotation_filename_pairs, overall_val_image_annotation_filename_pairs
|
|
import logging
import re
from threading import Event, Thread
from urllib.parse import unquote_plus, urlparse
import websocket
from streamlink import logger
from streamlink.buffers import RingBuffer
from streamlink.plugin import Plugin, PluginError
from streamlink.plugin.api import useragents, validate
from streamlink.stream.stream import Stream
from streamlink.stream.stream import StreamIO
log = logging.getLogger(__name__)
class TwitCasting(Plugin):
_url_re = re.compile(r"http(s)?://twitcasting.tv/(?P<channel>[^/]+)", re.VERBOSE)
_STREAM_INFO_URL = "https://twitcasting.tv/streamserver.php?target={channel}&mode=client"
_STREAM_REAL_URL = "{proto}://{host}/ws.app/stream/{movie_id}/fmp4/bd/1/1500?mode={mode}"
_STREAM_INFO_SCHEMA = validate.Schema({
"movie": {
"id": int,
"live": bool
},
"fmp4": {
"host": validate.text,
"proto": validate.text,
"source": bool,
"mobilesource": bool
}
})
def __init__(self, url):
Plugin.__init__(self, url)
match = self._url_re.match(url).groupdict()
self.channel = match.get("channel")
self.session.http.headers.update({'User-Agent': useragents.CHROME})
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
stream_info = self._get_stream_info()
log.debug("Live stream info: {}".format(stream_info))
if not stream_info["movie"]["live"]:
raise PluginError("The live stream is offline")
# Keys are already validated by schema above
proto = stream_info["fmp4"]["proto"]
host = stream_info["fmp4"]["host"]
movie_id = stream_info["movie"]["id"]
if stream_info["fmp4"]["source"]:
mode = "main" # High quality
elif stream_info["fmp4"]["mobilesource"]:
mode = "mobilesource" # Medium quality
else:
mode = "base" # Low quality
if (proto == '') or (host == '') or (not movie_id):
raise PluginError("No stream available for user {}".format(self.channel))
real_stream_url = self._STREAM_REAL_URL.format(proto=proto, host=host, movie_id=movie_id, mode=mode)
log.debug("Real stream url: {}".format(real_stream_url))
return {mode: TwitCastingStream(session=self.session, url=real_stream_url)}
def _get_stream_info(self):
url = self._STREAM_INFO_URL.format(channel=self.channel)
res = self.session.http.get(url)
return self.session.http.json(res, schema=self._STREAM_INFO_SCHEMA)
class TwitCastingWsClient(Thread):
"""
Recieve stream data from TwitCasting server via WebSocket.
"""
def __init__(self, url, buffer, proxy=""):
Thread.__init__(self)
self.stopped = Event()
self.url = url
self.buffer = buffer
self.proxy = proxy
self.ws = None
@staticmethod
def parse_proxy_url(purl):
"""
Credit: streamlink/plugins/ustreamtv.py:UHSClient:parse_proxy_url()
"""
proxy_options = {}
if purl:
p = urlparse(purl)
proxy_options['proxy_type'] = p.scheme
proxy_options['http_proxy_host'] = p.hostname
if p.port:
proxy_options['http_proxy_port'] = p.port
if p.username:
proxy_options['http_proxy_auth'] = (unquote_plus(p.username), unquote_plus(p.password or ""))
return proxy_options
def stop(self):
if not self.stopped.wait(0):
log.debug("Stopping WebSocket client...")
self.stopped.set()
self.ws.close()
def run(self):
if self.stopped.wait(0):
return
def on_message(ws, data):
if not self.stopped.wait(0):
try:
self.buffer.write(data)
except Exception as err:
log.error(err)
self.stop()
def on_error(ws, error):
log.error(error)
def on_close(ws):
log.debug("Disconnected from WebSocket server")
# Parse proxy string for websocket-client
proxy_options = self.parse_proxy_url(self.proxy)
if proxy_options.get('http_proxy_host'):
log.debug("Connecting to {0} via proxy ({1}://{2}:{3})".format(
self.url,
proxy_options.get('proxy_type') or "http",
proxy_options.get('http_proxy_host'),
proxy_options.get('http_proxy_port') or 80
))
else:
log.debug("Connecting to {0} without proxy".format(self.url))
# Connect to WebSocket server
self.ws = websocket.WebSocketApp(
self.url,
header=["User-Agent: {0}".format(useragents.CHROME)],
on_message=on_message,
on_error=on_error,
on_close=on_close
)
self.ws.run_forever(origin="https://twitcasting.tv/", **proxy_options)
class TwitCastingReader(StreamIO):
def __init__(self, stream, timeout=None, **kwargs):
StreamIO.__init__(self)
self.stream = stream
self.session = stream.session
self.timeout = timeout if timeout else self.session.options.get("stream-timeout")
self.buffer = None
if logger.root.level <= logger.DEBUG:
websocket.enableTrace(True, log)
def open(self):
# Prepare buffer
buffer_size = self.session.get_option("ringbuffer-size")
self.buffer = RingBuffer(buffer_size)
log.debug("Starting WebSocket client")
self.client = TwitCastingWsClient(
self.stream.url,
buffer=self.buffer,
proxy=self.session.get_option("http-proxy")
)
self.client.setDaemon(True)
self.client.start()
def close(self):
self.client.stop()
self.buffer.close()
def read(self, size):
if not self.buffer:
return b""
return self.buffer.read(size, block=(not self.client.stopped.wait(0)),
timeout=self.timeout)
class TwitCastingStream(Stream):
def __init__(self, session, url):
super().__init__(session)
self.url = url
def __repr__(self):
return "<TwitCastingStream({0!r})>".format(self.url)
def open(self):
reader = TwitCastingReader(self)
reader.open()
return reader
__plugin__ = TwitCasting
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitPeeringsOperations(object):
"""ExpressRouteCircuitPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitPeering"
"""Gets the specified peering for the express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.ExpressRouteCircuitPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCircuitPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
peering_parameters, # type: "_models.ExpressRouteCircuitPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitPeering"]
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update express route circuit
peering operation.
:type peering_parameters: ~azure.mgmt.network.v2019_09_01.models.ExpressRouteCircuitPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.ExpressRouteCircuitPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitPeeringListResult"]
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.ExpressRouteCircuitPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'} # type: ignore
|
|
import tensorflow as tf
from networks.network import Network
from fcn.config import cfg
zero_out_module = tf.load_op_library('lib/triplet_flow_loss/triplet_flow_loss.so')
class custom_network(Network):
def __init__(self):
self.inputs = cfg.INPUT
# self.input_format = input_format
self.num_output_dimensions = 2 # formerly num_classes
self.num_units = cfg.TRAIN.NUM_UNITS
self.scale = 1 / cfg.TRAIN.SCALES_BASE[0]
self.vertex_reg = cfg.TRAIN.VERTEX_REG
self.data_left = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.data_right = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.gt_flow = tf.placeholder(tf.float32, shape=[None, None, None, self.num_output_dimensions])
self.occluded = tf.placeholder(tf.int32, shape=[None, None, None, 1])
self.labels_left = tf.placeholder(tf.int32, shape=[None, None, None, None])
self.labels_right = tf.placeholder(tf.int32, shape=[None, None, None, None])
self.keep_prob = tf.placeholder(tf.float32)
self.queue_size = 20
# define a queue
self.q = tf.FIFOQueue(self.queue_size, [tf.float32, tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.float32])
self.enqueue_op = self.q.enqueue([self.data_left, self.data_right, self.gt_flow, self.occluded, self.labels_left, self.labels_right, self.keep_prob])
data_left, data_right, gt_flow, occluded, left_labels, right_labels, self.keep_prob_queue = self.q.dequeue()
self.layers = dict({'data_left': data_left, 'data_right': data_right, 'gt_flow': gt_flow, 'occluded': occluded,
'left_labels': left_labels, "right_labels": right_labels})
self.close_queue_op = self.q.close(cancel_pending_enqueues=True)
self.queue_size_op = self.q.size('queue_size')
self.trainable = cfg.TRAIN.TRAINABLE
if cfg.NET_CONF.CONV1_SKIP_LINK:
self.skip_1_mult = tf.constant(1.0, tf.float32)
else:
self.skip_1_mult = tf.constant(0.0, tf.float32)
if cfg.NET_CONF.CONV2_SKIP_LINK:
self.skip_2_mult = tf.constant(1.0, tf.float32)
else:
self.skip_2_mult = tf.constant(0.0, tf.float32)
if cfg.NET_CONF.CONV3_SKIP_LINK:
self.skip_4_mult = tf.constant(1.0, tf.float32)
else:
self.skip_4_mult = tf.constant(0.0, tf.float32)
self.setup()
def setup(self):
trainable = self.trainable
reuse = True
with tf.device("/cpu:0"):
# scaled versions of ground truth
(self.feed('gt_flow')
.avg_pool(2, 2, 2, 2, name='flow_pool1')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_2x')
.avg_pool(2, 2, 2, 2, name='flow_pool2')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_4x')
.avg_pool(2, 2, 2, 2, name='flow_pool3')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_8x')
.avg_pool(2, 2, 2, 2, name='flow_pool4')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_16x'))
(self.feed('occluded').cast(tf.float32)
.avg_pool(2, 2, 2, 2, name='occluded_2x_avg')
.avg_pool(2, 2, 2, 2, name='occluded_4x_avg')
.avg_pool(2, 2, 2, 2, name='occluded_8x_avg')
.avg_pool(2, 2, 2, 2, name='occluded_16x_avg'))
self.feed('occluded_2x_avg').round().cast(tf.int32, name="occluded_2x")
self.feed('occluded_4x_avg').round().cast(tf.int32, name="occluded_4x")
self.feed('occluded_8x_avg').round().cast(tf.int32, name="occluded_8x")
self.feed('occluded_16x_avg').round().cast(tf.int32, name="occluded_16x")
(self.feed('left_labels').cast(tf.float32)
.avg_pool(2, 2, 2, 2, name='left_labels_2x_avg')
.avg_pool(2, 2, 2, 2, name='left_labels_4x_avg')
.avg_pool(2, 2, 2, 2, name='left_labels_8x_avg')
.avg_pool(2, 2, 2, 2, name='left_labels_16x_avg'))
self.feed('left_labels_2x_avg').round().cast(tf.int32, name="left_labels_2x")
self.feed('left_labels_4x_avg').round().cast(tf.int32, name="left_labels_4x")
self.feed('left_labels_8x_avg').round().cast(tf.int32, name="left_labels_8x")
self.feed('left_labels_16x_avg').round().cast(tf.int32, name="left_labels_16x")
(self.feed('right_labels').cast(tf.float32)
.avg_pool(2, 2, 2, 2, name='right_labels_2x_avg')
.avg_pool(2, 2, 2, 2, name='right_labels_4x_avg')
.avg_pool(2, 2, 2, 2, name='right_labels_8x_avg')
.avg_pool(2, 2, 2, 2, name='right_labels_16x_avg'))
self.feed('right_labels_2x_avg').round().cast(tf.int32, name="right_labels_2x")
self.feed('right_labels_4x_avg').round().cast(tf.int32, name="right_labels_4x")
self.feed('right_labels_8x_avg').round().cast(tf.int32, name="right_labels_8x")
self.feed('right_labels_16x_avg').round().cast(tf.int32, name="right_labels_16x")
# left tower
(self.feed('data_left')
.add_immediate(tf.constant(0.0, tf.float32), name='data_left_tap')
.conv(3, 3, 64, 1, 1, name='conv1_1', c_i=3, trainable=trainable)
.conv(3, 3, 64, 1, 1, name='conv1_2', c_i=64, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv1_l')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', c_i=64, trainable=trainable)
.conv(3, 3, 128, 1, 1, name='conv2_2', c_i=128, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv2_l')
.max_pool(2, 2, 2, 2, name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1', c_i=128, trainable=trainable)
.conv(3, 3, 256, 1, 1, name='conv3_2', c_i=256, trainable=trainable)
.conv(3, 3, 256, 1, 1, name='conv3_3', c_i=256, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv3_l')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1', c_i=256, trainable=trainable)
.conv(3, 3, 512, 1, 1, name='conv4_2', c_i=512, trainable=trainable)
.conv(3, 3, 512, 1, 1, name='conv4_3', c_i=512, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv4_3_l'))
# 8x scaling input
(self.feed('conv4_3_l')
.conv(1, 1, 256, 1, 1, name='8x_skip_cov_1', c_i=512, elu=True)
.conv(3, 3, 512, 1, 1, name='8x_skip_cov_2', c_i=256, elu=True)
.conv(1, 1, 128, 1, 1, name='8x_skip_cov_3', c_i=512, elu=True)
.conv(3, 3, 64, 1, 1, name='8x_skip_cov_4', c_i=128, elu=True)
.add_immediate(tf.constant(0.0, tf.float32), name='features_8x_l'))
# 4x scaling input
(self.feed('conv3_l')
.conv(3, 3, 96, 1, 1, name='4x_skip_conv_1', elu=True, c_i=256)
# .conv(1, 1, 96, 1, 1, name='4x_skip_conv_2', elu=True, c_i=96)
# .conv(3, 3, 64, 1, 1, name='4x_skip_conv_3', elu=True, c_i=96)
.conv(1, 1, 96, 1, 1, name='4x_skip_conv_4', elu=True, c_i=96)
.conv(3, 3, 32, 1, 1, name='4x_skip_conv_5', elu=True, c_i=96)
.add_immediate(tf.constant(0.0, tf.float32), name='features_4x_l'))
# 2x scaling input
(self.feed('conv2_l')
.conv(3, 3, 96, 1, 1, name='2x_skip_conv_1', elu=True, c_i=128)
.conv(1, 1, 64, 1, 1, name='2x_skip_conv_2', elu=True, c_i=96)
.conv(3, 3, 16, 1, 1, name='2x_skip_conv_3', c_i=64, elu=True)
.add_immediate(tf.constant(0.0, tf.float32), name='features_2x_l'))
# 1x scaling input
(self.feed('conv1_l')
.conv(3, 3, 32, 1, 1, name='1x_skip_conv_1', elu=True, c_i=64)
.conv(3, 3, 8, 1, 1, name='1x_skip_conv_2', c_i=32, elu=True)
.add_immediate(tf.constant(0.0, tf.float32), name='features_1x_l'))
# right tower
(self.feed('data_right')
.add_immediate(tf.constant(0.0, tf.float32), name='data_right_tap')
.conv(3, 3, 64, 1, 1, name='conv1_1', c_i=3, trainable=trainable, reuse=reuse)
.conv(3, 3, 64, 1, 1, name='conv1_2', c_i=64, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv1_r')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', c_i=64, trainable=trainable, reuse=reuse)
.conv(3, 3, 128, 1, 1, name='conv2_2', c_i=128, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv2_r')
.max_pool(2, 2, 2, 2, name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1', c_i=128, trainable=trainable, reuse=reuse)
.conv(3, 3, 256, 1, 1, name='conv3_2', c_i=256, trainable=trainable, reuse=reuse)
.conv(3, 3, 256, 1, 1, name='conv3_3', c_i=256, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv3_r')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1', c_i=256, trainable=trainable, reuse=reuse)
.conv(3, 3, 512, 1, 1, name='conv4_2', c_i=512, trainable=trainable, reuse=reuse)
.conv(3, 3, 512, 1, 1, name='conv4_3', c_i=512, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv4_3_r'))
# 8x scaling input
(self.feed('conv4_3_r')
.conv(1, 1, 256, 1, 1, name='8x_skip_cov_1', c_i=512, elu=True, reuse=reuse)
.conv(3, 3, 512, 1, 1, name='8x_skip_cov_2', c_i=256, elu=True, reuse=reuse)
.conv(1, 1, 128, 1, 1, name='8x_skip_cov_3', c_i=512, elu=True, reuse=reuse)
.conv(3, 3, 64, 1, 1, name='8x_skip_cov_4', c_i=128, elu=True, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='features_8x_r'))
# 4x scaling input
(self.feed('conv3_r')
.conv(3, 3, 96, 1, 1, name='4x_skip_conv_1', c_i=256, elu=True, reuse=reuse)
# .conv(1, 1, 96, 1, 1, name='4x_skip_conv_2', c_i=96, elu=True, reuse=reuse)
# .conv(3, 3, 64, 1, 1, name='4x_skip_conv_3', c_i=96, elu=True, reuse=reuse)
.conv(1, 1, 96, 1, 1, name='4x_skip_conv_4', c_i=96, elu=True, reuse=reuse)
.conv(3, 3, 32, 1, 1, name='4x_skip_conv_5', c_i=96, elu=True, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='features_4x_r'))
# 2x scaling input
(self.feed('conv2_r')
.conv(3, 3, 96, 1, 1, name='2x_skip_conv_1', c_i=128, elu=True, reuse=reuse)
.conv(1, 1, 64, 1, 1, name='2x_skip_conv_2', c_i=96, elu=True, reuse=reuse)
.conv(3, 3, 16, 1, 1, name='2x_skip_conv_3', c_i=64, elu=True, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='features_2x_r'))
# 1x scaling input
(self.feed('conv1_r')
.conv(3, 3, 32, 1, 1, name='1x_skip_conv_1', c_i=64, elu=True, reuse=reuse)
.conv(3, 3, 8, 1, 1, name='1x_skip_conv_2', c_i=32, elu=True, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='features_1x_r'))
with tf.device("/cpu:0"):
# triplet loss
(self.feed(['features_1x_l', 'features_1x_r', 'gt_flow', 'occluded', 'left_labels', 'right_labels'])
.triplet_flow_loss(margin=1.0, negative_radius=2, positive_radius=0, name="triplet_loss_1x"))
(self.feed(['features_2x_l', 'features_2x_r', 'gt_flow_2x', 'occluded_2x', 'left_labels_2x', 'right_labels_2x'])
.triplet_flow_loss(margin=1.0, negative_radius=3, positive_radius=0, name="triplet_loss_2x"))
(self.feed(['features_4x_l', 'features_4x_r', 'gt_flow_4x', 'occluded_4x', 'left_labels_4x', 'right_labels_4x'])
.triplet_flow_loss(margin=1.0, negative_radius=3, positive_radius=0, name="triplet_loss_4x"))
(self.feed(['features_8x_l', 'features_8x_r', 'gt_flow_8x', 'occluded_8x', 'left_labels_8x', 'right_labels_8x'])
.triplet_flow_loss(margin=1.0, negative_radius=3, positive_radius=0, name="triplet_loss_8x"))
final_output = (self.get_output('triplet_loss_8x')[0] + self.get_output('triplet_loss_2x')[0] +
self.get_output('triplet_loss_4x')[0] + self.get_output('triplet_loss_1x')[0]) / 4.0
self.layers["final_triplet_loss"] = [final_output]
# (self.feed(['features_8x_l', 'features4x_l', 'features_2x_l', 'features_1x_l'])
# .concat(axis=3, name="final_features_l_out"))
#
# (self.feed(['features_8x_r', 'features4x_r', 'features_2x_r', 'features_1x_r'])
# .concat(axis=3, name="final_features_r_out"))
pass
|
|
#!/usr/bin/env python
"""usage: AntogniniAeberli.py [options] [params] [cityfile]
options:
-h, --help Show this help
-n, --no-gui Hide the Graphical User Interface during the problem resolution
-v, --verbose Show verbose informations during the generation of solution on the command line.
params:
-m VALUE, --maxtime=VALUE Max. execution time of genetic algorithm.
Zero for infinite. Default: 0
(c) 2014 by Diego Antognini and Marco Aeberli")
"""
import sys
import getopt
import os
import pygame
from math import hypot
from random import randint, shuffle, sample, choice
from time import clock
from copy import deepcopy
def equal_double(a, b, epsilon=1e-6):
"""
Returns true if a and b are equal, with epsilon as accepted range.
False if not.
"""
return abs(a-b) < epsilon
class Town:
"""
Class which represents a town in the TSP.
"""
PRECALCULATE_LIST = []
def __init__(self, id, name, x, y):
self.id = id
self.name = name
self.x = float(x)
self.y = float(y)
@staticmethod
def compute_distance(t1, t2):
"""
Computes the Euclidean distance between two towns.
"""
return Town.PRECALCULATE_LIST[t1][t2]
@staticmethod
def compute_all_possible_distance(cities):
for k1 in range(0, len(cities)):
Town.PRECALCULATE_LIST.append(list())
for k2 in range(0, len(cities)):
Town.PRECALCULATE_LIST[k1].append(0)
for k1 in range(0, len(cities)):
for k2 in range(k1, len(cities)):
c1 = cities[k1].id
c2 = cities[k2].id
Town.PRECALCULATE_LIST[c1][c2] = Town.PRECALCULATE_LIST[c2][c1] = hypot(cities[k1].x-cities[k2].x, cities[k1].y-cities[k2].y)
class Solution:
"""
Class which represents a solution in the TSP. Each gene is represented by an unique id, related with the town.
"""
def __init__(self, chromosome):
self.chromosome = chromosome
self.distance = 0
def __repr__(self):
return str(self.distance) + " : " + " ".join([str(i) for i in self.chromosome])
def __len__(self):
return len(self.chromosome)
def __getitem__(self, item):
return self.chromosome[item]
def __setitem__(self, key, value):
self.chromosome[key] = value
def index(self, value):
return self.chromosome.index(value)
def compute_distance(self):
"""
Computes for the traveling distance for one soltution.
"""
self.distance = 0.0
for s in xrange(0, len(self.chromosome)-1):
self.distance += Town.compute_distance(self.chromosome[s], self.chromosome[s+1])
# do not forget to compute the distance between the first and the last city.
self.distance += Town.compute_distance(self.chromosome[0], self.chromosome[-1])
def mutate(self):
"""
Mutation of a solution where the path between two genes are inversed.
i.e.: [0,1,2,3,4,5,6,7,8,9,10,11] --> select random 5 and 8
[0,1,2,3,4,8,7,6,5,9,10,11]
"""
gene1 = randint(0, len(self.chromosome)-1)
gene2 = gene1
while gene2 == gene1:
gene2 = randint(0, len(self.chromosome)-1)
if gene1 > gene2:
gene1, gene2 = gene2, gene1
while gene1 < gene2:
self.chromosome[gene1], self.chromosome[gene2] = self.chromosome[gene2], self.chromosome[gene1]
gene1 += 1
gene2 -= 1
class Problem:
"""
Class which represents the entire problem (without gui) for the TSP.
"""
NB_POPULATION = 0 # Will be changed during the execution time, by FACTOR*len(cities)
FACTOR = 1
SIZE_TOURNAMENT_BATTLE = 10 # Size of the tournament battle with which we keep the best
MUTATION_RATE = 0.3 # Probability to mutate
CROSSOVER_FRACTION = 0.8 # Number of generated offsprings
DELTA_GENERATION = 50 # Convergence criteria. If the best solution hasn't changed since DELTA_GENERATION => STOP
def __init__(self, cities):
"""
Initializes a problem, based on the cities passed as argument.
The cities are expected in format [[name, pos_x, pos_y], ...]
"""
Problem.NB_POPULATION = len(cities)*Problem.FACTOR
self.cities_dict = {}
self.keys = range(0, len(cities))
self.best_solution = None
self.population = []
cities_id = []
for c in xrange(0, len(cities)):
town = Town(self.keys[c], cities[c][0], cities[c][1], cities[c][2])
self.cities_dict[town.id] = town
cities_id.append(town)
Town.compute_all_possible_distance(cities_id)
def create_population(self):
"""
Creates a population based on the keys passed as argument.
Returns the population.
"""
for i in xrange(0, Problem.NB_POPULATION):
shuffle(self.keys) # Use Fisher-Yates shuffle, O(n). Better than copying and removing
self.population.append(Solution(self.keys[:]))
def initialize(self):
"""
Preparation for the execution of the algorithm.
"""
self.best_solution = Solution([])
self.best_solution.distance = float('inf')
self.create_population()
self.compute_all_distances()
def compute_all_distances(self):
"""
Computes the distances for all the solutions availlable in the population.
Determines also the best_solution in the population.
"""
for p in self.population:
p.compute_distance()
if p.distance < self.best_solution.distance and not equal_double(p.distance, self.best_solution.distance):
self.best_solution = deepcopy(p)
def generate(self):
"""
Runs all the steps for the generation of a "good" solution.
Returns the best solution obtained during the generation.
"""
new_population = self.selection_process()
new_population += self.crossover_process(new_population)
self.mutation_process(new_population)
self.population = new_population
self.compute_all_distances()
# If we don't have enough town to realize a crossover (eg 5)
if len(self.population) > Problem.NB_POPULATION:
self.population.sort(key=lambda p:p.distance)
self.population = self.population[:Problem.NB_POPULATION]
return self.best_solution
def selection_process(self):
"""
Runs the tournament with a specified size (defined as static).
"""
new_population = []
# If the number of cities is to small, we return the entire population and we'll cut it later
if self.SIZE_TOURNAMENT_BATTLE >= len(self.population):
return self.population
else:
for i in xrange(0, int(round((1-Problem.CROSSOVER_FRACTION)*Problem.NB_POPULATION))):
solutions = sample(self.population, self.SIZE_TOURNAMENT_BATTLE)
solutions.sort(key=lambda p: p.distance)
self.population.remove(solutions[0]) # O(n) but if we want, we could do the tricks with swaping with the last element and then pop it. But the population is really small so not necessary
new_population.append(solutions[0])
return new_population
def crossover_process(self, new_population):
"""
Does the crossover of two random solutions
"""
future_solution = []
for i in xrange(0, int(round(Problem.NB_POPULATION*Problem.CROSSOVER_FRACTION)/2)):
solution1 = choice(new_population)
solution2 = solution1
while solution2 == solution1:
solution2 = choice(new_population)
future_solution.append(self.crossover(solution1, solution2))
future_solution.append(self.crossover(solution2, solution1))
return future_solution
def crossover(self, ga, gb):
fa, fb = True, True
n = len(ga)
town = choice(ga.chromosome)
x = ga.index(town)
y = gb.index(town)
g = [town]
while fa or fb:
x = (x - 1) % n
y = (y + 1) % n
if fa:
if ga[x] not in g:
g.insert(0, ga[x])
else:
fa = False
if fb:
if gb[y] not in g:
g.append(gb[y])
else:
fb = False
remaining_towns = []
if len(g) < len(ga):
while len(g)+len(remaining_towns) != n:
x = (x - 1) % n
if ga[x] not in g:
remaining_towns.append(ga[x])
shuffle(remaining_towns) # Use Fisher-Yates shuffle, O(n). Better than copying and removing
while len(remaining_towns) > 0:
g.append(remaining_towns.pop())
return Solution(g)
def mutation_process(self, new_population):
"""
Mutates some of the solutions in the new_population passed as argument.
"""
for s in sample(new_population, int(round(Problem.MUTATION_RATE*Problem.NB_POPULATION))):
s.mutate()
class TS_GUI:
"""
Class attached with Problem to represent the TSP.
"""
screen_x = 500
screen_y = 600
offset_y = 50
offset_y_between_text = 20
offset_x_y_city_name = 10
city_color = [10, 10, 200]
city_start_color = [255, 0, 0]
city_end_color = [0, 255, 0]
city_radius = 3
cities_name = 'v'
infobox_color = [128, 128, 128]
font_color = [255, 255, 255]
def __init__(self, gui=True):
if gui:
pygame.init()
self.window = pygame.display.set_mode((TS_GUI.screen_x, TS_GUI.screen_y))
pygame.display.set_caption('Travelling Salesman Problem - Antognini Aeberli')
self.screen = pygame.display.get_surface()
self.font = pygame.font.Font(None, 18)
self.font_city_name = pygame.font.Font(None, 12)
pygame.display.flip()
self.cities_dict = {}
def draw_one_city(self, name, x, y, color, color_font):
"""
Draws one city to the pygame gui screen.
"""
pygame.draw.circle(self.screen, color, (int(x), int(y)), TS_GUI.city_radius)
text = self.font_city_name.render(name, True, color_font)
self.screen.blit(text, (x-TS_GUI.offset_x_y_city_name, y-TS_GUI.offset_x_y_city_name))
def draw_path(self, solution, nb_generation):
"""
Draws the path (between cities) of a solution and the appropriate informations to the pygame gui screen.
"""
self.screen.fill(0)
cities_to_draw = []
for c in xrange(0, len(solution)):
color, color_font = TS_GUI.city_color, TS_GUI.font_color
if c == 0:
color, color_font = TS_GUI.city_start_color, TS_GUI.city_start_color
elif c == len(solution)-1:
color, color_font = TS_GUI.city_end_color, TS_GUI.city_end_color
town = self.cities_dict[solution[c]]
self.draw_one_city(town.name, town.x, town.y, color, color_font)
cities_to_draw.append((int(town.x), int(town.y)))
pygame.draw.lines(self.screen, self.city_color, True, cities_to_draw) # True close the polygon between the first and last point
self.draw_infobox()
text = self.font.render("Generation %i, Length %s" % (nb_generation, solution.distance), True, TS_GUI.font_color)
self.screen.blit(text, (0, TS_GUI.screen_y - TS_GUI.offset_y + TS_GUI.offset_y_between_text))
text = self.font.render("%i cities" % len(self.cities_dict), True, TS_GUI.font_color)
self.screen.blit(text, (0, TS_GUI.screen_y - TS_GUI.offset_y))
pygame.display.flip()
def draw_infobox(self):
"""
Draws the base style of the infobox at the bottom of the gui.
"""
pygame.draw.rect(self.screen, TS_GUI.infobox_color, (0, TS_GUI.screen_y-TS_GUI.offset_y, TS_GUI.screen_x, TS_GUI.offset_y))
def read_cities(self):
"""
Proposes a gui for entering cities on a 500x500 sized map and returns the entered cities.
Returns a list with [NAME, POS_X, POS_X] where the names are auto generated.
"""
self.draw_infobox()
text = self.font.render("Click with the mouse to create a city. Press Enter to continue.", True, TS_GUI.font_color)
self.screen.blit(text, (0, TS_GUI.screen_y - TS_GUI.offset_y + TS_GUI.offset_y_between_text))
pygame.display.flip()
running = True
cities = []
i = 0
while running:
event = pygame.event.wait()
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
if y <= TS_GUI.screen_y-TS_GUI.offset_y:
cities.append([TS_GUI.cities_name + str(i), x, y])
self.draw_one_city(TS_GUI.cities_name + str(i), x, y, TS_GUI.city_color, TS_GUI.font_color)
pygame.display.flip()
i += 1
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
running = False
elif event.type == pygame.QUIT:
exit(-1)
return cities
def wait_to_quit(self, i, best_solution):
"""
Proposes a gui showing the best_solution to the user and waits for its confirmation to quit.
"""
self.draw_infobox()
text = self.font.render(str(len(self.cities_dict)) + " cities, Best : #" + str(i) + " generation, Distance : " + str(best_solution.distance), True, TS_GUI.font_color)
self.screen.blit(text, (0, TS_GUI.screen_y - TS_GUI.offset_y))
text = self.font.render("Press Enter to quit !", True, TS_GUI.font_color)
self.screen.blit(text, (0, TS_GUI.screen_y - TS_GUI.offset_y + TS_GUI.offset_y_between_text))
pygame.display.flip()
# wait until the user closes the window or presses the return key.
running = True
while running:
event = pygame.event.wait()
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
running = False
def display(self, problem, max_time=0):
"""
Executes the problem resolving and visualizes the results on the pygame gui.
"""
old_best_solution = problem.best_solution
printVerbose("Generation 0 : " + str(old_best_solution))
self.draw_path(old_best_solution, 0)
running = True
i = 1
t0 = 0
ith_best = 0
if max_time > 0:
t0 = clock()
while running:
best_solution = problem.generate()
if not equal_double(old_best_solution.distance, best_solution.distance):
old_best_solution = best_solution
self.draw_path(old_best_solution, i)
printVerbose("Generation " + str(i) + " : " + str(best_solution))
ith_best = i
i += 1
event = pygame.event.poll()
# Verify if the user has request to quit the gui, or the maximum time has passed, or if the problem has converged.
if event.type == pygame.QUIT or i-ith_best > Problem.DELTA_GENERATION and max_time <= 0 or (max_time > 0 and int(clock()-t0) > max_time):
# Quit the loop if so.
running = False
self.wait_to_quit(ith_best, old_best_solution)
# prepare the solution and return it
return self.return_solution(problem.best_solution)
def display_text_only(self, problem, max_time=0):
"""
Executes the problem resolving and displays the results on the command line.
"""
old_best_solution = problem.best_solution
printVerbose("Generation 0 : " + str(old_best_solution))
t0 = 0
i = 1
ith_best = 0
if max_time > 0:
t0 = clock()
# Until no convergence appears or the maximum processing time reached, generate new solutions and keep the best.
while i-ith_best <= Problem.DELTA_GENERATION and max_time <= 0 or (max_time > 0 and int(clock()-t0) < max_time):
best_solution = problem.generate()
if not equal_double(old_best_solution.distance, best_solution.distance):
old_best_solution = best_solution
printVerbose("Generation " + str(i) + " : " + str(best_solution))
ith_best = i
i += 1
# prepare the best solution for returning.
return self.return_solution(problem.best_solution)
def return_solution(self, solution):
"""
Creates the by the laboratory requested solution format and returns it..
Returns the solution in format (distance, list(cities))
"""
cities = []
for c in xrange(0, len(solution)):
cities.append(self.cities_dict[solution[c]].name)
return solution.distance, cities
def quit(self):
"""
Closes and exits pygame.
"""
pygame.quit()
def usage():
"""
Prints the module how to usage instructions to the console"
"""
print(__doc__)
def get_argv_params():
"""
Recuperates the arguments from the command line
"""
opts = []
try:
opts = getopt.getopt(
sys.argv[1:],
"hnm:v",
["help", "no-gui", "maxtime=", "verbose"])[0]
except getopt.GetoptError:
usage()
print("Wrong options or params.")
exit(2)
gui = True
verbose = False
max_time = 0
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
exit()
elif opt in ("-n", "--no-gui"):
gui = False
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-m", "--maxtime"):
max_time = int(arg)
filename = None
if len(sys.argv) > 1 and os.path.exists(sys.argv[-1]):
filename = sys.argv[-1]
return gui, max_time, filename, verbose
def ga_solve(filename=None, gui=True, max_time=0):
cities = []
g = None
if filename is None:
g = TS_GUI()
cities = g.read_cities()
# quit the gui here, when no gui to show the progress is necessary in future.
if not gui:
pygame.quit()
else:
with open(filename, 'r+') as f:
for l in f.readlines():
cities.append(l.split())
problem = Problem(cities)
problem.initialize()
if g is None:
g = TS_GUI(gui)
g.cities_dict = problem.cities_dict
if gui:
return g.display(problem, max_time)
else:
return g.display_text_only(problem, max_time)
def printVerbose(output):
if printVerbose.VERBOSE:
print(output)
printVerbose.VERBOSE = False
if __name__ == "__main__":
(GUI, MAX_TIME, FILENAME, VERBOSE) = get_argv_params()
print("arguments( gui: %s maxtime: %s filename: %s verbose: %s )" % (GUI, MAX_TIME, FILENAME, VERBOSE))
printVerbose.VERBOSE = VERBOSE
results = ga_solve(FILENAME, GUI, MAX_TIME)
print("distance: %s" % results[0])
print("cities: %s" % results[1])
|
|
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A context manager to perform a series of tasks on a set of resources.
:class:`TaskManager` is a context manager, created on-demand to allow
synchronized access to a node and its resources.
The :class:`TaskManager` will, by default, acquire an exclusive lock on
a node for the duration that the TaskManager instance exists. You may
create a TaskManager instance without locking by passing "shared=True"
when creating it, but certain operations on the resources held by such
an instance of TaskManager will not be possible. Requiring this exclusive
lock guards against parallel operations interfering with each other.
A shared lock is useful when performing non-interfering operations,
such as validating the driver interfaces.
An exclusive lock is stored in the database to coordinate between
:class:`ironic.conductor.manager` instances, that are typically deployed on
different hosts.
:class:`TaskManager` methods, as well as driver methods, may be decorated to
determine whether their invocation requires an exclusive lock.
The TaskManager instance exposes certain node resources and properties as
attributes that you may access:
task.context
The context passed to TaskManager()
task.shared
False if Node is locked, True if it is not locked. (The
'shared' kwarg arg of TaskManager())
task.node
The Node object
task.ports
Ports belonging to the Node
task.driver
The Driver for the Node, or the Driver based on the
'driver_name' kwarg of TaskManager().
Example usage:
::
with task_manager.acquire(context, node_id, purpose='power on') as task:
task.driver.power.power_on(task.node)
If you need to execute task-requiring code in a background thread, the
TaskManager instance provides an interface to handle this for you, making
sure to release resources when the thread finishes (successfully or if
an exception occurs). Common use of this is within the Manager like so:
::
with task_manager.acquire(context, node_id, purpose='some work') as task:
<do some work>
task.spawn_after(self._spawn_worker,
utils.node_power_action, task, new_state)
All exceptions that occur in the current GreenThread as part of the
spawn handling are re-raised. You can specify a hook to execute custom
code when such exceptions occur. For example, the hook is a more elegant
solution than wrapping the "with task_manager.acquire()" with a
try..exception block. (Note that this hook does not handle exceptions
raised in the background thread.):
::
def on_error(e):
if isinstance(e, Exception):
...
with task_manager.acquire(context, node_id, purpose='some work') as task:
<do some work>
task.set_spawn_error_hook(on_error)
task.spawn_after(self._spawn_worker,
utils.node_power_action, task, new_state)
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
import retrying
import six
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common.i18n import _LW
from ironic.common import states
from ironic import objects
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def require_exclusive_lock(f):
"""Decorator to require an exclusive lock.
Decorated functions must take a :class:`TaskManager` as the first
parameter. Decorated class methods should take a :class:`TaskManager`
as the first parameter after "self".
"""
@six.wraps(f)
def wrapper(*args, **kwargs):
# NOTE(dtantsur): this code could be written simpler, but then unit
# testing decorated functions is pretty hard, as we usually pass a Mock
# object instead of TaskManager there.
if len(args) > 1:
task = args[1] if isinstance(args[1], TaskManager) else args[0]
else:
task = args[0]
if task.shared:
raise exception.ExclusiveLockRequired()
return f(*args, **kwargs)
return wrapper
def acquire(context, node_id, shared=False, driver_name=None,
purpose='unspecified action'):
"""Shortcut for acquiring a lock on a Node.
:param context: Request context.
:param node_id: ID or UUID of node to lock.
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param driver_name: Name of Driver. Default: None.
:param purpose: human-readable purpose to put to debug logs.
:returns: An instance of :class:`TaskManager`.
"""
return TaskManager(context, node_id, shared=shared,
driver_name=driver_name, purpose=purpose)
class TaskManager(object):
"""Context manager for tasks.
This class wraps the locking, driver loading, and acquisition
of related resources (eg, Node and Ports) when beginning a unit of work.
"""
def __init__(self, context, node_id, shared=False, driver_name=None,
purpose='unspecified action'):
"""Create a new TaskManager.
Acquire a lock on a node. The lock can be either shared or
exclusive. Shared locks may be used for read-only or
non-disruptive actions only, and must be considerate to what
other threads may be doing on the same node at the same time.
:param context: request context
:param node_id: ID or UUID of node to lock.
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param driver_name: The name of the driver to load, if different
from the Node's current driver.
:param purpose: human-readable purpose to put to debug logs.
:raises: DriverNotFound
:raises: NodeNotFound
:raises: NodeLocked
"""
self._spawn_method = None
self._on_error_method = None
self.context = context
self.node = None
self.node_id = node_id
self.shared = shared
self.fsm = states.machine.copy()
self._purpose = purpose
self._debug_timer = timeutils.StopWatch()
try:
LOG.debug("Attempting to get %(type)s lock on node %(node)s (for "
"%(purpose)s)",
{'type': 'shared' if shared else 'exclusive',
'node': node_id, 'purpose': purpose})
if not self.shared:
self._lock()
else:
self._debug_timer.restart()
self.node = objects.Node.get(context, node_id)
self.ports = objects.Port.list_by_node_id(context, self.node.id)
self.driver = driver_factory.get_driver(driver_name or
self.node.driver)
# NOTE(deva): this handles the Juno-era NOSTATE state
# and should be deleted after Kilo is released
if self.node.provision_state is states.NOSTATE:
self.node.provision_state = states.AVAILABLE
self.node.save()
self.fsm.initialize(start_state=self.node.provision_state,
target_state=self.node.target_provision_state)
except Exception:
with excutils.save_and_reraise_exception():
self.release_resources()
def _lock(self):
self._debug_timer.restart()
# NodeLocked exceptions can be annoying. Let's try to alleviate
# some of that pain by retrying our lock attempts. The retrying
# module expects a wait_fixed value in milliseconds.
@retrying.retry(
retry_on_exception=lambda e: isinstance(e, exception.NodeLocked),
stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts,
wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
def reserve_node():
self.node = objects.Node.reserve(self.context, CONF.host,
self.node_id)
LOG.debug("Node %(node)s successfully reserved for %(purpose)s "
"(took %(time).2f seconds)",
{'node': self.node_id, 'purpose': self._purpose,
'time': self._debug_timer.elapsed()})
self._debug_timer.restart()
reserve_node()
def upgrade_lock(self):
"""Upgrade a shared lock to an exclusive lock.
Also reloads node object from the database.
Does nothing if lock is already exclusive.
"""
if self.shared:
LOG.debug('Upgrading shared lock on node %(uuid)s for %(purpose)s '
'to an exclusive one (shared lock was held %(time).2f '
'seconds)',
{'uuid': self.node.uuid, 'purpose': self._purpose,
'time': self._debug_timer.elapsed()})
self._lock()
self.shared = False
def spawn_after(self, _spawn_method, *args, **kwargs):
"""Call this to spawn a thread to complete the task.
The specified method will be called when the TaskManager instance
exits.
:param _spawn_method: a method that returns a GreenThread object
:param args: args passed to the method.
:param kwargs: additional kwargs passed to the method.
"""
self._spawn_method = _spawn_method
self._spawn_args = args
self._spawn_kwargs = kwargs
def set_spawn_error_hook(self, _on_error_method, *args, **kwargs):
"""Create a hook to handle exceptions when spawning a task.
Create a hook that gets called upon an exception being raised
from spawning a background thread to do a task.
:param _on_error_method: a callable object, it's first parameter
should accept the Exception object that was raised.
:param args: additional args passed to the callable object.
:param kwargs: additional kwargs passed to the callable object.
"""
self._on_error_method = _on_error_method
self._on_error_args = args
self._on_error_kwargs = kwargs
def release_resources(self):
"""Unlock a node and release resources.
If an exclusive lock is held, unlock the node. Reset attributes
to make it clear that this instance of TaskManager should no
longer be accessed.
"""
if not self.shared:
try:
if self.node:
objects.Node.release(self.context, CONF.host, self.node.id)
except exception.NodeNotFound:
# squelch the exception if the node was deleted
# within the task's context.
pass
if self.node:
LOG.debug("Successfully released %(type)s lock for %(purpose)s "
"on node %(node)s (lock was held %(time).2f sec)",
{'type': 'shared' if self.shared else 'exclusive',
'purpose': self._purpose, 'node': self.node.uuid,
'time': self._debug_timer.elapsed()})
self.node = None
self.driver = None
self.ports = None
self.fsm = None
def _thread_release_resources(self, t):
"""Thread.link() callback to release resources."""
self.release_resources()
def process_event(self, event, callback=None, call_args=None,
call_kwargs=None, err_handler=None, target_state=None):
"""Process the given event for the task's current state.
:param event: the name of the event to process
:param callback: optional callback to invoke upon event transition
:param call_args: optional \*args to pass to the callback method
:param call_kwargs: optional \**kwargs to pass to the callback method
:param err_handler: optional error handler to invoke if the
callback fails, eg. because there are no workers available
(err_handler should accept arguments node, prev_prov_state, and
prev_target_state)
:param target_state: if specified, the target provision state for the
node. Otherwise, use the target state from the fsm
:raises: InvalidState if the event is not allowed by the associated
state machine
"""
# Advance the state model for the given event. Note that this doesn't
# alter the node in any way. This may raise InvalidState, if this event
# is not allowed in the current state.
self.fsm.process_event(event, target_state=target_state)
# stash current states in the error handler if callback is set,
# in case we fail to get a worker from the pool
if err_handler and callback:
self.set_spawn_error_hook(err_handler, self.node,
self.node.provision_state,
self.node.target_provision_state)
self.node.provision_state = self.fsm.current_state
self.node.target_provision_state = self.fsm.target_state
# set up the async worker
if callback:
# clear the error if we're going to start work in a callback
self.node.last_error = None
if call_args is None:
call_args = ()
if call_kwargs is None:
call_kwargs = {}
self.spawn_after(callback, *call_args, **call_kwargs)
# publish the state transition by saving the Node
self.node.save()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None and self._spawn_method is not None:
# Spawn a worker to complete the task
# The linked callback below will be called whenever:
# - background task finished with no errors.
# - background task has crashed with exception.
# - callback was added after the background task has
# finished or crashed. While eventlet currently doesn't
# schedule the new thread until the current thread blocks
# for some reason, this is true.
# All of the above are asserted in tests such that we'll
# catch if eventlet ever changes this behavior.
thread = None
try:
thread = self._spawn_method(*self._spawn_args,
**self._spawn_kwargs)
# NOTE(comstud): Trying to use a lambda here causes
# the callback to not occur for some reason. This
# also makes it easier to test.
thread.link(self._thread_release_resources)
# Don't unlock! The unlock will occur when the
# thread finshes.
return
except Exception as e:
with excutils.save_and_reraise_exception():
try:
# Execute the on_error hook if set
if self._on_error_method:
self._on_error_method(e, *self._on_error_args,
**self._on_error_kwargs)
except Exception:
LOG.warning(_LW("Task's on_error hook failed to "
"call %(method)s on node %(node)s"),
{'method': self._on_error_method.__name__,
'node': self.node.uuid})
if thread is not None:
# This means the link() failed for some
# reason. Nuke the thread.
thread.cancel()
self.release_resources()
self.release_resources()
|
|
#!/usr/local/bin/python3
"""
Copyright (c) 2017-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
read filter log, limit by number of records or last received digest (md5 hash of row)
"""
import os
import sys
import re
import glob
from hashlib import md5
import argparse
import ujson
import subprocess
sys.path.insert(0, "/usr/local/opnsense/site-python")
from log_helper import reverse_log_reader, fetch_clog
from params import update_params
# define log layouts, every endpoint contains all options
# source : https://github.com/opnsense/ports/blob/master/opnsense/filterlog/files/description.txt
fields_general = 'rulenr,subrulenr,anchorname,rid,interface,reason,action,dir,ipversion'.split(',')
fields_ipv4 = fields_general + 'tos,ecn,ttl,id,offset,ipflags,protonum,protoname,length,src,dst'.split(',')
fields_ipv4_udp = fields_ipv4 + 'srcport,dstport,datalen'.split(',')
fields_ipv4_tcp = fields_ipv4 + 'srcport,dstport,datalen,tcpflags,seq,ack,urp,tcpopts'.split(',')
fields_ipv4_carp = fields_ipv4 + 'type,ttl,vhid,version,advskew,advbase'.split(',')
fields_ipv6 = fields_general + 'class,flow,hoplimit,protoname,protonum,length,src,dst'.split(',')
fields_ipv6_udp = fields_ipv6 + 'srcport,dstport,datalen'.split(',')
fields_ipv6_tcp = fields_ipv6 + 'srcport,dstport,datalen,tcpflags,seq,ack,urp,tcpopts'.split(',')
fields_ipv6_carp = fields_ipv6 + 'type,hoplimit,vhid,version,advskew,advbase'.split(',')
# define hex digits
HEX_DIGITS = set("0123456789abcdef")
def update_rule(target, metadata_target, ruleparts, spec):
""" update target rule with parts in spec
:param target: target rule
:param metadata_target: collected metadata
:param ruleparts: list of rule items
:param spec: full rule specification, depending on protocol and version
"""
while len(target) < len(spec) and len(ruleparts) > 0:
target[spec[len(target)]] = ruleparts.pop(0)
# full spec
metadata_target['__spec__'] = spec
def fetch_rule_details():
""" Fetch rule descriptions from the current running config if available
:return : rule details per line number
"""
line_id_map = dict()
if os.path.isfile('/tmp/rules.debug'):
# parse running config, fetch all md5 hashed labels
rule_map = dict()
with open('/tmp/rules.debug', "rt", encoding="utf-8") as f_in:
for line in f_in:
if line.find(' label ') > -1:
lbl = line.split(' label ')[-1]
if lbl.count('"') >= 2:
rule_md5 = lbl.split('"')[1]
if len(rule_md5) == 32 and set(rule_md5).issubset(HEX_DIGITS):
rule_map[rule_md5] = ''.join(lbl.split('"')[2:]).strip().strip('# : ')
# use pfctl to create a list per rule number with the details found
sp = subprocess.run(['/sbin/pfctl', '-vvPsr'], capture_output=True, text=True)
for line in sp.stdout.strip().split('\n'):
if line.startswith('@'):
line_id = line.split()[0][1:]
if line.find(' label ') > -1:
rid = ''.join(line.split(' label ')[-1:]).strip()[1:].split('"')[0]
if rid in rule_map:
line_id_map[line_id] = {'rid': rid, 'label': rule_map[rid]}
else:
line_id_map[line_id] = {'rid': None, 'label': rid}
return {'line_ids': line_id_map, 'rule_map': rule_map}
if __name__ == '__main__':
# read parameters
parameters = {'limit': '0', 'digest': ''}
update_params(parameters)
parameters['limit'] = int(parameters['limit'])
# parse current running config
running_conf_descr = fetch_rule_details()
result = list()
filter_logs = []
if os.path.isdir('/var/log/filter'):
filter_logs = list(sorted(glob.glob("/var/log/filter/filter_*.log"), reverse=True))
if os.path.isfile('/var/log/filter.log'):
filter_logs.append('/var/log/filter.log')
for filter_log in filter_logs:
do_exit = False
try:
filename = fetch_clog(filter_log)
except Exception as e:
filename = filter_log
for record in reverse_log_reader(filename):
if record['line'].find('filterlog') > -1:
rule = dict()
metadata = dict()
# rule metadata (unique hash, hostname, timestamp)
if re.search('filterlog\[\d*\]:', record['line']):
# rfc3164 format
log_ident = re.split('filterlog[^:]*:', record['line'])
tmp = log_ident[0].split()
metadata['__host__'] = tmp.pop()
metadata['__timestamp__'] = ' '.join(tmp)
rulep = log_ident[1].strip().split(',')
else:
# rfc5424 format
tmp = record['line'].split()
metadata['__timestamp__'] = tmp[1].split('+')[0]
metadata['__host__'] = tmp[2]
rulep = tmp[-1].strip().split(',')
metadata['__digest__'] = md5(record['line'].encode()).hexdigest()
update_rule(rule, metadata, rulep, fields_general)
if 'action' not in rule:
# not a filter log line, skip
continue
elif 'ipversion' in rule:
if rule['ipversion'] == '4':
update_rule(rule, metadata, rulep, fields_ipv4)
if 'protonum' in rule:
if rule['protonum'] == '17': # UDP
update_rule(rule, metadata, rulep, fields_ipv4_udp)
elif rule['protonum'] == '6': # TCP
update_rule(rule, metadata, rulep, fields_ipv4_tcp)
elif rule['protonum'] == '112': # CARP
update_rule(rule, metadata, rulep, fields_ipv4_carp)
elif rule['ipversion'] == '6':
update_rule(rule, metadata, rulep, fields_ipv6)
if 'protonum' in rule:
if rule['protonum'] == '17': # UDP
update_rule(rule, metadata, rulep, fields_ipv6_udp)
elif rule['protonum'] == '6': # TCP
update_rule(rule, metadata, rulep, fields_ipv6_tcp)
elif rule['protonum'] == '112': # CARP
update_rule(rule, metadata, rulep, fields_ipv6_carp)
rule.update(metadata)
if rule['rid'] != '0':
# rule id in latest record format, don't use rule sequence number in that case
if rule['rid'] in running_conf_descr['rule_map']:
rule['label'] = running_conf_descr['rule_map'][rule['rid']]
# obsolete md5 in log record
else:
rule['label'] = ''
elif rule['action'] not in ['pass', 'block']:
# no id for translation rules
rule['label'] = "%s rule" % rule['action']
elif len(rulep) > 0 and len(rulep[-1]) == 32 and set(rulep[-1]).issubset(HEX_DIGITS):
# rule id apended in record format, don't use rule sequence number in that case either
rule['rid'] = rulep[-1]
if rulep[-1] in running_conf_descr['rule_map']:
rule['label'] = running_conf_descr['rule_map'][rulep[-1]]
# obsolete md5 in log record
else:
rule['label'] = ''
elif 'rulenr' in rule and rule['rulenr'] in running_conf_descr['line_ids']:
rule['label'] = running_conf_descr['line_ids'][rule['rulenr']]['label']
rule['rid'] = running_conf_descr['line_ids'][rule['rulenr']]['rid']
result.append(rule)
# handle exit criteria, row limit or last digest
if parameters['limit'] != 0 and len(result) >= parameters['limit']:
do_exit = True
elif parameters['digest'].strip() != '' and parameters['digest'] == rule['__digest__']:
do_exit = True
if do_exit:
break
if do_exit:
break
print (ujson.dumps(result))
|
|
# Class definition:
# NordugridATLASExperiment
# This class is the ATLAS experiment class for Nordugrid inheriting from Experiment
# Instances are generated with ExperimentFactory via pUtil::getExperiment()
# import relevant python/pilot modules
from Experiment import Experiment # Main experiment class
from pUtil import tolog # Logging method that sends text to the pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from pUtil import isAnalysisJob # Is the current job a user analysis job or a production job?
from pUtil import verifyReleaseString # To verify the release string (move to Experiment later)
from pUtil import timedCommand # Standard time-out function
from PilotErrors import PilotErrors # Error codes
from ATLASExperiment import ATLASExperiment
# Standard python modules
import os
import re
import commands
from glob import glob
class NordugridATLASExperiment(ATLASExperiment):
# private data members
__experiment = "Nordugrid-ATLAS"
__instance = None
__warning = ""
__analysisJob = False
__job = None
# Required methods
def __init__(self):
""" Default initialization """
# not needed?
# e.g. self.__errorLabel = errorLabel
pass
def __new__(cls, *args, **kwargs):
""" Override the __new__ method to make the class a singleton """
if not cls.__instance:
cls.__instance = super(ATLASExperiment, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def getExperiment(self):
""" Return a string with the experiment name """
return self.__experiment
def setParameters(self, *args, **kwargs):
""" Set any internally needed variables """
# set initial values
self.__job = kwargs.get('job', None)
if self.__job:
self.__analysisJob = isAnalysisJob(self.__job.trf)
else:
self.__warning = "setParameters found no job object"
def getJobExecutionCommandObsolete(self, job, jobSite, pilot_initdir):
""" Define and test the command(s) that will be used to execute the payload """
# Input tuple: (method is called from RunJob*)
# job: Job object
# jobSite: Site object
# pilot_initdir: launch directory of pilot.py
#
# Return tuple:
# pilot_error_code, pilot_error_diagnostics, job_execution_command, special_setup_command, JEM, cmtconfig
# where
# pilot_error_code : self.__error.<PILOT ERROR CODE as defined in PilotErrors class> (value should be 0 for successful setup)
# pilot_error_diagnostics: any output from problematic command or explanatory error diagnostics
# job_execution_command : command to execute payload, e.g. cmd = "source <path>/setup.sh; <path>/python trf.py [options]"
# special_setup_command : any special setup command that can be insterted into job_execution_command and is sent to stage-in/out methods
# JEM : Job Execution Monitor activation state (default value "NO", meaning JEM is not to be used. See JEMstub.py)
# cmtconfig : cmtconfig symbol from the job def or schedconfig, e.g. "x86_64-slc5-gcc43-opt" [NOT USED IN THIS CLASS]
pilotErrorDiag = ""
cmd = ""
special_setup_cmd = ""
pysiteroot = ""
siteroot = ""
JEM = "NO"
cmtconfig = ""
# Is it's an analysis job or not?
analysisJob = isAnalysisJob(job.trf)
# Set the INDS env variable (used by runAthena)
if analysisJob:
self.setINDS(job.realDatasetsIn)
# Command used to download runAthena or runGen
wgetCommand = 'wget'
# special setup for NG
status, pilotErrorDiag, cmd = self.setupNordugridTrf(job, analysisJob, wgetCommand, pilot_initdir)
if status != 0:
return status, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
# add FRONTIER debugging and RUCIO env variables
cmd = self.addEnvVars2Cmd(cmd, job.jobId, job.taskID, job.processingType, jobSite.sitename, analysisJob)
if readpar('cloud') == "DE":
# Should JEM be used?
metaOut = {}
try:
import sys
from JEMstub import updateRunCommand4JEM
# If JEM should be used, the command will get updated by the JEMstub automatically.
cmd = updateRunCommand4JEM(cmd, job, jobSite, tolog, metaOut=metaOut)
except:
# On failure, cmd stays the same
tolog("Failed to update run command for JEM - will run unmonitored.")
# Is JEM to be used?
if metaOut.has_key("JEMactive"):
JEM = metaOut["JEMactive"]
tolog("Use JEM: %s (dictionary = %s)" % (JEM, str(metaOut)))
elif '--enable-jem' in cmd:
tolog("!!WARNING!!1111!! JEM can currently only be used on certain sites in DE")
# Pipe stdout/err for payload to files
cmd += " 1>%s 2>%s" % (job.stdout, job.stderr)
tolog("\nCommand to run the job is: \n%s" % (cmd))
tolog("ATLAS_PYTHON_PILOT = %s" % (os.environ['ATLAS_PYTHON_PILOT']))
if special_setup_cmd != "":
tolog("Special setup command: %s" % (special_setup_cmd))
return 0, pilotErrorDiag, cmd, special_setup_cmd, JEM, cmtconfig
def willDoFileLookups(self):
""" Should (LFC) file lookups be done by the pilot or not? """
return False
def willDoFileRegistration(self):
""" Should (LFC) file registration be done by the pilot or not? """
return False
# Additional optional methods
def setupNordugridTrf(self, job, analysisJob, wgetCommand, pilot_initdir):
""" perform the Nordugrid trf setup """
error = PilotErrors()
pilotErrorDiag = ""
cmd = ""
# assume that the runtime script has already been created
if not os.environ.has_key('RUNTIME_CONFIG_DIR'):
pilotErrorDiag = "Environment variable not set: RUNTIME_CONFIG_DIR"
tolog("!!FAILED!!3000!! %s" % (pilotErrorDiag))
return error.ERR_SETUPFAILURE, pilotErrorDiag, ""
runtime_script = "%s/APPS/HEP/ATLAS-%s" % (os.environ['RUNTIME_CONFIG_DIR'], job.release)
if os.path.exists(runtime_script):
cmd = ". %s 1" % (runtime_script)
if analysisJob:
# try to download the analysis trf
status, pilotErrorDiag, trfName = self.getAnalysisTrf(wgetCommand, job.trf, pilot_initdir)
if status != 0:
return status, pilotErrorDiag, ""
trfName = "./" + trfName
else:
trfName = job.trf
cmd += '; export ATLAS_RELEASE=%s;export AtlasVersion=%s;export AtlasPatchVersion=%s' % (job.homePackage.split('/')[-1],job.homePackage.split('/')[-1],job.homePackage.split('/')[-1])
cmd += "; %s %s" % (trfName, job.jobPars)
elif verifyReleaseString(job.release) == "NULL":
if analysisJob:
# try to download the analysis trf
status, pilotErrorDiag, trfName = self.getAnalysisTrf(wgetCommand, job.trf, pilot_initdir)
if status != 0:
return status, pilotErrorDiag, ""
trfName = "./" + trfName
else:
trfName = job.trf
cmd = "%s %s" % (trfName, job.jobPars)
else:
pilotErrorDiag = "Could not locate runtime script: %s" % (runtime_script)
tolog("!!FAILED!!3000!! %s" % (pilotErrorDiag))
return error.ERR_SETUPFAILURE, pilotErrorDiag, ""
# correct for multi-core if necessary (especially important in case coreCount=1 to limit parallel make)
cmd = self.addMAKEFLAGS(job.coreCount, "") + cmd
return 0, pilotErrorDiag, cmd
def getWarning(self):
""" Return any warning message passed to __warning """
return self.__warning
def getReleaseObsolete(self, release):
""" Return a list of the software release id's """
# Assuming 'release' is a string that separates release id's with '\n'
# Used in the case of payload using multiple steps with different release versions
# E.g. release = "19.0.0\n19.1.0" -> ['19.0.0', '19.1.0']
if os.environ.has_key('Nordugrid_pilot') and os.environ.has_key('ATLAS_RELEASE'):
return os.environ['ATLAS_RELEASE'].split(",")
else:
return release.split("\n")
def checkSpecialEnvVars(self, sitename):
""" Check special environment variables """
# Set a special env variable that will be used to identify Nordugrid in other pilot classes
os.environ['Nordugrid_pilot'] = ""
# Call the method from the parent class
ec = super(NordugridATLASExperiment, self).checkSpecialEnvVars(sitename)
return ec
# Optional
def shouldExecuteUtility(self):
""" Determine where a memory utility monitor should be executed """
# The RunJob class has the possibility to execute a memory utility monitor that can track the memory usage
# of the payload. The monitor is executed if this method returns True. The monitor is expected to produce
# a summary JSON file whose name is defined by the getMemoryMonitorJSONFilename() method. The contents of
# this file (ie. the full JSON dictionary) will be added to the jobMetrics at the end of the job (see
# PandaServerClient class).
return True
# Optional
def getUtilityJSONFilename(self):
""" Return the filename of the memory monitor JSON file """
# For explanation, see shouldExecuteUtility()
return "memory_monitor_summary.json"
def getSetupPath(self, job_command, trf):
""" Get the setup path from the job execution command """
setup = ""
# Trim the trf if necessary (i.e. remove any paths which are present in buildJob jobs)
trf = self.trimTrfName(trf)
# Take care of special cases, e.g. trf="buildJob-.." but job_command="..; ./buildJob-.."
special_case = "./%s" % (trf)
if special_case in job_command:
trf = special_case
# Strip the setup command at the location of the trf name
l = job_command.find(trf)
if l > 0:
setup = job_command[:l]
# Make sure to remove any unwanted white spaces as well
return setup.strip()
def trimTrfName(self, trfName):
""" Remove any unwanted strings from the trfName """
if "/" in trfName:
trfName = os.path.basename(trfName)
return trfName
def updateSetupPathWithReleaseAndCmtconfig(self, setup_path, release, alt_release, patched_release, alt_patched_release, cmtconfig, alt_cmtconfig):
""" Update the setup path with an alternative release, pathched release and cmtconfig """
# This method can be used to modify a setup path with an alternative release, patched release and cmtconfig
# E.g. this can be used by a tool that might want to fall back to a preferred setup
# Correct the release info
if "-" in release: # the cmtconfig is appended, e.g. release='17.2.7-X86_64-SLC5-GCC43-OPT'
cmtconfig = release[release.find('-')+1:]
release = release[:release.find('-')]
# Update the patched release with a tmp string
if patched_release != "" and patched_release in setup_path:
setup_path = setup_path.replace(patched_release, '__PATCHED_RELEASE__')
# Update the release
if release in setup_path:
setup_path = setup_path.replace(release, alt_release)
# Update the patched release
if '__PATCHED_RELEASE__' in setup_path:
setup_path = setup_path.replace('__PATCHED_RELEASE__', alt_patched_release)
# Update the cmtconfig
if cmtconfig != "" and cmtconfig in setup_path:
setup_path = setup_path.replace(cmtconfig, alt_cmtconfig.upper())
return setup_path
# Optional
def getUtilityCommand(self, **argdict):
""" Prepare a utility command string """
# This method can be used to prepare a setup string for an optional utility tool, e.g. a memory monitor,
# that will be executed by the pilot in parallel with the payload.
# The pilot will look for an output JSON file (summary.json) and will extract pre-determined fields
# from it and report them with the job updates. Currently the pilot expects to find fields related
# to memory information.
pid = argdict.get('pid', 0)
summary = self.getUtilityJSONFilename()
workdir = argdict.get('workdir', '.')
interval = 60
default_release = "21.0.22" #"21.0.18" #"21.0.17" #"20.7.5" #"20.1.5"
# default_patch_release = "20.7.5.8" #"20.1.5.2" #"20.1.4.1"
# default_cmtconfig = "x86_64-slc6-gcc49-opt"
default_cmtconfig = "x86_64-slc6-gcc62-opt"
# default_swbase = "%s/atlas.cern.ch/repo/sw/software" % (self.getCVMFSPath())
default_swbase = "%s/atlas.cern.ch/repo" % (self.getCVMFSPath())
default_setup = self.getModernASetup() + " Athena," + default_release + " --platform " + default_cmtconfig
tolog("Will use default (fallback) setup for MemoryMonitor")
cmd = default_setup
# Now add the MemoryMonitor command
cmd += "; MemoryMonitor --pid %d --filename %s --json-summary %s --interval %d" % (pid, self.getUtilityOutputFilename(), summary, interval)
cmd = "cd " + workdir + ";" + cmd
return cmd
if __name__ == "__main__":
print "Implement test cases here"
|
|
#! /bin/env python
"""
This is the BoccaCmd module
>>> b = BoccaCmd (".",package="my.package")
>>> b.full_name ("func")
'my.package.func'
>>> b.short_name ("my.package.func")
'func'
"""
import subprocess
import os
import sys
import shutil
import optparse
import tarfile
import commands
from types import *
pass_pre = "\x1B[00;32m"
pass_post = "\x1B[00m"
fail_pre = "\x1B[00;31m"
fail_post = "\x1B[00m"
class BoccaObject (object):
def __init__ (self, name, package=[]):
self._vars = {}
self._vars['name'] = name
self._vars['package'] = package
self._vars['extras'] = []
def name (self):
name = self._vars['name']
if isinstance (name, ListType):
return name[0]
else:
return name
def package (self):
package = self._vars['package']
if isinstance (package, ListType):
if len (package)>0:
return package[0]
else:
return None
else:
return package
def vars (self):
return self._vars.keys ()
def has_var (self, var_name):
return self._vars.has_key ()
def add_var (self, var_name, var):
if self._vars.has_key (var_name):
self._vars[var_name].append (var)
else:
self._vars[var_name] = [var]
def get_var (self, var_name):
if self._vars.has_key (var_name):
return self._vars[var_name]
else:
return None
def set_var (self, var_name, var):
self._vars[var_name] = var
def full_name (self):
if self.package () is not None:
return ".".join ([self.package (), self.name ()])
else:
return self.name ()
def prepend_name (self, prefix):
if prefix is not None:
self._vars['name'] = '.'.join ([prefix, self.name ()])
def root_dir (self):
return None
def impl_dir (self):
return None
def sidl_file (self):
return None
def valid_options (self):
return ['extras', 'no-merge-buildfiles']
class BoccaProject (BoccaObject):
def __init__ (self, name, package=None):
self._vars = {}
self._vars['name'] = name
self._vars['package'] = package
self._vars['language'] = []
self._vars['extras'] = []
def set_name (self, name):
self._vars['name'] = name
def full_name (self):
return self.name ()
def language (self):
return self.get_var ('language')
def root_dir (self, prefix=''):
return os.path.join (prefix, '.')
def noun (self):
return "project"
def valid_options (self):
return ['extras', 'no-merge-buildfiles', 'output-dir']
class BoccaInterface (BoccaObject):
def __init__ (self, name, package=[]):
self._vars = {}
self._vars['name'] = name
self._vars['package'] = package
self._vars['requires'] = []
self._vars['extends'] = []
self._vars['extras'] = []
def noun (self):
return "interface"
def root_dir (self, prefix=''):
return os.path.join (prefix, "ports", "sidl")
def sidl_file (self, prefix=""):
name = self.full_name ()
return os.path.join (self.root_dir (prefix), name+'.sidl')
def valid_options (self):
return ['extras', 'requires', 'extends', 'import-sidl',
'no-merge-buildfiles']
class BoccaClass (BoccaObject):
def __init__ (self, name, package=[]):
self._vars = {}
self._vars['name'] = name
self._vars['package'] = package
self._vars['language'] = []
self._vars['requires'] = []
self._vars['implements'] = []
self._vars['extras'] = []
def noun (self):
return "class"
def sidl_file (self, prefix=""):
name = self.full_name ()
return os.path.join (prefix, "components", "sidl", name+".sidl")
def root_dir (self, prefix=""):
return os.path.join (prefix, "components", self.full_name ())
def impl_dir (self, prefix=""):
return self.root_dir (prefix)
def valid_options (self):
return ['extras', 'implements', 'requires', 'import-sidl', 'import-impl',
'extends', 'no-merge-buildfiles']
class BoccaComponent (BoccaClass):
def __init__ (self, name, package=[]):
self._vars = {}
self._vars['name'] = name
self._vars['package'] = package
self._vars['language'] = []
self._vars['implements'] = []
self._vars['requires'] = []
self._vars['uses'] = []
self._vars['provides'] = []
self._vars['extras'] = []
def noun (self):
return "component"
def valid_options (self):
return ['extras', 'implements', 'requires', 'import-sidl', 'import-impl',
'extends', 'no-merge-buildfiles', 'provides', 'uses']
class BoccaPort (BoccaInterface):
def noun (self):
return "port"
def valid_options (self):
return ['extras', 'extends', 'requires', 'import-sidl',
'no-merge-buildfiles']
class BoccaEnum (BoccaInterface):
def noun (self):
return "enum"
def valid_options (self):
return ['extras', 'import-sidl', 'no-merge-buildfiles']
object_creator = { 'interface': BoccaInterface,
'enum': BoccaEnum,
'port': BoccaPort,
'class': BoccaClass,
'component': BoccaComponent,
'project': BoccaProject}
def new_object (name, type):
try:
obj = object_creator[type] (name)
except KeyError as e:
raise ObjectTypeError ()
return obj
if __name__ == "__main__":
import doctest
doctest.testmod ()
|
|
"""
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce
dimensionality and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck
# License: BSD 3 clause
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
# parse commandline arguments
op = OptionParser()
op.add_option(
"--lsa",
dest="n_components",
type="int",
help="Preprocess documents with latent semantic analysis.",
)
op.add_option(
"--no-minibatch",
action="store_false",
dest="minibatch",
default=True,
help="Use ordinary k-means algorithm (in batch mode).",
)
op.add_option(
"--no-idf",
action="store_false",
dest="use_idf",
default=True,
help="Disable Inverse Document Frequency feature weighting.",
)
op.add_option(
"--use-hashing",
action="store_true",
default=False,
help="Use a hashing feature vectorizer",
)
op.add_option(
"--n-features",
type=int,
default=10000,
help="Maximum number of features (dimensions) to extract from text.",
)
op.add_option(
"--verbose",
action="store_true",
dest="verbose",
default=False,
help="Print progress reports inside k-means algorithm.",
)
print(__doc__)
def is_interactive():
return not hasattr(sys.modules["__main__"], "__file__")
if not is_interactive():
op.print_help()
print()
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
# %%
# Load some categories from the training set
# ------------------------------------------
categories = [
"alt.atheism",
"talk.religion.misc",
"comp.graphics",
"sci.space",
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(
subset="all", categories=categories, shuffle=True, random_state=42
)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
# %%
# Feature Extraction
# ------------------
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(
n_features=opts.n_features,
stop_words="english",
alternate_sign=False,
norm=None,
)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(
n_features=opts.n_features,
stop_words="english",
alternate_sign=False,
norm="l2",
)
else:
vectorizer = TfidfVectorizer(
max_df=0.5,
max_features=opts.n_features,
min_df=2,
stop_words="english",
use_idf=opts.use_idf,
)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print(
"Explained variance of the SVD step: {}%".format(int(explained_variance * 100))
)
print()
# %%
# Clustering
# ----------
if opts.minibatch:
km = MiniBatchKMeans(
n_clusters=true_k,
init="k-means++",
n_init=1,
init_size=1000,
batch_size=1000,
verbose=opts.verbose,
)
else:
km = KMeans(
n_clusters=true_k,
init="k-means++",
max_iter=100,
n_init=1,
verbose=opts.verbose,
)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
# %%
# Performance metrics
# -------------------
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(labels, km.labels_))
print(
"Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000)
)
print()
# %%
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names_out()
for i in range(true_k):
print("Cluster %d:" % i, end="")
for ind in order_centroids[i, :10]:
print(" %s" % terms[ind], end="")
print()
|
|
# Copyright (C) 2008 Jean-Michel Sizun <jm.sizun AT gmail>
#
# Copyright (C) 2008 Brent Woodruff
# http://www.fprimex.com
#
# Copyright (C) 2004 John Sutherland <garion@twcny.rr.com>
# http://garion.tzo.com/python/
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# Change log:
#
# 16-Nov-08 - Migrated from urllib to coherence infrastructure
#
# 04-Aug-08 - Added Gallery2 compatibility
# Changed fetch_albums and fetch_albums_prune to return dicts
# Added docstrings
# Created package and registered with Pypi
#
# 09-Jun-04 - Removed self.cookie='' from _doRequest to allow multiple
# transactions for each login.
# Fixed cut paste error in newAlbum.
# (both patches from Yuti Takhteyev
from coherence.upnp.core.utils import getPage
import StringIO
import string
class Gallery:
"""
The Gallery class implements the Gallery Remote protocol as documented
here:
http://codex.gallery2.org/Gallery_Remote:Protocol
The Gallery project is an open source web based photo album organizer
written in php. Gallery's web site is:
http://gallery.menalto.com/
This class is a 3rd party product which is not maintained by the
creators of the Gallery project.
Example usage:
from galleryremote import Gallery
my_gallery = Gallery('http://www.yoursite.com/gallery2', 2)
my_gallery.login('username','password')
albums = my_gallery.fetch_albums()
"""
def __init__(self, url, version=2):
"""
Create a Gallery for remote access.
url - base address of the gallery
version - version of the gallery being connected to (default 2),
either 1 for Gallery1 or 2 for Gallery2
"""
self.version = version # Gallery1 or Gallery2
if version == 1:
self.url = url + '/gallery_remote2.php'
else:
# default to G2
self.url = url + '/main.php'
self.auth_token = None
self.logged_in = 0
self.cookie = ''
self.protocol_version = '2.5'
def _do_request(self, request):
"""
Send a request, encoded as described in the Gallery Remote protocol.
request - a dictionary of protocol parameters and values
"""
if self.auth_token != None:
request['g2_authToken'] = self.auth_token
url = self.url
if (len(request) > 0) :
url += '?'
for key,value in request.iteritems():
url += '%s=%s&' % (key,value)
headers = None
if self.cookie != '':
headers = {'Cookie' : self.cookie}
def gotPage(result):
data,headers = result
response = self._parse_response( data )
if response['status'] != '0':
raise Exception(response['status_text'])
try:
self.auth_token = response['auth_token']
except:
pass
if headers.has_key('set-cookie'):
cookie_info = headers['set-cookie'][-1]
self.cookie = cookie_info.split(';')[0]
return response
def gotError(error):
print "Unable to process Gallery2 request: %s" % url
print "Error: %s" % error
return None
d = getPage(url, headers=headers)
d.addCallback(gotPage)
d.addErrback(gotError)
return d
def _parse_response(self, response):
"""
Decode the response from a request, returning a request dict
response - The response from a gallery request, encoded according
to the gallery remote protocol
"""
myStr = StringIO.StringIO(response)
for line in myStr:
if string.find( line, '#__GR2PROTO__' ) != -1:
break
# make sure the 1st line is #__GR2PROTO__
if string.find( line, '#__GR2PROTO__' ) == -1:
raise Exception("Bad response: %r" % response)
resDict = {}
for myS in myStr:
myS = myS.strip()
strList = string.split(myS, '=', 2)
try:
resDict[strList[0]] = strList[1]
except:
resDict[strList[0]] = ''
return resDict
def _get(self, response, kwd):
"""
"""
try:
retval = response[kwd]
except:
retval = ''
return retval
def login(self, username, password):
"""
Establish an authenticated session to the remote gallery.
username - A valid gallery user's username
password - That valid user's password
"""
if self.version == 1:
request = {
'protocol_version': self.protocol_version,
'cmd': 'login',
'uname': username,
'password': password
}
else:
request = {
'g2_controller' : 'remote:GalleryRemote',
'g2_form[protocol_version]' : self.protocol_version,
'g2_form[cmd]' : 'login',
'g2_form[uname]': username,
'g2_form[password]': password
}
def gotPage(result):
if result is None:
print "Unable to login as %s to gallery2 server (%s)" % (username, self.url)
return
self.logged_in = 1
d = self._do_request(request)
d.addCallbacks(gotPage)
return d
def fetch_albums(self):
"""
Obtain a dict of albums contained in the gallery keyed by
album name. In Gallery1, the name is alphanumeric. In Gallery2,
the name is the unique identifying number for that album.
"""
if self.version == 1:
request = {
'protocol_version' : self.protocol_version,
'cmd' : 'fetch-albums'
}
else:
request = {
'g2_controller' : 'remote:GalleryRemote',
'g2_form[protocol_version]' : self.protocol_version,
'g2_form[cmd]' : 'fetch-albums'
}
d = self._do_request(request)
def gotResponse(response):
if response is None:
print "Unable to retrieve list of albums!"
return None
albums = {}
for x in range(1, int(response['album_count']) + 1):
album = {}
album['name'] = self._get(response,'album.name.' + str(x))
album['title'] = self._get(response,'album.title.' + str(x))
album['summary'] = self._get(response,'album.summary.' + str(x))
album['parent'] = self._get(response,'album.parent.' + str(x))
album['resize_size'] = self._get(response,'album.resize_size.' + str(x))
album['perms.add'] = self._get(response,'album.perms.add.' + str(x))
album['perms.write'] = self._get(response,'album.perms.write.' + str(x))
album['perms.del_item'] = self._get(response,'album.perms.del_item.' + str(x))
album['perms.del_alb'] = self._get(response,'album.perms.del_alb.' + str(x))
album['perms.create_sub'] = self._get(response,'album.perms.create_sub.' + str(x))
album['perms.info.extrafields'] = self._get(response,'album.info.extrafields' + str(x))
albums[album['name']] = album
return albums
d.addCallback(gotResponse)
return d
def fetch_albums_prune(self):
"""
Obtain a dict of albums contained in the gallery keyed by
album name. In Gallery1, the name is alphanumeric. In Gallery2,
the name is the unique identifying number for that album.
From the protocol docs:
"The fetch_albums_prune command asks the server to return a list
of all albums that the user can either write to, or that are
visible to the user and contain a sub-album that is writable
(including sub-albums several times removed)."
"""
if self.version == 1:
request = {
'protocol_version' : self.protocol_version,
'cmd' : 'fetch-albums-prune'
}
else:
request = {
'g2_controller' : 'remote:GalleryRemote',
'g2_form[protocol_version]' : self.protocol_version,
'g2_form[cmd]' : 'fetch-albums-prune'
}
response = self._do_request(request)
def gotResponse(response):
# as long as it comes back here without an exception, we're ok.
albums = {}
for x in range(1, int(response['album_count']) + 1):
album = {}
album['name'] = self._get(response,'album.name.' + str(x))
album['title'] = self._get(response,'album.title.' + str(x))
album['summary'] = self._get(response,'album.summary.' + str(x))
album['parent'] = self._get(response,'album.parent.' + str(x))
album['resize_size'] = self._get(response,'album.resize_size.' + str(x))
album['perms.add'] = self._get(response,'album.perms.add.' + str(x))
album['perms.write'] = self._get(response,'album.perms.write.' + str(x))
album['perms.del_item'] = self._get(response,'album.perms.del_item.' + str(x))
album['perms.del_alb'] = self._get(response,'album.perms.del_alb.' + str(x))
album['perms.create_sub'] = self._get(response,'album.perms.create_sub.' + str(x))
album['perms.info.extrafields'] = self._get(response,'album.info.extrafields' + str(x))
albums[album['name']] = album
return albums
d.addCallback(gotResponse)
return d
def add_item(self, album, filename, caption, description):
"""
Add a photo to the specified album.
album - album name / identifier
filename - image to upload
caption - string caption to add to the image
description - string description to add to the image
"""
if self.version == 1:
request = {
'protocol_version' : self.protocol_version,
'cmd' : 'add-item',
'set_albumName' : album,
'userfile' : file,
'userfile_name' : filename,
'caption' : caption,
'extrafield.Description' : description
}
else:
request = {
'g2_form[protocol_version]' : self.protocol_version,
'g2_form[cmd]' : 'add-item',
'g2_form[set_albumName]' : album,
'g2_form[userfile]' : file,
'g2_form[userfile_name]' : filename,
'g2_form[caption]' : caption,
'g2_form[extrafield.Description]' : description
}
file = open(filename)
d = self._do_request(request)
# if we get here, everything went ok.
return d
def album_properties(self, album):
"""
Obtain album property information for the specified album.
album - the album name / identifier to obtain information for
"""
if self.version == 1:
request = {
'protocol_version' : self.protocol_version,
'cmd' : 'album-properties',
'set_albumName' : album
}
else:
request = {
'g2_controller' : 'remote:GalleryRemote',
'g2_form[protocol_version]' : self.protocol_version,
'g2_form[cmd]' : 'album-properties',
'g2_form[set_albumName]' : album
}
d = self._do_request(request)
def gotResponse(response):
res_dict = {}
if response.has_key('auto_resize'):
res_dict['auto_resize'] = response['auto_resize']
if response.has_key('add_to_beginning'):
res_dict['add_to_beginning'] = response['add_to_beginning']
return res_dict
d.addCallback(gotResponse)
return d
def new_album(self, parent, name=None, title=None, description=None):
"""
Add an album to the specified parent album.
parent - album name / identifier to contain the new album
name - unique string name of the new album
title - string title of the album
description - string description to add to the image
"""
if self.version == 1:
request = {
'g2_controller' : 'remote:GalleryRemote',
'protocol_version' : self.protocol_version,
'cmd' : 'new-album',
'set_albumName' : parent
}
if name != None:
request['newAlbumName'] = name
if title != None:
request['newAlbumTitle'] = title
if description != None:
request['newAlbumDesc'] = description
else:
request = {
'g2_controller' : 'remote:GalleryRemote',
'g2_form[protocol_version]' : self.protocol_version,
'g2_form[cmd]' : 'new-album',
'g2_form[set_albumName]' : parent
}
if name != None:
request['g2_form[newAlbumName]'] = name
if title != None:
request['g2_form[newAlbumTitle]'] = title
if description != None:
request['g2_form[newAlbumDesc]'] = description
d = self._do_request(request)
def gotResponse(response):
return response['album_name']
d.addCallback(d)
return d
def fetch_album_images(self, album):
"""
Get the image information for all images in the specified album.
album - specifies the album from which to obtain image information
"""
if self.version == 1:
request = {
'protocol_version' : self.protocol_version,
'cmd' : 'fetch-album-images',
'set_albumName' : album,
'albums_too' : 'no',
'extrafields' : 'yes'
}
else:
request = {
'g2_controller' : 'remote:GalleryRemote',
'g2_form[protocol_version]' : self.protocol_version,
'g2_form[cmd]' : 'fetch-album-images',
'g2_form[set_albumName]' : album,
'g2_form[albums_too]' : 'no',
'g2_form[extrafields]' : 'yes'
}
d = self._do_request(request)
def gotResponse (response):
if response is None:
print "Unable to retrieve list of item for album %s." % album
return None
images = []
for x in range(1, int(response['image_count']) + 1):
image = {}
image['name'] = self._get(response, 'image.name.' + str(x))
image['title'] = self._get(response, 'image.title.' + str(x))
image['raw_width'] = self._get(response, 'image.raw_width.' + str(x))
image['raw_height'] = self._get(response, 'image.raw_height.' + str(x))
image['resizedName'] = self._get(response, 'image.resizedName.' + str(x))
image['resized_width'] = self._get(response, 'image.resized_width.' + str(x))
image['resized_height'] = self._get(response, 'image.resized_height.' + str(x))
image['thumbName'] = self._get(response, 'image.thumbName.' + str(x))
image['thumb_width'] = self._get(response, 'image.thumb_width.' + str(x))
image['thumb_height'] = self._get(response, 'image.thumb_height.' + str(x))
image['raw_filesize'] = self._get(response, 'image.raw_filesize.' + str(x))
image['caption'] = self._get(response, 'image.caption.' + str(x))
image['clicks'] = self._get(response, 'image.clicks.' + str(x))
image['capturedate.year'] = self._get(response, 'image.capturedate.year' + str(x))
image['capturedate.mon'] = self._get(response, 'image.capturedate.mon' + str(x))
image['capturedate.mday'] = self._get(response, 'image.capturedate.mday' + str(x))
image['capturedate.hours'] = self._get(response, 'image.capturedate.hours' + str(x))
image['capturedate.minutes'] = self._get(response, 'image.capturedate.minutes' + str(x))
image['capturedate.seconds'] = self._get(response, 'image.capturedate.seconds' + str(x))
image['description'] = self._get(response, 'image.extrafield.Description.' + str(x))
images.append(image)
return images
d.addCallback(gotResponse)
return d
def get_URL_for_image(self, gallery2_id):
url = '%s/main.php?g2_view=core.DownloadItem&g2_itemId=%s' % (self.url, gallery2_id)
return url
|
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
#Authors Kyle Monson and Robert Lutes
import datetime
import logging
import sys
import time
import math
from zmq.utils import jsonapi
from dateutil import parser
from volttron.platform.agent import BaseAgent, PublishMixin
from volttron.platform.agent import green, utils, matching, sched
from volttron.platform.messaging import topics
from volttron.platform.messaging import headers as headers_mod
def DemandResponseAgent(config_path, **kwargs):
"""DR application for time of use pricing"""
config = utils.load_config(config_path)
agent_id = config['agentid']
rtu_path = dict((key, config[key])
for key in ['campus', 'building', 'unit'])
schedule = config.get('Schedule')
datefmt = '%Y-%m-%d %H:%M:%S'
damper_cpp = config.get('damper_cpp', 0.0)
fan_reduction = config.get('fan_reduction', 0.1)
max_precool_hours = config.get('max_precool_hours', 5)
cooling_stage_differential = config.get('cooling_stage_differential', 1.0)
'''cpp_end_hour = config.get('cpp_end_hour', 18)
'cpp_end_minute = config.get('cpp_end_minute', 0)'''
#point names for controller
cooling_stpt = config.get('cooling_stpt')
heating_stpt = config.get('heating_stpt')
min_damper_stpt = config.get('min_damper_stpt')
cooling_stage_diff = config.get('cooling_stage_diff')
cooling_fan_sp1 = config.get('cooling_fan_sp1')
cooling_fan_sp2 = config.get('cooling_fan_sp2')
override_command = config.get('override_command')
occupied_status = config.get('occupied_status')
space_temp = config.get('space_temp')
volttron_flag = config.get('volttron_flag')
class Agent(PublishMixin, BaseAgent):
"""Class agent"""
def __init__(self, **kwargs):
super(Agent, self).__init__(**kwargs)
self.normal_firststage_fanspeed = config.get('normal_firststage_fanspeed', 75.0)
self.normal_secondstage_fanspeed = config.get('normal_secondstage_fanspeed', 90.0)
self.normal_damper_stpt = config.get('normal_damper_stpt', 5.0)
self.normal_coolingstpt = config.get('normal_coolingstpt', 74.0)
self.normal_heatingstpt = config.get('normal_heatingstpt', 67.0)
self.smap_path = config.get('smap_path')
self.default_cooling_stage_differential = 0.5
self.current_spacetemp = 0.0
self.building_thermal_constant = config.get('building_thermal_constant', 4.0)
self.timestep_length = config.get('timestep_length', 900)
self.csp_cpp = config.get('csp_cpp', 80.0)
self.csp_pre = config.get('csp_pre', 67.0)
self.restore_window = int(((self.csp_cpp - self.normal_coolingstpt)/self.building_thermal_constant) *3600)
self.state = 'STARTUP'
self.e_start_msg = None
self.error_handler = None
self.actuator_handler = None
self.pre_cool_idle = None
self.e_start = None
self.e_end = None
self.pre_stored_spacetemp =None
self.device_schedule = {}
self.all_scheduled_events = {}
self.currently_running_dr_event_handlers = []
self.headers = {headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, 'requesterID': agent_id}
utils.setup_logging()
self._log = logging.getLogger(__name__)
@matching.match_headers({headers_mod.REQUESTER_ID: agent_id})
@matching.match_exact(topics.ACTUATOR_SCHEDULE_RESULT())
def schedule_result(self, topic, headers, message, match):
msg = jsonapi.loads(message[0])
self._log.info('Schedule Request Acknowledged')
self.task_timer.cancel()
task_id = headers.get('taskID', 0)
response_type = headers.get('type', 0)
schedule_start = self.device_schedule[task_id]["schedule_start"]
event_start = schedule_start + datetime.timedelta(minutes = 1)
schedule_end = self.device_schedule[task_id]["schedule_end"]
e_start = self.device_schedule[task_id]["event_start"]
e_end = self.device_schedule[task_id]["event_end"]
if response_type == 'NEW_SCHEDULE' and self.error_handler == None:
if msg.get('result',0) == 'SUCCESS':
event = sched.Event(self.pre_cool_setup, args=[e_start, e_end])
self.schedule(event_start, event)
self.all_scheduled_events[e_start] = event
elif msg.get('result',0) =='FAILURE' and schedule_start < schedule_end:
schedule_start = schedule_start + datetime.timedelta(minutes = 10)
headers = {
'type': 'NEW_SCHEDULE',
'requesterID': agent_id,
'taskID': task_id,
'priority': 'High'
}
self.task_timer = self.periodic_timer(20, self.publish_json,
topics.ACTUATOR_SCHEDULE_REQUEST(), headers,
[["{campus}/{building}/{unit}".format(**rtu_path),str(schedule_start),schedule_end]])
elif schedule_start >= schedule_end:
return
if self.error_handler is not None:
self.error_handler()
@matching.match_headers({headers_mod.REQUESTER_ID: agent_id})
@matching.match_glob(topics.ACTUATOR_ERROR(point='*', **rtu_path))
def _on_error_result(self, topic, headers, message, match):
"""ERROR result"""
point = match.group(1)
msg = jsonapi.loads(message[0])
point = match.group(1)
today = datetime.datetime.now().date()
for key,schedule in self.device_schedule.items():
if schedule["date"] == today:
schedule_start = schedule["schedule_start"]
schedule_end = schedule["schedule_end"]
task_id = key
break
self._log.info('Error Results: '+ str(point) + ' '+ str(msg))
if msg.get('type',0) == 'LockError':
headers = {
'type': 'NEW_SCHEDULE',
'requesterID': agent_id,
'taskID': task_id,
'priority': 'HIGH'
}
self.task_timer = self.periodic_timer(20, self.publish_json,
topics.ACTUATOR_SCHEDULE_REQUEST(), headers,
[["{campus}/{building}/{unit}".format(**rtu_path),str(schedule_start),str(schedule_end)]])
elif self.error_handler is not None:
self._log.info('Running error handler')
self.error_handler()
@matching.match_exact(topics.DEVICES_VALUE(point='all', **rtu_path))
def _on_new_data(self, topic, headers, message, match):
"""watching for new data"""
data = jsonapi.loads(message[0])
self.current_spacetemp = float(data[space_temp])
dr_override = bool(int(data[override_command]))
occupied = bool(int(data[occupied_status]))
if dr_override and self.state not in ('IDLE', 'CLEANUP', 'STARTUP'):
self._log.info('User Override Initiated')
self.cancel_event(cancel_type='OVERRIDE')
if not occupied and self.state in ('DR_EVENT', 'RESTORE'):
self.cancel_event()
if self.state == 'STARTUP':
self._log.info('Finished Startup')
self.state = 'IDLE'
@matching.match_exact(topics.OPENADR_EVENT())
def _on_dr_event(self, topic, headers, message, match):
if self.state == 'STARTUP':
self._log.info('DR event ignored because of startup.')
return
"""handle openADR events"""
msg = jsonapi.loads(message[0])
self._log.info('EVENT Received: ' + str(msg))
e_id = msg['id']
e_status = msg['status']
e_start = msg['start_at']
task_id = msg['id']
#e_start = datetime.datetime.strptime(e_start,datefmt)
today = datetime.datetime.now().date()
e_end = msg['end_at']
e_end = parser.parse(e_end, fuzzy=True)
e_start = parser.parse(e_start, fuzzy=True)
dr_date = e_start.date()
current_datetime = datetime.datetime.now()
if current_datetime > e_end:
self._log.info('Too Late Event is Over')
return
if e_status == 'cancelled':
if e_start in self.all_scheduled_events:
self._log.info('Event Cancelled')
self.all_scheduled_events[e_start].cancel()
del self.all_scheduled_events[e_start]
if e_start.date() == today and (self.state == 'PRECOOL' or self.state == 'DR_EVENT'):
self.cancel_event()
return
if today > e_start.date():
if e_start in self.all_scheduled_events:
self.all_scheduled_events[e_start].cancel()
del self.all_scheduled_events[e_start]
return
for item in self.all_scheduled_events:
if e_start.date() == item.date():
if e_start.time() != item.time():
self._log.info( 'Updating Event')
self.all_scheduled_events[item].cancel()
del self.all_scheduled_events[item]
if e_start.date() == today and (self.state == 'PRECOOL' or self.state == 'DR_EVENT'):
self.cancel_event(cancel_type='UPDATING')
break
elif e_start.time() == item.time():
self._log.info("same event")
return
#Don't schedule an event if we are currently in OVERRIDE state.
if e_start.date() == today and (self.state == 'OVERRIDE'):
return
schedule_start = e_start - datetime.timedelta(hours = max_precool_hours)
schedule_end = e_end + datetime.timedelta(seconds = self.restore_window)
schedule_end = schedule_end + datetime.timedelta(minutes = 10)
self.device_schedule[task_id]={"date": dr_date,
"schedule_start": schedule_start,
"schedule_end": schedule_end,
"event_start": e_start,
"event_end": e_end}
headers = {
'type': 'NEW_SCHEDULE',
'requesterID': agent_id,
'taskID': task_id,
'priority': 'HIGH'
}
self.task_timer = self.periodic_timer(20, self.publish_json,
topics.ACTUATOR_SCHEDULE_REQUEST(),
headers,
[["{campus}/{building}/{unit}".format(**rtu_path),str(schedule_start),str(schedule_end)]])
def pre_cool_setup(self, e_start, e_end):
if self.state == 'OVERRIDE':
self._log.info("Override today")
return
if self.pre_cool_idle == False:
return
now = datetime.datetime.now()
day=now.weekday()
if not schedule[day]:
self._log.info("Unoccupied today")
return
if self.state == 'PRECOOL' and self.pre_cool_idle == True:
for event in self.currently_running_dr_event_handlers:
event.cancel()
self.currently_running_dr_event_handlers = []
self.state = 'PRECOOL'
e_start_unix = time.mktime(e_start.timetuple())
e_end_unix = time.mktime(e_end.timetuple())
event_start = now + datetime.timedelta(minutes=15)
event = sched.Event(self.pre_cool_setup, args=[e_start, e_end])
self.schedule(event_start, event)
self.all_scheduled_events[e_start] = event
self.schedule_builder(e_start_unix, e_end_unix)
def modify_temp_set_point(self, csp, hsp):
self.publish(topics.ACTUATOR_SET(point=volttron_flag, **rtu_path), self.headers, str(3.0))
self.publish(topics.ACTUATOR_SET(point=min_damper_stpt, **rtu_path), self.headers, str(self.normal_damper_stpt))
self.publish(topics.ACTUATOR_SET(point=cooling_stage_diff, **rtu_path), self.headers, str(self.default_cooling_stage_differential))
self.publish(topics.ACTUATOR_SET(point=cooling_stpt, **rtu_path), self.headers, str(csp))
self.publish(topics.ACTUATOR_SET(point=heating_stpt, **rtu_path), self.headers, str(hsp))
if self.pre_cool_idle == True:
self.pre_cool_idle = False
def backup_run():
self.modify_temp_set_point(csp, hsp)
self.error_handler = None
self.error_handler = backup_run
def start_dr_event(self):
self.state = 'DR_EVENT'
self.publish(topics.ACTUATOR_SET(point=volttron_flag, **rtu_path), self.headers, str(3.0))
self.publish(topics.ACTUATOR_SET(point=cooling_stpt, **rtu_path), self.headers, str(self.csp_cpp))
new_fan_speed = self.normal_firststage_fanspeed - (self.normal_firststage_fanspeed*fan_reduction)
new_fan_speed = max(new_fan_speed,0)
self.publish(topics.ACTUATOR_SET(point=cooling_fan_sp1, **rtu_path), self.headers, str(new_fan_speed))
new_fan_speed = self.normal_secondstage_fanspeed - (self.normal_firststage_fanspeed*fan_reduction)
new_fan_speed = max(new_fan_speed,0)
self.publish(topics.ACTUATOR_SET(point=cooling_fan_sp2, **rtu_path), self.headers, str(new_fan_speed))
self.publish(topics.ACTUATOR_SET(point=min_damper_stpt, **rtu_path), self.headers, str(damper_cpp))
self.publish(topics.ACTUATOR_SET(point=cooling_stage_diff, **rtu_path), self.headers, str(cooling_stage_differential))
mytime = int(time.time())
content = {
"Demand Response Event": {
"Readings": [[mytime, 1.0]],
"Units": "TU",
"data_type": "double"
}
}
self.publish(self.smap_path, self.headers, jsonapi.dumps(content))
def backup_run():
self.start_dr_event()
self.error_handler = None
self.error_handler = backup_run
def start_restore_event(self, csp, hsp):
self.state = 'RESTORE'
self._log.info('Restore: Begin restoring normal operations')
self.publish(topics.ACTUATOR_SET(point=cooling_stpt, **rtu_path), self.headers, str(csp))
self.publish(topics.ACTUATOR_SET(point=heating_stpt, **rtu_path), self.headers, str(hsp)) #heating
self.publish(topics.ACTUATOR_SET(point=cooling_fan_sp1, **rtu_path), self.headers, str(self.normal_firststage_fanspeed))
self.publish(topics.ACTUATOR_SET(point=cooling_fan_sp2, **rtu_path), self.headers, str(self.normal_secondstage_fanspeed))
self.publish(topics.ACTUATOR_SET(point=min_damper_stpt, **rtu_path), self.headers, str(self.normal_damper_stpt))
self.publish(topics.ACTUATOR_SET(point=cooling_stage_diff, **rtu_path), self.headers, str(self.default_cooling_stage_differential))
def backup_run():
self.start_restore_event(csp, hsp)
self.error_handler = None
self.error_handler = backup_run
def cancel_event(self, cancel_type='NORMAL'):
if cancel_type == 'OVERRIDE':
self.state = 'OVERRIDE'
smap_input = 3.0
elif cancel_type != 'UPDATING':
self.state = 'CLEANUP'
smap_input = 2.0
self.publish(topics.ACTUATOR_SET(point=cooling_stpt, **rtu_path), self.headers, str(self.normal_coolingstpt))
self.publish(topics.ACTUATOR_SET(point=heating_stpt, **rtu_path), self.headers, str(self.normal_heatingstpt))
self.publish(topics.ACTUATOR_SET(point=cooling_fan_sp1, **rtu_path), self.headers, str(self.normal_firststage_fanspeed))
self.publish(topics.ACTUATOR_SET(point=cooling_fan_sp2, **rtu_path), self.headers, str(self.normal_secondstage_fanspeed))
self.publish(topics.ACTUATOR_SET(point=min_damper_stpt, **rtu_path), self.headers, str(self.normal_damper_stpt))
self.publish(topics.ACTUATOR_SET(point=cooling_stage_diff, **rtu_path), self.headers, str(self.default_cooling_stage_differential))
self.publish(topics.ACTUATOR_SET(point=volttron_flag, **rtu_path), self.headers,str(0))
for event in self.currently_running_dr_event_handlers:
event.cancel()
if cancel_type != 'UPDATING':
mytime = int(time.time())
content = {
"Demand Response Event": {
"Readings": [[mytime, smap_input]],
"Units": "TU",
"data_type": "double"
}
}
self.publish(self.smap_path, self.headers, jsonapi.dumps(content))
self.device_schedule = {}
self.all_scheduled_events = {}
self.currently_running_dr_event_handlers = []
def backup_run():
self.cancel_event()
self.error_handler = None
self.error_handler = backup_run
expected_values = {cooling_stpt: self.normal_coolingstpt,
heating_stpt: self.normal_heatingstpt,
cooling_fan_sp1: self.normal_firststage_fanspeed,
cooling_fan_sp2: self.normal_secondstage_fanspeed,
min_damper_stpt: self.normal_damper_stpt,
cooling_stage_diff: self.default_cooling_stage_differential}
EPSILON = 0.5 #allowed difference from expected value
def result_handler(point, value):
#print "actuator point being handled:", point, value
expected_value = expected_values.pop(point, None)
if expected_value is not None:
diff = abs(expected_value-value)
if diff > EPSILON:
self._log.info( "Did not get back expected value for: " + str(point))
if not expected_values:
self.actuator_handler = None
self.error_handler = None
self.state = 'IDLE' if not cancel_type == 'OVERRIDE' else 'OVERRIDE'
if cancel_type != 'UPDATING':
self.actuator_handler = result_handler
else:
self.actuator_handler = None
if cancel_type == 'OVERRIDE':
def on_reset():
self.error_handler = None
self.state = 'IDLE'
today = datetime.datetime.now()
reset_time = today + datetime.timedelta(days=1)
reset_time = reset_time.replace(hour=0, minute =0, second = 0)
event = sched.Event(on_reset)
self.schedule(reset_time, event)
def schedule_builder(self,start_time, end_time):
"""schedule all events for a DR event."""
current_time = time.time()
if current_time > end_time:
return
self._log.info('Scheduling all DR actions')
pre_hsp = self.csp_pre - 5.0
ideal_cooling_window = int(((self.current_spacetemp - self.csp_pre)/self.building_thermal_constant) *3600)
ideal_precool_start_time = start_time - ideal_cooling_window
max_cooling_window = start_time - current_time
cooling_window = ideal_cooling_window if ideal_cooling_window < max_cooling_window else max_cooling_window
precool_start_time = start_time - cooling_window
pre_cool_step = 0
if (max_cooling_window > 0):
self._log.info('Schedule Pre Cooling')
num_cooling_timesteps = int(math.ceil(float(cooling_window) / float(self.timestep_length)))
cooling_step_delta = (self.normal_coolingstpt - self.csp_pre) / num_cooling_timesteps
if num_cooling_timesteps <= 0:
num_cooling_timesteps=1
for step_index in range (1, num_cooling_timesteps):
if step_index == 1:
pre_cool_step = 2*self.timestep_length
else:
pre_cool_step += self.timestep_length
event_time = start_time - pre_cool_step
csp = self.csp_pre + ((step_index-1) * cooling_step_delta)
self._log.info('Precool step: '+ str(datetime.datetime.fromtimestamp(event_time)) + ' CSP: ' + str(csp))
event = sched.Event(self.modify_temp_set_point, args = [csp, pre_hsp])
self.schedule(event_time, event)
self.currently_running_dr_event_handlers.append(event)
else:
self._log.info('Too late to pre-cool!')
restore_start_time = end_time
num_restore_timesteps = int(math.ceil(float(self.restore_window) / float(self.timestep_length)))
restore_step_delta = (self.csp_pre - self.normal_coolingstpt) / num_restore_timesteps
self._log.info('Schedule DR Event: ' + str(datetime.datetime.fromtimestamp(start_time)) +' CSP: ' + str(self.csp_cpp))
event = sched.Event(self.start_dr_event)
self.schedule(start_time, event)
self.currently_running_dr_event_handlers.append(event)
self._log.info('Schedule Restore Event: '+ str(datetime.datetime.fromtimestamp(end_time)) + ' CSP: ' + str(self.csp_pre-restore_step_delta))
event = sched.Event(self.start_restore_event, args = [self.csp_pre-restore_step_delta, self.normal_heatingstpt])
self.schedule(end_time, event)
self.currently_running_dr_event_handlers.append(event)
for step_index in range (1, num_restore_timesteps):
event_time = end_time + (step_index * self.timestep_length)
csp = self.csp_pre - ((step_index + 1) * restore_step_delta)
self._log.info('Restore step: ' + str(datetime.datetime.fromtimestamp(event_time)) +' CSP: ' + str(csp))
event = sched.Event(self.modify_temp_set_point, args = [csp, self.normal_heatingstpt])
self.schedule(event_time, event)
self.currently_running_dr_event_handlers.append(event)
event_time = end_time + (num_restore_timesteps * self.timestep_length)
self._log.info('Schedule Cleanup Event: ' + str(datetime.datetime.fromtimestamp(event_time)))
event = sched.Event(self.cancel_event)
self.schedule(event_time,event)
self.currently_running_dr_event_handlers.append(event)
Agent.__name__ = 'DemandResponseAgent'
return Agent(**kwargs)
def main(argv = sys.argv):
'''Main method called by the eggsecutable.'''
utils.default_main(DemandResponseAgent,
description = 'VOLTTRON platform DR agent',
argv=argv)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from ..framework import Parameter
from .layers import Layer
from .base import param_guard
__all__ = [
'Sequential',
'ParameterList',
'LayerList',
]
class Sequential(Layer):
"""Sequential container.
Sub layers will be added to this container in the order of argument in the constructor.
The argument passed to the constructor can be iterable Layers or iterable name Layer pairs.
Parameters:
layers(Layer|list|tuple): Layer or list/tuple of iterable name Layer pair.
Examples:
.. code-block:: python
import paddle
import numpy as np
data = np.random.uniform(-1, 1, [30, 10]).astype('float32')
data = paddle.to_tensor(data)
# create Sequential with iterable Layers
model1 = paddle.nn.Sequential(
paddle.nn.Linear(10, 1), paddle.nn.Linear(1, 2)
)
model1[0] # access the first layer
res1 = model1(data) # sequential execution
# create Sequential with name Layer pairs
model2 = paddle.nn.Sequential(
('l1', paddle.nn.Linear(10, 2)),
('l2', paddle.nn.Linear(2, 3))
)
model2['l1'] # access l1 layer
model2.add_sublayer('l3', paddle.nn.Linear(3, 3)) # add sublayer
res2 = model2(data) # sequential execution
"""
def __init__(self, *layers):
super(Sequential, self).__init__()
if len(layers) > 0 and isinstance(layers[0], (list, tuple)):
for name, layer in layers:
self.add_sublayer(name, layer)
else:
for idx, layer in enumerate(layers):
self.add_sublayer(str(idx), layer)
def __getitem__(self, name):
if isinstance(name, slice):
return self.__class__(*(list(self._sub_layers.values())[name]))
elif isinstance(name, str):
return self._sub_layers[name]
else:
if name >= len(self._sub_layers):
raise IndexError('index {} is out of range'.format(name))
elif name < 0 and name >= -len(self._sub_layers):
name += len(self._sub_layers)
elif name < -len(self._sub_layers):
raise IndexError('index {} is out of range'.format(name))
return self._sub_layers[str(name)]
def __setitem__(self, name, layer):
assert isinstance(layer, Layer)
setattr(self, str(name), layer)
def __delitem__(self, name):
name = str(name)
assert name in self._sub_layers
del self._sub_layers[name]
def __len__(self):
return len(self._sub_layers)
def forward(self, input):
for layer in self._sub_layers.values():
input = layer(input)
return input
class ParameterList(Layer):
"""ParameterList Container.
This container acts like a Python list, but parameters it contains will be properly added.
Parameters:
parameters (iterable, optional): Iterable Parameters to be added
Examples:
.. code-block:: python
import paddle
import numpy as np
class MyLayer(paddle.nn.Layer):
def __init__(self, num_stacked_param):
super(MyLayer, self).__init__()
# create ParameterList with iterable Parameters
self.params = paddle.nn.ParameterList(
[paddle.create_parameter(
shape=[2, 2], dtype='float32')] * num_stacked_param)
def forward(self, x):
for i, p in enumerate(self.params):
tmp = self._helper.create_variable_for_type_inference('float32')
self._helper.append_op(
type="mul",
inputs={"X": x,
"Y": p},
outputs={"Out": tmp},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
x = tmp
return x
data_np = np.random.uniform(-1, 1, [5, 2]).astype('float32')
x = paddle.to_tensor(data_np)
num_stacked_param = 4
model = MyLayer(num_stacked_param)
print(len(model.params)) # 4
res = model(x)
print(res.shape) # [5, 2]
replaced_param = paddle.create_parameter(shape=[2, 3], dtype='float32')
model.params[num_stacked_param - 1] = replaced_param # replace last param
res = model(x)
print(res.shape) # [5, 3]
model.params.append(paddle.create_parameter(shape=[3, 4], dtype='float32')) # append param
print(len(model.params)) # 5
res = model(x)
print(res.shape) # [5, 4]
"""
def __init__(self, parameters=None):
super(ParameterList, self).__init__()
if parameters is not None:
for idx, param in enumerate(parameters):
assert isinstance(param, Parameter)
self.add_parameter(str(idx), param)
def __getitem__(self, idx):
with param_guard(self._parameters):
return self._parameters[str(idx)]
def __setitem__(self, idx, param):
assert isinstance(param, Parameter)
setattr(self, str(idx), param)
def __len__(self):
return len(self._parameters)
def __iter__(self):
with param_guard(self._parameters):
return iter(self._parameters.values())
def append(self, parameter):
"""Appends a given parameter at the end of the list.
Parameters:
parameter (Parameter): parameter to append
"""
idx = len(self._parameters)
self.add_parameter(str(idx), parameter)
return self
class LayerList(Layer):
"""
LayerList holds sublayers, and sublayers it contains are properly registered.
Holded sublayers can be indexed like a regular python list.
Parameters:
sublayers (iterable of Layer, optional): sublayers to hold
Examples:
.. code-block:: python
import paddle
import numpy as np
class MyLayer(paddle.nn.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.linears = paddle.nn.LayerList(
[paddle.nn.Linear(10, 10) for i in range(10)])
def forward(self, x):
# LayerList can act as an iterable, or be indexed using ints
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return x
"""
def __init__(self, sublayers=None):
super(LayerList, self).__init__()
if sublayers is not None:
for idx, layer in enumerate(sublayers):
self.add_sublayer(str(idx), layer)
def _get_abs_idx(self, idx):
if isinstance(idx, int):
if not (-len(self) <= idx < len(self)):
raise IndexError(
'index {} is out of range, should be an integer in range [{}, {})'.
format(idx, -len(self), len(self)))
if idx < 0:
idx += len(self)
return idx
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(list(self._sub_layers.values())[idx])
else:
idx = self._get_abs_idx(idx)
return self._sub_layers[str(idx)]
def __setitem__(self, idx, sublayer):
idx = self._get_abs_idx(idx)
return setattr(self, str(idx), sublayer)
def __delitem__(self, idx):
if isinstance(idx, slice):
for k in range(len(self._sub_layers))[idx]:
delattr(self, str(k))
else:
idx = self._get_abs_idx(idx)
delattr(self, str(idx))
str_indices = [str(i) for i in range(len(self._sub_layers))]
self._sub_layers = OrderedDict(
list(zip(str_indices, self._sub_layers.values())))
def __len__(self):
return len(self._sub_layers)
def __iter__(self):
return iter(self._sub_layers.values())
def append(self, sublayer):
"""
Appends a sublayer to the end of the list.
Parameters:
sublayer (Layer): sublayer to append
Examples:
.. code-block:: python
import paddle
linears = paddle.nn.LayerList([paddle.nn.Linear(10, 10) for i in range(10)])
another = paddle.nn.Linear(10, 10)
linears.append(another)
print(len(linears)) # 11
"""
self.add_sublayer(str(len(self)), sublayer)
return self
def insert(self, index, sublayer):
"""
Insert a sublayer before a given index in the list.
Parameters:
index (int): index to insert.
sublayer (Layer): sublayer to insert
Examples:
.. code-block:: python
import paddle
linears = paddle.nn.LayerList([paddle.nn.Linear(10, 10) for i in range(10)])
another = paddle.nn.Linear(10, 10)
linears.insert(3, another)
print(linears[3] is another) # True
another = paddle.nn.Linear(10, 10)
linears.insert(-1, another)
print(linears[-2] is another) # True
"""
assert isinstance(index, int) and \
-len(self._sub_layers) <= index < len(self._sub_layers), \
"index should be an integer in range [{}, {})".format(-len(self), len(self))
index = self._get_abs_idx(index)
for i in range(len(self._sub_layers), index, -1):
self._sub_layers[str(i)] = self._sub_layers[str(i - 1)]
self._sub_layers[str(index)] = sublayer
def extend(self, sublayers):
"""
Appends sublayers to the end of the list.
Parameters:
sublayers (iterable of Layer): iterable of sublayers to append
Examples:
.. code-block:: python
import paddle
linears = paddle.nn.LayerList([paddle.nn.Linear(10, 10) for i in range(10)])
another_list = paddle.nn.LayerList([paddle.nn.Linear(10, 10) for i in range(5)])
linears.extend(another_list)
print(len(linears)) # 15
print(another_list[0] is linears[10]) # True
"""
offset = len(self)
for i, sublayer in enumerate(sublayers):
idx = str(offset + i)
self.add_sublayer(idx, sublayer)
return self
|
|
"""
The MIT License (MIT)
Copyright (c) 2016 Stratos Goudelis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from tornado.websocket import WebSocketHandler, WebSocketClosedError
import tornado.websocket
import tornado.web
import tornado.httpserver
import tornado.options
import brukva
import logging
import json
import redis
import os
import uuid
import re
from tornado import gen
tornado.options.define("address", default="0.0.0.0", help="address to listen on", type=str)
tornado.options.define("port", default=5000, help="port to listen on", type=int)
tornado.options.define("redishost", default="127.0.0.1", help="redis server", type=str)
tornado.options.define("redisport", default=6379, help="redis port", type=int)
tornado.options.define("redisdb", default=0, help="redis database", type=int)
tornado.options.define("redispassword", default="", help="redis server password", type=str)
tornado.options.define("channelttl", default=3600, help="redis hash key ttl", type=int)
tornado.options.define("clientlimit", default=10, help="client keys limit per ip", type=int)
tornado.options.define("requestlimit", default=50, help="request limit per marker key", type=int)
tornado.options.define("historylength", default=50, help="list of last N http requests", type=int)
tornado.options.define("debug", default=True, help="set Tornado to debug", type=bool)
tornado.options.define("origin", default='', help="regex to match, do not set for any origin", type=str)
hash_set_prefix = "client#"
client_ip_prefix = "client_ip#"
channel_name_prefix = 'httprequests#'
client_history = 'client_history#'
def generate_marker_key():
"""
Generate a key
:return:
"""
unique_hash = str(uuid.uuid4().hex)[:10]
return unique_hash
class BaseHashViewRequestHandler(tornado.web.RequestHandler):
redis_async_connection = None
redis_sync_connection = None
class BaseLogWebSocket(WebSocketHandler):
redis_async_connection = None
redis_sync_connection = None
class CatchAllView(tornado.web.RequestHandler):
"""
RequestHandler view for catch all
"""
def get(self):
self.finish()
def post(self):
self.finish()
def update(self):
self.finish()
def delete(self):
self.finish()
class GenerateHashView(BaseHashViewRequestHandler):
"""
View for generating hash
"""
def get(self):
unique_hash = generate_marker_key()
self.redis_sync_connection = redis.StrictRedis(host=tornado.options.options.redishost,
port=tornado.options.options.redisport,
password=tornado.options.options.redispassword,
db=tornado.options.options.redisdb)
# get real ip address
client_ip = self.request.headers.get('X-Forwarded-For', self.request.headers.get('X-Real-Ip',
self.request.remote_ip))
# check how many keys are created from the same ip address
try:
client_hits = int(self.redis_sync_connection.get(client_ip_prefix+str(client_ip)))
except TypeError:
client_hits = 0
if client_hits > tornado.options.options.clientlimit:
self.finish(json.dumps({'error': "limit reached"}))
return
# create a value for the key
value = {'ip': client_ip}
# set the key in redis
self.redis_sync_connection.setex(hash_set_prefix+unique_hash, tornado.options.options.channelttl,
json.dumps(value))
# set the ip as key to keep track how many key there are for that ip
if not self.redis_sync_connection.exists(client_ip_prefix+str(client_ip)):
self.redis_sync_connection.setex(client_ip_prefix+str(client_ip), tornado.options.options.channelttl, 1)
self.redis_sync_connection.incrby(client_ip_prefix+str(client_ip), 1)
# finish the request
self.finish(json.dumps({'key': unique_hash}))
class LogView(tornado.web.RequestHandler):
"""
View for the logger where the user will observe HTTP calls
"""
def get(self, bucket):
vals = {'bucket': bucket, 'request_limit': tornado.options.options.requestlimit}
self.render("templates/log.html", title="Logger", items=vals)
class HomeView(tornado.web.RequestHandler):
"""
View for the logger where the user will observe HTTP calls
"""
def get(self):
vals = {'bucket': "", 'request_limit': tornado.options.options.requestlimit}
self.render("templates/index.html", title="Logger", items=vals)
class LogWebSocket(BaseLogWebSocket):
"""
Websockets interface
"""
channel_name = None
@gen.engine
def open(self, bucket='root'):
self.channel_name = str(channel_name_prefix+bucket)
self.redis_async_connection = brukva.Client(host=tornado.options.options.redishost,
port=tornado.options.options.redisport,
password=tornado.options.options.redispassword,
selected_db=tornado.options.options.redisdb)
# connect to redis
self.redis_async_connection.connect()
# check for limits first
self.redis_async_connection.get('counter#'+bucket, self.close_connection_on_limit)
# check for limits first
self.redis_async_connection.lrange('client_history#'+bucket, 0, tornado.options.options.historylength,
self.send_request_history)
# subscribe
self.redis_async_connection.subscribe(self.channel_name)
self.redis_async_connection.listen(self.on_message)
logging.info('New viewer connected to observe flow for channel: %s' % self.channel_name)
def send_request_history(self, request_list):
"""
send a the last N HTTP requests made
:param request_list:
:return:
"""
for request in reversed(request_list):
self.write_message(request)
def close_connection_on_limit(self, counter):
"""
:param counter:
:return:
"""
if counter is None or int(counter) >= int(tornado.options.options.requestlimit):
message = {'type': 'alert', 'message': 'this marker key is expired or does not exist',
'request_limit': tornado.options.options.requestlimit,
'request_count': counter}
self.write_message(message)
return
def on_message(self, message):
try:
if type(message) == brukva.exceptions.ResponseError:
logging.error(message)
elif type(message) == unicode:
self.write_message(message)
else:
self.write_message(message.body)
except WebSocketClosedError, e:
logging.warn('WebsocketClosedError occured %s' % e.message)
def on_close(self):
logging.info('Websocket closed')
def check_origin(self, origin):
if tornado.options.options.origin == '':
return True
else:
return bool(re.match(tornado.options.options.origin, origin))
class Application(tornado.web.Application):
"""
Main Class for this application holding everything together.
"""
def __init__(self):
# url routing
handlers = [
(r'/', HomeView),
(r'/generatekey', GenerateHashView),
(r'/log/([a-zA-Z0-9]*)$', LogView),
(r'/log/([a-zA-Z0-9]*)/ws', LogWebSocket),
(r'/.*', CatchAllView),
]
# settings
settings = dict(
auto_reload=True,
debug=tornado.options.options.debug,
static_path=os.path.join(os.path.dirname(__file__), "static")
)
# constructor
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == "__main__":
tornado.options.parse_command_line()
application = Application()
application.listen(tornado.options.options.port, address=tornado.options.options.address)
tornado.ioloop.IOLoop.instance().start()
|
|
# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
##
# Copyright (c) 2015-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.enterprise.dal.syntax import Max, Select, Parameter, Delete, Insert, \
Update, ColumnSyntax, TableSyntax, Upper, utcNowSQL
from twext.python.clsprop import classproperty
from twext.python.log import Logger
from twisted.internet.defer import succeed, inlineCallbacks, returnValue
from txdav.base.datastore.util import normalizeUUIDOrNot
from txdav.common.datastore.sql_tables import schema
from txdav.common.icommondatastore import SyncTokenValidException, \
ENOTIFICATIONTYPE, ECALENDARTYPE, EADDRESSBOOKTYPE
import time
from uuid import UUID
log = Logger()
"""
Classes and methods for the SQL store.
"""
class _EmptyCacher(object):
def set(self, key, value):
return succeed(True)
def get(self, key, withIdentifier=False):
return succeed(None)
def delete(self, key):
return succeed(True)
class _SharedSyncLogic(object):
"""
Logic for maintaining sync-token shared between notification collections and
shared collections.
"""
@classproperty
def _childSyncTokenQuery(cls):
"""
DAL query for retrieving the sync token of a L{CommonHomeChild} based on
its resource ID.
"""
rev = cls._revisionsSchema
return Select([Max(rev.REVISION)], From=rev,
Where=rev.RESOURCE_ID == Parameter("resourceID"))
@classmethod
def _revisionsForResourceIDs(cls, resourceIDs):
rev = cls._revisionsSchema
return Select(
[rev.RESOURCE_ID, Max(rev.REVISION)],
From=rev,
Where=rev.RESOURCE_ID.In(Parameter("resourceIDs", len(resourceIDs))).And(
(rev.RESOURCE_NAME != None).Or(rev.DELETED == False)),
GroupBy=rev.RESOURCE_ID
)
def revisionFromToken(self, token):
if token is None:
return 0
elif isinstance(token, str) or isinstance(token, unicode):
_ignore_uuid, revision = token.split("_", 1)
return int(revision)
else:
return token
@inlineCallbacks
def syncToken(self):
if self._syncTokenRevision is None:
self._syncTokenRevision = yield self.syncTokenRevision()
returnValue(("%s_%s" % (self._resourceID, self._syncTokenRevision,)))
@inlineCallbacks
def syncTokenRevision(self):
revision = (yield self._childSyncTokenQuery.on(self._txn, resourceID=self._resourceID))[0][0]
if revision is None:
revision = int((yield self._txn.calendarserverValue("MIN-VALID-REVISION")))
returnValue(revision)
@classmethod
@inlineCallbacks
def childSyncTokenRevisions(cls, home, childResourceIDs):
rows = (yield cls._revisionsForResourceIDs(childResourceIDs).on(home._txn, resourceIDs=childResourceIDs))
revisions = dict(rows)
# Add in any that were missing - this assumes that childResourceIDs were all valid to begin with
missingIDs = set(childResourceIDs) - set(revisions.keys())
if missingIDs:
min_revision = int((yield home._txn.calendarserverValue("MIN-VALID-REVISION")))
for resourceID in missingIDs:
revisions[resourceID] = min_revision
returnValue(revisions)
def objectResourcesSinceToken(self, token):
raise NotImplementedError()
@classmethod
def _objectNamesSinceRevisionQuery(cls, deleted=True):
"""
DAL query for (resource, deleted-flag)
"""
rev = cls._revisionsSchema
where = (rev.REVISION > Parameter("revision")).And(rev.RESOURCE_ID == Parameter("resourceID"))
if not deleted:
where = where.And(rev.DELETED == False)
return Select(
[rev.RESOURCE_NAME, rev.DELETED],
From=rev,
Where=where,
)
def resourceNamesSinceToken(self, token):
"""
Return the changed and deleted resources since a particular sync-token. This simply extracts
the revision from from the token then calls L{resourceNamesSinceRevision}.
@param revision: the revision to determine changes since
@type revision: C{int}
"""
return self.resourceNamesSinceRevision(self.revisionFromToken(token))
@inlineCallbacks
def resourceNamesSinceRevision(self, revision):
"""
Return the changed and deleted resources since a particular revision.
@param revision: the revision to determine changes since
@type revision: C{int}
"""
changed = []
deleted = []
invalid = []
if revision:
minValidRevision = yield self._txn.calendarserverValue("MIN-VALID-REVISION")
if revision < int(minValidRevision):
raise SyncTokenValidException
results = [
(name if name else "", removed) for name, removed in (
yield self._objectNamesSinceRevisionQuery().on(
self._txn, revision=revision, resourceID=self._resourceID)
)
]
results.sort(key=lambda x: x[1])
for name, wasdeleted in results:
if name:
if wasdeleted:
deleted.append(name)
else:
changed.append(name)
else:
changed = yield self.listObjectResources()
returnValue((changed, deleted, invalid))
@classproperty
def _removeDeletedRevision(cls):
rev = cls._revisionsSchema
return Delete(From=rev,
Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
rev.COLLECTION_NAME == Parameter("collectionName")))
@classproperty
def _addNewRevision(cls):
rev = cls._revisionsSchema
return Insert(
{
rev.HOME_RESOURCE_ID: Parameter("homeID"),
rev.RESOURCE_ID: Parameter("resourceID"),
rev.COLLECTION_NAME: Parameter("collectionName"),
rev.RESOURCE_NAME: None,
# Always starts false; may be updated to be a tombstone
# later.
rev.DELETED: False
},
Return=[rev.REVISION]
)
@inlineCallbacks
def _initSyncToken(self):
yield self._removeDeletedRevision.on(
self._txn, homeID=self._home._resourceID, collectionName=self._name
)
self._syncTokenRevision = (yield (
self._addNewRevision.on(self._txn, homeID=self._home._resourceID,
resourceID=self._resourceID,
collectionName=self._name)))[0][0]
self._txn.bumpRevisionForObject(self)
@classproperty
def _renameSyncTokenQuery(cls):
"""
DAL query to change sync token for a rename (increment and adjust
resource name).
"""
rev = cls._revisionsSchema
return Update(
{
rev.REVISION: schema.REVISION_SEQ,
rev.COLLECTION_NAME: Parameter("name"),
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And
(rev.RESOURCE_NAME == None),
Return=rev.REVISION
)
@inlineCallbacks
def _renameSyncToken(self):
rows = yield self._renameSyncTokenQuery.on(
self._txn, name=self._name, resourceID=self._resourceID)
if rows:
self._syncTokenRevision = rows[0][0]
self._txn.bumpRevisionForObject(self)
else:
yield self._initSyncToken()
@classproperty
def _bumpSyncTokenQuery(cls):
"""
DAL query to change collection sync token. Note this can impact multiple rows if the
collection is shared.
"""
rev = cls._revisionsSchema
return Update(
{
rev.REVISION: schema.REVISION_SEQ,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And
(rev.RESOURCE_NAME == None)
)
@inlineCallbacks
def _bumpSyncToken(self):
if not self._txn.isRevisionBumpedAlready(self):
self._txn.bumpRevisionForObject(self)
yield self._bumpSyncTokenQuery.on(
self._txn,
resourceID=self._resourceID,
)
self._syncTokenRevision = None
@classproperty
def _deleteSyncTokenQuery(cls):
"""
DAL query to remove all child revision information. The revision for the collection
itself is not touched.
"""
rev = cls._revisionsSchema
return Delete(
From=rev,
Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And
(rev.RESOURCE_ID == Parameter("resourceID")).And
(rev.COLLECTION_NAME == None)
)
@classproperty
def _sharedRemovalQuery(cls):
"""
DAL query to indicate a shared collection has been deleted.
"""
rev = cls._revisionsSchema
return Update(
{
rev.RESOURCE_ID: None,
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: True,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == None)
)
@classproperty
def _unsharedRemovalQuery(cls):
"""
DAL query to indicate an owned collection has been deleted.
"""
rev = cls._revisionsSchema
return Update(
{
rev.RESOURCE_ID: None,
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: True,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == None),
)
@inlineCallbacks
def _deletedSyncToken(self, sharedRemoval=False):
"""
When a collection is deleted we remove all the revision information for its child resources.
We update the collection's sync token to indicate it has been deleted - that way a sync on
the home collection can report the deletion of the collection.
@param sharedRemoval: indicates whether the collection being removed is shared
@type sharedRemoval: L{bool}
"""
# Remove all child entries
yield self._deleteSyncTokenQuery.on(self._txn,
homeID=self._home._resourceID,
resourceID=self._resourceID)
# If this is a share being removed then we only mark this one specific
# home/resource-id as being deleted. On the other hand, if it is a
# non-shared collection, then we need to mark all collections
# with the resource-id as being deleted to account for direct shares.
if sharedRemoval:
yield self._sharedRemovalQuery.on(self._txn,
homeID=self._home._resourceID,
resourceID=self._resourceID)
else:
yield self._unsharedRemovalQuery.on(self._txn,
resourceID=self._resourceID)
self._syncTokenRevision = None
def _insertRevision(self, name):
return self._changeRevision("insert", name)
def _updateRevision(self, name):
return self._changeRevision("update", name)
def _deleteRevision(self, name):
return self._changeRevision("delete", name)
@classproperty
def _deleteBumpTokenQuery(cls):
rev = cls._revisionsSchema
return Update(
{
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: True,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == Parameter("name")),
Return=rev.REVISION
)
@classproperty
def _updateBumpTokenQuery(cls):
rev = cls._revisionsSchema
return Update(
{
rev.REVISION: schema.REVISION_SEQ,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == Parameter("name")),
Return=rev.REVISION
)
@classproperty
def _insertFindPreviouslyNamedQuery(cls):
rev = cls._revisionsSchema
return Select(
[rev.RESOURCE_ID],
From=rev,
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == Parameter("name"))
)
@classproperty
def _updatePreviouslyNamedQuery(cls):
rev = cls._revisionsSchema
return Update(
{
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: False,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == Parameter("name")),
Return=rev.REVISION
)
@classproperty
def _completelyNewRevisionQuery(cls):
rev = cls._revisionsSchema
return Insert(
{
rev.HOME_RESOURCE_ID: Parameter("homeID"),
rev.RESOURCE_ID: Parameter("resourceID"),
rev.RESOURCE_NAME: Parameter("name"),
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: False
},
Return=rev.REVISION
)
@classproperty
def _completelyNewDeletedRevisionQuery(cls):
rev = cls._revisionsSchema
return Insert(
{
rev.HOME_RESOURCE_ID: Parameter("homeID"),
rev.RESOURCE_ID: Parameter("resourceID"),
rev.RESOURCE_NAME: Parameter("name"),
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: True
},
Return=rev.REVISION
)
@inlineCallbacks
def _changeRevision(self, action, name):
# Need to handle the case where for some reason the revision entry is
# actually missing. For a "delete" we don't care, for an "update" we
# will turn it into an "insert".
if action == "delete":
rows = (
yield self._deleteBumpTokenQuery.on(
self._txn, resourceID=self._resourceID, name=name))
if rows:
self._syncTokenRevision = rows[0][0]
else:
self._syncTokenRevision = (
yield self._completelyNewDeletedRevisionQuery.on(
self._txn, homeID=self.ownerHome()._resourceID,
resourceID=self._resourceID, name=name)
)[0][0]
elif action == "update":
rows = (
yield self._updateBumpTokenQuery.on(
self._txn, resourceID=self._resourceID, name=name))
if rows:
self._syncTokenRevision = rows[0][0]
else:
self._syncTokenRevision = (
yield self._completelyNewRevisionQuery.on(
self._txn, homeID=self.ownerHome()._resourceID,
resourceID=self._resourceID, name=name)
)[0][0]
elif action == "insert":
# Note that an "insert" may happen for a resource that previously
# existed and then was deleted. In that case an entry in the
# REVISIONS table still exists so we have to detect that and do db
# INSERT or UPDATE as appropriate
found = bool((
yield self._insertFindPreviouslyNamedQuery.on(
self._txn, resourceID=self._resourceID, name=name)))
if found:
self._syncTokenRevision = (
yield self._updatePreviouslyNamedQuery.on(
self._txn, resourceID=self._resourceID, name=name)
)[0][0]
else:
self._syncTokenRevision = (
yield self._completelyNewRevisionQuery.on(
self._txn, homeID=self.ownerHome()._resourceID,
resourceID=self._resourceID, name=name)
)[0][0]
yield self._maybeNotify()
returnValue(self._syncTokenRevision)
def _maybeNotify(self):
"""
Maybe notify changed. (Overridden in NotificationCollection.)
"""
return succeed(None)
def determineNewest(uid, homeType):
"""
Construct a query to determine the modification time of the newest object
in a given home.
@param uid: the UID of the home to scan.
@type uid: C{str}
@param homeType: The type of home to scan; C{ECALENDARTYPE},
C{ENOTIFICATIONTYPE}, or C{EADDRESSBOOKTYPE}.
@type homeType: C{int}
@return: A select query that will return a single row containing a single
column which is the maximum value.
@rtype: L{Select}
"""
if homeType == ENOTIFICATIONTYPE:
return Select(
[Max(schema.NOTIFICATION.MODIFIED)],
From=schema.NOTIFICATION_HOME.join(
schema.NOTIFICATION,
on=schema.NOTIFICATION_HOME.RESOURCE_ID ==
schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID),
Where=schema.NOTIFICATION_HOME.OWNER_UID == uid
)
homeTypeName = {ECALENDARTYPE: "CALENDAR",
EADDRESSBOOKTYPE: "ADDRESSBOOK"}[homeType]
home = getattr(schema, homeTypeName + "_HOME")
bind = getattr(schema, homeTypeName + "_BIND")
child = getattr(schema, homeTypeName)
obj = getattr(schema, homeTypeName + "_OBJECT")
return Select(
[Max(obj.MODIFIED)],
From=home.join(bind, on=bind.HOME_RESOURCE_ID == home.RESOURCE_ID).join(
child, on=child.RESOURCE_ID == bind.RESOURCE_ID).join(
obj, on=obj.PARENT_RESOURCE_ID == child.RESOURCE_ID),
Where=(bind.BIND_MODE == 0).And(home.OWNER_UID == uid)
)
@inlineCallbacks
def mergeHomes(sqlTxn, one, other, homeType):
"""
Merge two homes together. This determines which of C{one} or C{two} is
newer - that is, has been modified more recently - and pulls all the data
from the older into the newer home. Then, it changes the UID of the old
home to its UID, normalized and prefixed with "old.", and then re-names the
new home to its name, normalized.
Because the UIDs of both homes have changed, B{both one and two will be
invalid to all other callers from the start of the invocation of this
function}.
@param sqlTxn: the transaction to use
@type sqlTxn: A L{CommonTransaction}
@param one: A calendar home.
@type one: L{ICalendarHome}
@param two: Another, different calendar home.
@type two: L{ICalendarHome}
@param homeType: The type of home to scan; L{ECALENDARTYPE} or
L{EADDRESSBOOKTYPE}.
@type homeType: C{int}
@return: a L{Deferred} which fires with with the newer of C{one} or C{two},
into which the data from the other home has been merged, when the merge
is complete.
"""
from txdav.caldav.datastore.util import migrateHome as migrateCalendarHome
from txdav.carddav.datastore.util import migrateHome as migrateABHome
migrateHome = {EADDRESSBOOKTYPE: migrateABHome,
ECALENDARTYPE: migrateCalendarHome,
ENOTIFICATIONTYPE: _dontBotherWithNotifications}[homeType]
homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
ECALENDARTYPE: schema.CALENDAR_HOME,
ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
both = []
both.append([one,
(yield determineNewest(one.uid(), homeType).on(sqlTxn))])
both.append([other,
(yield determineNewest(other.uid(), homeType).on(sqlTxn))])
both.sort(key=lambda x: x[1])
older = both[0][0]
newer = both[1][0]
yield migrateHome(older, newer, merge=True)
# Rename the old one to 'old.<correct-guid>'
newNormalized = normalizeUUIDOrNot(newer.uid())
oldNormalized = normalizeUUIDOrNot(older.uid())
yield _renameHome(sqlTxn, homeTable, older.uid(), "old." + oldNormalized)
# Rename the new one to '<correct-guid>'
if newer.uid() != newNormalized:
yield _renameHome(sqlTxn, homeTable, newer.uid(), newNormalized)
yield returnValue(newer)
def _renameHome(txn, table, oldUID, newUID):
"""
Rename a calendar, addressbook, or notification home. Note that this
function is only safe in transactions that have had caching disabled, and
more specifically should only ever be used during upgrades. Running this
in a normal transaction will have unpredictable consequences, especially
with respect to memcache.
@param txn: an SQL transaction to use for this update
@type txn: L{twext.enterprise.ienterprise.IAsyncTransaction}
@param table: the storage table of the desired home type
@type table: L{TableSyntax}
@param oldUID: the old UID, the existing home's UID
@type oldUID: L{str}
@param newUID: the new UID, to change the UID to
@type newUID: L{str}
@return: a L{Deferred} which fires when the home is renamed.
"""
return Update({table.OWNER_UID: newUID},
Where=table.OWNER_UID == oldUID).on(txn)
def _dontBotherWithNotifications(older, newer, merge):
"""
Notifications are more transient and can be easily worked around; don't
bother to migrate all of them when there is a UUID case mismatch.
"""
pass
@inlineCallbacks
def _normalizeHomeUUIDsIn(t, homeType):
"""
Normalize the UUIDs in the given L{txdav.common.datastore.CommonStore}.
This changes the case of the UUIDs in the calendar home.
@param t: the transaction to normalize all the UUIDs in.
@type t: L{CommonStoreTransaction}
@param homeType: The type of home to scan, L{ECALENDARTYPE},
L{EADDRESSBOOKTYPE}, or L{ENOTIFICATIONTYPE}.
@type homeType: C{int}
@return: a L{Deferred} which fires with C{None} when the UUID normalization
is complete.
"""
from txdav.caldav.datastore.util import fixOneCalendarHome
homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
ECALENDARTYPE: schema.CALENDAR_HOME,
ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
homeTypeName = homeTable.model.name.split("_")[0]
allUIDs = yield Select([homeTable.OWNER_UID],
From=homeTable,
OrderBy=homeTable.OWNER_UID).on(t)
total = len(allUIDs)
allElapsed = []
for n, [UID] in enumerate(allUIDs):
start = time.time()
if allElapsed:
estimate = "%0.3d" % ((sum(allElapsed) / len(allElapsed)) *
total - n)
else:
estimate = "unknown"
log.info(
"Scanning UID {uid} [{homeType}] "
"({pct:0.2d}%, {estimate} seconds remaining)...",
uid=UID, pct=(n / float(total)) * 100, estimate=estimate,
homeType=homeTypeName
)
other = None
this = yield _getHome(t, homeType, UID)
if homeType == ECALENDARTYPE:
fixedThisHome = yield fixOneCalendarHome(this)
else:
fixedThisHome = 0
fixedOtherHome = 0
if this is None:
log.info(
"{uid!r} appears to be missing, already processed", uid=UID
)
try:
uuidobj = UUID(UID)
except ValueError:
pass
else:
newname = str(uuidobj).upper()
if UID != newname:
log.info(
"Detected case variance: {uid} {newuid}[{homeType}]",
uid=UID, newuid=newname, homeType=homeTypeName
)
other = yield _getHome(t, homeType, newname)
if other is None:
# No duplicate: just fix the name.
yield _renameHome(t, homeTable, UID, newname)
else:
if homeType == ECALENDARTYPE:
fixedOtherHome = yield fixOneCalendarHome(other)
this = yield mergeHomes(t, this, other, homeType)
# NOTE: WE MUST NOT TOUCH EITHER HOME OBJECT AFTER THIS POINT.
# THE UIDS HAVE CHANGED AND ALL OPERATIONS WILL FAIL.
end = time.time()
elapsed = end - start
allElapsed.append(elapsed)
log.info(
"Scanned UID {uid}; {elapsed} seconds elapsed,"
" {fixes} properties fixed ({duplicate} fixes in duplicate).",
uid=UID, elapsed=elapsed, fixes=fixedThisHome,
duplicate=fixedOtherHome
)
returnValue(None)
def _getHome(txn, homeType, uid):
"""
Like L{CommonHome.homeWithUID} but also honoring ENOTIFICATIONTYPE which
isn't I{really} a type of home.
@param txn: the transaction to retrieve the home from
@type txn: L{CommonStoreTransaction}
@param homeType: L{ENOTIFICATIONTYPE}, L{ECALENDARTYPE}, or
L{EADDRESSBOOKTYPE}.
@param uid: the UID of the home to retrieve.
@type uid: L{str}
@return: a L{Deferred} that fires with the L{CommonHome} or
L{NotificationHome} when it has been retrieved.
"""
if homeType == ENOTIFICATIONTYPE:
return txn.notificationsWithUID(uid)
else:
return txn.homeWithUID(homeType, uid)
@inlineCallbacks
def _normalizeColumnUUIDs(txn, column):
"""
Upper-case the UUIDs in the given SQL DAL column.
@param txn: The transaction.
@type txn: L{CommonStoreTransaction}
@param column: the column, which may contain UIDs, to normalize.
@type column: L{ColumnSyntax}
@return: A L{Deferred} that will fire when the UUID normalization of the
given column has completed.
"""
tableModel = column.model.table
# Get a primary key made of column syntax objects for querying and
# comparison later.
pkey = [ColumnSyntax(columnModel)
for columnModel in tableModel.primaryKey]
for row in (yield Select([column] + pkey,
From=TableSyntax(tableModel)).on(txn)):
before = row[0]
pkeyparts = row[1:]
after = normalizeUUIDOrNot(before)
if after != before:
where = _AndNothing
# Build a where clause out of the primary key and the parts of the
# primary key that were found.
for pkeycol, pkeypart in zip(pkeyparts, pkey):
where = where.And(pkeycol == pkeypart)
yield Update({column: after}, Where=where).on(txn)
class _AndNothing(object):
"""
Simple placeholder for iteratively generating a 'Where' clause; the 'And'
just returns its argument, so it can be used at the start of the loop.
"""
@staticmethod
def And(self):
"""
Return the argument.
"""
return self
@inlineCallbacks
def _needsNormalizationUpgrade(txn):
"""
Determine whether a given store requires a UUID normalization data upgrade.
@param txn: the transaction to use
@type txn: L{CommonStoreTransaction}
@return: a L{Deferred} that fires with C{True} or C{False} depending on
whether we need the normalization upgrade or not.
"""
for x in [schema.CALENDAR_HOME, schema.ADDRESSBOOK_HOME,
schema.NOTIFICATION_HOME]:
slct = Select([x.OWNER_UID], From=x,
Where=x.OWNER_UID != Upper(x.OWNER_UID))
rows = yield slct.on(txn)
if rows:
for [uid] in rows:
if normalizeUUIDOrNot(uid) != uid:
returnValue(True)
returnValue(False)
@inlineCallbacks
def fixUUIDNormalization(store):
"""
Fix all UUIDs in the given SQL store to be in a canonical form;
00000000-0000-0000-0000-000000000000 format and upper-case.
"""
t = store.newTransaction(label="fixUUIDNormalization", disableCache=True)
# First, let's see if there are any calendar, addressbook, or notification
# homes that have a de-normalized OWNER_UID. If there are none, then we can
# early-out and avoid the tedious and potentially expensive inspection of
# oodles of calendar data.
if not (yield _needsNormalizationUpgrade(t)):
log.info("No potentially denormalized UUIDs detected, "
"skipping normalization upgrade.")
yield t.abort()
returnValue(None)
try:
yield _normalizeHomeUUIDsIn(t, ECALENDARTYPE)
yield _normalizeHomeUUIDsIn(t, EADDRESSBOOKTYPE)
yield _normalizeHomeUUIDsIn(t, ENOTIFICATIONTYPE)
yield _normalizeColumnUUIDs(t, schema.RESOURCE_PROPERTY.VIEWER_UID)
yield _normalizeColumnUUIDs(t, schema.APN_SUBSCRIPTIONS.SUBSCRIBER_GUID)
except:
log.failure("Unable to normalize UUIDs")
yield t.abort()
# There's a lot of possible problems here which are very hard to test
# for individually; unexpected data that might cause constraint
# violations under one of the manipulations done by
# normalizeHomeUUIDsIn. Since this upgrade does not come along with a
# schema version bump and may be re- attempted at any time, just raise
# the exception and log it so that we can try again later, and the
# service will survive for everyone _not_ affected by this somewhat
# obscure bug.
else:
yield t.commit()
|
|
"""
Copyright (c) 2012, Thomas M. Farrelly
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class META :
NAME = 'unamed'
HEADER = ''
DECL = ''
TYPES = []
def BUILD() :
result_h = ''
result_c = META.HEADER
result_h += ( ''.join( [
'\nstruct ' + c.name + '_struct ;' +
'\ntypedef struct ' + c.name + '_struct * ' + c.name + ' ;'
for c in T.classes ] ) )
result_h += META.DECL
result_h += ( ''.join( [
'\nTYPE ' + c.name + '_type ;'
for c in T.classes ] ) )
result_h += ( ''.join( [
'\nWID WI_' + stringtocid( w ) + ' ;'
for w in T.wids ] ) )
result_h += ( ''.join( [
'\ninline ' + c.name + ' new' + c.name + '( ' +
( ', '.join( [
a.t + ' ' + a.n
for a in c.attributes ] ) ) + ' ) ;' +
'\nstruct ' + c.name + '_struct {' +
'\n n_void (*objective)( TASK ) ;'
'\n n_string (*debug)( ANY ) ;' +
( ''.join( [
'\n ' + a.t + ' ' + a.n + ' ;'
for a in c.attributes ] ) ) + '\n} ;'
for c in T.classes ] ) )
result_c += ( ''.join( [
'\n#define THIS c(' + c.name +',task->action->value)' +
'\n#define THIS_R newREFERENCE( ' + c.name + '_type, task->context->this->value, any(THIS) )' +
'\n#define DO_TYPE_ID_TEST TYPE_RESPONSE(' + c.name + '_type )'
'\n#define PSTHIS ' + ( ( 'C(' + c.t1 + ',' ) if c.t1 else 'any(' ) + 'task->context->this->svalue)' +
'\n#define PSTHAT ' + ( ( 'C(' + c.t2 + ',' ) if c.t2 else 'any(' ) + 'task->context->that->svalue)' +
'\nn_void ' + c.name + '_objective( TASK task ) {' + c.objective + '}' +
'\n#undef PSTHAT' +
'\n#undef PSTHIS' +
'\n#undef DO_TYPE_ID_TEST' +
'\n#undef THIS_R' +
'\n#undef THIS' +
'\nn_string debug' + c.name + '( ANY o ) {' +
'\n char * s ;' +
'\n asprintf( &s, "[%04zx:%s' + c.debug.f + ']", c( size_t, o ) >> 4 & 0xfff, "' + c.name + '"' +
( ''.join( [
', ' + d
for d in c.debug.d ] ) ) + ' ) ;' +
'\n return s ;' +
'\n}' +
(
'\ninline ' + c.name + ' new' + c.name + '( ' +
( ', '.join( [
a.t + ' ' + a.n
for a in c.attributes ] ) ) + ' ) {' +
'\n ' + c.name + ' new_object = ALLOCATE( struct ' + c.name + '_struct ) ;' +
'\n new_object->objective = ' + c.name + '_objective ;' +
'\n new_object->debug = debug' + c.name + ' ;' +
( ''.join( [
'\n new_object->' + a.n + ' = ' + a.n + ' ;'
for a in c.attributes ] ) ) +
'\n return new_object ;' +
'\n}'
)
for c in T.classes ] ) + '' )
result_h += '\nn_void INITIALIZE_' + META.NAME + '_TYPES() ;'
result_c += ( '\nn_void INITIALIZE_' + META.NAME + '_TYPES() {' + ( ''.join( [
'\n ' + c.name + '_type = newTYPE( newTID(), ' + c.name + '_objective, any(NONE), any(NONE) ) ; '
for c in T.classes ] ) ) + '\n}' )
result_h += '\nn_void INITIALIZE_' + META.NAME + '_WIDS() ;'
result_c += ( '\nn_void INITIALIZE_' + META.NAME + '_WIDS() {' +
'\n WIDS = listNEW ; ' + ( ''.join( [
'\n WI_' + stringtocid( w ) + ' = widNEW( "' + w + '" ) ; '
for w in T.wids ] ) ) + '\n}' )
result_h += '\n\n'
result_c += '\n\n'
open( META.NAME + '.h', 'w' ).write( result_h ) ;
open( META.NAME + '.c', 'w' ).write( result_c ) ;
class D:
def __init__( self, f = '', *d ) :
self.f = f
self.d = d
class A:
def __init__( self, t, n ) :
self.t = t
self.n = n
class T:
classes = []
wids = []
def __init__( self, name, t1 = None, t2 = None, attributes = (), objective = "", debug = D() ) :
self.name = name
self.t1 = t1
self.t2 = t2
self.attributes = attributes
self.objective = objective
self.debug = debug
T.classes.append( self )
def stringtocid( s ) :
for a, b in {
'.':'DOT', '!':'EXCLAIM',':':'COLON',
'+':'ADD', '-':'SUB', '*':'MUL', '/':'DIV',
'<':'LT', '=':'EQ', '>':'GT', '%':'PERC'
}.iteritems() :
s = s.replace( a, b )
return s
def W( *wids ) :
T.wids = wids
class X:
def __init__( self, n, c ) :
self.n = n
self.c = c
def P( name, *parameters ) :
T( 'PARAM' + name + '_assort', None, None, (), """
REFERENCE tuple = refNEW( TUPLE_type, any(NONE) ) ;
REFERENCE tuple_ref = ref(tuple) ;
task->next = newTASK( ref(newPARAM%(name)s_0( tuple )), task->context, task->result, task->next, task->exit ) ;
CONTEXT c0 = newCONTEXT( task->context->closure, tuple_ref, ref(newNOUN( task->context->that )) ) ;
task->next = newTASK( tuple_ref, c0, ref(NONE), task->next, task->next ) ;
""" % { 'name':name } )
T( 'PARAM' + name + '_0', None, None, (
A( 'REFERENCE', 'tuple' ),
), """
// OUT( assort 0 ) ;
// %(dbg)s
if ( NOTNONE( THIS->tuple->value ) ) {
if ( C(TUPLE,THIS->tuple->value)->list->length == %(len)s ) {
// OUT( assort 0.1 ) ;
%(decl)s
task->next = newTASK( ref(newPARAM%(name)s_1( %(attr)s )), task->context, task->result, task->next, task->exit ) ;
%(check)s
}
}
""" % { 'name':name, 'len':len(parameters),
'dbg': ( ''.join( [ 'LOG( C(TUPLE,THIS->tuple->value)->list->data[%s]->value ) ;' % str( i ) for i in range( len(parameters) ) ] ) ),
'attr': ( ', '.join( [ p.n + '_ref' for p in parameters ] ) ),
'decl': ( ''.join( [ """
REFERENCE %(name)s_ref = refNEW( %(cls)s_type, any(NONE) ) ;
REFERENCE %(name)s_ref_ref = ref(%(name)s_ref) ;
""" % { 'name':p.n, 'cls':p.c } for p in parameters ] ) ),
'check': ( ''.join( [ """
// LOG( %(name)s_ref_ref ) ;
CONTEXT c%(name)s = newCONTEXT( task->context->closure, %(name)s_ref_ref, ref(newNOUN( C(TUPLE,THIS->tuple->value)->list->data[%(i)s] )) ) ;
task->next = newTASK( %(name)s_ref_ref, c%(name)s, ref(NONE), task->next, task->next ) ;
""" % { 'i':i, 'name':parameters[i].n } for i in range( len(parameters) ) ] ) )
} )
T( 'PARAM' + name + '_1', None, None, [
A( 'REFERENCE', p.n + '_ref' ) for p in parameters
], """
// OUT( assort 1 ) ;
// %(dbg)s
if ( %(test)s ) {
// OUT( assort 1.1 ) ;
RETURN( newPARAM%(name)s_struct( %(pass)s ) ) ;
}
""" % { 'name':name,
'dbg': ( ''.join( [ 'LOG( THIS->%s_ref->value ) ;' % p.n for p in parameters ] ) ),
'test': ( ' && '.join( [ 'NOTNONE( THIS->%s_ref->value )' % p.n for p in parameters ] ) ),
'pass': ( ', '.join( [ 'THIS->%s_ref' % p.n for p in parameters ] ) )
} )
T( 'PARAM' + name + '_struct', None, None, [
A( 'REFERENCE', p.n + '_ref' ) for p in parameters
], """
DO_TYPE ;
%(attr)s
""" % { 'attr': ( ''.join( [ """
ONWID( WI_%(p)s, THIS->%(p)s_ref->value ) ;
""" % { 'p':p.n } for p in parameters ] ) ) } )
|
|
# coding: utf-8
"""
flask_oauthlib.provider.oauth1
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implemnts OAuth1 provider support for Flask.
:copyright: (c) 2013 - 2014 by Hsiaoming Yang.
"""
import logging
from functools import wraps
from werkzeug import cached_property
from flask import request, redirect, url_for
from flask import make_response, abort
from oauthlib.oauth1 import RequestValidator
from oauthlib.oauth1 import WebApplicationServer as Server
from oauthlib.oauth1 import SIGNATURE_HMAC, SIGNATURE_RSA
from oauthlib.common import to_unicode, add_params_to_uri, urlencode
from oauthlib.oauth1.rfc5849 import errors
from ..utils import extract_params, create_response
SIGNATURE_METHODS = (SIGNATURE_HMAC, SIGNATURE_RSA)
__all__ = ('OAuth1Provider', 'OAuth1RequestValidator')
log = logging.getLogger('flask_oauthlib')
class OAuth1Provider(object):
"""Provide secure services using OAuth1.
Like many other Flask extensions, there are two usage modes. One is
binding the Flask app instance::
app = Flask(__name__)
oauth = OAuth1Provider(app)
The second possibility is to bind the Flask app later::
oauth = OAuth1Provider()
def create_app():
app = Flask(__name__)
oauth.init_app(app)
return app
And now you can protect the resource with realms::
@app.route('/api/user')
@oauth.require_oauth('email', 'username')
def user():
return jsonify(request.oauth.user)
"""
def __init__(self, app=None):
self._before_request_funcs = []
self._after_request_funcs = []
if app:
self.init_app(app)
def init_app(self, app):
"""
This callback can be used to initialize an application for the
oauth provider instance.
"""
self.app = app
app.extensions = getattr(app, 'extensions', {})
app.extensions['oauthlib.provider.oauth1'] = self
@cached_property
def error_uri(self):
"""The error page URI.
When something turns error, it will redirect to this error page.
You can configure the error page URI with Flask config::
OAUTH1_PROVIDER_ERROR_URI = '/error'
You can also define the error page by a named endpoint::
OAUTH1_PROVIDER_ERROR_ENDPOINT = 'oauth.error'
"""
error_uri = self.app.config.get('OAUTH1_PROVIDER_ERROR_URI')
if error_uri:
return error_uri
error_endpoint = self.app.config.get('OAUTH1_PROVIDER_ERROR_ENDPOINT')
if error_endpoint:
return url_for(error_endpoint)
return '/oauth/errors'
@cached_property
def server(self):
"""
All in one endpoints. This property is created automaticly
if you have implemented all the getters and setters.
"""
if hasattr(self, '_validator'):
return Server(self._validator)
if hasattr(self, '_clientgetter') and \
hasattr(self, '_tokengetter') and \
hasattr(self, '_tokensetter') and \
hasattr(self, '_noncegetter') and \
hasattr(self, '_noncesetter') and \
hasattr(self, '_grantgetter') and \
hasattr(self, '_grantsetter') and \
hasattr(self, '_verifiergetter') and \
hasattr(self, '_verifiersetter'):
validator = OAuth1RequestValidator(
clientgetter=self._clientgetter,
tokengetter=self._tokengetter,
tokensetter=self._tokensetter,
grantgetter=self._grantgetter,
grantsetter=self._grantsetter,
noncegetter=self._noncegetter,
noncesetter=self._noncesetter,
verifiergetter=self._verifiergetter,
verifiersetter=self._verifiersetter,
config=self.app.config,
)
self._validator = validator
server = Server(validator)
if self.app.testing:
# It will always be false, since the redirect_uri
# didn't match when doing the testing
server._check_signature = lambda *args, **kwargs: True
return server
raise RuntimeError(
'application not bound to required getters and setters'
)
def before_request(self, f):
"""Register functions to be invoked before accessing the resource.
The function accepts nothing as parameters, but you can get
information from `Flask.request` object. It is usually useful
for setting limitation on the client request::
@oauth.before_request
def limit_client_request():
client_key = request.values.get('client_key')
if not client_key:
return
client = Client.get(client_key)
if over_limit(client):
return abort(403)
track_request(client)
"""
self._before_request_funcs.append(f)
return f
def after_request(self, f):
"""Register functions to be invoked after accessing the resource.
The function accepts ``valid`` and ``request`` as parameters,
and it should return a tuple of them::
@oauth.after_request
def valid_after_request(valid, oauth):
if oauth.user in black_list:
return False, oauth
return valid, oauth
"""
self._after_request_funcs.append(f)
return f
def clientgetter(self, f):
"""Register a function as the client getter.
The function accepts one parameter `client_key`, and it returns
a client object with at least these information:
- client_key: A random string
- client_secret: A random string
- redirect_uris: A list of redirect uris
- default_realms: Default scopes of the client
The client may contain more information, which is suggested:
- default_redirect_uri: One of the redirect uris
Implement the client getter::
@oauth.clientgetter
def get_client(client_key):
client = get_client_model(client_key)
# Client is an object
return client
"""
self._clientgetter = f
return f
def tokengetter(self, f):
"""Register a function as the access token getter.
The function accepts `client_key` and `token` parameters, and it
returns an access token object contains:
- client: Client associated with this token
- user: User associated with this token
- token: Access token
- secret: Access token secret
- realms: Realms with this access token
Implement the token getter::
@oauth.tokengetter
def get_access_token(client_key, token):
return AccessToken.get(client_key=client_key, token=token)
"""
self._tokengetter = f
return f
def tokensetter(self, f):
"""Register a function as the access token setter.
The setter accepts two parameters at least, one is token,
the other is request::
@oauth.tokensetter
def save_access_token(token, request):
access_token = AccessToken(
client=request.client,
user=request.user,
token=token['oauth_token'],
secret=token['oauth_token_secret'],
realms=token['oauth_authorized_realms'].split(' '),
)
return access_token.save()
The parameter token is a dict, that looks like::
{
u'oauth_token': u'arandomstringoftoken',
u'oauth_token_secret': u'arandomstringofsecret',
u'oauth_authorized_realms': u'email address'
}
The `request` object would provide these information (at least)::
- client: Client object associated with this token
- user: User object associated with this token
- request_token: Requst token for exchanging this access token
"""
self._tokensetter = f
return f
def grantgetter(self, f):
"""Register a function as the request token getter.
The function accepts a `token` parameter, and it returns an
request token object contains:
- client: Client associated with this token
- token: Access token
- secret: Access token secret
- realms: Realms with this access token
- redirect_uri: A URI for redirecting
Implement the token getter::
@oauth.tokengetter
def get_request_token(token):
return RequestToken.get(token=token)
"""
self._grantgetter = f
return f
def grantsetter(self, f):
"""Register a function as the request token setter.
The setter accepts a token and request parameters::
@oauth.grantsetter
def save_request_token(token, request):
data = RequestToken(
token=token['oauth_token'],
secret=token['oauth_token_secret'],
client=request.client,
redirect_uri=oauth.redirect_uri,
realms=request.realms,
)
return data.save()
"""
self._grantsetter = f
return f
def noncegetter(self, f):
"""Register a function as the nonce and timestamp getter.
The function accepts parameters:
- client_key: The client/consure key
- timestamp: The ``oauth_timestamp`` parameter
- nonce: The ``oauth_nonce`` parameter
- request_token: Request token string, if any
- access_token: Access token string, if any
A nonce and timestamp make each request unique. The implementation::
@oauth.noncegetter
def get_nonce(client_key, timestamp, nonce, request_token,
access_token):
return Nonce.get("...")
"""
self._noncegetter = f
return f
def noncesetter(self, f):
"""Register a function as the nonce and timestamp setter.
The parameters are the same with :meth:`noncegetter`::
@oauth.noncegetter
def save_nonce(client_key, timestamp, nonce, request_token,
access_token):
data = Nonce("...")
return data.save()
The timestamp will be expired in 60s, it would be a better design
if you put timestamp and nonce object in a cache.
"""
self._noncesetter = f
return f
def verifiergetter(self, f):
"""Register a function as the verifier getter.
The return verifier object should at least contain a user object
which is the current user.
The implemented code looks like::
@oauth.verifiergetter
def load_verifier(verifier, token):
data = Verifier.get(verifier)
if data.request_token == token:
# check verifier for safety
return data
return data
"""
self._verifiergetter = f
return f
def verifiersetter(self, f):
"""Register a function as the verifier setter.
A verifier is better together with request token, but it is not
required. A verifier is used together with request token for
exchanging access token, it has an expire time, in this case, it
would be a better design if you put them in a cache.
The implemented code looks like::
@oauth.verifiersetter
def save_verifier(verifier, token, *args, **kwargs):
data = Verifier(
verifier=verifier['oauth_verifier'],
request_token=token,
user=get_current_user()
)
return data.save()
"""
self._verifiersetter = f
return f
def authorize_handler(self, f):
"""Authorization handler decorator.
This decorator will sort the parameters and headers out, and
pre validate everything::
@app.route('/oauth/authorize', methods=['GET', 'POST'])
@oauth.authorize_handler
def authorize(*args, **kwargs):
if request.method == 'GET':
# render a page for user to confirm the authorization
return render_template('oauthorize.html')
confirm = request.form.get('confirm', 'no')
return confirm == 'yes'
"""
@wraps(f)
def decorated(*args, **kwargs):
if request.method == 'POST':
if not f(*args, **kwargs):
uri = add_params_to_uri(
self.error_uri, [('error', 'denied')]
)
return redirect(uri)
return self.confirm_authorization_request()
server = self.server
uri, http_method, body, headers = extract_params()
try:
realms, credentials = server.get_realms_and_credentials(
uri, http_method=http_method, body=body, headers=headers
)
kwargs['realms'] = realms
kwargs.update(credentials)
return f(*args, **kwargs)
except errors.OAuth1Error as e:
return redirect(e.in_uri(self.error_uri))
except errors.InvalidClientError as e:
return redirect(e.in_uri(self.error_uri))
return decorated
def confirm_authorization_request(self):
"""When consumer confirm the authrozation."""
server = self.server
uri, http_method, body, headers = extract_params()
try:
realms, credentials = server.get_realms_and_credentials(
uri, http_method=http_method, body=body, headers=headers
)
ret = server.create_authorization_response(
uri, http_method, body, headers, realms, credentials
)
log.debug('Authorization successful.')
return create_response(*ret)
except errors.OAuth1Error as e:
return redirect(e.in_uri(self.error_uri))
except errors.InvalidClientError as e:
return redirect(e.in_uri(self.error_uri))
def request_token_handler(self, f):
"""Request token handler decorator.
The decorated function should return an dictionary or None as
the extra credentials for creating the token response.
If you don't need to add any extra credentials, it could be as
simple as::
@app.route('/oauth/request_token')
@oauth.request_token_handler
def request_token():
return {}
"""
@wraps(f)
def decorated(*args, **kwargs):
server = self.server
uri, http_method, body, headers = extract_params()
credentials = f(*args, **kwargs)
try:
ret = server.create_request_token_response(
uri, http_method, body, headers, credentials)
return create_response(*ret)
except errors.OAuth1Error as e:
return _error_response(e)
return decorated
def access_token_handler(self, f):
"""Access token handler decorator.
The decorated function should return an dictionary or None as
the extra credentials for creating the token response.
If you don't need to add any extra credentials, it could be as
simple as::
@app.route('/oauth/access_token')
@oauth.access_token_handler
def access_token():
return {}
"""
@wraps(f)
def decorated(*args, **kwargs):
server = self.server
uri, http_method, body, headers = extract_params()
credentials = f(*args, **kwargs)
try:
ret = server.create_access_token_response(
uri, http_method, body, headers, credentials)
return create_response(*ret)
except errors.OAuth1Error as e:
return _error_response(e)
return decorated
def require_oauth(self, *realms, **kwargs):
"""Protect resource with specified scopes."""
def wrapper(f):
@wraps(f)
def decorated(*args, **kwargs):
for func in self._before_request_funcs:
func()
if hasattr(request, 'oauth') and request.oauth:
return f(*args, **kwargs)
server = self.server
uri, http_method, body, headers = extract_params()
try:
valid, req = server.validate_protected_resource_request(
uri, http_method, body, headers, realms
)
except Exception as e:
log.warn('Exception: %r', e)
e.urlencoded = urlencode([('error', 'unknown')])
e.status_code = 400
return _error_response(e)
for func in self._after_request_funcs:
valid, req = func(valid, req)
if not valid:
return abort(401)
# alias user for convenience
req.user = req.access_token.user
request.oauth = req
return f(*args, **kwargs)
return decorated
return wrapper
class OAuth1RequestValidator(RequestValidator):
"""Subclass of Request Validator.
:param clientgetter: a function to get client object
:param tokengetter: a function to get access token
:param tokensetter: a function to save access token
:param grantgetter: a function to get request token
:param grantsetter: a function to save request token
:param noncegetter: a function to get nonce and timestamp
:param noncesetter: a function to save nonce and timestamp
"""
def __init__(self, clientgetter, tokengetter, tokensetter,
grantgetter, grantsetter, noncegetter, noncesetter,
verifiergetter, verifiersetter, config=None):
self._clientgetter = clientgetter
# access token getter and setter
self._tokengetter = tokengetter
self._tokensetter = tokensetter
# request token getter and setter
self._grantgetter = grantgetter
self._grantsetter = grantsetter
# nonce and timestamp
self._noncegetter = noncegetter
self._noncesetter = noncesetter
# verifier getter and setter
self._verifiergetter = verifiergetter
self._verifiersetter = verifiersetter
self._config = config or {}
@property
def allowed_signature_methods(self):
"""Allowed signature methods.
Default value: SIGNATURE_HMAC and SIGNATURE_RSA.
You can customize with Flask Config:
- OAUTH1_PROVIDER_SIGNATURE_METHODS
"""
return self._config.get(
'OAUTH1_PROVIDER_SIGNATURE_METHODS',
SIGNATURE_METHODS,
)
@property
def client_key_length(self):
return self._config.get(
'OAUTH1_PROVIDER_KEY_LENGTH',
(20, 30)
)
@property
def reqeust_token_length(self):
return self._config.get(
'OAUTH1_PROVIDER_KEY_LENGTH',
(20, 30)
)
@property
def access_token_length(self):
return self._config.get(
'OAUTH1_PROVIDER_KEY_LENGTH',
(20, 30)
)
@property
def nonce_length(self):
return self._config.get(
'OAUTH1_PROVIDER_KEY_LENGTH',
(20, 30)
)
@property
def verifier_length(self):
return self._config.get(
'OAUTH1_PROVIDER_KEY_LENGTH',
(20, 30)
)
@property
def realms(self):
return self._config.get('OAUTH1_PROVIDER_REALMS', [])
@property
def enforce_ssl(self):
"""Enforce SSL request.
Default is True. You can customize with:
- OAUTH1_PROVIDER_ENFORCE_SSL
"""
return self._config.get('OAUTH1_PROVIDER_ENFORCE_SSL', True)
@property
def dummy_client(self):
return to_unicode('dummy_client', 'utf-8')
@property
def dummy_request_token(self):
return to_unicode('dummy_request_token', 'utf-8')
@property
def dummy_access_token(self):
return to_unicode('dummy_access_token', 'utf-8')
def get_client_secret(self, client_key, request):
"""Get client secret.
The client object must has ``client_secret`` attribute.
"""
log.debug('Get client secret of %r', client_key)
if not request.client:
request.client = self._clientgetter(client_key=client_key)
if request.client:
return request.client.client_secret
return None
def get_request_token_secret(self, client_key, token, request):
"""Get request token secret.
The request token object should a ``secret`` attribute.
"""
log.debug('Get request token secret of %r for %r',
token, client_key)
tok = request.request_token or self._grantgetter(token=token)
if tok and tok.client_key == client_key:
request.request_token = tok
return tok.secret
return None
def get_access_token_secret(self, client_key, token, request):
"""Get access token secret.
The access token object should a ``secret`` attribute.
"""
log.debug('Get access token secret of %r for %r',
token, client_key)
tok = request.access_token or self._tokengetter(
client_key=client_key,
token=token,
)
if tok:
request.access_token = tok
return tok.secret
return None
def get_default_realms(self, client_key, request):
"""Default realms of the client."""
log.debug('Get realms for %r', client_key)
if not request.client:
request.client = self._clientgetter(client_key=client_key)
client = request.client
if hasattr(client, 'default_realms'):
return client.default_realms
return []
def get_realms(self, token, request):
"""Realms for this request token."""
log.debug('Get realms of %r', token)
tok = request.request_token or self._grantgetter(token=token)
if not tok:
return []
request.request_token = tok
if hasattr(tok, 'realms'):
return tok.realms or []
return []
def get_redirect_uri(self, token, request):
"""Redirect uri for this request token."""
log.debug('Get redirect uri of %r', token)
tok = request.request_token or self._grantgetter(token=token)
return tok.redirect_uri
def get_rsa_key(self, client_key, request):
"""Retrieves a previously stored client provided RSA key."""
if not request.client:
request.client = self._clientgetter(client_key=client_key)
if hasattr(request.client, 'rsa_key'):
return request.client.rsa_key
return None
def invalidate_request_token(self, client_key, request_token, request):
"""Invalidates a used request token."""
# TODO
def validate_client_key(self, client_key, request):
"""Validates that supplied client key."""
log.debug('Validate client key for %r', client_key)
if not request.client:
request.client = self._clientgetter(client_key=client_key)
if request.client:
return True
return False
def validate_request_token(self, client_key, token, request):
"""Validates request token is available for client."""
log.debug('Validate request token %r for %r',
token, client_key)
tok = request.request_token or self._grantgetter(token=token)
if tok and tok.client_key == client_key:
request.request_token = tok
return True
return False
def validate_access_token(self, client_key, token, request):
"""Validates access token is available for client."""
log.debug('Validate access token %r for %r',
token, client_key)
tok = request.access_token or self._tokengetter(
client_key=client_key,
token=token,
)
if tok:
request.access_token = tok
return True
return False
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request, request_token=None,
access_token=None):
"""Validate the timestamp and nonce is used or not."""
log.debug('Validate timestamp and nonce %r', client_key)
nonce_exists = self._noncegetter(
client_key=client_key, timestamp=timestamp,
nonce=nonce, request_token=request_token,
access_token=access_token
)
if nonce_exists:
return False
self._noncesetter(
client_key=client_key, timestamp=timestamp,
nonce=nonce, request_token=request_token,
access_token=access_token
)
return True
def validate_redirect_uri(self, client_key, redirect_uri, request):
"""Validate if the redirect_uri is allowed by the client."""
log.debug('Validate redirect_uri %r for %r', redirect_uri, client_key)
if not request.client:
request.client = self._clientgetter(client_key=client_key)
if not request.client:
return False
if not request.client.redirect_uris and redirect_uri is None:
return True
request.redirect_uri = redirect_uri
return redirect_uri in request.client.redirect_uris
def validate_requested_realms(self, client_key, realms, request):
log.debug('Validate requested realms %r for %r', realms, client_key)
if not request.client:
request.client = self._clientgetter(client_key=client_key)
client = request.client
if not client:
return False
if hasattr(client, 'validate_realms'):
return client.validate_realms(realms)
if set(client.default_realms).issuperset(set(realms)):
return True
return True
def validate_realms(self, client_key, token, request, uri=None,
realms=None):
"""Check if the token has permission on those realms."""
log.debug('Validate realms %r for %r', realms, client_key)
if request.access_token:
tok = request.access_token
else:
tok = self._tokengetter(client_key=client_key, token=token)
request.access_token = tok
if not tok:
return False
return set(tok.realms).issuperset(set(realms))
def validate_verifier(self, client_key, token, verifier, request):
"""Validate verifier exists."""
log.debug('Validate verifier %r for %r', verifier, client_key)
data = self._verifiergetter(verifier=verifier, token=token)
if not data:
return False
if not hasattr(data, 'user'):
log.debug('Verifier should has user attribute')
return False
request.user = data.user
if hasattr(data, 'client_key'):
return data.client_key == client_key
return True
def verify_request_token(self, token, request):
"""Verify if the request token is existed."""
log.debug('Verify request token %r', token)
tok = request.request_token or self._grantgetter(token=token)
if tok:
request.request_token = tok
return True
return False
def verify_realms(self, token, realms, request):
"""Verify if the realms match the requested realms."""
log.debug('Verify realms %r', realms)
tok = request.request_token or self._grantgetter(token=token)
if not tok:
return False
request.request_token = tok
if not hasattr(tok, 'realms'):
# realms not enabled
return True
return set(tok.realms) == set(realms)
def save_access_token(self, token, request):
"""Save access token to database.
A tokensetter is required, which accepts a token and request
parameters::
def tokensetter(token, request):
access_token = Token(
client=request.client,
user=request.user,
token=token['oauth_token'],
secret=token['oauth_token_secret'],
realms=token['oauth_authorized_realms'],
)
return access_token.save()
"""
log.debug('Save access token %r', token)
self._tokensetter(token, request)
def save_request_token(self, token, request):
"""Save request token to database.
A grantsetter is required, which accepts a token and request
parameters::
def grantsetter(token, request):
grant = Grant(
token=token['oauth_token'],
secret=token['oauth_token_secret'],
client=request.client,
redirect_uri=oauth.redirect_uri,
realms=request.realms,
)
return grant.save()
"""
log.debug('Save request token %r', token)
self._grantsetter(token, request)
def save_verifier(self, token, verifier, request):
"""Save verifier to database.
A verifiersetter is required. It would be better to combine request
token and verifier together::
def verifiersetter(token, verifier, request):
tok = Grant.query.filter_by(token=token).first()
tok.verifier = verifier['oauth_verifier']
tok.user = get_current_user()
return tok.save()
.. admonition:: Note:
A user is required on verifier, remember to attach current
user to verifier.
"""
log.debug('Save verifier %r for %r', verifier, token)
self._verifiersetter(
token=token, verifier=verifier, request=request
)
def _error_response(e):
res = make_response(e.urlencoded, e.status_code)
res.headers['Content-Type'] = 'application/x-www-form-urlencoded'
return res
|
|
'''runstest
formulas for mean and var of runs taken from SAS manual NPAR tests, also idea
for runstest_1samp and runstest_2samp
Description in NIST handbook and dataplot doesn't explain their expected
values, or variance
Note:
There are (at least) two definitions of runs used in literature. The classical
definition which is also used here, is that runs are sequences of identical
observations separated by observations with different realizations.
The second definition allows for overlapping runs, or runs where counting a
run is also started after a run of a fixed length of the same kind.
TODO
* add one-sided tests where possible or where it makes sense
'''
from __future__ import print_function
import numpy as np
from scipy import stats
from scipy.misc import comb
import warnings
class Runs(object):
'''class for runs in a binary sequence
Parameters
----------
x : array_like, 1d
data array,
Notes
-----
This was written as a more general class for runs. This has some redundant
calculations when only the runs_test is used.
TODO: make it lazy
The runs test could be generalized to more than 1d if there is a use case
for it.
This should be extended once I figure out what the distribution of runs
of any length k is.
The exact distribution for the runs test is also available but not yet
verified.
'''
def __init__(self, x):
self.x = np.asarray(x)
self.runstart = runstart = np.nonzero(np.diff(np.r_[[-np.inf], x, [np.inf]]))[0]
self.runs = runs = np.diff(runstart)
self.runs_sign = runs_sign = x[runstart[:-1]]
self.runs_pos = runs[runs_sign==1]
self.runs_neg = runs[runs_sign==0]
self.runs_freqs = np.bincount(runs)
self.n_runs = len(self.runs)
self.n_pos = (x==1).sum()
def runs_test(self, correction=True):
'''basic version of runs test
Parameters
----------
correction: bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
pvalue based on normal distribution, with integer correction
'''
self.npo = npo = (self.runs_pos).sum()
self.nne = nne = (self.runs_neg).sum()
#n_r = self.n_runs
n = npo + nne
npn = npo * nne
rmean = 2. * npn / n + 1
rvar = 2. * npn * (2.*npn - n) / n**2. / (n-1.)
rstd = np.sqrt(rvar)
rdemean = self.n_runs - rmean
if n >= 50 or not correction:
z = rdemean
else:
if rdemean > 0.5:
z = rdemean - 0.5
elif rdemean < 0.5:
z = rdemean + 0.5
else:
z = 0.
z /= rstd
pval = 2 * stats.norm.sf(np.abs(z))
return z, pval
def runstest_1samp(x, cutoff='mean', correction=True):
'''use runs test on binary discretized data above/below cutoff
Parameters
----------
x : array_like
data, numeric
cutoff : {'mean', 'median'} or number
This specifies the cutoff to split the data into large and small
values.
correction: bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
Returns
-------
z_stat : float
test statistic, asymptotically normally distributed
p-value : float
p-value, reject the null hypothesis if it is below an type 1 error
level, alpha .
'''
if cutoff == 'mean':
cutoff = np.mean(x)
elif cutoff == 'median':
cutoff = np.median(x)
xindicator = (x >= cutoff).astype(int)
return Runs(xindicator).runs_test(correction=correction)
def runstest_2samp(x, y=None, groups=None, correction=True):
'''Wald-Wolfowitz runstest for two samples
This tests whether two samples come from the same distribution.
Parameters
----------
x : array_like
data, numeric, contains either one group, if y is also given, or
both groups, if additionally a group indicator is provided
y : array_like (optional)
data, numeric
groups : array_like
group labels or indicator the data for both groups is given in a
single 1-dimensional array, x. If group labels are not [0,1], then
correction: bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
Returns
-------
z_stat : float
test statistic, asymptotically normally distributed
p-value : float
p-value, reject the null hypothesis if it is below an type 1 error
level, alpha .
Notes
-----
Wald-Wolfowitz runs test.
If there are ties, then then the test statistic and p-value that is
reported, is based on the higher p-value between sorting all tied
observations of the same group
This test is intended for continuous distributions
SAS has treatment for ties, but not clear, and sounds more complicated
(minimum and maximum possible runs prevent use of argsort)
(maybe it's not so difficult, idea: add small positive noise to first
one, run test, then to the other, run test, take max(?) p-value - DONE
This gives not the minimum and maximum of the number of runs, but should
be close. Not true, this is close to minimum but far away from maximum.
maximum number of runs would use alternating groups in the ties.)
Maybe adding random noise would be the better approach.
SAS has exact distribution for sample size <=30, doesn't look standard
but should be easy to add.
currently two-sided test only
This has not been verified against a reference implementation. In a short
Monte Carlo simulation where both samples are normally distribute, the test
seems to be correctly sized for larger number of observations (30 or
larger), but conservative (i.e. reject less often than nominal) with a
sample size of 10 in each group.
See Also
--------
runs_test_1samp
Runs
RunsProb
'''
x = np.asarray(x)
if not y is None:
y = np.asarray(y)
groups = np.concatenate((np.zeros(len(x)), np.ones(len(y))))
# note reassigning x
x = np.concatenate((x, y))
gruni = np.arange(2)
elif not groups is None:
gruni = np.unique(groups)
if gruni.size != 2: # pylint: disable=E1103
raise ValueError('not exactly two groups specified')
#require groups to be numeric ???
else:
raise ValueError('either y or groups is necessary')
xargsort = np.argsort(x)
#check for ties
x_sorted = x[xargsort]
x_diff = np.diff(x_sorted) # used for detecting and handling ties
if x_diff.min() == 0:
print('ties detected') #replace with warning
x_mindiff = x_diff[x_diff > 0].min()
eps = x_mindiff/2.
xx = x.copy() #don't change original, just in case
xx[groups==gruni[0]] += eps
xargsort = np.argsort(xx)
xindicator = groups[xargsort]
z0, p0 = Runs(xindicator).runs_test(correction=correction)
xx[groups==gruni[0]] -= eps #restore xx = x
xx[groups==gruni[1]] += eps
xargsort = np.argsort(xx)
xindicator = groups[xargsort]
z1, p1 = Runs(xindicator).runs_test(correction=correction)
idx = np.argmax([p0,p1])
return [z0, z1][idx], [p0, p1][idx]
else:
xindicator = groups[xargsort]
return Runs(xindicator).runs_test(correction=correction)
class TotalRunsProb(object):
'''class for the probability distribution of total runs
This is the exact probability distribution for the (Wald-Wolfowitz)
runs test. The random variable is the total number of runs if the
sample has (n0, n1) observations of groups 0 and 1.
Notes
-----
Written as a class so I can store temporary calculations, but I don't
think it matters much.
Formulas taken from SAS manual for one-sided significance level.
Could be converted to a full univariate distribution, subclassing
scipy.stats.distributions.
*Status*
Not verified yet except for mean.
'''
def __init__(self, n0, n1):
self.n0 = n0
self.n1 = n1
self.n = n = n0 + n1
self.comball = comb(n, n1)
def runs_prob_even(self, r):
n0, n1 = self.n0, self.n1
tmp0 = comb(n0-1, r//2-1)
tmp1 = comb(n1-1, r//2-1)
return tmp0 * tmp1 * 2. / self.comball
def runs_prob_odd(self, r):
n0, n1 = self.n0, self.n1
k = (r+1)//2
tmp0 = comb(n0-1, k-1)
tmp1 = comb(n1-1, k-2)
tmp3 = comb(n0-1, k-2)
tmp4 = comb(n1-1, k-1)
return (tmp0 * tmp1 + tmp3 * tmp4) / self.comball
def pdf(self, r):
r = np.asarray(r)
r_isodd = np.mod(r, 2) > 0
r_odd = r[r_isodd]
r_even = r[~r_isodd]
runs_pdf = np.zeros(r.shape)
runs_pdf[r_isodd] = self.runs_prob_odd(r_odd)
runs_pdf[~r_isodd] = self.runs_prob_even(r_even)
return runs_pdf
def cdf(self, r):
r_ = np.arange(2,r+1)
cdfval = self.runs_prob_even(r_[::2]).sum()
cdfval += self.runs_prob_odd(r_[1::2]).sum()
return cdfval
class RunsProb(object):
'''distribution of success runs of length k or more (classical definition)
The underlying process is assumed to be a sequence of Bernoulli trials
of a given length n.
not sure yet, how to interpret or use the distribution for runs
of length k or more.
Musseli also has longest success run, and waiting time distribution
negative binomial of order k and geometric of order k
need to compare with Godpole
need a MonteCarlo function to do some quick tests before doing more
'''
def pdf(self, x, k, n, p):
'''distribution of success runs of length k or more
Parameters
----------
x : float
count of runs of length n
k : int
length of runs
n : int
total number of observations or trials
p : float
probability of success in each Bernoulli trial
Returns
-------
pdf : float
probability that x runs of length of k are observed
Notes
-----
not yet vectorized
References
----------
Muselli 1996, theorem 3
'''
q = 1-p
m = np.arange(x, (n+1)//(k+1)+1)[:,None]
terms = (-1)**(m-x) * comb(m, x) * p**(m*k) * q**(m-1) \
* (comb(n - m*k, m - 1) + q * comb(n - m*k, m))
return terms.sum(0)
def pdf_nb(self, x, k, n, p):
pass
#y = np.arange(m-1, n-mk+1
'''
>>> [np.sum([RunsProb().pdf(xi, k, 16, 10/16.) for xi in range(0,16)]) for k in range(16)]
[0.99999332193894064, 0.99999999999999367, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> [(np.arange(0,16) * [RunsProb().pdf(xi, k, 16, 10/16.) for xi in range(0,16)]).sum() for k in range(16)]
[6.9998931510341809, 4.1406249999999929, 2.4414062500000075, 1.4343261718749996, 0.83923339843749856, 0.48875808715820324, 0.28312206268310569, 0.1629814505577086, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
>>> np.array([(np.arange(0,16) * [RunsProb().pdf(xi, k, 16, 10/16.) for xi in range(0,16)]).sum() for k in range(16)])/11
array([ 0.63635392, 0.37642045, 0.22194602, 0.13039329, 0.07629395,
0.04443255, 0.02573837, 0.0148165 , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. ])
>>> np.diff([(np.arange(0,16) * [RunsProb().pdf(xi, k, 16, 10/16.) for xi in range(0,16)]).sum() for k in range(16)][::-1])
array([ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.16298145, 0.12014061, 0.20563602,
0.35047531, 0.59509277, 1.00708008, 1.69921875, 2.85926815])
'''
def median_test_ksample(x, groups):
'''chisquare test for equality of median/location
This tests whether all groups have the same fraction of observations
above the median.
Parameters
----------
x : array_like
data values stacked for all groups
groups : array_like
group labels or indicator
Returns
-------
stat : float
test statistic
pvalue : float
pvalue from the chisquare distribution
others ????
currently some test output, table and expected
'''
x = np.asarray(x)
gruni = np.unique(groups)
xli = [x[groups==group] for group in gruni]
xmedian = np.median(x)
counts_larger = np.array([(xg > xmedian).sum() for xg in xli])
counts = np.array([len(xg) for xg in xli])
counts_smaller = counts - counts_larger
nobs = counts.sum()
n_larger = (x > xmedian).sum()
n_smaller = nobs - n_larger
table = np.vstack((counts_smaller, counts_larger))
#the following should be replaced by chisquare_contingency table
expected = np.vstack((counts * 1. / nobs * n_smaller,
counts * 1. / nobs * n_larger))
if (expected < 5).any():
print('Warning: There are cells with less than 5 expected' \
'observations. The chisquare distribution might not be a good' \
'approximation for the true distribution.')
#check ddof
return stats.chisquare(table.ravel(), expected.ravel(), ddof=1), table, expected
def cochrans_q(x):
'''Cochran's Q test for identical effect of k treatments
Cochran's Q is a k-sample extension of the McNemar test. If there are only
two treatments, then Cochran's Q test and McNemar test are equivalent.
Test that the probability of success is the same for each treatment.
The alternative is that at least two treatments have a different
probability of success.
Parameters
----------
x : array_like, 2d (N,k)
data with N cases and k variables
Returns
-------
q_stat : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
In Wikipedia terminology, rows are blocks and columns are treatments.
The number of rows N, should be large for the chisquare distribution to be
a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
http://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS
'''
warnings.warn("Deprecated, use stats.cochrans_q instead", DeprecationWarning)
x = np.asarray(x)
gruni = np.unique(x)
N, k = x.shape
count_row_success = (x==gruni[-1]).sum(1, float)
count_col_success = (x==gruni[-1]).sum(0, float)
count_row_ss = count_row_success.sum()
count_col_ss = count_col_success.sum()
assert count_row_ss == count_col_ss #just a calculation check
#this is SAS manual
q_stat = (k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2) \
/ (k * count_row_ss - np.sum(count_row_success**2))
#Note: the denominator looks just like k times the variance of the
#columns
#Wikipedia uses a different, but equivalent expression
## q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2) \
## / (k * count_col_ss - np.sum(count_col_success**2))
return q_stat, stats.chi2.sf(q_stat, k-1)
def mcnemar(x, y=None, exact=True, correction=True):
'''McNemar test
Parameters
----------
x, y : array_like
two paired data samples. If y is None, then x can be a 2 by 2
contingency table. x and y can have more than one dimension, then
the results are calculated under the assumption that axis zero
contains the observation for the samples.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be used, which
is the approximation to the distribution of the test statistic for
large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
stat : float or int, array
The test statistic is the chisquare statistic if exact is false. If the
exact binomial distribution is used, then this contains the min(n1, n2),
where n1, n2 are cases that are zero in one sample but one in the other
sample.
pvalue : float or array
p-value of the null hypothesis of equal effects.
Notes
-----
This is a special case of Cochran's Q test. The results when the chisquare
distribution is used are identical, except for continuity correction.
'''
warnings.warn("Deprecated, use stats.TableSymmetry instead", DeprecationWarning)
x = np.asarray(x)
if y is None and x.shape[0] == x.shape[1]:
if x.shape[0] != 2:
raise ValueError('table needs to be 2 by 2')
n1, n2 = x[1, 0], x[0, 1]
else:
# I'm not checking here whether x and y are binary,
# isn't this also paired sign test
n1 = np.sum(x < y, 0)
n2 = np.sum(x > y, 0)
if exact:
stat = np.minimum(n1, n2)
# binom is symmetric with p=0.5
pval = stats.binom.cdf(stat, n1 + n2, 0.5) * 2
pval = np.minimum(pval, 1) # limit to 1 if n1==n2
else:
corr = int(correction) # convert bool to 0 or 1
stat = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2))
df = 1
pval = stats.chi2.sf(stat, df)
return stat, pval
def symmetry_bowker(table):
'''Test for symmetry of a (k, k) square contingency table
This is an extension of the McNemar test to test the Null hypothesis
that the contingency table is symmetric around the main diagonal, that is
n_{i, j} = n_{j, i} for all i, j
Parameters
----------
table : array_like, 2d, (k, k)
a square contingency table that contains the count for k categories
in rows and columns.
Returns
-------
statistic : float
chisquare test statistic
p-value : float
p-value of the test statistic based on chisquare distribution
df : int
degrees of freedom of the chisquare distribution
Notes
-----
Implementation is based on the SAS documentation, R includes it in
`mcnemar.test` if the table is not 2 by 2.
The pvalue is based on the chisquare distribution which requires that the
sample size is not very small to be a good approximation of the true
distribution. For 2x2 contingency tables exact distribution can be
obtained with `mcnemar`
See Also
--------
mcnemar
'''
warnings.warn("Deprecated, use stats.TableSymmetry instead", DeprecationWarning)
table = np.asarray(table)
k, k2 = table.shape
if k != k2:
raise ValueError('table needs to be square')
#low_idx = np.tril_indices(k, -1) # this doesn't have Fortran order
upp_idx = np.triu_indices(k, 1)
tril = table.T[upp_idx] # lower triangle in column order
triu = table[upp_idx] # upper triangle in row order
stat = ((tril - triu)**2 / (tril + triu + 1e-20)).sum()
df = k * (k-1) / 2.
pval = stats.chi2.sf(stat, df)
return stat, pval, df
if __name__ == '__main__':
x1 = np.array([1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1])
print(Runs(x1).runs_test())
print(runstest_1samp(x1, cutoff='mean'))
print(runstest_2samp(np.arange(16,0,-1), groups=x1))
print(TotalRunsProb(7,9).cdf(11))
print(median_test_ksample(np.random.randn(100), np.random.randint(0,2,100)))
print(cochrans_q(np.random.randint(0,2,(100,8))))
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import argparse
from oslo_config import cfg
from st2common import log as logging
from st2actions import config
from st2actions.runners.pythonrunner import Action
from st2actions.runners.utils import get_logger_for_python_runner_action
from st2actions.runners.utils import get_action_class_instance
from st2common.util import loader as action_loader
from st2common.util.config_loader import ContentPackConfigLoader
from st2common.constants.action import ACTION_OUTPUT_RESULT_DELIMITER
from st2common.constants.keyvalue import SYSTEM_SCOPE
from st2common.constants.runners import PYTHON_RUNNER_INVALID_ACTION_STATUS_EXIT_CODE
from st2common.service_setup import db_setup
from st2common.services.datastore import DatastoreService
__all__ = [
'PythonActionWrapper',
'ActionService'
]
LOG = logging.getLogger(__name__)
INVALID_STATUS_ERROR_MESSAGE = """
If this is an existing action which returns a tuple with two items, it needs to be updated to
either:
1. Return a list instead of a tuple
2. Return a tuple where a first items is a status flag - (True, ('item1', 'item2'))
For more information, please see: https://docs.stackstorm.com/upgrade_notes.html#st2-v1-6
""".strip()
class ActionService(object):
"""
Instance of this class is passed to the action instance and exposes "public"
methods which can be called by the action.
"""
def __init__(self, action_wrapper):
logger = get_logger_for_python_runner_action(action_name=action_wrapper._class_name)
self._action_wrapper = action_wrapper
self._datastore_service = DatastoreService(logger=logger,
pack_name=self._action_wrapper._pack,
class_name=self._action_wrapper._class_name,
api_username='action_service')
##################################
# Methods for datastore management
##################################
def list_values(self, local=True, prefix=None):
return self._datastore_service.list_values(local, prefix)
def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False):
return self._datastore_service.get_value(name, local, scope=scope, decrypt=decrypt)
def set_value(self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False):
return self._datastore_service.set_value(name, value, ttl, local, scope=scope,
encrypt=encrypt)
def delete_value(self, name, local=True, scope=SYSTEM_SCOPE):
return self._datastore_service.delete_value(name, local)
class PythonActionWrapper(object):
def __init__(self, pack, file_path, parameters=None, user=None, parent_args=None):
"""
:param pack: Name of the pack this action belongs to.
:type pack: ``str``
:param file_path: Path to the action module.
:type file_path: ``str``
:param parameters: action parameters.
:type parameters: ``dict`` or ``None``
:param user: Name of the user who triggered this action execution.
:type user: ``str``
:param parent_args: Command line arguments passed to the parent process.
:type parse_args: ``list``
"""
self._pack = pack
self._file_path = file_path
self._parameters = parameters or {}
self._user = user
self._parent_args = parent_args or []
self._class_name = None
self._logger = logging.getLogger('PythonActionWrapper')
try:
config.parse_args(args=self._parent_args)
except Exception:
pass
db_setup()
# Note: We can only set a default user value if one is not provided after parsing the
# config
if not self._user:
self._user = cfg.CONF.system_user.user
def run(self):
action = self._get_action_instance()
output = action.run(**self._parameters)
if isinstance(output, tuple) and len(output) == 2:
# run() method returned status and data - (status, data)
action_status = output[0]
action_result = output[1]
else:
# run() method returned only data, no status (pre StackStorm v1.6)
action_status = None
action_result = output
action_output = {
'result': action_result,
'status': None
}
if action_status is not None and not isinstance(action_status, bool):
sys.stderr.write('Status returned from the action run() method must either be '
'True or False, got: %s\n' % (action_status))
sys.stderr.write(INVALID_STATUS_ERROR_MESSAGE)
sys.exit(PYTHON_RUNNER_INVALID_ACTION_STATUS_EXIT_CODE)
if action_status is not None and isinstance(action_status, bool):
action_output['status'] = action_status
try:
print_output = json.dumps(action_output)
except Exception:
print_output = str(action_output)
# Print output to stdout so the parent can capture it
sys.stdout.write(ACTION_OUTPUT_RESULT_DELIMITER)
sys.stdout.write(print_output + '\n')
sys.stdout.write(ACTION_OUTPUT_RESULT_DELIMITER)
def _get_action_instance(self):
actions_cls = action_loader.register_plugin(Action, self._file_path)
action_cls = actions_cls[0] if actions_cls and len(actions_cls) > 0 else None
if not action_cls:
raise Exception('File "%s" has no action or the file doesn\'t exist.' %
(self._file_path))
config_loader = ContentPackConfigLoader(pack_name=self._pack, user=self._user)
config = config_loader.get_config()
if config:
LOG.info('Found config for action "%s"' % (self._file_path))
else:
LOG.info('No config found for action "%s"' % (self._file_path))
config = None
action_service = ActionService(action_wrapper=self)
action_instance = get_action_class_instance(action_cls=action_cls,
config=config,
action_service=action_service)
return action_instance
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Python action runner process wrapper')
parser.add_argument('--pack', required=True,
help='Name of the pack this action belongs to')
parser.add_argument('--file-path', required=True,
help='Path to the action module')
parser.add_argument('--parameters', required=False,
help='Serialized action parameters')
parser.add_argument('--user', required=False,
help='User who triggered the action execution')
parser.add_argument('--parent-args', required=False,
help='Command line arguments passed to the parent process')
args = parser.parse_args()
parameters = args.parameters
parameters = json.loads(parameters) if parameters else {}
user = args.user
parent_args = json.loads(args.parent_args) if args.parent_args else []
assert isinstance(parent_args, list)
obj = PythonActionWrapper(pack=args.pack,
file_path=args.file_path,
parameters=parameters,
user=user,
parent_args=parent_args)
obj.run()
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["setup_ranger_plugin", "get_audit_configs", "generate_ranger_service_config"]
import os
import ambari_simplejson as json
from datetime import datetime
from resource_management.libraries.functions.ranger_functions import Rangeradmin
from resource_management.core.resources import File, Directory, Execute
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.core.logger import Logger
from resource_management.core.source import DownloadSource, InlineTemplate
from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
from resource_management.core.utils import PasswordString
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.default import default
def setup_ranger_plugin(component_select_name, service_name, previous_jdbc_jar,
component_downloaded_custom_connector, component_driver_curl_source,
component_driver_curl_target, java_home,
repo_name, plugin_repo_dict,
ranger_env_properties, plugin_properties,
policy_user, policymgr_mgr_url,
plugin_enabled, conf_dict, component_user, component_group,
cache_service_list, plugin_audit_properties, plugin_audit_attributes,
plugin_security_properties, plugin_security_attributes,
plugin_policymgr_ssl_properties, plugin_policymgr_ssl_attributes,
component_list, audit_db_is_enabled, credential_file,
xa_audit_db_password, ssl_truststore_password,
ssl_keystore_password, api_version=None, stack_version_override = None, skip_if_rangeradmin_down = True,
is_security_enabled = False, is_stack_supports_ranger_kerberos = False,
component_user_principal = None, component_user_keytab = None, cred_lib_path_override = None, cred_setup_prefix_override = None):
if audit_db_is_enabled and component_driver_curl_source is not None and not component_driver_curl_source.endswith("/None"):
if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar):
File(previous_jdbc_jar, action='delete')
File(component_downloaded_custom_connector,
content = DownloadSource(component_driver_curl_source),
mode = 0644
)
Execute(('cp', '--remove-destination', component_downloaded_custom_connector, component_driver_curl_target),
path=["/bin", "/usr/bin/"],
sudo=True
)
File(component_driver_curl_target, mode=0644)
if policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
stack_version = stack_version_override
component_conf_dir = conf_dict
if plugin_enabled:
service_name_exist = False
policycache_path = os.path.join('/etc', 'ranger', repo_name, 'policycache')
try:
for cache_service in cache_service_list:
policycache_json_file = format('{policycache_path}/{cache_service}_{repo_name}.json')
if os.path.isfile(policycache_json_file) and os.path.getsize(policycache_json_file) > 0:
with open(policycache_json_file) as json_file:
json_data = json.load(json_file)
if 'serviceName' in json_data and json_data['serviceName'] == repo_name:
service_name_exist = True
Logger.info("Skipping Ranger API calls, as policy cache file exists for {0}".format(service_name))
Logger.warning("If service name for {0} is not created on Ranger Admin UI, then to re-create it delete policy cache file: {1}".format(service_name, policycache_json_file))
break
except Exception, err:
Logger.error("Error occurred while fetching service name from policy cache file.\nError: {0}".format(err))
if not service_name_exist:
if api_version is not None and api_version == 'v2':
ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down)
ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'],
ranger_env_properties['admin_username'], ranger_env_properties['admin_password'],
policy_user, is_security_enabled, is_stack_supports_ranger_kerberos, component_user,
component_user_principal, component_user_keytab)
else:
ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down)
ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'],
ranger_env_properties['admin_username'], ranger_env_properties['admin_password'],
policy_user)
current_datetime = datetime.now()
File(format('{component_conf_dir}/ranger-security.xml'),
owner = component_user,
group = component_group,
mode = 0644,
content = InlineTemplate(format('<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'))
)
Directory([os.path.join('/etc', 'ranger', repo_name), os.path.join('/etc', 'ranger', repo_name, 'policycache')],
owner = component_user,
group = component_group,
mode=0775,
create_parents = True,
cd_access = 'a'
)
for cache_service in cache_service_list:
File(os.path.join('/etc', 'ranger', repo_name, 'policycache', format('{cache_service}_{repo_name}.json')),
owner = component_user,
group = component_group,
mode = 0644
)
# remove plain-text password from xml configs
plugin_audit_password_property = 'xasecure.audit.destination.db.password'
plugin_audit_properties_copy = {}
plugin_audit_properties_copy.update(plugin_audit_properties)
if plugin_audit_password_property in plugin_audit_properties_copy:
plugin_audit_properties_copy[plugin_audit_password_property] = "crypted"
XmlConfig(format('ranger-{service_name}-audit.xml'),
conf_dir=component_conf_dir,
configurations=plugin_audit_properties_copy,
configuration_attributes=plugin_audit_attributes,
owner = component_user,
group = component_group,
mode=0744)
XmlConfig(format('ranger-{service_name}-security.xml'),
conf_dir=component_conf_dir,
configurations=plugin_security_properties,
configuration_attributes=plugin_security_attributes,
owner = component_user,
group = component_group,
mode=0744)
# remove plain-text password from xml configs
plugin_password_properties = ['xasecure.policymgr.clientssl.keystore.password', 'xasecure.policymgr.clientssl.truststore.password']
plugin_policymgr_ssl_properties_copy = {}
plugin_policymgr_ssl_properties_copy.update(plugin_policymgr_ssl_properties)
for prop in plugin_password_properties:
if prop in plugin_policymgr_ssl_properties_copy:
plugin_policymgr_ssl_properties_copy[prop] = "crypted"
if str(service_name).lower() == 'yarn' :
XmlConfig("ranger-policymgr-ssl-yarn.xml",
conf_dir=component_conf_dir,
configurations=plugin_policymgr_ssl_properties_copy,
configuration_attributes=plugin_policymgr_ssl_attributes,
owner = component_user,
group = component_group,
mode=0744)
else:
XmlConfig("ranger-policymgr-ssl.xml",
conf_dir=component_conf_dir,
configurations=plugin_policymgr_ssl_properties_copy,
configuration_attributes=plugin_policymgr_ssl_attributes,
owner = component_user,
group = component_group,
mode=0744)
# creating symblink should be done by rpm package
setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list)
setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file,
xa_audit_db_password, ssl_truststore_password, ssl_keystore_password,
component_user, component_group, java_home, cred_lib_path_override, cred_setup_prefix_override)
else:
File(format('{component_conf_dir}/ranger-security.xml'),
action="delete"
)
def setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list):
if service_name == 'nifi':
return None
stack_root = '/usr/lib'
jar_files = os.listdir(format('{stack_root}/ranger-{service_name}-plugin/lib'))
for jar_file in jar_files:
for component in component_list:
if component == "kafka":
Execute(('ln','-sf',format('{stack_root}/ranger-{service_name}-plugin/lib/{jar_file}'),format('{stack_root}/{component}/libs/{jar_file}')),
not_if=format('ls {stack_root}/lib/{jar_file}'),
only_if=format('ls {stack_root}/ranger-{service_name}-plugin/lib/{jar_file}'),
sudo=True)
elif component == "atlas" or component == "atlas-server":
Execute(('ln','-sf',format('{stack_root}/ranger-{service_name}-plugin/lib/{jar_file}'),format('{stack_root}/{component}/server/webapp/atlas/WEB-INF/lib/{jar_file}')),
sudo=True)
else:
Execute(('ln','-sf',format('{stack_root}/ranger-{service_name}-plugin/lib/{jar_file}'),format('{stack_root}/{component}/lib/{jar_file}')),
not_if=format('ls {stack_root}/lib/{jar_file}'),
only_if=format('ls {stack_root}/ranger-{service_name}-plugin/lib/{jar_file}'),
sudo=True)
def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password,
ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home, cred_lib_path_override = None, cred_setup_prefix_override = None):
stack_root = Script.get_stack_root()
service_name = str(service_name).lower()
if cred_lib_path_override is not None:
cred_lib_path = cred_lib_path_override
else:
cred_lib_path = format('{stack_root}/ranger-{service_name}-plugin/install/lib/*')
if cred_setup_prefix_override is not None:
cred_setup_prefix = cred_setup_prefix_override
else:
cred_setup_prefix = (format('{stack_root}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)
if audit_db_is_enabled:
cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'auditDBCred', '-v', PasswordString(xa_audit_db_password), '-c', '1')
Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)
cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslKeyStore', '-v', PasswordString(ssl_keystore_password), '-c', '1')
Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)
cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslTrustStore', '-v', PasswordString(ssl_truststore_password), '-c', '1')
Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)
File(credential_file,
owner = component_user,
group = component_group,
mode = 0640
)
def setup_core_site_for_required_plugins(component_user, component_group, create_core_site_path, config):
XmlConfig('core-site.xml',
conf_dir=create_core_site_path,
configurations=config['configurations']['core-site'],
configuration_attributes=config['configuration_attributes']['core-site'],
owner=component_user,
group=component_group,
mode=0644
)
def get_audit_configs(config):
xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR'].lower()
xa_db_host = config['configurations']['admin-properties']['db_host']
xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
if xa_audit_db_flavor == 'mysql':
jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "com.mysql.jdbc.Driver"
elif xa_audit_db_flavor == 'oracle':
jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
colon_count = xa_db_host.count(':')
if colon_count == 2 or colon_count == 0:
audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
else:
audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
jdbc_driver = "oracle.jdbc.OracleDriver"
elif xa_audit_db_flavor == 'postgres':
jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "org.postgresql.Driver"
elif xa_audit_db_flavor == 'mssql':
jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
elif xa_audit_db_flavor == 'sqla':
jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
return jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver
def generate_ranger_service_config(ranger_plugin_properties):
custom_service_config_dict = {}
ranger_plugin_properties_copy = {}
ranger_plugin_properties_copy.update(ranger_plugin_properties)
for key, value in ranger_plugin_properties_copy.iteritems():
if key.startswith("ranger.service.config.param."):
modify_key_name = key.replace("ranger.service.config.param.","")
custom_service_config_dict[modify_key_name] = value
return custom_service_config_dict
|
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from os import urandom
from json import (
dumps,
loads,
)
from collections import OrderedDict
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from cryptography.hazmat.primitives.padding import PKCS7
from azure.core.exceptions import HttpResponseError
from .._version import VERSION
from . import encode_base64, decode_base64_to_bytes
_ENCRYPTION_PROTOCOL_V1 = '1.0'
_ERROR_OBJECT_INVALID = \
'{0} does not define a complete interface. Value of {1} is either missing or invalid.'
def _validate_not_none(param_name, param):
if param is None:
raise ValueError('{0} should not be None.'.format(param_name))
def _validate_key_encryption_key_wrap(kek):
# Note that None is not callable and so will fail the second clause of each check.
if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
class _EncryptionAlgorithm(object):
'''
Specifies which client encryption algorithm is used.
'''
AES_CBC_256 = 'AES_CBC_256'
class _WrappedContentKey:
'''
Represents the envelope key details stored on the service.
'''
def __init__(self, algorithm, encrypted_key, key_id):
'''
:param str algorithm:
The algorithm used for wrapping.
:param bytes encrypted_key:
The encrypted content-encryption-key.
:param str key_id:
The key-encryption-key identifier string.
'''
_validate_not_none('algorithm', algorithm)
_validate_not_none('encrypted_key', encrypted_key)
_validate_not_none('key_id', key_id)
self.algorithm = algorithm
self.encrypted_key = encrypted_key
self.key_id = key_id
class _EncryptionAgent:
'''
Represents the encryption agent stored on the service.
It consists of the encryption protocol version and encryption algorithm used.
'''
def __init__(self, encryption_algorithm, protocol):
'''
:param _EncryptionAlgorithm encryption_algorithm:
The algorithm used for encrypting the message contents.
:param str protocol:
The protocol version used for encryption.
'''
_validate_not_none('encryption_algorithm', encryption_algorithm)
_validate_not_none('protocol', protocol)
self.encryption_algorithm = str(encryption_algorithm)
self.protocol = protocol
class _EncryptionData:
'''
Represents the encryption data that is stored on the service.
'''
def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
key_wrapping_metadata):
'''
:param bytes content_encryption_IV:
The content encryption initialization vector.
:param _EncryptionAgent encryption_agent:
The encryption agent.
:param _WrappedContentKey wrapped_content_key:
An object that stores the wrapping algorithm, the key identifier,
and the encrypted key bytes.
:param dict key_wrapping_metadata:
A dict containing metadata related to the key wrapping.
'''
_validate_not_none('content_encryption_IV', content_encryption_IV)
_validate_not_none('encryption_agent', encryption_agent)
_validate_not_none('wrapped_content_key', wrapped_content_key)
self.content_encryption_IV = content_encryption_IV
self.encryption_agent = encryption_agent
self.wrapped_content_key = wrapped_content_key
self.key_wrapping_metadata = key_wrapping_metadata
def _generate_encryption_data_dict(kek, cek, iv):
'''
Generates and returns the encryption metadata as a dict.
:param object kek: The key encryption key. See calling functions for more information.
:param bytes cek: The content encryption key.
:param bytes iv: The initialization vector.
:return: A dict containing all the encryption metadata.
:rtype: dict
'''
# Encrypt the cek.
wrapped_cek = kek.wrap_key(cek)
# Build the encryption_data dict.
# Use OrderedDict to comply with Java's ordering requirement.
wrapped_content_key = OrderedDict()
wrapped_content_key['KeyId'] = kek.get_kid()
wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek)
wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
encryption_agent = OrderedDict()
encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
encryption_data_dict = OrderedDict()
encryption_data_dict['WrappedContentKey'] = wrapped_content_key
encryption_data_dict['EncryptionAgent'] = encryption_agent
encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv)
encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION}
return encryption_data_dict
def _dict_to_encryption_data(encryption_data_dict):
'''
Converts the specified dictionary to an EncryptionData object for
eventual use in decryption.
:param dict encryption_data_dict:
The dictionary containing the encryption data.
:return: an _EncryptionData object built from the dictionary.
:rtype: _EncryptionData
'''
try:
if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
raise ValueError("Unsupported encryption version.")
except KeyError:
raise ValueError("Unsupported encryption version.")
wrapped_content_key = encryption_data_dict['WrappedContentKey']
wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
wrapped_content_key['KeyId'])
encryption_agent = encryption_data_dict['EncryptionAgent']
encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
encryption_agent['Protocol'])
if 'KeyWrappingMetadata' in encryption_data_dict:
key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
else:
key_wrapping_metadata = None
encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
encryption_agent,
wrapped_content_key,
key_wrapping_metadata)
return encryption_data
def _generate_AES_CBC_cipher(cek, iv):
'''
Generates and returns an encryption cipher for AES CBC using the given cek and iv.
:param bytes[] cek: The content encryption key for the cipher.
:param bytes[] iv: The initialization vector for the cipher.
:return: A cipher for encrypting in AES256 CBC.
:rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
'''
backend = default_backend()
algorithm = AES(cek)
mode = CBC(iv)
return Cipher(algorithm, mode, backend)
def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
'''
Extracts and returns the content_encryption_key stored in the encryption_data object
and performs necessary validation on all parameters.
:param _EncryptionData encryption_data:
The encryption metadata of the retrieved value.
:param obj key_encryption_key:
The key_encryption_key used to unwrap the cek. Please refer to high-level service object
instance variables for more details.
:param func key_resolver:
A function used that, given a key_id, will return a key_encryption_key. Please refer
to high-level service object instance variables for more details.
:return: the content_encryption_key stored in the encryption_data object.
:rtype: bytes[]
'''
_validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
_validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol:
raise ValueError('Encryption version is not supported.')
content_encryption_key = None
# If the resolver exists, give priority to the key it finds.
if key_resolver is not None:
key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
_validate_not_none('key_encryption_key', key_encryption_key)
if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key):
raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid():
raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.')
# Will throw an exception if the specified algorithm is not supported.
content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
encryption_data.wrapped_content_key.algorithm)
_validate_not_none('content_encryption_key', content_encryption_key)
return content_encryption_key
def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None):
'''
Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek).
Returns the original plaintex.
:param str message:
The ciphertext to be decrypted.
:param _EncryptionData encryption_data:
The metadata associated with this ciphertext.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)
- returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()
- returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key
implementing the interface defined above.
:return: The decrypted plaintext.
:rtype: str
'''
_validate_not_none('message', message)
content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm:
raise ValueError('Specified encryption algorithm is not supported.')
cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
# decrypt data
decrypted_data = message
decryptor = cipher.decryptor()
decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
# unpad data
unpadder = PKCS7(128).unpadder()
decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
return decrypted_data
def encrypt_blob(blob, key_encryption_key):
'''
Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
Returns a json-formatted string containing the encryption metadata. This method should
only be used when a blob is small enough for single shot upload. Encrypting larger blobs
is done as a part of the upload_data_chunks method.
:param bytes blob:
The blob to be encrypted.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
get_kid()--returns a string key id for this key-encryption-key.
:return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
:rtype: (str, bytes)
'''
_validate_not_none('blob', blob)
_validate_not_none('key_encryption_key', key_encryption_key)
_validate_key_encryption_key_wrap(key_encryption_key)
# AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
content_encryption_key = urandom(32)
initialization_vector = urandom(16)
cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
# PKCS7 with 16 byte blocks ensures compatibility with AES.
padder = PKCS7(128).padder()
padded_data = padder.update(blob) + padder.finalize()
# Encrypt the data.
encryptor = cipher.encryptor()
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
initialization_vector)
encryption_data['EncryptionMode'] = 'FullBlob'
return dumps(encryption_data), encrypted_data
def generate_blob_encryption_data(key_encryption_key):
'''
Generates the encryption_metadata for the blob.
:param bytes key_encryption_key:
The key-encryption-key used to wrap the cek associate with this blob.
:return: A tuple containing the cek and iv for this blob as well as the
serialized encryption metadata for the blob.
:rtype: (bytes, bytes, str)
'''
encryption_data = None
content_encryption_key = None
initialization_vector = None
if key_encryption_key:
_validate_key_encryption_key_wrap(key_encryption_key)
content_encryption_key = urandom(32)
initialization_vector = urandom(16)
encryption_data = _generate_encryption_data_dict(key_encryption_key,
content_encryption_key,
initialization_vector)
encryption_data['EncryptionMode'] = 'FullBlob'
encryption_data = dumps(encryption_data)
return content_encryption_key, initialization_vector, encryption_data
def decrypt_blob(require_encryption, key_encryption_key, key_resolver,
content, start_offset, end_offset, response_headers):
'''
Decrypts the given blob contents and returns only the requested range.
:param bool require_encryption:
Whether or not the calling blob service requires objects to be decrypted.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
get_kid()--returns a string key id for this key-encryption-key.
:param key_resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key
implementing the interface defined above.
:return: The decrypted blob content.
:rtype: bytes
'''
try:
encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata']))
except: # pylint: disable=bare-except
if require_encryption:
raise ValueError(
'Encryption required, but received data does not contain appropriate metatadata.' + \
'Data was either not encrypted or metadata has been lost.')
return content
if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256:
raise ValueError('Specified encryption algorithm is not supported.')
blob_type = response_headers['x-ms-blob-type']
iv = None
unpad = False
if 'content-range' in response_headers:
content_range = response_headers['content-range']
# Format: 'bytes x-y/size'
# Ignore the word 'bytes'
content_range = content_range.split(' ')
content_range = content_range[1].split('-')
content_range = content_range[1].split('/')
end_range = int(content_range[0])
blob_size = int(content_range[1])
if start_offset >= 16:
iv = content[:16]
content = content[16:]
start_offset -= 16
else:
iv = encryption_data.content_encryption_IV
if end_range == blob_size - 1:
unpad = True
else:
unpad = True
iv = encryption_data.content_encryption_IV
if blob_type == 'PageBlob':
unpad = False
content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
decryptor = cipher.decryptor()
content = decryptor.update(content) + decryptor.finalize()
if unpad:
unpadder = PKCS7(128).unpadder()
content = unpadder.update(content) + unpadder.finalize()
return content[start_offset: len(content) - end_offset]
def get_blob_encryptor_and_padder(cek, iv, should_pad):
encryptor = None
padder = None
if cek is not None and iv is not None:
cipher = _generate_AES_CBC_cipher(cek, iv)
encryptor = cipher.encryptor()
padder = PKCS7(128).padder() if should_pad else None
return encryptor, padder
def encrypt_queue_message(message, key_encryption_key):
'''
Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
Returns a json-formatted string containing the encrypted message and the encryption metadata.
:param object message:
The plain text messge to be encrypted.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
get_kid()--returns a string key id for this key-encryption-key.
:return: A json-formatted string containing the encrypted message and the encryption metadata.
:rtype: str
'''
_validate_not_none('message', message)
_validate_not_none('key_encryption_key', key_encryption_key)
_validate_key_encryption_key_wrap(key_encryption_key)
# AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
content_encryption_key = os.urandom(32)
initialization_vector = os.urandom(16)
# Queue encoding functions all return unicode strings, and encryption should
# operate on binary strings.
message = message.encode('utf-8')
cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
# PKCS7 with 16 byte blocks ensures compatibility with AES.
padder = PKCS7(128).padder()
padded_data = padder.update(message) + padder.finalize()
# Encrypt the data.
encryptor = cipher.encryptor()
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
# Build the dictionary structure.
queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data),
'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
content_encryption_key,
initialization_vector)}
return dumps(queue_message)
def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver):
'''
Returns the decrypted message contents from an EncryptedQueueMessage.
If no encryption metadata is present, will return the unaltered message.
:param str message:
The JSON formatted QueueEncryptedMessage contents with all associated metadata.
:param bool require_encryption:
If set, will enforce that the retrieved messages are encrypted and decrypt them.
:param object key_encryption_key:
The user-provided key-encryption-key. Must implement the following methods:
unwrap_key(key, algorithm)
- returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm.
get_kid()
- returns a string key id for this key-encryption-key.
:param function resolver(kid):
The user-provided key resolver. Uses the kid string to return a key-encryption-key
implementing the interface defined above.
:return: The plain text message from the queue message.
:rtype: str
'''
response = response.http_response
try:
message = loads(message)
encryption_data = _dict_to_encryption_data(message['EncryptionData'])
decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents'])
except (KeyError, ValueError):
# Message was not json formatted and so was not encrypted
# or the user provided a json formatted message.
if require_encryption:
raise ValueError('Message was not encrypted.')
return message
try:
return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
except Exception as error:
raise HttpResponseError(
message="Decryption failed.",
response=response,
error=error)
|
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Various utilities
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname(os.path.realpath(__file__))
>>> datadir = os.path.realpath(os.path.join(filepath,
... '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import os
import re
import numpy as np
import nibabel as nb
from ..base import (traits, TraitedSpec, DynamicTraitedSpec, File,
Undefined, isdefined, OutputMultiPath, InputMultiPath,
BaseInterface, BaseInterfaceInputSpec, Str)
from ..io import IOBase, add_traits
from ...utils.filemanip import filename_to_list, copyfile, split_filename
class IdentityInterface(IOBase):
"""Basic interface class generates identity mappings
Examples
--------
>>> from nipype.interfaces.utility import IdentityInterface
>>> ii = IdentityInterface(fields=['a', 'b'], mandatory_inputs=False)
>>> ii.inputs.a
<undefined>
>>> ii.inputs.a = 'foo'
>>> out = ii._outputs()
>>> out.a
<undefined>
>>> out = ii.run()
>>> out.outputs.a # doctest: +ALLOW_UNICODE
'foo'
>>> ii2 = IdentityInterface(fields=['a', 'b'], mandatory_inputs=True)
>>> ii2.inputs.a = 'foo'
>>> out = ii2.run() # doctest: +SKIP
ValueError: IdentityInterface requires a value for input 'b' because it was listed in 'fields' Interface IdentityInterface failed to run.
"""
input_spec = DynamicTraitedSpec
output_spec = DynamicTraitedSpec
def __init__(self, fields=None, mandatory_inputs=True, **inputs):
super(IdentityInterface, self).__init__(**inputs)
if fields is None or not fields:
raise ValueError('Identity Interface fields must be a non-empty list')
# Each input must be in the fields.
for in_field in inputs:
if in_field not in fields:
raise ValueError('Identity Interface input is not in the fields: %s' % in_field)
self._fields = fields
self._mandatory_inputs = mandatory_inputs
add_traits(self.inputs, fields)
# Adding any traits wipes out all input values set in superclass initialization,
# even it the trait is not in the add_traits argument. The work-around is to reset
# the values after adding the traits.
self.inputs.trait_set(**inputs)
def _add_output_traits(self, base):
return add_traits(base, self._fields)
def _list_outputs(self):
# manual mandatory inputs check
if self._fields and self._mandatory_inputs:
for key in self._fields:
value = getattr(self.inputs, key)
if not isdefined(value):
msg = "%s requires a value for input '%s' because it was listed in 'fields'. \
You can turn off mandatory inputs checking by passing mandatory_inputs = False to the constructor." % \
(self.__class__.__name__, key)
raise ValueError(msg)
outputs = self._outputs().get()
for key in self._fields:
val = getattr(self.inputs, key)
if isdefined(val):
outputs[key] = val
return outputs
class MergeInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
axis = traits.Enum('vstack', 'hstack', usedefault=True,
desc='direction in which to merge, hstack requires same number of elements in each input')
no_flatten = traits.Bool(False, usedefault=True,
desc='append to outlist instead of extending in vstack mode')
ravel_inputs = traits.Bool(False, usedefault=True,
desc='ravel inputs when no_flatten is False')
class MergeOutputSpec(TraitedSpec):
out = traits.List(desc='Merged output')
def _ravel(in_val):
if not isinstance(in_val, list):
return in_val
flat_list = []
for val in in_val:
raveled_val = _ravel(val)
if isinstance(raveled_val, list):
flat_list.extend(raveled_val)
else:
flat_list.append(raveled_val)
return flat_list
class Merge(IOBase):
"""Basic interface class to merge inputs into a single list
``Merge(1)`` will merge a list of lists
Examples
--------
>>> from nipype.interfaces.utility import Merge
>>> mi = Merge(3)
>>> mi.inputs.in1 = 1
>>> mi.inputs.in2 = [2, 5]
>>> mi.inputs.in3 = 3
>>> out = mi.run()
>>> out.outputs.out
[1, 2, 5, 3]
>>> merge = Merge(1)
>>> merge.inputs.in1 = [1, [2, 5], 3]
>>> out = merge.run()
>>> out.outputs.out
[1, [2, 5], 3]
>>> merge = Merge(1)
>>> merge.inputs.in1 = [1, [2, 5], 3]
>>> merge.inputs.ravel_inputs = True
>>> out = merge.run()
>>> out.outputs.out
[1, 2, 5, 3]
>>> merge = Merge(1)
>>> merge.inputs.in1 = [1, [2, 5], 3]
>>> merge.inputs.no_flatten = True
>>> out = merge.run()
>>> out.outputs.out
[[1, [2, 5], 3]]
"""
input_spec = MergeInputSpec
output_spec = MergeOutputSpec
def __init__(self, numinputs=0, **inputs):
super(Merge, self).__init__(**inputs)
self._numinputs = numinputs
if numinputs >= 1:
input_names = ['in%d' % (i + 1) for i in range(numinputs)]
else:
input_names = []
add_traits(self.inputs, input_names)
def _list_outputs(self):
outputs = self._outputs().get()
out = []
if self._numinputs < 1:
return outputs
else:
getval = lambda idx: getattr(self.inputs, 'in%d' % (idx + 1))
values = [getval(idx) for idx in range(self._numinputs)
if isdefined(getval(idx))]
if self.inputs.axis == 'vstack':
for value in values:
if isinstance(value, list) and not self.inputs.no_flatten:
out.extend(_ravel(value) if self.inputs.ravel_inputs else
value)
else:
out.append(value)
else:
lists = [filename_to_list(val) for val in values]
out = [[val[i] for val in lists] for i in range(len(lists[0]))]
outputs['out'] = out
return outputs
class RenameInputSpec(DynamicTraitedSpec):
in_file = File(exists=True, mandatory=True, desc="file to rename")
keep_ext = traits.Bool(desc=("Keep in_file extension, replace "
"non-extension component of name"))
format_string = Str(mandatory=True,
desc="Python formatting string for output template")
parse_string = Str(desc="Python regexp parse string to define "
"replacement inputs")
use_fullpath = traits.Bool(False, usedefault=True,
desc="Use full path as input to regex parser")
class RenameOutputSpec(TraitedSpec):
out_file = traits.File(exists=True, desc="softlink to original file with new name")
class Rename(IOBase):
"""Change the name of a file based on a mapped format string.
To use additional inputs that will be defined at run-time, the class
constructor must be called with the format template, and the fields
identified will become inputs to the interface.
Additionally, you may set the parse_string input, which will be run
over the input filename with a regular expressions search, and will
fill in additional input fields from matched groups. Fields set with
inputs have precedence over fields filled in with the regexp match.
Examples
--------
>>> from nipype.interfaces.utility import Rename
>>> rename1 = Rename()
>>> rename1.inputs.in_file = "zstat1.nii.gz"
>>> rename1.inputs.format_string = "Faces-Scenes.nii.gz"
>>> res = rename1.run() # doctest: +SKIP
>>> res.outputs.out_file # doctest: +SKIP
'Faces-Scenes.nii.gz" # doctest: +SKIP
>>> rename2 = Rename(format_string="%(subject_id)s_func_run%(run)02d")
>>> rename2.inputs.in_file = "functional.nii"
>>> rename2.inputs.keep_ext = True
>>> rename2.inputs.subject_id = "subj_201"
>>> rename2.inputs.run = 2
>>> res = rename2.run() # doctest: +SKIP
>>> res.outputs.out_file # doctest: +SKIP
'subj_201_func_run02.nii' # doctest: +SKIP
>>> rename3 = Rename(format_string="%(subject_id)s_%(seq)s_run%(run)02d.nii")
>>> rename3.inputs.in_file = "func_epi_1_1.nii"
>>> rename3.inputs.parse_string = "func_(?P<seq>\w*)_.*"
>>> rename3.inputs.subject_id = "subj_201"
>>> rename3.inputs.run = 2
>>> res = rename3.run() # doctest: +SKIP
>>> res.outputs.out_file # doctest: +SKIP
'subj_201_epi_run02.nii' # doctest: +SKIP
"""
input_spec = RenameInputSpec
output_spec = RenameOutputSpec
def __init__(self, format_string=None, **inputs):
super(Rename, self).__init__(**inputs)
if format_string is not None:
self.inputs.format_string = format_string
self.fmt_fields = re.findall(r"%\((.+?)\)", format_string)
add_traits(self.inputs, self.fmt_fields)
else:
self.fmt_fields = []
def _rename(self):
fmt_dict = dict()
if isdefined(self.inputs.parse_string):
if isdefined(self.inputs.use_fullpath) and self.inputs.use_fullpath:
m = re.search(self.inputs.parse_string,
self.inputs.in_file)
else:
m = re.search(self.inputs.parse_string,
os.path.split(self.inputs.in_file)[1])
if m:
fmt_dict.update(m.groupdict())
for field in self.fmt_fields:
val = getattr(self.inputs, field)
if isdefined(val):
fmt_dict[field] = getattr(self.inputs, field)
if self.inputs.keep_ext:
fmt_string = "".join([self.inputs.format_string,
split_filename(self.inputs.in_file)[2]])
else:
fmt_string = self.inputs.format_string
return fmt_string % fmt_dict
def _run_interface(self, runtime):
runtime.returncode = 0
_ = copyfile(self.inputs.in_file, os.path.join(os.getcwd(),
self._rename()))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.join(os.getcwd(), self._rename())
return outputs
class SplitInputSpec(BaseInterfaceInputSpec):
inlist = traits.List(traits.Any, mandatory=True,
desc='list of values to split')
splits = traits.List(traits.Int, mandatory=True,
desc='Number of outputs in each split - should add to number of inputs')
squeeze = traits.Bool(False, usedefault=True,
desc='unfold one-element splits removing the list')
class Split(IOBase):
"""Basic interface class to split lists into multiple outputs
Examples
--------
>>> from nipype.interfaces.utility import Split
>>> sp = Split()
>>> _ = sp.inputs.trait_set(inlist=[1, 2, 3], splits=[2, 1])
>>> out = sp.run()
>>> out.outputs.out1
[1, 2]
"""
input_spec = SplitInputSpec
output_spec = DynamicTraitedSpec
def _add_output_traits(self, base):
undefined_traits = {}
for i in range(len(self.inputs.splits)):
key = 'out%d' % (i + 1)
base.add_trait(key, traits.Any)
undefined_traits[key] = Undefined
base.trait_set(trait_change_notify=False, **undefined_traits)
return base
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.splits):
if sum(self.inputs.splits) != len(self.inputs.inlist):
raise RuntimeError('sum of splits != num of list elements')
splits = [0]
splits.extend(self.inputs.splits)
splits = np.cumsum(splits)
for i in range(len(splits) - 1):
val = np.array(self.inputs.inlist)[splits[i]:splits[i + 1]].tolist()
if self.inputs.squeeze and len(val) == 1:
val = val[0]
outputs['out%d' % (i + 1)] = val
return outputs
class SelectInputSpec(BaseInterfaceInputSpec):
inlist = InputMultiPath(traits.Any, mandatory=True,
desc='list of values to choose from')
index = InputMultiPath(traits.Int, mandatory=True,
desc='0-based indices of values to choose')
class SelectOutputSpec(TraitedSpec):
out = OutputMultiPath(traits.Any, desc='list of selected values')
class Select(IOBase):
"""Basic interface class to select specific elements from a list
Examples
--------
>>> from nipype.interfaces.utility import Select
>>> sl = Select()
>>> _ = sl.inputs.trait_set(inlist=[1, 2, 3, 4, 5], index=[3])
>>> out = sl.run()
>>> out.outputs.out
4
>>> _ = sl.inputs.trait_set(inlist=[1, 2, 3, 4, 5], index=[3, 4])
>>> out = sl.run()
>>> out.outputs.out
[4, 5]
"""
input_spec = SelectInputSpec
output_spec = SelectOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
out = np.array(self.inputs.inlist)[np.array(self.inputs.index)].tolist()
outputs['out'] = out
return outputs
class AssertEqualInputSpec(BaseInterfaceInputSpec):
volume1 = File(exists=True, mandatory=True)
volume2 = File(exists=True, mandatory=True)
class AssertEqual(BaseInterface):
input_spec = AssertEqualInputSpec
def _run_interface(self, runtime):
data1 = nb.load(self.inputs.volume1).get_data()
data2 = nb.load(self.inputs.volume2).get_data()
if not np.all(data1 == data2):
raise RuntimeError('Input images are not exactly equal')
return runtime
|
|
import logging
from django.http import QueryDict
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
from rest_framework import status
from rest_framework import generics
from rest_framework import serializers
from hs_core import hydroshare
from hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \
ExternalProfileLink, Format, FundingAgency, Identifier, Subject, Source, Relation
from hs_core.views import utils as view_utils
from hs_core.views.utils import ACTION_TO_AUTHORIZE
logger = logging.getLogger(__name__)
class ExternalProfileLinkSerializer(serializers.Serializer):
type = serializers.CharField(required=False)
url = serializers.URLField(required=False)
object_id = serializers.IntegerField(required=False)
# content_type = models.ForeignKey(ContentType)
# content_object = GenericForeignKey('content_type', 'object_id')
class Meta:
model = ExternalProfileLink
class PartySerializer(serializers.Serializer):
name = serializers.CharField()
description = serializers.URLField(required=False)
organization = serializers.CharField(required=False)
email = serializers.EmailField(required=False)
address = serializers.CharField(required=False)
phone = serializers.CharField(required=False)
homepage = serializers.URLField(required=False)
external_links = serializers = ExternalProfileLinkSerializer(required=False, many=True)
class Meta:
model = Creator
fields = {'name', 'description', 'organization', 'email',
'address', 'phone', 'homepage', 'external_links'}
class CreatorSerializer(PartySerializer):
order = serializers.IntegerField(required=False)
class Meta:
model = Contributor
class DateSerializer(serializers.Serializer):
# term = 'Date'
type = serializers.CharField(required=False)
start_date = serializers.DateTimeField(required=False)
end_date = serializers.DateTimeField(required=False)
class Meta:
model = Date
class CoverageSerializer(serializers.Serializer):
type = serializers.CharField(required=False)
value = serializers.SerializerMethodField(required=False)
class Meta:
model = Coverage
def get_value(self, obj):
return obj.value
class FormatSerializer(serializers.Serializer):
value = serializers.CharField(required=False)
class Meta:
model = Format
class FundingAgencySerializer(serializers.Serializer):
agency_name = serializers.CharField()
award_title = serializers.CharField(required=False)
award_number = serializers.CharField(required=False)
agency_url = serializers.URLField(required=False)
class Meta:
model = FundingAgency
class IdentifierSerializer(serializers.Serializer):
name = serializers.CharField(required=False)
url = serializers.URLField(required=False)
class Meta:
model = Identifier
class SubjectSerializer(serializers.Serializer):
value = serializers.CharField(required=False)
class Meta:
model = Subject
class SourceSerializer(serializers.Serializer):
derived_from = serializers.CharField(required=False)
class Meta:
model = Source
class RelationSerializer(serializers.Serializer):
type = serializers.CharField(required=False)
value = serializers.CharField(required=False)
class Meta:
model = Relation
class CoreMetaDataSerializer(serializers.Serializer):
title = serializers.CharField(required=False)
creators = CreatorSerializer(required=False, many=True)
contributors = PartySerializer(required=False, many=True)
coverages = CoverageSerializer(required=False, many=True)
dates = DateSerializer(required=False, many=True)
description = serializers.CharField(required=False)
formats = FormatSerializer(required=False, many=True)
funding_agencies = FundingAgencySerializer(required=False, many=True)
identifiers = IdentifierSerializer(required=False, many=True)
language = serializers.CharField(required=False)
rights = serializers.CharField(required=False)
type = serializers.CharField(required=False)
publisher = serializers.CharField(required=False)
sources = SourceSerializer(required=False, many=True)
subjects = SubjectSerializer(required=False, many=True)
relations = RelationSerializer(required=False, many=True)
class Meta:
model = CoreMetaData
class MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve resource science (Dublin Core) metadata
REST URL: /hsapi/resource/{pk}/scimeta/elements/
HTTP method: GET
:type pk: str
:param pk: id of the resource
:return: resource science metadata as JSON document
:rtype: str
:raises:
NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}
PermissionDenied: return json format: {'detail': 'You do not have permission to perform
this action.'}
REST URL: /hsapi/resource/{pk}/scimeta/elements/
HTTP method: PUT
:type pk: str
:param pk: id of the resource
:type request: JSON formatted string
:param request: resource metadata
:return: updated resource science metadata as JSON document
:rtype: str
:raises:
NotFound: return json format: {'detail': 'No resource was found for resource id':pk}
PermissionDenied: return json format: {'detail': 'You do not have permission to perform
this action.'}
ValidationError: return json format: {parameter-1': ['error message-1'],
'parameter-2': ['error message-2'], .. }
"""
ACCEPT_FORMATS = ('application/json',)
allowed_methods = ('GET', 'PUT')
serializer_class = CoreMetaDataSerializer
def get(self, request, pk):
view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)
resource = hydroshare.get_resource_by_shortkey(shortkey=pk)
serializer = resource.metadata.serializer
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, pk):
# Update science metadata
resource, _, _ = view_utils.authorize(
request, pk,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
metadata = []
put_data = request.data.copy()
# convert the QueryDict to dict
if isinstance(put_data, QueryDict):
put_data = put_data.dict()
try:
resource.metadata.parse_for_bulk_update(put_data, metadata)
hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)
except Exception as ex:
error_msg = {
'resource': "Resource metadata update failed: %s, %s"
% (ex.__class__, ex.message)
}
raise ValidationError(detail=error_msg)
resource = hydroshare.get_resource_by_shortkey(shortkey=pk)
serializer = resource.metadata.serializer
return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)
|
|
#!/usr/bin/env python3
import os
import sys
import re
import math
import platform
import xml.etree.ElementTree as ET
################################################################################
# Config #
################################################################################
flags = {
'c': platform.platform() != 'Windows', # Disable by default on windows, since we use ANSI escape codes
'b': False,
'g': False,
's': False,
'u': False,
'h': False,
'p': False,
'o': True,
'i': False,
'a': True,
}
flag_descriptions = {
'c': 'Toggle colors when outputting.',
'b': 'Toggle showing only not fully described classes.',
'g': 'Toggle showing only completed classes.',
's': 'Toggle showing comments about the status.',
'u': 'Toggle URLs to docs.',
'h': 'Show help and exit.',
'p': 'Toggle showing percentage as well as counts.',
'o': 'Toggle overall column.',
'i': 'Toggle collapse of class items columns.',
'a': 'Toggle showing all items.',
}
long_flags = {
'colors': 'c',
'use-colors': 'c',
'bad': 'b',
'only-bad': 'b',
'good': 'g',
'only-good': 'g',
'comments': 's',
'status': 's',
'urls': 'u',
'gen-url': 'u',
'help': 'h',
'percent': 'p',
'use-percentages': 'p',
'overall': 'o',
'use-overall': 'o',
'items': 'i',
'collapse': 'i',
'all': 'a',
}
table_columns = ['name', 'brief_description', 'description', 'methods', 'constants', 'members', 'signals']
table_column_names = ['Name', 'Brief Desc.', 'Desc.', 'Methods', 'Constants', 'Members', 'Signals']
colors = {
'name': [36], # cyan
'part_big_problem': [4, 31], # underline, red
'part_problem': [31], # red
'part_mostly_good': [33], # yellow
'part_good': [32], # green
'url': [4, 34], # underline, blue
'section': [1, 4], # bold, underline
'state_off': [36], # cyan
'state_on': [1, 35], # bold, magenta/plum
}
overall_progress_description_weigth = 10
################################################################################
# Utils #
################################################################################
def validate_tag(elem, tag):
if elem.tag != tag:
print('Tag mismatch, expected "' + tag + '", got ' + elem.tag)
sys.exit(255)
def color(color, string):
if flags['c']:
color_format = ''
for code in colors[color]:
color_format += '\033[' + str(code) + 'm'
return color_format + string + '\033[0m'
else:
return string
ansi_escape = re.compile(r'\x1b[^m]*m')
def nonescape_len(s):
return len(ansi_escape.sub('', s))
################################################################################
# Classes #
################################################################################
class ClassStatusProgress:
def __init__(self, described=0, total=0):
self.described = described
self.total = total
def __add__(self, other):
return ClassStatusProgress(self.described + other.described, self.total + other.total)
def increment(self, described):
if described:
self.described += 1
self.total += 1
def is_ok(self):
return self.described >= self.total
def to_configured_colored_string(self):
if flags['p']:
return self.to_colored_string('{percent}% ({has}/{total})', '{pad_percent}{pad_described}{s}{pad_total}')
else:
return self.to_colored_string()
def to_colored_string(self, format='{has}/{total}', pad_format='{pad_described}{s}{pad_total}'):
ratio = self.described / self.total if self.total != 0 else 1
percent = round(100 * ratio)
s = format.format(has=str(self.described), total=str(self.total), percent=str(percent))
if self.described >= self.total:
s = color('part_good', s)
elif self.described >= self.total / 4 * 3:
s = color('part_mostly_good', s)
elif self.described > 0:
s = color('part_problem', s)
else:
s = color('part_big_problem', s)
pad_size = max(len(str(self.described)), len(str(self.total)))
pad_described = ''.ljust(pad_size - len(str(self.described)))
pad_percent = ''.ljust(3 - len(str(percent)))
pad_total = ''.ljust(pad_size - len(str(self.total)))
return pad_format.format(pad_described=pad_described, pad_total=pad_total, pad_percent=pad_percent, s=s)
class ClassStatus:
def __init__(self, name=''):
self.name = name
self.has_brief_description = True
self.has_description = True
self.progresses = {
'methods': ClassStatusProgress(),
'constants': ClassStatusProgress(),
'members': ClassStatusProgress(),
'signals': ClassStatusProgress()
}
def __add__(self, other):
new_status = ClassStatus()
new_status.name = self.name
new_status.has_brief_description = self.has_brief_description and other.has_brief_description
new_status.has_description = self.has_description and other.has_description
for k in self.progresses:
new_status.progresses[k] = self.progresses[k] + other.progresses[k]
return new_status
def is_ok(self):
ok = True
ok = ok and self.has_brief_description
ok = ok and self.has_description
for k in self.progresses:
ok = ok and self.progresses[k].is_ok()
return ok
def make_output(self):
output = {}
output['name'] = color('name', self.name)
ok_string = color('part_good', 'OK')
missing_string = color('part_big_problem', 'MISSING')
output['brief_description'] = ok_string if self.has_brief_description else missing_string
output['description'] = ok_string if self.has_description else missing_string
description_progress = ClassStatusProgress(
(self.has_brief_description + self.has_description) * overall_progress_description_weigth,
2 * overall_progress_description_weigth
)
items_progress = ClassStatusProgress()
for k in ['methods', 'constants', 'members', 'signals']:
items_progress += self.progresses[k]
output[k] = self.progresses[k].to_configured_colored_string()
output['items'] = items_progress.to_configured_colored_string()
output['overall'] = (description_progress + items_progress).to_colored_string('{percent}%', '{pad_percent}{s}')
if self.name.startswith('Total'):
output['url'] = color('url', 'http://docs.godotengine.org/en/latest/classes/')
if flags['s']:
output['comment'] = color('part_good', 'ALL OK')
else:
output['url'] = color('url', 'http://docs.godotengine.org/en/latest/classes/class_{name}.html'.format(name=self.name.lower()))
if flags['s'] and not flags['g'] and self.is_ok():
output['comment'] = color('part_good', 'ALL OK')
return output
def generate_for_class(c):
status = ClassStatus()
status.name = c.attrib['name']
# setgets do not count
methods = []
for tag in list(c):
if tag.tag in ['methods']:
for sub_tag in list(tag):
methods.append(sub_tag.find('name'))
if tag.tag in ['members']:
for sub_tag in list(tag):
try:
methods.remove(sub_tag.find('setter'))
methods.remove(sub_tag.find('getter'))
except:
pass
for tag in list(c):
if tag.tag == 'brief_description':
status.has_brief_description = len(tag.text.strip()) > 0
elif tag.tag == 'description':
status.has_description = len(tag.text.strip()) > 0
elif tag.tag in ['methods', 'signals']:
for sub_tag in list(tag):
if sub_tag.find('name') in methods or tag.tag == 'signals':
descr = sub_tag.find('description')
status.progresses[tag.tag].increment(len(descr.text.strip()) > 0)
elif tag.tag in ['constants', 'members']:
for sub_tag in list(tag):
status.progresses[tag.tag].increment(len(sub_tag.text.strip()) > 0)
elif tag.tag in ['tutorials', 'demos']:
pass # Ignore those tags for now
elif tag.tag in ['theme_items']:
pass # Ignore those tags, since they seem to lack description at all
else:
print(tag.tag, tag.attrib)
return status
################################################################################
# Arguments #
################################################################################
input_file_list = []
input_class_list = []
merged_file = ""
for arg in sys.argv[1:]:
if arg.startswith('--'):
flags[long_flags[arg[2:]]] = not flags[long_flags[arg[2:]]]
elif arg.startswith('-'):
for f in arg[1:]:
flags[f] = not flags[f]
elif os.path.isdir(arg):
for f in os.listdir(arg):
if f.endswith('.xml'):
input_file_list.append(os.path.join(arg, f));
else:
input_class_list.append(arg)
if flags['i']:
for r in ['methods', 'constants', 'members', 'signals']:
index = table_columns.index(r)
del table_column_names[index]
del table_columns[index]
table_column_names.append('Items')
table_columns.append('items')
if flags['o'] == (not flags['i']):
table_column_names.append('Overall')
table_columns.append('overall')
if flags['u']:
table_column_names.append('Docs URL')
table_columns.append('url')
################################################################################
# Help #
################################################################################
if len(input_file_list) < 1 or flags['h']:
if not flags['h']:
print(color('section', 'Invalid usage') + ': Please specify a classes directory')
print(color('section', 'Usage') + ': doc_status.py [flags] <classes_dir> [class names]')
print('\t< and > signify required parameters, while [ and ] signify optional parameters.')
print(color('section', 'Available flags') + ':')
possible_synonym_list = list(long_flags)
possible_synonym_list.sort()
flag_list = list(flags)
flag_list.sort()
for flag in flag_list:
synonyms = [color('name', '-' + flag)]
for synonym in possible_synonym_list:
if long_flags[synonym] == flag:
synonyms.append(color('name', '--' + synonym))
print(('{synonyms} (Currently ' + color('state_' + ('on' if flags[flag] else 'off'), '{value}') + ')\n\t{description}').format(
synonyms=', '.join(synonyms),
value=('on' if flags[flag] else 'off'),
description=flag_descriptions[flag]
))
sys.exit(0)
################################################################################
# Parse class list #
################################################################################
class_names = []
classes = {}
for file in input_file_list:
tree = ET.parse(file)
doc = tree.getroot()
if 'version' not in doc.attrib:
print('Version missing from "doc"')
sys.exit(255)
version = doc.attrib['version']
if doc.attrib['name'] in class_names:
continue
class_names.append(doc.attrib['name'])
classes[doc.attrib['name']] = doc
class_names.sort()
if len(input_class_list) < 1:
input_class_list = class_names
################################################################################
# Make output table #
################################################################################
table = [table_column_names]
table_row_chars = '| - '
table_column_chars = '|'
total_status = ClassStatus('Total')
for cn in input_class_list:
if not cn in classes:
print('Cannot find class ' + cn + '!')
sys.exit(255)
c = classes[cn]
validate_tag(c, 'class')
status = ClassStatus.generate_for_class(c)
total_status = total_status + status
if (flags['b'] and status.is_ok()) or (flags['g'] and not status.is_ok()) or (not flags['a']):
continue
out = status.make_output()
row = []
for column in table_columns:
if column in out:
row.append(out[column])
else:
row.append('')
if 'comment' in out and out['comment'] != '':
row.append(out['comment'])
table.append(row)
################################################################################
# Print output table #
################################################################################
if len(table) == 1 and flags['a']:
print(color('part_big_problem', 'No classes suitable for printing!'))
sys.exit(0)
if len(table) > 2 or not flags['a']:
total_status.name = 'Total = {0}'.format(len(table) - 1)
out = total_status.make_output()
row = []
for column in table_columns:
if column in out:
row.append(out[column])
else:
row.append('')
table.append(row)
table_column_sizes = []
for row in table:
for cell_i, cell in enumerate(row):
if cell_i >= len(table_column_sizes):
table_column_sizes.append(0)
table_column_sizes[cell_i] = max(nonescape_len(cell), table_column_sizes[cell_i])
divider_string = table_row_chars[0]
for cell_i in range(len(table[0])):
divider_string += table_row_chars[1] + table_row_chars[2] * (table_column_sizes[cell_i]) + table_row_chars[1] + table_row_chars[0]
print(divider_string)
for row_i, row in enumerate(table):
row_string = table_column_chars
for cell_i, cell in enumerate(row):
padding_needed = table_column_sizes[cell_i] - nonescape_len(cell) + 2
if cell_i == 0:
row_string += table_row_chars[3] + cell + table_row_chars[3] * (padding_needed - 1)
else:
row_string += table_row_chars[3] * math.floor(padding_needed / 2) + cell + table_row_chars[3] * math.ceil((padding_needed / 2))
row_string += table_column_chars
print(row_string)
if row_i == 0 or row_i == len(table) - 2:
print(divider_string)
print(divider_string)
if total_status.is_ok() and not flags['g']:
print('All listed classes are ' + color('part_good', 'OK') + '!')
|
|
import os
from copy import deepcopy
################################################################################
############################# IDENTIFICATION TREES #############################
################################################################################
class Classifier :
def __init__(self, name, classify_fn) :
self.name = str(name)
self._classify_fn = classify_fn
def classify(self, point):
try:
return self._classify_fn(point)
except KeyError, key:
raise ClassifierError("point has no attribute " + str(key) + ": " + str(point))
def copy(self):
return deepcopy(self)
def __eq__(self, other):
try:
return (self.name == other.name
and self._classify_fn.__code__.co_code == other._classify_fn.__code__.co_code)
except:
return False
def __str__(self):
return "Classifier<" + str(self.name) + ">"
__repr__ = __str__
## HELPER FUNCTIONS FOR CREATING CLASSIFIERS
def maybe_number(x) :
try :
return float(x)
except (ValueError, TypeError) :
return x
def feature_test(key) :
return Classifier(key, lambda pt : maybe_number(pt[key]))
def threshold_test(feature, threshold) :
return Classifier(feature + " > " + str(threshold),
lambda pt: "Yes" if (maybe_number(pt.get(feature)) > threshold) else "No")
## CUSTOM ERROR CLASSES
class NoGoodClassifiersError(ValueError):
def __init__(self, value=""):
self.value = value
def __str__(self):
return repr(self.value)
class ClassifierError(RuntimeError):
def __init__(self, value=""):
self.value = value
def __str__(self):
return repr(self.value)
class IdentificationTreeNode:
def __init__(self, target_classifier, parent_branch_name=None):
self.target_classifier = target_classifier
self._parent_branch_name = parent_branch_name
self._classification = None #value, if leaf node
self._classifier = None #Classifier, if tree continues
self._children = {} #dict mapping feature to node, if tree continues
self._data = [] #only used temporarily for printing with data
def get_parent_branch_name(self):
return self._parent_branch_name if self._parent_branch_name else "(Root node: no parent branch)"
def is_leaf(self):
return not self._classifier
def set_node_classification(self, classification):
self._classification = classification
if self._classifier:
print "Warning: Setting the classification", classification, "converts this node from a subtree to a leaf, overwriting its previous classifier:", self._classifier
self._classifier = None
self._children = {}
return self
def get_node_classification(self):
return self._classification
def set_classifier_and_expand(self, classifier, features):
if classifier is None:
raise TypeError("Cannot set classifier to None")
if not isinstance_Classifier(classifier):
raise TypeError("classifier must be Classifier-type object: " + str(classifier))
self._classifier = classifier
try:
self._children = {feature:IdentificationTreeNode(self.target_classifier, parent_branch_name=str(feature))
for feature in features}
except TypeError:
raise TypeError("Expected list of feature names, got: " + str(features))
if len(self._children) == 1:
print "Warning: The classifier", classifier.name, "has only one relevant feature, which means it's not a useful test!"
if self._classification:
print "Warning: Setting the classifier", classifier.name, "converts this node from a leaf to a subtree, overwriting its previous classification:", self._classification
self._classification = None
return self
def get_classifier(self):
return self._classifier
def apply_classifier(self, point):
if self._classifier is None:
raise ClassifierError("Cannot apply classifier at leaf node")
return self._children[self._classifier.classify(point)]
def get_branches(self):
return self._children
def copy(self):
return deepcopy(self)
def print_with_data(self, data):
tree = self.copy()
tree._assign_data(data)
print tree.__str__(with_data=True)
def _assign_data(self, data):
if not self._classifier:
self._data = deepcopy(data)
return self
try:
pairs = self._soc(data, self._classifier).items()
except KeyError: #one of the points is missing a feature
raise ClassifierError("One or more points cannot be classified by " + str(self._classifier))
for (feature, branch_data) in pairs:
if self._children.has_key(feature):
self._children[feature]._assign_data(branch_data)
else: #feature branch doesn't exist
self._data.extend(branch_data)
return self
_ssc=lambda self,c,d:self.set_classifier_and_expand(c,self._soc(d,c))
_soc=lambda self,d,c:reduce(lambda b,p:b.__setitem__(c.classify(p),b.get(c.classify(p),[])+[p]) or b,d,{})
def __eq__(self, other):
try:
return (self.target_classifier == other.target_classifier
and self._parent_branch_name == other._parent_branch_name
and self._classification == other._classification
and self._classifier == other._classifier
and self._children == other._children
and self._data == other._data)
except:
return False
def __str__(self, indent=0, with_data=False):
newline = os.linesep
ret = ''
if indent == 0:
ret += (newline + "IdentificationTreeNode classifying by "
+ self.target_classifier.name + ":" + newline)
ret += " "*indent + (self._parent_branch_name + ": " if self._parent_branch_name else '')
if self._classifier:
ret += self._classifier.name
if with_data and self._data:
ret += self._render_points()
for (feature, node) in sorted(self._children.items()):
ret += newline + node.__str__(indent+1, with_data)
else: #leaf
ret += str(self._classification)
if with_data and self._data:
ret += self._render_points()
return ret
def _render_points(self):
ret = ' ('
first_point = True
for point in self._data:
if first_point:
first_point = False
else:
ret += ', '
ret += str(point.get("name","datapoint")) + ": "
try:
ret += str(self.target_classifier.classify(point))
except ClassifierError:
ret += '(unknown)'
ret += ')'
return ret
__repr__ = __str__
def is_class_instance(obj, class_name):
return hasattr(obj, '__class__') and obj.__class__.__name__ == class_name
def isinstance_Classifier(obj):
return is_class_instance(obj, 'Classifier')
def isinstance_IdentificationTreeNode(obj):
return is_class_instance(obj, 'IdentificationTreeNode')
def isinstance_Point(obj):
return is_class_instance(obj, 'Point')
|
|
# Copyright 2017 Ravi Sojitra. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
from numpy.testing import assert_allclose
from plda import plda
from plda.plda.model import (
transform_D_to_X,
transform_X_to_U,
transform_U_to_U_model,
transform_U_model_to_U,
transform_U_to_X,
transform_X_to_D
)
from plda.plda.optimizer import optimize_maximum_likelihood
from sklearn.decomposition import PCA
from plda.tests.utils import generate_data
@pytest.fixture('module')
def data_dict():
np.random.seed(1234)
return generate_data(n_k=1000, K=10, dimensionality=50)
@pytest.fixture('module')
def model(data_dict):
return plda.Model(data_dict['data'], data_dict['labels'])
@pytest.fixture('module')
def expected_parameters(data_dict):
X = data_dict['data']
Y = data_dict['labels']
params = optimize_maximum_likelihood(X, Y)
params_dict = dict()
params_dict['m'] = params[0]
params_dict['A'] = params[1]
params_dict['Psi'] = params[2]
params_dict['relevant_U_dims'] = params[3]
params_dict['inv_A'] = params[4]
return params_dict
def test_maximum_likelihood_optimized_parameters(model, expected_parameters):
def test_m():
assert_allclose(model.m, expected_parameters['m'])
def test_Psi():
assert_allclose(model.Psi, expected_parameters['Psi'])
def test_A():
assert_allclose(model.A, expected_parameters['A'])
def test_inv_A():
assert_allclose(model.inv_A, expected_parameters['inv_A'])
def test_relevant_U_dims():
assert_allclose(model.relevant_U_dims,
expected_parameters['relevant_U_dims'])
def test_pca(data_dict, model):
# When data has a full rank covariance matrix.
assert model.pca is None
# When the data does NOT have a full rank covariance matrix.
shape = data_dict['data'].shape
data = np.zeros((shape[0], shape[1] + 10))
data[:, :shape[1]] = data_dict['data']
actual = plda.Model(data, data_dict['labels'])
assert actual.pca is not None
assert isinstance(actual.pca, PCA)
assert actual.pca.n_features_ == data.shape[1]
assert actual.pca.n_components == shape[1]
def test_get_dimensionality(data_dict, model):
# When data has a full rank covariance matrix.
dim = data_dict['data'].shape[1]
K = len(data_dict['means'])
assert model.get_dimensionality('D') == dim
assert model.get_dimensionality('X') == dim
assert model.get_dimensionality('U') == dim
assert model.get_dimensionality('U_model') == K - 1
# When the data does NOT have a full rank covariance matrix.
shape = data_dict['data'].shape
data = np.zeros((shape[0], shape[1] + 10))
data[:, :shape[1]] = data_dict['data']
actual = plda.Model(data, data_dict['labels'])
assert actual.get_dimensionality('D') == data.shape[-1]
assert actual.get_dimensionality('X') == dim
assert actual.get_dimensionality('U') == dim
assert actual.get_dimensionality('U_model') == K - 1
def test_transform(data_dict, model):
# When training data does have a full rank covariance matrix.
# D to U_model.
X = data_dict['data']
expected = transform_D_to_X(X, model.pca)
expected = transform_X_to_U(expected, model.inv_A, model.m)
expected = transform_U_to_U_model(expected, model.relevant_U_dims)
actual = model.transform(X, from_space='D', to_space='U_model')
assert_allclose(actual, expected)
# U_model to D.
dim = model.get_dimensionality('U')
expected = transform_U_model_to_U(actual, model.relevant_U_dims, dim)
expected = transform_U_to_X(expected, model.A, model.m)
expected = transform_X_to_D(expected, model.pca)
actual = model.transform(actual, from_space='U_model', to_space='D')
# When training data does not have a full rank covariance matrix.
# D to U_model.
shape = data_dict['data'].shape
data = np.zeros((shape[0], shape[1] + 10))
data[:, :shape[1]] = data_dict['data']
tmp_model = plda.Model(data, data_dict['labels'])
expected = transform_D_to_X(data, tmp_model.pca)
expected = transform_X_to_U(expected, tmp_model.inv_A, tmp_model.m)
expected = transform_U_to_U_model(expected, tmp_model.relevant_U_dims)
actual = tmp_model.transform(data, from_space='D', to_space='U_model')
assert_allclose(actual, expected)
# U_model to D.
dim = tmp_model.get_dimensionality('U')
expected = transform_U_model_to_U(actual, tmp_model.relevant_U_dims, dim)
expected = transform_U_to_X(expected, tmp_model.A, tmp_model.m)
expected = transform_X_to_D(expected, tmp_model.pca)
actual = model.transform(actual, from_space='U_model', to_space='D')
def test_prior_params(model):
"""
Implemented in `tests/test_optimizer/test_optimizer_units.py`.
Also implicitly tested in `tests/test_model/test_model_inference.py`.
"""
pass
def test_posterior_params(model):
"""
Implemented in `tests/test_optimizer/test_optimizer_units.py`.
Also implicitly tested in `tests/test_model/test_model_inference.py`.
"""
pass
def test_posterior_predictive_params(model):
"""
Implemented in `tests/test_optimizer/test_optimizer_units.py`.
Also implicitly tested in `tests/test_model/test_model_inference.py`.
"""
pass
def test_calc_logp_marginal_likelihood():
""" Implemented in `tests/test_model/test_model_inference.py`. """
pass
def test_calc_logp_prior():
""" Implemented in `tests/test_model/test_model_inference.py`. """
pass
def test_calc_logp_posterior():
""" Implemented in `tests/test_model/test_model_inference.py`. """
pass
def test_calc_logp_posterior_predictive():
""" Implemented in `tests/test_model/test_model_inference.py`. """
pass
|
|
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
"""Synchronized queues.
The :mod:`gevent.queue` module implements multi-producer, multi-consumer queues
that work across greenlets, with the API similar to the classes found in the
standard :mod:`Queue` and :class:`multiprocessing <multiprocessing.Queue>` modules.
The classes in this module implement iterator protocol. Iterating over queue
means repeatedly calling :meth:`get <Queue.get>` until :meth:`get <Queue.get>` returns ``StopIteration``.
>>> queue = gevent.queue.Queue()
>>> queue.put(1)
>>> queue.put(2)
>>> queue.put(StopIteration)
>>> for item in queue:
... print(item)
1
2
.. versionchanged:: 1.0
``Queue(0)`` now means queue of infinite size, not a channel. A :exc:`DeprecationWarning`
will be issued with this argument.
"""
from __future__ import absolute_import
import sys
import heapq
import collections
if sys.version_info[0] == 2:
import Queue as __queue__
else:
import queue as __queue__
Full = __queue__.Full
Empty = __queue__.Empty
from gevent.timeout import Timeout
from gevent.hub import get_hub, Waiter, getcurrent, PY3
from gevent.hub import InvalidSwitchError
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue', 'Channel']
def _safe_remove(deq, item):
# For when the item may have been removed by
# Queue._unlock
try:
deq.remove(item)
except ValueError:
pass
class Queue(object):
"""
Create a queue object with a given maximum size.
If *maxsize* is less than or equal to zero or ``None``, the queue
size is infinite.
.. versionchanged:: 1.1b3
Queues now support :func:`len`; it behaves the same as :meth:`qsize`.
.. versionchanged:: 1.1b3
Multiple greenlets that block on a call to :meth:`put` for a full queue
will now be woken up to put their items into the queue in the order in which
they arrived. Likewise, multiple greenlets that block on a call to :meth:`get` for
an empty queue will now receive items in the order in which they blocked. An
implementation quirk under CPython *usually* ensured this was roughly the case
previously anyway, but that wasn't the case for PyPy.
"""
def __init__(self, maxsize=None, items=None):
if maxsize is not None and maxsize <= 0:
self.maxsize = None
if maxsize == 0:
import warnings
warnings.warn('Queue(0) now equivalent to Queue(None); if you want a channel, use Channel',
DeprecationWarning, stacklevel=2)
else:
self.maxsize = maxsize
# Explicitly maintain order for getters and putters that block
# so that callers can consistently rely on getting things out
# in the apparent order they went in. This was once required by
# imap_unordered. Previously these were set() objects, and the
# items put in the set have default hash() and eq() methods;
# under CPython, since new objects tend to have increasing
# hash values, this tended to roughly maintain order anyway,
# but that's not true under PyPy. An alternative to a deque
# (to avoid the linear scan of remove()) might be an
# OrderedDict, but it's 2.7 only; we don't expect to have so
# many waiters that removing an arbitrary element is a
# bottleneck, though.
self.getters = collections.deque()
self.putters = collections.deque()
self.hub = get_hub()
self._event_unlock = None
if items:
self._init(maxsize, items)
else:
self._init(maxsize)
# QQQ make maxsize into a property with setter that schedules unlock if necessary
def copy(self):
return type(self)(self.maxsize, self.queue)
def _init(self, maxsize, items=None):
# FIXME: Why is maxsize unused or even passed?
# pylint:disable=unused-argument
if items:
self.queue = collections.deque(items)
else:
self.queue = collections.deque()
def _get(self):
return self.queue.popleft()
def _peek(self):
return self.queue[0]
def _put(self, item):
self.queue.append(item)
def __repr__(self):
return '<%s at %s%s>' % (type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s%s>' % (type(self).__name__, self._format())
def _format(self):
result = []
if self.maxsize is not None:
result.append('maxsize=%r' % (self.maxsize, ))
if getattr(self, 'queue', None):
result.append('queue=%r' % (self.queue, ))
if self.getters:
result.append('getters[%s]' % len(self.getters))
if self.putters:
result.append('putters[%s]' % len(self.putters))
if result:
return ' ' + ' '.join(result)
else:
return ''
def qsize(self):
"""Return the size of the queue."""
return len(self.queue)
def __len__(self):
"""
Return the size of the queue. This is the same as :meth:`qsize`.
.. versionadded: 1.1b3
Previously, getting len() of a queue would raise a TypeError.
"""
return self.qsize()
def __bool__(self):
"""
A queue object is always True.
.. versionadded: 1.1b3
Now that queues support len(), they need to implement ``__bool__``
to return True for backwards compatibility.
"""
return True
__nonzero__ = __bool__
def empty(self):
"""Return ``True`` if the queue is empty, ``False`` otherwise."""
return not self.qsize()
def full(self):
"""Return ``True`` if the queue is full, ``False`` otherwise.
``Queue(None)`` is never full.
"""
return self.maxsize is not None and self.qsize() >= self.maxsize
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional arg *block* is true and *timeout* is ``None`` (the default),
block if necessary until a free slot is available. If *timeout* is
a positive number, it blocks at most *timeout* seconds and raises
the :class:`Full` exception if no free slot was available within that time.
Otherwise (*block* is false), put an item on the queue if a free slot
is immediately available, else raise the :class:`Full` exception (*timeout*
is ignored in that case).
"""
if self.maxsize is None or self.qsize() < self.maxsize:
# there's a free slot, put an item right away
self._put(item)
if self.getters:
self._schedule_unlock()
elif self.hub is getcurrent():
# We're in the mainloop, so we cannot wait; we can switch to other greenlets though.
# Check if possible to get a free slot in the queue.
while self.getters and self.qsize() and self.qsize() >= self.maxsize:
getter = self.getters.popleft()
getter.switch(getter)
if self.qsize() < self.maxsize:
self._put(item)
return
raise Full
elif block:
waiter = ItemWaiter(item, self)
self.putters.append(waiter)
timeout = Timeout._start_new_or_dummy(timeout, Full)
try:
if self.getters:
self._schedule_unlock()
result = waiter.get()
if result is not waiter:
raise InvalidSwitchError("Invalid switch into Queue.put: %r" % (result, ))
finally:
timeout.cancel()
_safe_remove(self.putters, waiter)
else:
raise Full
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the :class:`Full` exception.
"""
self.put(item, False)
def __get_or_peek(self, method, block, timeout):
# Internal helper method. The `method` should be either
# self._get when called from self.get() or self._peek when
# called from self.peek(). Call this after the initial check
# to see if there are items in the queue.
if self.hub is getcurrent():
# special case to make get_nowait() or peek_nowait() runnable in the mainloop greenlet
# there are no items in the queue; try to fix the situation by unlocking putters
while self.putters:
# Note: get() used popleft(), peek used pop(); popleft
# is almost certainly correct.
self.putters.popleft().put_and_switch()
if self.qsize():
return method()
raise Empty()
if not block:
# We can't block, we're not the hub, and we have nothing
# to return. No choice...
raise Empty()
waiter = Waiter()
timeout = Timeout._start_new_or_dummy(timeout, Empty)
try:
self.getters.append(waiter)
if self.putters:
self._schedule_unlock()
result = waiter.get()
if result is not waiter:
raise InvalidSwitchError('Invalid switch into Queue.get: %r' % (result, ))
return method()
finally:
timeout.cancel()
_safe_remove(self.getters, waiter)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args *block* is true and *timeout* is ``None`` (the default),
block if necessary until an item is available. If *timeout* is a positive number,
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
if no item was available within that time. Otherwise (*block* is false), return
an item if one is immediately available, else raise the :class:`Empty` exception
(*timeout* is ignored in that case).
"""
if self.qsize():
if self.putters:
self._schedule_unlock()
return self._get()
return self.__get_or_peek(self._get, block, timeout)
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the :class:`Empty` exception.
"""
return self.get(False)
def peek(self, block=True, timeout=None):
"""Return an item from the queue without removing it.
If optional args *block* is true and *timeout* is ``None`` (the default),
block if necessary until an item is available. If *timeout* is a positive number,
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
if no item was available within that time. Otherwise (*block* is false), return
an item if one is immediately available, else raise the :class:`Empty` exception
(*timeout* is ignored in that case).
"""
if self.qsize():
# XXX: Why doesn't this schedule an unlock like get() does?
return self._peek()
return self.__get_or_peek(self._peek, block, timeout)
def peek_nowait(self):
"""Return an item from the queue without blocking.
Only return an item if one is immediately available. Otherwise
raise the :class:`Empty` exception.
"""
return self.peek(False)
def _unlock(self):
while True:
repeat = False
if self.putters and (self.maxsize is None or self.qsize() < self.maxsize):
repeat = True
try:
putter = self.putters.popleft()
self._put(putter.item)
except: # pylint:disable=bare-except
putter.throw(*sys.exc_info())
else:
putter.switch(putter)
if self.getters and self.qsize():
repeat = True
getter = self.getters.popleft()
getter.switch(getter)
if not repeat:
return
def _schedule_unlock(self):
if not self._event_unlock:
self._event_unlock = self.hub.loop.run_callback(self._unlock)
def __iter__(self):
return self
def next(self):
result = self.get()
if result is StopIteration:
raise result
return result
if PY3:
__next__ = next
del next
class ItemWaiter(Waiter):
__slots__ = ['item', 'queue']
def __init__(self, item, queue):
Waiter.__init__(self)
self.item = item
self.queue = queue
def put_and_switch(self):
self.queue._put(self.item)
self.queue = None
self.item = None
return self.switch(self)
class PriorityQueue(Queue):
'''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: ``(priority number, data)``.
'''
def _init(self, maxsize, items=None):
if items:
self.queue = list(items)
else:
self.queue = []
def _put(self, item, heappush=heapq.heappush):
# pylint:disable=arguments-differ
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
# pylint:disable=arguments-differ
return heappop(self.queue)
class LifoQueue(Queue):
'''A subclass of :class:`Queue` that retrieves most recently added entries first.'''
def _init(self, maxsize, items=None):
if items:
self.queue = list(items)
else:
self.queue = []
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
def _peek(self):
return self.queue[-1]
class JoinableQueue(Queue):
"""
A subclass of :class:`Queue` that additionally has
:meth:`task_done` and :meth:`join` methods.
"""
def __init__(self, maxsize=None, items=None, unfinished_tasks=None):
"""
.. versionchanged:: 1.1a1
If *unfinished_tasks* is not given, then all the given *items*
(if any) will be considered unfinished.
"""
from gevent.event import Event
Queue.__init__(self, maxsize, items)
self._cond = Event()
self._cond.set()
if unfinished_tasks:
self.unfinished_tasks = unfinished_tasks
elif items:
self.unfinished_tasks = len(items)
else:
self.unfinished_tasks = 0
if self.unfinished_tasks:
self._cond.clear()
def copy(self):
return type(self)(self.maxsize, self.queue, self.unfinished_tasks)
def _format(self):
result = Queue._format(self)
if self.unfinished_tasks:
result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
return result
def _put(self, item):
Queue._put(self, item)
self.unfinished_tasks += 1
self._cond.clear()
def task_done(self):
'''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
that the processing on the task is complete.
If a :meth:`join` is currently blocking, it will resume when all items have been processed
(meaning that a :meth:`task_done` call was received for every item that had been
:meth:`put <Queue.put>` into the queue).
Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
'''
if self.unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self.unfinished_tasks -= 1
if self.unfinished_tasks == 0:
self._cond.set()
def join(self, timeout=None):
'''
Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the queue.
The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
that the item was retrieved and all work on it is complete. When the count of
unfinished tasks drops to zero, :meth:`join` unblocks.
:param float timeout: If not ``None``, then wait no more than this time in seconds
for all tasks to finish.
:return: ``True`` if all tasks have finished; if ``timeout`` was given and expired before
all tasks finished, ``False``.
.. versionchanged:: 1.1a1
Add the *timeout* parameter.
'''
return self._cond.wait(timeout=timeout)
class Channel(object):
def __init__(self):
self.getters = collections.deque()
self.putters = collections.deque()
self.hub = get_hub()
self._event_unlock = None
def __repr__(self):
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._format())
def _format(self):
result = ''
if self.getters:
result += ' getters[%s]' % len(self.getters)
if self.putters:
result += ' putters[%s]' % len(self.putters)
return result
@property
def balance(self):
return len(self.putters) - len(self.getters)
def qsize(self):
return 0
def empty(self):
return True
def full(self):
return True
def put(self, item, block=True, timeout=None):
if self.hub is getcurrent():
if self.getters:
getter = self.getters.popleft()
getter.switch(item)
return
raise Full
if not block:
timeout = 0
waiter = Waiter()
item = (item, waiter)
self.putters.append(item)
timeout = Timeout._start_new_or_dummy(timeout, Full)
try:
if self.getters:
self._schedule_unlock()
result = waiter.get()
if result is not waiter:
raise InvalidSwitchError("Invalid switch into Channel.put: %r" % (result, ))
except:
_safe_remove(self.putters, item)
raise
finally:
timeout.cancel()
def put_nowait(self, item):
self.put(item, False)
def get(self, block=True, timeout=None):
if self.hub is getcurrent():
if self.putters:
item, putter = self.putters.popleft()
self.hub.loop.run_callback(putter.switch, putter)
return item
if not block:
timeout = 0
waiter = Waiter()
timeout = Timeout._start_new_or_dummy(timeout, Empty)
try:
self.getters.append(waiter)
if self.putters:
self._schedule_unlock()
return waiter.get()
except:
self.getters.remove(waiter)
raise
finally:
timeout.cancel()
def get_nowait(self):
return self.get(False)
def _unlock(self):
while self.putters and self.getters:
getter = self.getters.popleft()
item, putter = self.putters.popleft()
getter.switch(item)
putter.switch(putter)
def _schedule_unlock(self):
if not self._event_unlock:
self._event_unlock = self.hub.loop.run_callback(self._unlock)
def __iter__(self):
return self
def next(self):
result = self.get()
if result is StopIteration:
raise result
return result
__next__ = next # py3
|
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/colors.py
__version__=''' $Id: colors.py 3208 2008-02-13 11:23:55Z rgbecker $ '''
import string, math
from types import StringType, ListType, TupleType
from reportlab.lib.utils import fp_str
_SeqTypes = (ListType,TupleType)
class Color:
"""This class is used to represent color. Components red, green, blue
are in the range 0 (dark) to 1 (full intensity)."""
def __init__(self, red=0, green=0, blue=0):
"Initialize with red, green, blue in range [0-1]."
self.red, self.green, self.blue = red,green,blue
def __repr__(self):
return "Color(%s)" % string.replace(fp_str(self.red, self.green, self.blue),' ',',')
def __hash__(self):
return hash( (self.red, self.green, self.blue) )
def __cmp__(self,other):
try:
dsum = 4*self.red-4*other.red + 2*self.green-2*other.green + self.blue-other.blue
except:
return -1
if dsum > 0: return 1
if dsum < 0: return -1
return 0
def rgb(self):
"Returns a three-tuple of components"
return (self.red, self.green, self.blue)
def bitmap_rgb(self):
return tuple(map(lambda x: int(x*255)&255, self.rgb()))
def hexval(self):
return '0x%02x%02x%02x' % self.bitmap_rgb()
class CMYKColor(Color):
"""This represents colors using the CMYK (cyan, magenta, yellow, black)
model commonly used in professional printing. This is implemented
as a derived class so that renderers which only know about RGB "see it"
as an RGB color through its 'red','green' and 'blue' attributes, according
to an approximate function.
The RGB approximation is worked out when the object in constructed, so
the color attributes should not be changed afterwards.
Extra attributes may be attached to the class to support specific ink models,
and renderers may look for these."""
def __init__(self, cyan=0, magenta=0, yellow=0, black=0,
spotName=None, density=1, knockout=None):
"""
Initialize with four colors in range [0-1]. the optional
spotName, density & knockout may be of use to specific renderers.
spotName is intended for use as an identifier to the renderer not client programs.
density is used to modify the overall amount of ink.
knockout is a renderer dependent option that determines whether the applied colour
knocksout (removes) existing colour; None means use the global default.
"""
self.cyan = cyan
self.magenta = magenta
self.yellow = yellow
self.black = black
self.spotName = spotName
self.density = max(min(density,1),0) # force into right range
self.knockout = knockout
# now work out the RGB approximation. override
self.red, self.green, self.blue = cmyk2rgb( (cyan, magenta, yellow, black) )
if density<1:
#density adjustment of rgb approximants, effectively mix with white
r, g, b = self.red, self.green, self.blue
r = density*(r-1)+1
g = density*(g-1)+1
b = density*(b-1)+1
self.red, self.green, self.blue = (r,g,b)
def __repr__(self):
return "CMYKColor(%s%s%s%s)" % (
string.replace(fp_str(self.cyan, self.magenta, self.yellow, self.black),' ',','),
(self.spotName and (',spotName='+repr(self.spotName)) or ''),
(self.density!=1 and (',density='+fp_str(self.density)) or ''),
(self.knockout is not None and (',knockout=%d' % self.knockout) or ''),
)
def __hash__(self):
return hash( (self.cyan, self.magenta, self.yellow, self.black, self.density, self.spotName) )
def __cmp__(self,other):
"""Partial ordering of colors according to a notion of distance.
Comparing across the two color models is of limited use."""
# why the try-except? What can go wrong?
if isinstance(other, CMYKColor):
dsum = (((( (self.cyan-other.cyan)*2 +
(self.magenta-other.magenta))*2+
(self.yellow-other.yellow))*2+
(self.black-other.black))*2+
(self.density-other.density))*2 + cmp(self.spotName or '',other.spotName or '')
else: # do the RGB comparison
try:
dsum = ((self.red-other.red)*2+(self.green-other.green))*2+(self.blue-other.blue)
except: # or just return 'not equal' if not a color
return -1
if dsum >= 0:
return dsum>0
else:
return -1
def cmyk(self):
"Returns a tuple of four color components - syntactic sugar"
return (self.cyan, self.magenta, self.yellow, self.black)
def _density_str(self):
return fp_str(self.density)
class PCMYKColor(CMYKColor):
'''100 based CMYKColor with density and a spotName; just like Rimas uses'''
def __init__(self,cyan,magenta,yellow,black,density=100,spotName=None,knockout=None):
CMYKColor.__init__(self,cyan/100.,magenta/100.,yellow/100.,black/100.,spotName,density/100.,knockout=knockout)
def __repr__(self):
return "PCMYKColor(%s%s%s%s)" % (
string.replace(fp_str(self.cyan*100, self.magenta*100, self.yellow*100, self.black*100),' ',','),
(self.spotName and (',spotName='+repr(self.spotName)) or ''),
(self.density!=1 and (',density='+fp_str(self.density*100)) or ''),
(self.knockout is not None and (',knockout=%d' % self.knockout) or ''),
)
def cmyk2rgb((c,m,y,k),density=1):
"Convert from a CMYK color tuple to an RGB color tuple"
# From the Adobe Postscript Ref. Manual 2nd ed.
r = 1.0 - min(1.0, c + k)
g = 1.0 - min(1.0, m + k)
b = 1.0 - min(1.0, y + k)
return (r,g,b)
def rgb2cmyk(r,g,b):
'''one way to get cmyk from rgb'''
c = 1 - r
m = 1 - g
y = 1 - b
k = min(c,m,y)
c = min(1,max(0,c-k))
m = min(1,max(0,m-k))
y = min(1,max(0,y-k))
k = min(1,max(0,k))
return (c,m,y,k)
def color2bw(colorRGB):
"Transform an RGB color to a black and white equivalent."
col = colorRGB
r, g, b = col.red, col.green, col.blue
n = (r + g + b) / 3.0
bwColorRGB = Color(n, n, n)
return bwColorRGB
def HexColor(val, htmlOnly=False):
"""This function converts a hex string, or an actual integer number,
into the corresponding color. E.g., in "#AABBCC" or 0xAABBCC,
AA is the red, BB is the green, and CC is the blue (00-FF).
For completeness I assume that #aabbcc or 0xaabbcc are hex numbers
otherwise a pure integer is converted as decimal rgb. If htmlOnly is true,
only the #aabbcc form is allowed.
>>> HexColor('#ffffff')
Color(1,1,1)
>>> HexColor('#FFFFFF')
Color(1,1,1)
>>> HexColor('0xffffff')
Color(1,1,1)
>>> HexColor('16777215')
Color(1,1,1)
An '0x' or '#' prefix is required for hex (as opposed to decimal):
>>> HexColor('ffffff')
Traceback (most recent call last):
ValueError: invalid literal for int(): ffffff
>>> HexColor('#FFFFFF', htmlOnly=True)
Color(1,1,1)
>>> HexColor('0xffffff', htmlOnly=True)
Traceback (most recent call last):
ValueError: not a hex string
>>> HexColor('16777215', htmlOnly=True)
Traceback (most recent call last):
ValueError: not a hex string
""" #" for emacs
if type(val) == StringType:
b = 10
if val[:1] == '#':
val = val[1:]
b = 16
else:
if htmlOnly:
raise ValueError('not a hex string')
if string.lower(val[:2]) == '0x':
b = 16
val = val[2:]
val = string.atoi(val,b)
return Color(((val>>16)&0xFF)/255.0,((val>>8)&0xFF)/255.0,(val&0xFF)/255.0)
def linearlyInterpolatedColor(c0, c1, x0, x1, x):
"""
Linearly interpolates colors. Can handle RGB, CMYK and PCMYK
colors - give ValueError if colours aren't the same.
Doesn't currently handle 'Spot Color Interpolation'.
"""
if c0.__class__ != c1.__class__:
raise ValueError, "Color classes must be the same for interpolation!"
if x1<x0:
x0,x1,c0,c1 = x1,x0,c1,c0 # normalized so x1>x0
if x<x0-1e-8 or x>x1+1e-8: # fudge factor for numerical problems
raise ValueError, "Can't interpolate: x=%f is not between %f and %f!" % (x,x0,x1)
if x<=x0:
return c0
elif x>=x1:
return c1
cname = c0.__class__.__name__
dx = float(x1-x0)
x = x-x0
if cname == 'Color': # RGB
r = c0.red+x*(c1.red - c0.red)/dx
g = c0.green+x*(c1.green- c0.green)/dx
b = c0.blue+x*(c1.blue - c0.blue)/dx
return Color(r,g,b)
elif cname == 'CMYKColor':
c = c0.cyan+x*(c1.cyan - c0.cyan)/dx
m = c0.magenta+x*(c1.magenta - c0.magenta)/dx
y = c0.yellow+x*(c1.yellow - c0.yellow)/dx
k = c0.black+x*(c1.black - c0.black)/dx
d = c0.density+x*(c1.density - c0.density)/dx
return CMYKColor(c,m,y,k, density=d)
elif cname == 'PCMYKColor':
if cmykDistance(c0,c1)<1e-8:
#colors same do density and preserve spotName if any
assert c0.spotName == c1.spotName, "Identical cmyk, but different spotName"
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = c0.density+x*(c1.density - c0.density)/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, spotName=c0.spotName)
elif cmykDistance(c0,_CMYK_white)<1e-8:
#special c0 is white
c = c1.cyan
m = c1.magenta
y = c1.yellow
k = c1.black
d = x*c1.density/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, spotName=c1.spotName)
elif cmykDistance(c1,_CMYK_white)<1e-8:
#special c1 is white
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = x*c0.density/dx
d = c0.density*(1-x/dx)
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, spotName=c0.spotName)
else:
c = c0.cyan+x*(c1.cyan - c0.cyan)/dx
m = c0.magenta+x*(c1.magenta - c0.magenta)/dx
y = c0.yellow+x*(c1.yellow - c0.yellow)/dx
k = c0.black+x*(c1.black - c0.black)/dx
d = c0.density+x*(c1.density - c0.density)/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100)
else:
raise ValueError, "Can't interpolate: Unknown color class %s!" % cname
# special case -- indicates no drawing should be done
# this is a hangover from PIDDLE - suggest we ditch it since it is not used anywhere
#transparent = Color(-1, -1, -1)
_CMYK_white=CMYKColor(0,0,0,0)
_PCMYK_white=PCMYKColor(0,0,0,0)
_CMYK_black=CMYKColor(0,0,0,1)
_PCMYK_black=PCMYKColor(0,0,0,100)
# Special colors
ReportLabBlueOLD = HexColor(0x4e5688)
ReportLabBlue = HexColor(0x00337f)
ReportLabBluePCMYK = PCMYKColor(100,65,0,30,spotName='Pantone 288U')
ReportLabLightBlue = HexColor(0xb7b9d3)
ReportLabFidBlue=HexColor(0x3366cc)
ReportLabFidRed=HexColor(0xcc0033)
ReportLabGreen = HexColor(0x336600)
ReportLabLightGreen = HexColor(0x339933)
# color constants -- mostly from HTML standard
aliceblue = HexColor(0xF0F8FF)
antiquewhite = HexColor(0xFAEBD7)
aqua = HexColor(0x00FFFF)
aquamarine = HexColor(0x7FFFD4)
azure = HexColor(0xF0FFFF)
beige = HexColor(0xF5F5DC)
bisque = HexColor(0xFFE4C4)
black = HexColor(0x000000)
blanchedalmond = HexColor(0xFFEBCD)
blue = HexColor(0x0000FF)
blueviolet = HexColor(0x8A2BE2)
brown = HexColor(0xA52A2A)
burlywood = HexColor(0xDEB887)
cadetblue = HexColor(0x5F9EA0)
chartreuse = HexColor(0x7FFF00)
chocolate = HexColor(0xD2691E)
coral = HexColor(0xFF7F50)
cornflowerblue = cornflower = HexColor(0x6495ED)
cornsilk = HexColor(0xFFF8DC)
crimson = HexColor(0xDC143C)
cyan = HexColor(0x00FFFF)
darkblue = HexColor(0x00008B)
darkcyan = HexColor(0x008B8B)
darkgoldenrod = HexColor(0xB8860B)
darkgray = HexColor(0xA9A9A9)
darkgreen = HexColor(0x006400)
darkkhaki = HexColor(0xBDB76B)
darkmagenta = HexColor(0x8B008B)
darkolivegreen = HexColor(0x556B2F)
darkorange = HexColor(0xFF8C00)
darkorchid = HexColor(0x9932CC)
darkred = HexColor(0x8B0000)
darksalmon = HexColor(0xE9967A)
darkseagreen = HexColor(0x8FBC8B)
darkslateblue = HexColor(0x483D8B)
darkslategray = HexColor(0x2F4F4F)
darkturquoise = HexColor(0x00CED1)
darkviolet = HexColor(0x9400D3)
deeppink = HexColor(0xFF1493)
deepskyblue = HexColor(0x00BFFF)
dimgray = HexColor(0x696969)
dodgerblue = HexColor(0x1E90FF)
firebrick = HexColor(0xB22222)
floralwhite = HexColor(0xFFFAF0)
forestgreen = HexColor(0x228B22)
fuchsia = HexColor(0xFF00FF)
gainsboro = HexColor(0xDCDCDC)
ghostwhite = HexColor(0xF8F8FF)
gold = HexColor(0xFFD700)
goldenrod = HexColor(0xDAA520)
gray = HexColor(0x808080)
grey = gray
green = HexColor(0x008000)
greenyellow = HexColor(0xADFF2F)
honeydew = HexColor(0xF0FFF0)
hotpink = HexColor(0xFF69B4)
indianred = HexColor(0xCD5C5C)
indigo = HexColor(0x4B0082)
ivory = HexColor(0xFFFFF0)
khaki = HexColor(0xF0E68C)
lavender = HexColor(0xE6E6FA)
lavenderblush = HexColor(0xFFF0F5)
lawngreen = HexColor(0x7CFC00)
lemonchiffon = HexColor(0xFFFACD)
lightblue = HexColor(0xADD8E6)
lightcoral = HexColor(0xF08080)
lightcyan = HexColor(0xE0FFFF)
lightgoldenrodyellow = HexColor(0xFAFAD2)
lightgreen = HexColor(0x90EE90)
lightgrey = HexColor(0xD3D3D3)
lightpink = HexColor(0xFFB6C1)
lightsalmon = HexColor(0xFFA07A)
lightseagreen = HexColor(0x20B2AA)
lightskyblue = HexColor(0x87CEFA)
lightslategray = HexColor(0x778899)
lightsteelblue = HexColor(0xB0C4DE)
lightyellow = HexColor(0xFFFFE0)
lime = HexColor(0x00FF00)
limegreen = HexColor(0x32CD32)
linen = HexColor(0xFAF0E6)
magenta = HexColor(0xFF00FF)
maroon = HexColor(0x800000)
mediumaquamarine = HexColor(0x66CDAA)
mediumblue = HexColor(0x0000CD)
mediumorchid = HexColor(0xBA55D3)
mediumpurple = HexColor(0x9370DB)
mediumseagreen = HexColor(0x3CB371)
mediumslateblue = HexColor(0x7B68EE)
mediumspringgreen = HexColor(0x00FA9A)
mediumturquoise = HexColor(0x48D1CC)
mediumvioletred = HexColor(0xC71585)
midnightblue = HexColor(0x191970)
mintcream = HexColor(0xF5FFFA)
mistyrose = HexColor(0xFFE4E1)
moccasin = HexColor(0xFFE4B5)
navajowhite = HexColor(0xFFDEAD)
navy = HexColor(0x000080)
oldlace = HexColor(0xFDF5E6)
olive = HexColor(0x808000)
olivedrab = HexColor(0x6B8E23)
orange = HexColor(0xFFA500)
orangered = HexColor(0xFF4500)
orchid = HexColor(0xDA70D6)
palegoldenrod = HexColor(0xEEE8AA)
palegreen = HexColor(0x98FB98)
paleturquoise = HexColor(0xAFEEEE)
palevioletred = HexColor(0xDB7093)
papayawhip = HexColor(0xFFEFD5)
peachpuff = HexColor(0xFFDAB9)
peru = HexColor(0xCD853F)
pink = HexColor(0xFFC0CB)
plum = HexColor(0xDDA0DD)
powderblue = HexColor(0xB0E0E6)
purple = HexColor(0x800080)
red = HexColor(0xFF0000)
rosybrown = HexColor(0xBC8F8F)
royalblue = HexColor(0x4169E1)
saddlebrown = HexColor(0x8B4513)
salmon = HexColor(0xFA8072)
sandybrown = HexColor(0xF4A460)
seagreen = HexColor(0x2E8B57)
seashell = HexColor(0xFFF5EE)
sienna = HexColor(0xA0522D)
silver = HexColor(0xC0C0C0)
skyblue = HexColor(0x87CEEB)
slateblue = HexColor(0x6A5ACD)
slategray = HexColor(0x708090)
snow = HexColor(0xFFFAFA)
springgreen = HexColor(0x00FF7F)
steelblue = HexColor(0x4682B4)
tan = HexColor(0xD2B48C)
teal = HexColor(0x008080)
thistle = HexColor(0xD8BFD8)
tomato = HexColor(0xFF6347)
turquoise = HexColor(0x40E0D0)
violet = HexColor(0xEE82EE)
wheat = HexColor(0xF5DEB3)
white = HexColor(0xFFFFFF)
whitesmoke = HexColor(0xF5F5F5)
yellow = HexColor(0xFFFF00)
yellowgreen = HexColor(0x9ACD32)
fidblue=HexColor(0x3366cc)
fidred=HexColor(0xcc0033)
fidlightblue=HexColor("#d6e0f5")
ColorType=type(black)
################################################################
#
# Helper functions for dealing with colors. These tell you
# which are predefined, so you can print color charts;
# and can give the nearest match to an arbitrary color object
#
#################################################################
def colorDistance(col1, col2):
"""Returns a number between 0 and root(3) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.red - col2.red)**2 +
(col1.green - col2.green)**2 +
(col1.blue - col2.blue)**2
)
def cmykDistance(col1, col2):
"""Returns a number between 0 and root(4) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.cyan - col2.cyan)**2 +
(col1.magenta - col2.magenta)**2 +
(col1.yellow - col2.yellow)**2 +
(col1.black - col2.black)**2
)
_namedColors = None
def getAllNamedColors():
#returns a dictionary of all the named ones in the module
# uses a singleton for efficiency
global _namedColors
if _namedColors is not None: return _namedColors
import colors
_namedColors = {}
for (name, value) in colors.__dict__.items():
if isinstance(value, Color):
_namedColors[name] = value
return _namedColors
def describe(aColor,mode=0):
'''finds nearest colour match to aColor.
mode=0 print a string desription
mode=1 return a string description
mode=2 return (distance, colorName)
'''
namedColors = getAllNamedColors()
closest = (10, None, None) #big number, name, color
for (name, color) in namedColors.items():
distance = colorDistance(aColor, color)
if distance < closest[0]:
closest = (distance, name, color)
if mode<=1:
s = 'best match is %s, distance %0.4f' % (closest[1], closest[0])
if mode==0: print s
else: return s
elif mode==2:
return (closest[1], closest[0])
else:
raise ValueError, "Illegal value for mode "+str(mode)
def toColor(arg,default=None):
'''try to map an arbitrary arg to a color instance'''
if isinstance(arg,Color): return arg
tArg = type(arg)
if tArg in _SeqTypes:
assert 3<=len(arg)<=4, 'Can only convert 3 and 4 sequences to color'
assert 0<=min(arg) and max(arg)<=1
return len(arg)==3 and Color(arg[0],arg[1],arg[2]) or CMYKColor(arg[0],arg[1],arg[2],arg[3])
elif tArg == StringType:
C = getAllNamedColors()
s = string.lower(arg)
if C.has_key(s): return C[s]
try:
return toColor(eval(arg))
except:
pass
try:
return HexColor(arg)
except:
if default is None:
raise ValueError('Invalid color value %r' % arg)
return default
def toColorOrNone(arg,default=None):
'''as above but allows None as a legal value'''
if arg is None:
return None
else:
return toColor(arg, default)
def setColors(**kw):
UNDEF = []
progress = 1
assigned = {}
while kw and progress:
progress = 0
for k, v in kw.items():
if type(v) in (type(()),type([])):
c = map(lambda x,UNDEF=UNDEF: toColor(x,UNDEF),v)
if type(v) is type(()): c = tuple(c)
ok = UNDEF not in c
else:
c = toColor(v,UNDEF)
ok = c is not UNDEF
if ok:
assigned[k] = c
del kw[k]
progress = 1
if kw: raise ValueError("Can't convert\n%s" % str(kw))
getAllNamedColors()
for k, c in assigned.items():
globals()[k] = c
if isinstance(c,Color): _namedColors[k] = c
def Whiter(c,f):
'''given a color combine with white as c*f w*(1-f) 0<=f<=1'''
c = toColor(c)
if isinstance(c,PCMYKColor):
w = _PCMYK_white
elif isinstance(c,CMYKColor): w = _CMYK_white
else: w = white
return linearlyInterpolatedColor(w, c, 0, 1, f)
def Blacker(c,f):
'''given a color combine with black as c*f+b*(1-f) 0<=f<=1'''
c = toColor(c)
if isinstance(c,PCMYKColor):
b = _PCMYK_black
elif isinstance(c,CMYKColor): b = _CMYK_black
else: b = black
return linearlyInterpolatedColor(b, c, 0, 1, f)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
# Copyright 2016 Suzy M. Stiegelmeyer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
intevalTree.py is an adaptation of the interval tree algorithm based on
red-black trees from Introduction to Algorithms by Cormen, Leiserson, Rivest
and Stein (2001) 2nd Edition, The MIT Press
This makes a nice self-balancing tree. The alrogithm may be more efficient
if items are randomly selected for insertion, instead of in sort order.
I've modified the CLRS algorithm to report all overlapping nodes instead of
only the first node. This involved adding a min value instead of only a max
value in order to speed up the search by checking if the subtree min,max
overlaps with the search interval. The search routine is also recursive.
'''
#
# 2010-05-31 S. Stiegelmeyer Created
# 2014-08-21 S. stiegelmeyer Added closestNode method to find closest interval
# to non-overlapping interval
# 2014-09-29 S. Stiegelmeyer Fixed bugs in closestNode method and removed
# rightChild method
# 2017-07-15 S. Stiegelmeyer Add more document strings
import sys
RED = 0
BLACK = 1
def overlap(x1, y1, x2, y2):
""" Calculates if two intervals overlap with one another.
Input:
x1 - min value of interval 1
y1 - max value of interval 1
x2 - min value of interval 2
y2 - max value of interval 2
Output:
Boolean indicating overlap (True) or no overlap (False)
"""
lap = False
if x1 <= y2 and x2 <= y1:
lap = True
return lap
def traverseInOrder(node):
""" In Order traversal of tree. Should see list from smallest to largest
by x value.
Input:
node - node to begin traversal
Output:
intervals are printed out.
"""
if node.eiTree is not None:
traverseInOrder(node.eiTree)
if node.left is not None:
traverseInOrder(node.left)
sys.stdout.write(node)
if node.right is not None:
traverseInOrder(node.right)
def searchTSS(node, pt):
"""Routine for closest transcription start site (TSS).
Don't use. Not finished.
"""
found = False
while not found:
if node.left is not None and node.right is not None:
l = abs(node.left.tss-pt)
r = abs(node.right.tss-pt)
p = abs(node.tss-pt)
minlist = [l, r, p]
minval = min(l, r, p)
index = minlist.index(minval)
if index == 0:
node = node.left
elif index == 1:
node = node.right
else:
found = True
elif node.left is not None:
l = abs(node.left.tss-pt)
p = abs(node.tss-pt)
minval = min(l, p)
if l < p:
node = node.left
else:
found = True
elif node.right is not None:
r = abs(node.right.tss-pt)
p = abs(node.tss-pt)
minval = min(r, p)
if r < p:
node = node.right
else:
found = True
else:
minval = abs(node.tss-pt)
found = True
return (node.name, minval)
def deleteTree(node):
'''
Given a root node, recursively delete the interval tree.
Input:
node - initially the root node
Output:
none
'''
if node.eiTree is not None:
deleteTree(node.eiTree)
if node.left is not None:
deleteTree(node.left)
if node.right is not None:
deleteTree(node.right)
del(node)
def searchTree(node, mi, ma):
""" Find all nodes which overlap with the input interval.
Input:
node - node to begin traversal, usually the root
mi - minimum value of interval
ma - maximum value of interval
Output:
list of nodes which overlap with input interval.
"""
ret = []
if node.left is not None and overlap(mi, ma, node.left.min, node.left.max):
ret.extend(searchTree(node.left, mi, ma))
if overlap(mi, ma, node.x, node.y):
ret.append(node)
if node.right is not None and \
overlap(mi, ma, node.right.min, node.right.max):
ret.extend(searchTree(node.right, mi, ma))
return ret
def closestNode(node, mi, ma):
""" Find the node that is closest to the input interval
Input:
node - node to begin traversal, usually the root
mi - minimum value of interval
ma - maximum value of interval
Output:
closest node, None if nodes overlap
"""
ret = None
while node is not None:
if overlap(mi, ma, node.x, node.y):
node = None
prev = None
elif node.left is not None and \
overlap(mi, ma, node.left.min, node.left.max):
prev = node
node = node.left
elif node.right is not None and \
overlap(mi, ma, node.right.min, node.right.max):
prev = node
node = node.right
else:
prev = node
node = None
if prev is not None:
if prev.x > ma:
# get max node in left tree
if prev.left is not None:
if ma < prev.left.min:
node = prev.left.minimumNode()
else:
node = prev.left.maximumNode()
a = prev.x - ma
b = mi - node.y
if a < b:
ret = prev
else:
ret = node
else:
ret = prev
else: # get min node in right tree
if prev.right is not None:
if mi > prev.right.max:
node = prev.right.maximumNode()
else:
node = prev.right.minimumNode()
a = mi-prev.y
b = node.x-ma
if a < b:
ret = prev
else:
ret = node
else:
ret = prev
return ret
def searchGeneName(node, name):
""" Find node or nodes which match input gene name.
Input:
node - node to begin traversal, usually the root
name - name to match with node.name
Output:
list of all nodes which match name
"""
ret = []
if node.name == name:
ret.append(node)
if node.left is not None:
ret.extend(searchGeneName(node.left, name))
if node.right is not None:
ret.extend(searchGeneName(node.right, name))
return ret
class Tree:
def __init__(self,
mi=0,
ma=0,
tss=0,
tes=0,
strand="-",
name="",
name2="",
cdsS=0,
cdsE=0,
eTree=None,
eType=0,
misc=[]):
self.x = mi
self.y = ma
self.max = ma
self.min = mi
self.strand = strand
self.name = name
self.name2 = name2
self.tss = tss
self.tes = tes
self.cdsStart = cdsS
self.cdsEnd = cdsE
self.exontype = eType
self.eiTree = eTree
self.color = RED
self.left = None
self.right = None
self.p = None
self.misc = misc
def __repr__(self):
return "({0.x!r},{0.y!r}),({0.cdsStart!r},{0.cdsEnd!r}),\
{0.exontype!r}".format(self)
def __str__(self):
return "({0.x!r}, {0.y!r}, {0.max!r}) {0.color!r} {0.name!r} \
{0.strand!r} {0.exontype!r}".format(self)
def insertTree(self, root):
""" Insert node in tree given the root node
Input:
root - root of tree
Output:
root
"""
nodesave = None
node = root
while node is not None:
nodesave = node
if self.x < node.x or (self.x == node.x and self.y >= node.y):
node = node.left
else:
node = node.right
self.p = nodesave
if nodesave is None:
root = self
elif self.x < nodesave.x or \
(self.x == nodesave.x and self.y >= nodesave.y):
nodesave.left = self
else:
nodesave.right = self
self.left = None
self.right = None
self.color = RED
self.updateMax()
self.updateMin()
root = self.insertFixup(root)
return root
def insertFixup(self, root):
""" Routine to ensure the tree is balanced
Input:
root - root of tree
Output:
root
"""
z = self
while z is not None and z.p is not None and z.p.color == RED:
if z.p.p is not None and z.p == z.p.p.left:
y = z.p.p.right
if y is not None and y.color == RED:
z.p.color = BLACK
y.color = BLACK
z.p.p.color = RED
z = z.p.p
else:
if z == z.p.right:
z = z.p
root = z.leftRotate(root)
z.p.color = BLACK
z.p.p.color = RED
root = z.p.p.rightRotate(root)
elif z.p.p is not None and z.p == z.p.p.right:
y = z.p.p.left
# sentinal pointer is always black
if y is not None and y.color == RED:
z.p.color = BLACK
y.color = BLACK
z.p.p.color = RED
z = z.p.p
else:
if z == z.p.left:
z = z.p
root = z.rightRotate(root)
z.p.color = BLACK
z.p.p.color = RED
root = z.p.p.leftRotate(root)
root.color = BLACK
return root
def maximum(self):
""" Calculate the maximum of (self.y, self.left.max, self.right.max)
"""
t1 = self.y
t2 = self.left.max if self.left is not None else 0
t3 = self.right.max if self.right is not None else 0
if t2 > t1:
t1 = t2
if t3 > t1:
t1 = t3
self.max = t1
def minimum(self):
""" Calculate the minimum of (self.x, self.left.min, self.right.min)
"""
t1 = self.x
t2 = self.left.min if self.left is not None else self.x
t3 = self.right.min if self.right is not None else self.x
if t2 < t1:
t1 = t2
if t3 < t1:
t1 = t3
self.min = t1
def updateMax(self):
""" Updates the max value for all parent nodes
"""
tmp = self
while tmp is not None:
tmp.maximum()
tmp = tmp.p
def updateMin(self):
""" Updates the min value for all parent nodes
"""
tmp = self
while tmp is not None:
tmp.minimum()
tmp = tmp.p
def leftRotate(self, root):
""" Routine to help maintain tree balance by performing a left rotation
Input:
root - root of tree
Output:
root
"""
node = self.right # x = self; y=x.right
self.right = node.left # x.right=y.left
if node.left is not None:
node.left.p = self # parent pointers y.left.p=x
node.p = self.p # y.p = x.p
if self.p is None:
root = node # root = y
elif self == self.p.left: # x == x.p.left
self.p.left = node # x.p.left = y
else:
self.p.right = node # x.p.right = y
node.left = self # y.left = x
self.p = node # x.p = y
# self.updateMax()
# self.updateMin()
self.maximum()
self.minimum()
node.maximum()
node.minimum()
return root
def rightRotate(self, root):
""" Routine to help maintain tree balance by performing a right rotation
Input:
root - root of tree
Output:
root
"""
node = self.left
self.left = node.right
if node.right is not None:
node.right.p = self
node.p = self.p
if self.p is None:
root = node
elif self.p.left == self:
self.p.left = node
else:
self.p.right = node
node.right = self
self.p = node
# self.updateMax()
# self.updateMin()
self.maximum()
self.minimum()
node.maximum()
node.minimum()
return root
def maximumNode(self):
'''
Find the node with the maximum value
Input:
self - tree node
Output:
maximum node
'''
x = self
while x.right is not None:
x = x.right
return x
def minimumNode(self):
'''
Find the node with the minimum value
Input:
self - tree node
Output:
minimum node
'''
x = self
while x.left is not None:
x = x.left
return x
def successorNode(self):
'''Find the successor node'''
x = self
if x.right is not None:
return x.right.minimumNode()
y = x.p
while y is not None and x == y.right:
x = y
y = y.p
return y
def deleteNode(self, root):
""" Delete node from tree given the root node
***This does not work***
Input:
self: node to delete
root: root of tree
Output:
root
"""
z = self
if z.left is None or z.right is None:
y = z
else:
y = z.successorNode()
if y.left is not None:
x = y.left
else:
x = y.right
if x is not None:
x.p = y.p
if y.p == z:
u = y
else:
u = y.p
if y.p is None:
root = x
else:
if y == y.p.left:
y.p.left = x
y.p.minimum()
y.p.maximum()
else:
y.p.right = x
y.p.minimum()
y.p.maximum()
savcolor = y.color
if y != z:
if z.p is None:
root = y
if z.p is not None and z.p.left == z:
z.p.left = y
elif z.p is not None and z.p.right == z:
z.p.right = y
y.p = z.p
y.color = z.color
y.left = z.left
if y.left is not None:
y.left.p = y
y.right = z.right
if y.right is not None:
y.right.p = y
y.minimum()
y.maximum()
if savcolor == BLACK:
if x is not None:
root = x.deleteFixup(root, False)
# if u is none then there are no nodes in the tree
elif u is not None:
# x's "parent" is y's old parent
root = u.deleteFixup(root, True)
del(z)
if u is not None:
u.updateMax()
u.updateMin()
return root
def deleteFixup(self, root, sentinel):
'''Routine to ensure the tree is balanced after removing a node'''
if sentinel:
x = None
parent = self
else:
x = self
parent = x.p
while x != root and (x is None or x.color == BLACK):
if x == parent.left:
w = parent.right
if w is not None and w.color == RED:
w.color = BLACK
parent.color = RED
root = parent.leftRotate(root)
w = parent.right
if (w is not None) and \
(w.left is None or w.left.color == BLACK) and \
(w.right is None or w.right.color == BLACK):
w.color = RED
x = parent
parent = x.p
else:
if (w is not None) and \
(w.right is None or w.right.color == BLACK):
if w.left is not None:
w.left.color = BLACK
w.color = RED
root = w.rightRotate(root)
w = parent.right
if w is not None:
w.color = parent.color
parent.color = BLACK
if w is not None:
w.right.color = BLACK
root = parent.leftRotate(root)
x = root
elif x == parent.right:
w = parent.left
if w is not None and w.color == RED:
w.color = BLACK
parent.color = RED
root = parent.rightRotate(root)
w = parent.left
if (w is not None) and \
(w.left is None or w.left.color == BLACK) and \
(w.right is None or w.right.color == BLACK):
w.color = RED
x = parent
parent = x.p
else:
if (w is not None) and \
(w.left is None or w.left.color == BLACK):
if w.right is not None:
w.right.color = BLACK
w.color = RED
root = w.leftRotate(root)
w = parent.left
if w is not None:
w.color = parent.color
parent.color = BLACK
if w is not None:
w.left.color = BLACK
root = parent.rightRotate(root)
x = root
if x is not None:
x.color = BLACK
return root
def prevNode(self):
'''Previous node in tree'''
node = None
if self.left is not None:
node = self.left.maximumNode()
if self.left is None and self.p is not None and self.p.right == self:
node = self.p
elif self.left is None and self.p is not None and self.p.left == self:
node = self.p
while node is not None and node.x > self.x:
node = node.p
return node
def nextNode(self):
'''Next node in tree'''
node = None
if self.right is not None:
node = self.right.minimumNode()
if self.right is None and self.p is not None and self.p.left == self:
node = self.p
elif self.right is None and \
self.p is not None and self.p.right == self:
node = self.p
while node is not None and node.y < self.y:
node = node.p
return node
|
|
"""Testing configuration from CLI, env-vars and YAML files."""
from __future__ import absolute_import
import unittest
from collections import OrderedDict
import yaml
import mock
import pathlib2
from future.builtins import str
from rotest.common.config import Option, get_configuration, search_config_file
class CommandLineTest(unittest.TestCase):
def test_command_line_option_with_space(self):
"""Test a command line option of the form '--option value'."""
schema = {"target": Option(command_line_options=["--option"])}
configuration = get_configuration(
configuration_schema=schema,
command_line_options=["script.py", "--option value"])
self.assertEqual(configuration, {"target": "value"})
def test_command_line_option_with_equation(self):
"""Test a command line option of the form '--option=value'."""
schema = {"target": Option(command_line_options=["--option"])}
configuration = get_configuration(
configuration_schema=schema,
command_line_options=["script.py", "--option=value"])
self.assertEqual(configuration, {"target": "value"})
def test_command_line_options_overriding(self):
"""Assert that the last used command line option takes predecense."""
schema = {"target": Option(command_line_options=["--option1",
"--option2"])}
configuration = get_configuration(
configuration_schema=schema,
command_line_options=["script.py", "--option1=value1",
"--option2", "value2"])
self.assertEqual(configuration, {"target": "value2"})
class EnvironmentVariableTest(unittest.TestCase):
def test_environment_variables(self):
"""Test two targets derived from two different env-vars."""
schema = {"target1": Option(environment_variables=["ENVIRON1"]),
"target2": Option(environment_variables=["ENVIRON2"])}
configuration = get_configuration(
configuration_schema=schema,
environment_variables={"ENVIRON1": "value1",
"ENVIRON2": "value2"})
self.assertEqual(configuration, {"target1": "value1",
"target2": "value2"})
def test_command_line_prioritized_over_environment_variable(self):
"""Asert that a command line option takes predecense over env-var."""
schema = {"target": Option(command_line_options=["option"],
environment_variables=["ENVIRON"])}
configuration = get_configuration(
configuration_schema=schema,
command_line_options=["script.py", "--option=value"],
environment_variables={"ENVIRON": "value"})
self.assertEqual(configuration, {"target": "value"})
def test_first_environment_variable_wins(self):
"""Assert that the first defined source env-var takes predecense."""
schema = {"target": Option(environment_variables=["ENVIRON1",
"ENVIRON2"])}
configuration = get_configuration(
configuration_schema=schema,
environment_variables=OrderedDict([("ENVIRON2", "value2"),
("ENVIRON1", "value1")]))
self.assertEqual(configuration, {"target": "value1"})
class ConfigFileTest(unittest.TestCase):
def test_configuration_file(self):
"""Test target derived from the configuration file."""
schema = {"target": Option(config_file_options=["option"])}
configuration = get_configuration(
configuration_schema=schema,
config_content="rotest:\n option: value")
self.assertEqual(configuration, {"target": "value"})
def test_environment_variable_prioritized_over_configuration_file(self):
"""Asert that an env-var takes predecense over configuration file."""
schema = {"target": Option(environment_variables=["ENVIRON"],
config_file_options=["option"])}
configuration = get_configuration(
configuration_schema=schema,
environment_variables={"ENVIRON": "value1"},
config_content="rotest:\n option: value2")
self.assertEqual(configuration, {"target": "value1"})
def test_file_configuration_prioritized_over_default_value(self):
"""Assert that the config file takes predecense over default value."""
schema = {"target": Option(config_file_options=["option"],
default_value="default")}
configuration = get_configuration(
configuration_schema=schema,
config_content="rotest:\n option: value")
self.assertEqual(configuration, {"target": "value"})
def test_first_configuration_file_option_wins(self):
"""Assert the first option in the config file takes predecense."""
schema = {"target": Option(config_file_options=["option1", "option2"])}
config_content = "rotest:\n option2: value2\n option1: value1"
configuration = get_configuration(
configuration_schema=schema,
config_content=config_content)
self.assertEqual(configuration, {"target": "value1"})
def test_configuration_parsing_error(self):
"""Assert error when the configuration file isn't in YAML format."""
schema = {"target": Option()}
with self.assertRaises(yaml.YAMLError):
get_configuration(configuration_schema=schema,
config_content="][")
def test_rotest_section_not_preset(self):
"""Assert not using the config file if there is no 'rotest' section."""
schema = {"target": Option(config_file_options=["option"],
default_value="default")}
configuration = get_configuration(
configuration_schema=schema,
config_content="other:\n key: value")
self.assertEqual(configuration, {"target": "default"})
@mock.patch("os.path.abspath",
return_value=str(pathlib2.Path("/home/user/project/")))
@mock.patch("os.path.isfile", return_value=False)
def test_config_file_cannot_be_found(self, *_args):
"""Test non-existing config file scenario."""
self.assertEqual(search_config_file(), None)
@mock.patch("os.path.abspath",
return_value=str(pathlib2.Path("/home/user/project/")))
@mock.patch(
"os.path.isfile",
side_effect=lambda path:
path == str(pathlib2.Path("/home/user/project/.rotest.yml")))
def test_finding_configuration_file_on_current_directory(self, *_args):
"""Test finding the config file in the direct ancestor."""
self.assertEqual(search_config_file(),
str(pathlib2.Path("/home/user/project/.rotest.yml")))
@mock.patch(
"os.path.abspath",
return_value=str(pathlib2.Path("/home/user/project/sub1/sub2/")))
@mock.patch(
"os.path.isfile",
side_effect=lambda path:
path == str(pathlib2.Path("/home/user/project/.rotest.yml")))
def test_finding_configuration_file_on_ancestor_directories(self, *_args):
"""Test finding the config file in the non-direct ancestor."""
self.assertEqual(search_config_file(),
str(pathlib2.Path("/home/user/project/.rotest.yml")))
class EdgeCaseTest(unittest.TestCase):
def test_default_value(self):
"""Assert a default value is chosen if there is no other option."""
schema = {"target": Option(default_value="value")}
configuration = get_configuration(
configuration_schema=schema,
command_line_options=["script.py"])
self.assertEqual(configuration, {"target": "value"})
def test_no_value_given(self):
"""Assert that None is received if there is no default value."""
schema = {"target": Option(command_line_options=["value"])}
configuration = get_configuration(
configuration_schema=schema,
command_line_options=["script.py"])
self.assertEqual(configuration, {"target": None})
|
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CLI tests."""
from __future__ import absolute_import, print_function
import uuid
from click.testing import CliRunner
from flask.cli import ScriptInfo
from invenio_pidstore.cli import pid as cmd
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
def test_pid_creation(app, db):
"""Test pid creation."""
runner = CliRunner()
script_info = ScriptInfo(create_app=lambda info: app)
with runner.isolated_filesystem():
with app.app_context():
assert PersistentIdentifier.query.count() == 0
result = runner.invoke(cmd, [
'create', 'doi', '10.1234/foo'
], obj=script_info)
assert 0 == result.exit_code
with app.app_context():
assert PersistentIdentifier.query.count() == 1
pid = PersistentIdentifier.get('doi', '10.1234/foo')
assert pid.pid_type == 'doi'
assert pid.pid_value == '10.1234/foo'
assert pid.pid_provider is None
assert pid.status == PIDStatus.NEW
assert pid.object_type is None
assert pid.object_uuid is None
rec_uuid = uuid.uuid4()
# Bad parameter status:
result = runner.invoke(cmd, [
'create', 'recid', '2', '--status', 'BADPARAMETER',
'--type', 'rec', '--uuid', str(rec_uuid),
], obj=script_info)
assert 2 == result.exit_code
# Any or both type and uuid must be defined:
result = runner.invoke(cmd, [
'create', 'recid', '2',
'--type', 'rec',
], obj=script_info)
assert 2 == result.exit_code
result = runner.invoke(cmd, [
'create', 'recid', '2',
'--uuid', str(rec_uuid),
], obj=script_info)
assert 2 == result.exit_code
# Everything should be fine now:
result = runner.invoke(cmd, [
'create', 'recid', '2', '--status', 'REGISTERED',
'--type', 'rec', '--uuid', str(rec_uuid),
], obj=script_info)
assert 0 == result.exit_code
with app.app_context():
assert PersistentIdentifier.query.count() == 2
pid = PersistentIdentifier.get('recid', '2')
assert pid.pid_type == 'recid'
assert pid.pid_value == '2'
assert pid.pid_provider is None
assert pid.status == PIDStatus.REGISTERED
assert pid.object_type == 'rec'
assert pid.object_uuid == rec_uuid
# Can't duplicate existing persistent identifier
result = runner.invoke(cmd, [
'create', 'recid', '2',
], obj=script_info)
assert -1 == result.exit_code
def test_pid_assign(app, db):
"""Test pid object assignment."""
runner = CliRunner()
script_info = ScriptInfo(create_app=lambda info: app)
with runner.isolated_filesystem():
# No assigned object
result = runner.invoke(cmd, [
'create', 'doi', '10.1234/foo'
], obj=script_info)
assert 0 == result.exit_code
with app.app_context():
pid = PersistentIdentifier.get('doi', '10.1234/foo')
assert not pid.has_object()
assert pid.get_assigned_object() is None
assert pid.get_assigned_object('rec') is None
# Assign object
rec_uuid = uuid.uuid4()
result = runner.invoke(cmd, [
'assign', 'doi', '10.1234/foo',
'-t', 'rec', '-i', str(rec_uuid)
], obj=script_info)
assert 0 == result.exit_code
with app.app_context():
pid = PersistentIdentifier.get('doi', '10.1234/foo')
assert pid.has_object()
assert pid.get_assigned_object() == rec_uuid
assert pid.get_assigned_object('rec') == rec_uuid
assert pid.get_assigned_object('oth') is None
# Doesnt' raise
result = runner.invoke(cmd, [
'assign', 'doi', '10.1234/foo',
'-t', 'rec', '-i', str(rec_uuid)
], obj=script_info)
assert 0 == result.exit_code
# Missing type or uuid:
result = runner.invoke(cmd, [
'assign', 'doi', '10.1234/foo',
], obj=script_info)
assert 2 == result.exit_code
result = runner.invoke(cmd, [
'assign', 'doi', '10.1234/foo',
'-t', 'rec',
], obj=script_info)
assert 2 == result.exit_code
result = runner.invoke(cmd, [
'assign', 'doi', '10.1234/foo',
'-i', str(rec_uuid),
], obj=script_info)
assert 2 == result.exit_code
# Assign without overwrite (uuid as str and uuid)
new_uuid = uuid.uuid4()
result = runner.invoke(cmd, [
'assign', 'doi', '10.1234/foo',
'-t', 'rec', '-i', str(new_uuid)
], obj=script_info)
assert -1 == result.exit_code
# Assign with overwrite
result = runner.invoke(cmd, [
'assign', 'doi', '10.1234/foo',
'-s', 'REGISTERED',
'-t', 'rec', '-i', str(new_uuid),
'--overwrite'
], obj=script_info)
assert 0 == result.exit_code
with app.app_context():
pid = PersistentIdentifier.get('doi', '10.1234/foo')
assert pid.has_object()
assert pid.status == PIDStatus.REGISTERED
assert pid.get_assigned_object() == new_uuid
assert pid.get_assigned_object('rec') == new_uuid
assert pid.get_assigned_object('oth') is None
def test_pid_unassign(app, db):
"""Test pid object unassignment."""
runner = CliRunner()
script_info = ScriptInfo(create_app=lambda info: app)
with runner.isolated_filesystem():
rec_uuid = uuid.uuid4()
# Assigned object
result = runner.invoke(cmd, [
'create', 'recid', '101',
'-t', 'rec', '-i', str(rec_uuid)
], obj=script_info)
assert 0 == result.exit_code
result = runner.invoke(cmd, [
'get', 'recid', '101',
], obj=script_info)
assert 0 == result.exit_code
assert 'rec {0} N\n'.format(str(rec_uuid)) == result.output
result = runner.invoke(cmd, [
'dereference', 'rec', str(rec_uuid),
], obj=script_info)
assert 0 == result.exit_code
assert 'recid 101 None\n' == result.output
result = runner.invoke(cmd, [
'dereference', 'rec', str(rec_uuid), '-s', 'NEW',
], obj=script_info)
assert 0 == result.exit_code
assert 'recid 101 None\n' == result.output
with app.app_context():
pid = PersistentIdentifier.get('recid', '101')
assert pid.has_object()
assert pid.get_assigned_object() == rec_uuid
assert pid.get_assigned_object('rec') == rec_uuid
# Unassign the object
result = runner.invoke(cmd, [
'unassign', 'recid', '101',
], obj=script_info)
assert 0 == result.exit_code
with app.app_context():
pid = PersistentIdentifier.get('recid', '101')
assert not pid.has_object()
assert pid.get_assigned_object() is None
assert pid.get_assigned_object('rec') is None
|
|
#!/usr/bin/python
# Licensed to Paul Querna under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# libcloud.org licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HUDSON_ROOT = "http://hudson.zones.apache.org/hudson/job/Cassandra/"
JSON_API = HUDSON_ROOT + "lastSuccessfulBuild/api/json"
ARTIFACT_ROOT = HUDSON_ROOT + "lastSuccessfulBuild/artifact"
import cassconf
import paramiko
from mako.template import Template
from libcloud.types import Provider
from libcloud.providers import get_driver
from libcloud.deployment import SSHKeyDeployment
import os
from os.path import join as pjoin
import urllib2
import tempfile
import shutil
try:
import simplejson as json
except ImportError:
import json
from threading import Thread
ROOT_DIR = os.path.split(os.path.abspath(__file__))[0]
def log(str):
print str
def get_hudson_data():
data = urllib2.urlopen(JSON_API).read()
hud = json.loads(data)
return hud
# only tested on rackspace, should be trivial to port!
def get_libcloud_driver():
return get_driver(Provider.RACKSPACE)(cassconf.USERNAME, cassconf.SECRET)
def artifact_buildnumber(hud):
return hud["number"]
def artifact_url(hud):
artifacts = [x["relativePath"] for x in hud["artifacts"]]
tarurl = ARTIFACT_ROOT +"/"+ filter(lambda x: x.find("-bin") != -1, artifacts)[0]
return tarurl
def get_artifact(tempdir, url):
tarball = url[url.rfind("/")+1:]
localpath = pjoin(tempdir, tarball)
log("Downloading build from %s to %s" % (url, localpath))
fp = open(localpath, 'w')
fp.write(urllib2.urlopen(url).read())
fp.close()
return localpath
def boot_master(driver, pubkey):
loc = driver.list_locations()[0]
size = filter(lambda x: x.ram == 512, driver.list_sizes(loc))[0]
image = filter(lambda x: x.name.find("karmic") != -1, driver.list_images(loc))[0]
log("booting master machine with %s on %s size node" % ( image.name, size.name))
d = SSHKeyDeployment(pubkey)
node = driver.deploy_node(name="cbench-master.querna.org",
location=loc, image=image, size=size, deploy=d)
return node
def boot_servers(driver, count, pubkey):
loc = driver.list_locations()[0]
size = filter(lambda x: x.ram == 256, driver.list_sizes(loc))[0]
image = filter(lambda x: x.name.find("karmic") != -1, driver.list_images(loc))[0]
nodes = []
for i in range(count):
log("booting machine %d with %s on %s size node" % (i, image.name, size.name))
d = SSHKeyDeployment(pubkey)
node = driver.deploy_node(name="cbench%d.querna.org" % (i),
location=loc, image=image, size=size, deploy=d)
nodes.append(node)
return nodes
def exec_wait(client, cmd):
log("[%s] Running `%s`" % (client.get_transport().getpeername()[0], cmd))
stdin, stdout, stderr = client.exec_command(cmd)
stdin.close()
return stdout.read(), stderr.read()
def storage_conf(server, peers):
d = { "replication_factor": min(3, len(peers)+1),
"peers": [s.private_ip[0] for s in peers],
"interface": server.private_ip[0],
}
t = Template(filename=pjoin(ROOT_DIR, 'storage-conf.xml.mako'))
return t.render(**d)
def master_script(servers):
d = {"peers": ",".join([s.private_ip[0] for s in servers])}
t = Template(filename=pjoin(ROOT_DIR, 'runtests.sh.mako'))
return t.render(**d)
def push_master_files(key, master, servers):
conninfo = {'hostname': master.public_ip[0],
'port': 22,
'username': 'root',
'pkey': key,
'allow_agent': False,
'look_for_keys': False}
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(**conninfo)
try:
sftp = client.open_sftp()
exec_wait(client, "apt-get install -y python-virtualenv")
sftp.put(pjoin(ROOT_DIR, "py_and_thrift-ubuntu-bin.tar.bz2"), "py_and_thrift-ubuntu-bin.tar.bz2")
exec_wait(client, "tar -xvjf py_and_thrift-ubuntu-bin.tar.bz2 -C /")
sftp.put(pjoin(ROOT_DIR, "stress.py"), "stress.py")
conf = master_script(servers)
fp = sftp.open("/root/runtest.sh", 'w')
fp.write(conf)
fp.chmod(755)
fp.close()
log("Starting tests...")
exec_wait(client, "/root/runtest.sh")
sftp.get("/root/insert.txt", "insert.txt")
sftp.get("/root/read.txt", "read.txt")
finally:
client.close()
class pusher_thread(Thread):
def __init__(self, key, s, local, servers):
Thread.__init__(self)
self.key = key
self.s = s
self.servers = servers
self.local = local
def run(self):
tarball = self.local[self.local.rfind("/")+1:]
conninfo = {'hostname': self.s.public_ip[0],
'port': 22,
'username': 'root',
'pkey': self.key,
'allow_agent': False,
'look_for_keys': False}
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(**conninfo)
try:
sftp = client.open_sftp()
sftp.put(self.local, tarball)
exec_wait(client, "tar -xvzf %s" % (tarball))
dname = tarball[:tarball.rfind("-")]
sftp.symlink(dname, "cassandra")
sftp.put(pjoin(ROOT_DIR, "ivy-shit.tar.bz2"), "ivy-shit.tar.bz2")
exec_wait(client, "tar -xvjf ivy-shit.tar.bz2")
conf = storage_conf(self.s, [x for x in self.servers if x != self.s])
fp = sftp.open("cassandra/conf/storage-conf.xml", 'w')
fp.write(conf)
fp.close()
# we do need... java for this :)
exec_wait(client, "apt-get install -y openjdk-6-jdk")
exec_wait(client, "cd cassandra; bin/cassandra")
finally:
client.close()
def push_cassandra_files(key, local, servers):
t = []
for s in servers:
t.append(pusher_thread(key, s, local, servers))
t[-1].start()
for thread in t:
thread.join()
def main():
tempdir = tempfile.mkdtemp(prefix="cassandra-bench")
try:
hud = get_hudson_data()
url = artifact_url(hud)
buildnum = artifact_buildnumber(hud)
data = urllib2.urlopen(JSON_API).read()
local = get_artifact(tempdir, url)
key = paramiko.RSAKey.generate(2048)
key.write_private_key_file(pjoin(tempdir, "id_rsa"))
pubkey = "ssh-rsa %s cassandrabench@paul.querna.org" % (key.get_base64())
driver = get_libcloud_driver()
servers = boot_servers(driver, cassconf.CLUSTER_SIZE, pubkey)
push_cassandra_files(key, local, servers)
master = boot_master(driver, pubkey)
push_master_files(key, master, servers)
print servers
print master
finally:
print "Cleaning up "+ tempdir
shutil.rmtree(tempdir)
log("Cleaning up any booted servers....")
driver = get_libcloud_driver()
[n.destroy() for n in driver.list_nodes() if n.name.find('cbench') != -1]
if __name__ == "__main__":
main()
|
|
from . import ScattergramData
from .. import db
import os
import random
from datetime import datetime
import urllib.request, json
import requests
from requests.exceptions import HTTPError
from requests.auth import HTTPBasicAuth
import plotly.tools as tools
import plotly.plotly as py
import plotly.graph_objs as go
PLOTLY_USERNAME = os.environ.get('PLOTLY_USERNAME')
PLOTLY_API_KEY = os.environ.get('PLOTLY_API_KEY')
py.sign_in(PLOTLY_USERNAME, PLOTLY_API_KEY)
auth = HTTPBasicAuth(PLOTLY_USERNAME, PLOTLY_API_KEY)
headers = {'Plotly-Client-Platform': 'python'}
class College(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, index=True)
description = db.Column(db.String, index=True)
cost_of_attendance = db.Column(db.Integer, index=True)
image = db.Column(db.String, index=True)
regular_deadline = db.Column(db.Date, index=True)
admission_rate = db.Column(db.Float, index=True)
early_deadline = db.Column(db.Date, index=True)
fafsa_deadline = db.Column(db.Date, index=True)
scholarship_deadline = db.Column(db.Date, index=True)
acceptance_deadline = db.Column(db.Date, index=True)
plot_SAT2400 = db.Column(db.String)
plot_SAT1600 = db.Column(db.String)
plot_ACT = db.Column(db.String)
image = db.Column(db.String, index=True)
school_url = db.Column(db.String, index=True)
school_size = db.Column(db.Integer, index=True)
school_city = db.Column(db.String, index=True)
tuition_in_state = db.Column(db.Float, index=True)
tuition_out_of_state = db.Column(db.Float, index=True)
cost_of_attendance_in_state = db.Column(db.Float, index=True)
cost_of_attendance_out_of_state = db.Column(db.Float, index=True)
room_and_board = db.Column(db.Float, index=True)
sat_score_average_overall = db.Column(db.Float, index=True)
act_score_average_overall = db.Column(db.Float, index=True)
first_generation_percentage = db.Column(db.Float, index=True)
year_data_collected = db.Column(db.String, index=True)
race_white = db.Column(db.Float, index=True)
race_black = db.Column(db.Float, index=True)
race_hispanic = db.Column(db.Float, index=True)
race_asian = db.Column(db.Float, index=True)
race_american_indian = db.Column(db.Float, index=True)
race_native_hawaiian = db.Column(db.Float, index=True)
race_international = db.Column(db.Float, index=True)
# TODO: Add college dates
def update_plots(self):
if (self.plot_SAT2400):
plot_num = self.plot_SAT2400[1 + self.plot_SAT2400.rfind('/')]
requests.post('https://api.plot.ly/v2/files/' +
PLOTLY_USERNAME + ':' + plot_num + '/trash', auth=auth, headers=headers)
requests.delete('https://api.plot.ly/v2/files/' + username + ':' + plot_num +
'/permanent_delete', auth=auth, headers=headers)
if (self.plot_SAT1600):
plot_num = self.plot_SAT1600[1 + self.plot_SAT1600.rfind('/')]
requests.post('https://api.plot.ly/v2/files/' +
PLOTLY_USERNAME + ':' + plot_num + '/trash', auth=auth, headers=headers)
requests.delete('https://api.plot.ly/v2/files/' + username + ':' + plot_num +
'/permanent_delete', auth=auth, headers=headers)
if (self.plot_ACT):
plot_num = self.plot_ACT[1 + self.plot_ACT.rfind('/')]
requests.post('https://api.plot.ly/v2/files/' +
PLOTLY_USERNAME + ':' + plot_num + '/trash', auth=auth, headers=headers)
requests.delete('https://api.plot.ly/v2/files/' + username + ':' + plot_num +
'/permanent_delete', auth=auth, headers=headers)
data = ScattergramData.query.filter_by(college=self.name).all()
college_filename = self.name.replace(' ', '-').lower()
# GPA vs. SAT [2400]
SAT2400_Accepted = []
GPA_SAT2400_Accepted = []
SAT2400_Denied = []
GPA_SAT2400_Denied = []
SAT2400_Waitlisted1 = []
GPA_SAT2400_Waitlisted1 = []
SAT2400_Waitlisted2 = []
GPA_SAT2400_Waitlisted2 = []
SAT2400_Waitlisted3 = []
GPA_SAT2400_Waitlisted3 = []
# GPA vs. SAT [1600]
SAT1600_Accepted = []
GPA_SAT1600_Accepted = []
SAT1600_Denied = []
GPA_SAT1600_Denied = []
SAT1600_Waitlisted1 = []
GPA_SAT1600_Waitlisted1 = []
SAT1600_Waitlisted2 = []
GPA_SAT1600_Waitlisted2 = []
SAT1600_Waitlisted3 = []
GPA_SAT1600_Waitlisted3 = []
# GPA vs. ACT
ACT_Accepted = []
GPA_ACT_Accepted = []
ACT_Denied = []
GPA_ACT_Denied = []
ACT_Waitlisted1 = []
GPA_ACT_Waitlisted1 = []
ACT_Waitlisted2 = []
GPA_ACT_Waitlisted2 = []
ACT_Waitlisted3 = []
GPA_ACT_Waitlisted3 = []
for i in range(len(data)):
if(data[i].SAT2400):
if(data[i].status == 'Accepted'):
SAT2400_Accepted.append(int(data[i].SAT2400))
GPA_SAT2400_Accepted.append(data[i].GPA)
elif(data[i].status == 'Denied'):
SAT2400_Denied.append(int(data[i].SAT2400))
GPA_SAT2400_Denied.append(data[i].GPA)
elif(data[i].status == 'Waitlisted/Deferred (Accepted)'):
SAT2400_Waitlisted1.append(int(data[i].SAT2400))
GPA_SAT2400_Waitlisted1.append(data[i].GPA)
elif(data[i].status == 'Waitlisted/Deferred (Denied)'):
SAT2400_Waitlisted2.append(int(data[i].SAT2400))
GPA_SAT2400_Waitlisted2.append(data[i].GPA)
if(data[i].status == 'Waitlisted/Deferred (Withdrew App)'):
SAT2400_Waitlisted3.append(int(data[i].SAT2400))
GPA_SAT2400_Waitlisted3.append(data[i].GPA)
if(data[i].SAT1600):
if(data[i].status == 'Accepted'):
SAT1600_Accepted.append(int(data[i].SAT1600))
GPA_SAT1600_Accepted.append(data[i].GPA)
elif(data[i].status == 'Denied'):
SAT1600_Denied.append(int(data[i].SAT1600))
GPA_SAT1600_Denied.append(data[i].GPA)
elif(data[i].status == 'Waitlisted/Deferred (Accepted)'):
SAT1600_Waitlisted1.append(int(data[i].SAT1600))
GPA_SAT1600_Waitlisted1.append(data[i].GPA)
elif(data[i].status == 'Waitlisted/Deferred (Denied)'):
SAT1600_Waitlisted2.append(int(data[i].SAT1600))
GPA_SAT1600_Waitlisted2.append(data[i].GPA)
if(data[i].status == 'Waitlisted/Deferred (Withdrew App)'):
SAT1600_Waitlisted3.append(int(data[i].SAT1600))
GPA_SAT1600_Waitlisted3.append(data[i].GPA)
if(data[i].ACT):
if(data[i].status == 'Accepted'):
ACT_Accepted.append(int(data[i].ACT))
GPA_ACT_Accepted.append(data[i].GPA)
elif(data[i].status == 'Denied'):
ACT_Denied.append(int(data[i].ACT))
GPA_ACT_Denied.append(data[i].GPA)
elif(data[i].status == 'Waitlisted/Deferred (Accepted)'):
ACT_Waitlisted1.append(int(data[i].ACT))
GPA_ACT_Waitlisted1.append(data[i].GPA)
elif(data[i].status == 'Waitlisted/Deferred (Denied)'):
ACT_Waitlisted2.append(int(data[i].ACT))
GPA_ACT_Waitlisted2.append(data[i].GPA)
if(data[i].status == 'Waitlisted/Deferred (Withdrew App)'):
ACT_Waitlisted3.append(int(data[i].ACT))
GPA_ACT_Waitlisted3.append(data[i].GPA)
# Create a trace
trace0 = go.Scatter(
x=SAT2400_Accepted,
y=GPA_SAT2400_Accepted,
mode='markers',
name="Accepted"
)
trace1 = go.Scatter(
x=SAT2400_Denied,
y=GPA_SAT2400_Denied,
mode='markers',
name="Denied"
)
trace2 = go.Scatter(
x=SAT2400_Waitlisted1,
y=GPA_SAT2400_Waitlisted1,
mode='markers',
name="Waitlisted/Deferred (Accepted)"
)
trace3 = go.Scatter(
x=SAT2400_Waitlisted2,
y=GPA_SAT2400_Waitlisted2,
mode='markers',
name="Waitlisted/Deferred (Denied)"
)
trace4 = go.Scatter(
x=SAT2400_Waitlisted3,
y=GPA_SAT2400_Waitlisted3,
mode='markers',
name="Waitlisted/Deferred (Withdrew App)"
)
layout1 = go.Layout(
title='{}: SAT [2400] vs. GPA'.format(self.name),
xaxis=dict(
title='SAT [2400]'
),
yaxis=dict(
title='GPA',
)
)
fig1 = go.Figure(data=[trace0, trace1, trace2,
trace3, trace4], layout=layout1)
self.plot_SAT2400 = py.plot(
fig1, filename=college_filename + '-sat2400', auto_open=False)
# Create a trace
trace5 = go.Scatter(
x=SAT1600_Accepted,
y=GPA_SAT1600_Accepted,
mode='markers',
name="Accepted"
)
trace6 = go.Scatter(
x=SAT1600_Denied,
y=GPA_SAT1600_Denied,
mode='markers',
name="Denied"
)
trace7 = go.Scatter(
x=SAT1600_Waitlisted1,
y=GPA_SAT1600_Waitlisted1,
mode='markers',
name="Waitlisted/Deferred (Accepted)"
)
trace8 = go.Scatter(
x=SAT1600_Waitlisted2,
y=GPA_SAT1600_Waitlisted2,
mode='markers',
name="Waitlisted/Deferred (Denied)"
)
trace9 = go.Scatter(
x=SAT1600_Waitlisted3,
y=GPA_SAT1600_Waitlisted3,
mode='markers',
name="Waitlisted/Deferred (Withdrew App)"
)
layout2 = go.Layout(
title='{}: SAT [1600] vs. GPA'.format(self.name),
xaxis=dict(
title='SAT1600'
),
yaxis=dict(
title='GPA',
)
)
fig2 = go.Figure(data=[trace5, trace6, trace7,
trace8, trace9], layout=layout2)
self.plot_SAT1600 = py.plot(
fig2, filename=college_filename + '-sat1600', auto_open=False)
# Create a trace
trace10 = go.Scatter(
x=ACT_Accepted,
y=GPA_ACT_Accepted,
mode='markers',
name="Accepted"
)
trace11 = go.Scatter(
x=ACT_Denied,
y=GPA_ACT_Denied,
mode='markers',
name="Denied"
)
trace12 = go.Scatter(
x=ACT_Waitlisted1,
y=GPA_ACT_Waitlisted1,
mode='markers',
name="Waitlisted/Deferred (Accepted)"
)
trace13 = go.Scatter(
x=ACT_Waitlisted2,
y=GPA_ACT_Waitlisted2,
mode='markers',
name="Waitlisted/Deferred (Denied)"
)
trace14 = go.Scatter(
x=ACT_Waitlisted3,
y=GPA_ACT_Waitlisted3,
mode='markers',
name="Waitlisted/Deferred (Withdrew App)"
)
layout3 = go.Layout(
title='{}: ACT vs. GPA'.format(self.name),
xaxis=dict(
title='ACT'
),
yaxis=dict(
title='GPA',
)
)
fig3 = go.Figure(data=[trace10, trace11, trace12,
trace13, trace14], layout=layout3)
self.plot_ACT = py.plot(
fig3, filename=college_filename + '-act', auto_open=False)
@staticmethod
def get_college_by_name(name):
return College.query.filter_by(name=name).first()
@staticmethod
def search_college_scorecard(college):
''' This method uses the College Scorecard Data API to retrieve a dictionary
of information about colleges that match with our query name
@param name: name of the college we need to look up
@return a dictionary of information about colleges that match with our query'''
# Split name by white space, add %20 as the encoding for the space chacracter in query
name = college.name
tokens = name.split()
nameNewFormat = ''
for token in tokens:
nameNewFormat = nameNewFormat + token + "%20"
nameNewFormat = nameNewFormat[:-3]
nameNewFormat = nameNewFormat.replace(',', '')
# Get current year and keep decrementing the year to get the valid most recent data
now = datetime.now()
yearNum = now.year
while(True):
try:
year = str(yearNum)
urlStr = '' .join(['https://api.data.gov/ed/collegescorecard/v1/schools.json?school.name=',
nameNewFormat, '&_fields=school.name,school.city,', year, '.admissions.admission_rate.overall,',
year, '.student.size,school.school_url,', year, '.cost.attendance.academic_year,',
year, '.cost.tuition.in_state,', year, '.cost.tuition.out_of_state,', year,
'.admissions.act_scores.midpoint.cumulative,', year, '.student.share_firstgeneration,', year,
'.admissions.sat_scores.average.overall,', year, '.student.demographics.race_ethnicity.white,',
year, '.student.demographics.race_ethnicity.black,', year, '.student.demographics.race_ethnicity.hispanic,',
year, '.student.demographics.race_ethnicity.asian,', year, '.student.demographics.race_ethnicity.aian,',
year, '.student.demographics.race_ethnicity.nhpi,', year, '.student.demographics.race_ethnicity.non_resident_alien',
'&api_key=jjHzFLWEyba3YYtWiv7jaQN8kGSkMuf55A9sRsxl'])
r = requests.get(urlStr)
r.raise_for_status()
data = r.json()
except HTTPError:
yearNum = yearNum - 1
else:
college.year_data_collected = year
break
return(data)
@staticmethod
def retrieve_college_info(college):
''' This method takes in a College, attempts to find the college that best matches
with our query, and fill in the variables of the college accordingly.
Always called after college.name has been initialized
@param name: name of the college we need to look up
@return a dictionary of information about the college'''
if(college.name == ''):
return
data = College.search_college_scorecard(college)
# If there are some colleges that match with the query
if(len(data['results']) > 0):
# Default to the first search result returned
result = data['results'][0]
firstFoundIdx = float("inf")
# Prioritize colleges whose name contain the query name, and of those who do, prioritize
# those wherein the query name appears earlier in the college's name
for r in data['results']:
idx = r['school.name'].find(college.name)
if idx != -1:
if(firstFoundIdx > idx):
firstFoundIdx = idx
result = r
y = college.year_data_collected
if result[y + '.admissions.admission_rate.overall'] is not None:
college.admission_rate = round(result[y + '.admissions.admission_rate.overall']*100,2)
if result['school.school_url'] is not None:
college.school_url = result['school.school_url']
if result[y + '.student.size'] is not None:
college.school_size = result[y + '.student.size']
if result['school.city'] is not None:
college.school_city = result['school.city']
if result[y + '.cost.tuition.in_state'] is not None:
college.tuition_in_state = result[y + '.cost.tuition.in_state']
if result[y + '.cost.tuition.out_of_state'] is not None:
college.tuition_out_of_state = result[y + '.cost.tuition.out_of_state']
if result[y + '.cost.attendance.academic_year'] is not None:
college.cost_of_attendance_in_state = result[y + '.cost.attendance.academic_year']
if result[y + '.cost.attendance.academic_year'] is not None and result[y + '.cost.tuition.in_state'] is not None:
college.room_and_board = result[y + '.cost.attendance.academic_year'] - result[y + '.cost.tuition.in_state']
if result[y + '.cost.tuition.out_of_state'] is not None:
college.cost_of_attendance_out_of_state = college.tuition_out_of_state + college.room_and_board
if result[y + '.admissions.sat_scores.average.overall'] is not None:
college.sat_score_average_overall = result[y + '.admissions.sat_scores.average.overall']
if result[y + '.admissions.act_scores.midpoint.cumulative'] is not None:
college.act_score_average_overall = result[y + '.admissions.act_scores.midpoint.cumulative']
if result[y + '.student.share_firstgeneration'] is not None:
college.first_generation_percentage = round(result[y + '.student.share_firstgeneration']*100,2)
if result[y + '.student.demographics.race_ethnicity.white'] is not None:
college.race_white = round(result[y + '.student.demographics.race_ethnicity.white']*100,2)
if result[y + '.student.demographics.race_ethnicity.black'] is not None:
college.race_black = round(result[y + '.student.demographics.race_ethnicity.black']*100,2)
if result[y + '.student.demographics.race_ethnicity.hispanic'] is not None:
college.race_hispanic = round(result[y + '.student.demographics.race_ethnicity.hispanic']*100,2)
if result[y + '.student.demographics.race_ethnicity.asian'] is not None:
college.race_asian= round(result[y + '.student.demographics.race_ethnicity.asian']*100,2)
if result[y + '.student.demographics.race_ethnicity.aian'] is not None:
college.race_american_indian = round(result[y + '.student.demographics.race_ethnicity.aian']*100,2)
if result[y + '.student.demographics.race_ethnicity.nhpi'] is not None:
college.race_native_hawaiian = round(result[y + '.student.demographics.race_ethnicity.nhpi']*100,2)
if result[y + '.student.demographics.race_ethnicity.non_resident_alien'] is not None:
college.race_international = round(result[y + '.student.demographics.race_ethnicity.non_resident_alien']*100,2)
@staticmethod
def insert_colleges():
college_names = {
'University of Pennsylvania', 'Columbia University',
'Stanford University', 'Princeton University',
'Harvard University', 'Cornell University', 'Yale University',
'Brown University', 'Dartmouth College', 'New York University',
'University of California, Berkeley',
'University of California, Los Angeles', 'University of Michigan-Ann Arbor',
'Carnegie Mellon University', 'John Hopkins University',
'University of Chicago', 'Amherst College', 'Williams College',
'Massachusetts Institute of Technology',
'Georgia Institute of Technology',
'California Institute of Technology', 'Duke University'
}
early_deadlines = [
datetime(2017, 11, 4),
datetime(2017, 11, 3),
datetime(2017, 10, 26),
datetime(2017, 11, 1),
datetime(2017, 11, 11),
datetime(2017, 11, 13),
datetime(2017, 10, 29)
]
regular_deadlines = [
datetime(2017, 12, 31),
datetime(2017, 1, 1),
datetime(2017, 1, 2),
datetime(2017, 1, 3),
datetime(2017, 1, 5),
datetime(2017, 2, 1),
datetime(2017, 1, 14)
]
fafsa_deadline = [
datetime(2017, 12, 31),
datetime(2017, 1, 1),
datetime(2017, 1, 2),
datetime(2017, 1, 3),
datetime(2017, 1, 5),
datetime(2017, 2, 1),
datetime(2017, 1, 14)
]
acceptance_deadline = [
datetime(2017, 12, 31),
datetime(2017, 1, 1),
datetime(2017, 1, 2),
datetime(2017, 1, 3),
datetime(2017, 1, 5),
datetime(2017, 2, 1),
datetime(2017, 1, 14)
]
scholarship_deadlines = [
datetime(2017, 12, 31),
datetime(2017, 1, 1),
datetime(2017, 1, 2),
datetime(2017, 1, 3),
datetime(2017, 1, 5),
datetime(2017, 2, 1),
datetime(2017, 1, 14)
]
descriptions = [
'Private research university', 'Ivy League university',
'Liberal arts college', 'Public research university',
'Private doctorate university'
]
images = [
'http://www.collegerank.net/wp-content/uploads/2015/08/morehouse-college-quad.jpg',
'https://static1.squarespace.com/static/52f11228e4b0a96c7b51a92d/t/55e705bee4b03fc234f02b5e/1441203647587/'
]
for c in college_names:
college = College.get_college_by_name(c)
if college is None:
college = College(
name=c,
admission_rate = 0,
description=random.choice(descriptions),
regular_deadline=random.choice(regular_deadlines),
early_deadline=random.choice(early_deadlines),
fafsa_deadline=random.choice(fafsa_deadline),
acceptance_deadline=random.choice(acceptance_deadline),
school_url = "",
school_size = 0,
school_city = "",
tuition_in_state = 0,
tuition_out_of_state = 0,
cost_of_attendance_in_state = 0,
cost_of_attendance_out_of_state = 0,
room_and_board = 0,
sat_score_average_overall = 0,
act_score_average_overall = 0,
first_generation_percentage = 0,
year_data_collected = "",
race_white = 0,
race_black = 0,
race_hispanic = 0,
race_asian = 0,
race_american_indian = 0,
race_native_hawaiian = 0,
race_international = 0,
scholarship_deadline=random.choice(scholarship_deadlines),
image=random.choice(images))
College.retrieve_college_info(college)
db.session.add(college)
db.session.commit()
#@TODOOOOO: DO THE SAME FOR ADD COLLEGE METHOD IN COUNSELOR:VIEWS.PY
def __repr__(self):
return '<College: {}>'.format(self.name)
|
|
"""
This script first erases all the files in a target directory, and then
copies the necessary files to run Repy into it. Afterwards, the .mix
files in the target directory are ran through the preprocessor.
The target directory that is passed to the script must exist. It is
emptied before files are copied over.
It is assumed that you have checked out all the required repos of
SeattleTestbed into the parent directory of this script.
NOTE WELL: The repositories are used as-is. No attempt is made to switch
to a specific branch, pull from remotes, etc.
(In a future version of this script, the currently active branch
for each repo will be displayed as a visual reminder of this fact.)
<Usage>
preparetest.py [-t] [-v] [-c] [-r] <target_directory>
-t or --testfiles copies in all the files required to run the unit tests
-v or --verbose displays significantly more output on failure to process
a mix file
-c or --checkapi copies the checkapi source files
-r or --randomports replaces the default ports of 12345, 12346, and 12347
with three random ports between 52000 and 53000.
<Example>
Put the Repy runtime and unit test files into a temporary dir,
and run the unit tests for module "repyv2api" there.
user@vm:seattle$ cd dist
user@vm:dist$ mkdir /tmp/test
user@vm:dist$ python preparetest.py -t /tmp/test
user@vm:dist$ cd /tmp/test
user@vm:test$ python utf.py -m repyv2api
"""
import os
import sys
import glob
import random
import shutil
import optparse
import subprocess
# import testportfiller from path ../repy_v1/tests
sys.path.insert(0, os.path.join(os.path.dirname(os.getcwd()), "repy_v1", "tests"))
import testportfiller
# Remove testportfiller's path again
sys.path = sys.path[1:]
def copy_to_target(file_expr, target):
"""
This function copies files (in the current directory) that match the
expression file_expr to the target folder.
The source files are from the current directory.
The target directory must exist.
file_expr may contain wildcards (shell globs).
"""
files_to_copy = glob.glob(file_expr)
if files_to_copy == []:
print "WARNING: File expression '" + file_expr + "' does not match any files. Maybe the directory is empty, or the file / directory doesn't exist?"
for file_path in files_to_copy:
if os.path.isfile(file_path):
shutil.copyfile(file_path, target + "/" +os.path.basename(file_path))
def copy_tree_to_target(source, target, ignore=None):
"""
Copies a directory to the target destination.
If you pass a string for ignore, then subdirectories that contain the ignore
string will not be copied over (as well as the files they contain).
"""
full_source_path = os.path.abspath(source)
full_target_path = os.path.abspath(target)
for root, directories, filenames in os.walk(source):
# Relative path is needed to build the absolute target path.
# If we leave a leading directory separator in the relative folder
# path, then attempts to join it will cause the relative folder path
# to be treated as an absolute path.
relative_folder_path = os.path.abspath(root)[len(full_source_path):].lstrip(os.sep)
# If the ignore string is in the relative path, skip this directory.
if ignore and ignore in relative_folder_path:
continue
# Attempts to copy over a file when the containing directories above it do not
# exist will trigger an exception.
full_target_subdir_path = os.path.join(full_target_path, relative_folder_path)
if not os.path.isdir(full_target_subdir_path):
os.makedirs(full_target_subdir_path)
for name in filenames:
relative_path = os.path.join(relative_folder_path, name)
shutil.copyfile(
os.path.join(full_source_path, relative_path),
os.path.join(full_target_path, relative_path))
def process_mix(script_path, verbose):
"""
Run the .mix files in current directory through the preprocessor.
script_path specifies the name of the preprocessor script.
The preprocessor script must be in the working directory.
"""
mix_files = glob.glob("*.mix")
error_list = []
for file_path in mix_files:
# Generate a .py file for the .mix file specified by file_path
processed_file_path = (os.path.basename(file_path)).replace(".mix",".py")
(theout, theerr) = exec_command(sys.executable + " " + script_path + " " + file_path + " " + processed_file_path)
# If there was any problem processing the files, then notify the user.
if theerr:
print "Unable to process the file: " + file_path
error_list.append((file_path, theerr))
# If the verbose option is on then print the error.
if verbose and len(error_list) > 0:
print "\n" + '#'*50 + "\nPrinting all the exceptions (verbose option)\n" + '#'*50
for file_name, error in error_list:
print "\n" + file_name + ":"
print error
print '-'*80
def exec_command(command):
"""
Execute command on a shell, return a tuple containing the resulting
standard output and standard error (as strings).
"""
# Windows does not like close_fds and we shouldn't need it so...
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# get the output and close
theout = process.stdout.read()
process.stdout.close()
# get the errput and close
theerr = process.stderr.read()
process.stderr.close()
# FreeBSD prints on stdout, when it gets a signal...
# I want to look at the last line. It ends in \n, so I use index -2
if len(theout.split('\n')) > 1 and theout.split('\n')[-2].strip() == 'Terminated':
# remove the last line
theout = '\n'.join(theout.split('\n')[:-2])
# However we threw away an extra '\n'. If anything remains, let's replace it
if theout != '':
theout = theout + '\n'
# OS's besides FreeBSD uses stderr
if theerr.strip() == 'Terminated':
theerr = ''
# Windows isn't fond of this either...
# clean up after the child
#os.waitpid(p.pid,0)
return (theout, theerr)
def replace_string(old_string, new_string, file_name_pattern="*"):
"""
<Purpose>
Go through all the files in the current folder and replace
every match of the old string in the file with the new
string.
<Arguments>
old_string - The string we want to replace.
new_string - The new string we want to replace the old string
with.
file_name_pattern - The pattern of the file name if you want
to reduce the number of files we look at. By default the
function looks at all files.
<Exceptions>
None.
<Side Effects>
Many files may get modified.
<Return>
None
"""
for testfile in glob.glob(file_name_pattern):
# Read in the initial file.
inFile = file(testfile, 'r')
filestring = inFile.read()
inFile.close()
# Replace any form of the matched old string with
# the new string.
filestring = filestring.replace(old_string, new_string)
# Write the file back.
outFile = file(testfile, 'w')
outFile.write(filestring)
outFile.close()
def help_exit(errMsg, parser):
"""
Prints the given error message and the help string, then exits
"""
print errMsg
parser.print_help()
sys.exit(1)
def main():
# Parse the options provided.
helpstring = "python preparetest.py [-t] [-v] [-c] [-r] <target>"
parser = optparse.OptionParser(usage=helpstring)
parser.add_option("-t", "--testfiles", action="store_true",
dest="include_tests", default=False,
help="Include files required to run the unit tests ")
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Show more output on failure to process a .mix file")
parser.add_option("-c", "--checkapi", action="store_true",
dest="copy_checkapi", default=False,
help="Include checkAPI files")
parser.add_option("-r", "--randomports", action="store_true",
dest="randomports", default=False,
help="Replace the default ports with random ports between 52000 and 53000. ")
(options, args) = parser.parse_args()
# Extract the target directory.
if len(args) == 0:
help_exit("Please pass the target directory as a parameter.", parser)
else:
target_dir = args[0]
# Make sure they gave us a valid directory
if not os.path.isdir(target_dir):
help_exit("Supplied target is not a directory", parser)
# Set variables according to the provided options.
repytest = options.include_tests
RANDOMPORTS = options.randomports
verbose = options.verbose
copy_checkapi = options.copy_checkapi
# This script's parent directory is the root dir of all repositories
repos_root_dir = os.path.dirname(os.getcwd())
# Set working directory to the target
os.chdir(target_dir)
files_to_remove = glob.glob("*")
# Empty the destination
for entry in files_to_remove:
if os.path.isdir(entry):
shutil.rmtree(entry)
else:
os.remove(entry)
# Create directories for each Repy version under the target
repy_dir = {"v1" : os.path.join(target_dir, "repyV1"),
"v2" : os.path.join(target_dir, "repyV2") }
for dir_name in repy_dir.values():
if not os.path.exists(dir_name):
os.makedirs(dir_name)
# Return to the repo root
os.chdir(repos_root_dir)
# Copy the necessary files to the respective target folders:
# Affix framework and components
copy_to_target("affix/*", target_dir)
copy_to_target("affix/components/*", target_dir)
copy_to_target("affix/services/tcp_relay/*", target_dir)
# Nodemanager and RepyV2 runtime
copy_to_target("repy_v2/*", target_dir)
copy_to_target("nodemanager/*", target_dir)
copy_to_target("portability/*", target_dir)
copy_to_target("seattlelib_v2/*", target_dir)
# RepyV2 runtime for vessels
copy_to_target("portability/*", repy_dir["v2"])
copy_to_target("repy_v2/*", repy_dir["v2"])
copy_to_target("seattlelib_v2/dylink.r2py", repy_dir["v2"])
copy_to_target("seattlelib_v2/textops.py", repy_dir["v2"])
copy_to_target("nodemanager/servicelogger.py", repy_dir["v2"])
# RepyV1 runtime for vessels
copy_to_target("repy_v1/*", repy_dir["v1"])
# Seash
copy_to_target("seash/*", target_dir)
copy_tree_to_target("seash/pyreadline/", os.path.join(target_dir, 'pyreadline/'), ignore=".git")
copy_tree_to_target("seash/modules/", os.path.join(target_dir, 'modules/'), ignore=".git")
# Clearinghouse XML-RPC interface
copy_to_target("common/seattleclearinghouse_xmlrpc.py", target_dir)
# Software updater
copy_to_target("softwareupdater/*", target_dir)
copy_to_target("dist/update_crontab_entry.py", target_dir)
# The license must be included in anything we distribute.
copy_to_target("common/LICENSE", target_dir)
if repytest:
# Only copy the tests if they were requested.
copy_to_target("repy_v2/tests/restrictions.*", target_dir)
copy_to_target("utf/*.py", target_dir)
copy_to_target("utf/tests/*.py", target_dir)
copy_to_target("repy_v2/testsV2/*", target_dir) # XXX Scheduled for merge with repy_v2/tests
copy_to_target("nodemanager/tests/*", target_dir)
copy_to_target("portability/tests/*", target_dir)
copy_to_target("seash/tests/*", target_dir)
copy_tree_to_target("seash/tests/modules/", os.path.join(target_dir, 'modules/'), ignore=".git")
copy_to_target("seattlelib_v2/tests/*", target_dir)
# The web server is used in the software updater tests
#copy_to_target("assignments/webserver/*", target_dir)
#copy_to_target("softwareupdater/test/*", target_dir)
# Set working directory to the target
os.chdir(target_dir)
# Set up dynamic port information
if RANDOMPORTS:
print "\n[ Randomports option was chosen ]\n"+'-'*50
ports_as_ints = random.sample(range(52000, 53000), 5)
ports_as_strings = []
for port in ports_as_ints:
ports_as_strings.append(str(port))
print "Randomly chosen ports: ", ports_as_strings
testportfiller.replace_ports(ports_as_strings, ports_as_strings)
# Replace the string <nodemanager_port> with a random port
random_nodemanager_port = random.randint(53000, 54000)
print "Chosen random nodemanager port: " + str(random_nodemanager_port)
print '-'*50 + "\n"
replace_string("<nodemanager_port>", str(random_nodemanager_port), "*nm*")
replace_string("<nodemanager_port>", str(random_nodemanager_port), "*securitylayers*")
else:
# Otherwise use the default ports...
testportfiller.replace_ports(['12345','12346','12347', '12348', '12349'], ['12345','12346','12347', '12348', '12349'])
# Use default port 1224 for the nodemanager port if --random flag is not provided.
replace_string("<nodemanager_port>", '1224', "*nm*")
replace_string("<nodemanager_port>", '1224', "*securitylayers*")
os.chdir("repyV1")
process_mix("repypp.py", verbose)
# Change back to root project directory
os.chdir(repos_root_dir)
if __name__ == '__main__':
main()
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'CryptoSamActionEnum' : _MetaInfoEnum('CryptoSamActionEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg',
{
'proceed':'PROCEED',
'terminate':'TERMINATE',
}, 'Cisco-IOS-XR-crypto-sam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-crypto-sam-cfg']),
'Crypto.Sam.PromptInterval' : {
'meta_info' : _MetaInfoClass('Crypto.Sam.PromptInterval',
False,
[
_MetaInfoClassMember('action', REFERENCE_ENUM_CLASS, 'CryptoSamActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg', 'CryptoSamActionEnum',
[], [],
''' Respond to SAM prompt either Proceed/Terminate
''',
'action',
'Cisco-IOS-XR-crypto-sam-cfg', False),
_MetaInfoClassMember('prompt-time', ATTRIBUTE, 'int' , None, None,
[(0, 300)], [],
''' Prompt time from 0 - 300 seconds
''',
'prompt_time',
'Cisco-IOS-XR-crypto-sam-cfg', False),
],
'Cisco-IOS-XR-crypto-sam-cfg',
'prompt-interval',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-sam-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg'
),
},
'Crypto.Sam' : {
'meta_info' : _MetaInfoClass('Crypto.Sam',
False,
[
_MetaInfoClassMember('prompt-interval', REFERENCE_CLASS, 'PromptInterval' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg', 'Crypto.Sam.PromptInterval',
[], [],
''' Set prompt interval at reboot time
''',
'prompt_interval',
'Cisco-IOS-XR-crypto-sam-cfg', False),
],
'Cisco-IOS-XR-crypto-sam-cfg',
'sam',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-sam-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg'
),
},
'Crypto.Ssh.Client' : {
'meta_info' : _MetaInfoClass('Crypto.Ssh.Client',
False,
[
_MetaInfoClassMember('client-vrf', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Source interface VRF for ssh client sessions
''',
'client_vrf',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('dscp', ATTRIBUTE, 'int' , None, None,
[(0, 63)], [],
''' Cisco sshd DSCP value
''',
'dscp',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('host-public-key', ATTRIBUTE, 'str' , None, None,
[], [],
''' Filename - where to store known host file
''',
'host_public_key',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('source-interface', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Source interface for ssh client sessions
''',
'source_interface',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
],
'Cisco-IOS-XR-crypto-ssh-cfg',
'client',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-ssh-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg'
),
},
'Crypto.Ssh.Server.VrfTable.Vrf' : {
'meta_info' : _MetaInfoClass('Crypto.Ssh.Server.VrfTable.Vrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Enter VRF name
''',
'vrf_name',
'Cisco-IOS-XR-crypto-ssh-cfg', True),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable to use VRF
''',
'enable',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('ipv4-access-list', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' SSH v4 access-list name
''',
'ipv4_access_list',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('ipv6-access-list', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' SSH v6 access-list name
''',
'ipv6_access_list',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
],
'Cisco-IOS-XR-crypto-ssh-cfg',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-ssh-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg'
),
},
'Crypto.Ssh.Server.VrfTable' : {
'meta_info' : _MetaInfoClass('Crypto.Ssh.Server.VrfTable',
False,
[
_MetaInfoClassMember('vrf', REFERENCE_LIST, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg', 'Crypto.Ssh.Server.VrfTable.Vrf',
[], [],
''' Enter VRF name
''',
'vrf',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
],
'Cisco-IOS-XR-crypto-ssh-cfg',
'vrf-table',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-ssh-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg'
),
},
'Crypto.Ssh.Server.NetconfVrfTable.Vrf' : {
'meta_info' : _MetaInfoClass('Crypto.Ssh.Server.NetconfVrfTable.Vrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Enter VRF name
''',
'vrf_name',
'Cisco-IOS-XR-crypto-ssh-cfg', True),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable to use VRF
''',
'enable',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('ipv4-access-list', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' SSH v4 access-list name
''',
'ipv4_access_list',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('ipv6-access-list', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' SSH v6 access-list name
''',
'ipv6_access_list',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
],
'Cisco-IOS-XR-crypto-ssh-cfg',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-ssh-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg'
),
},
'Crypto.Ssh.Server.NetconfVrfTable' : {
'meta_info' : _MetaInfoClass('Crypto.Ssh.Server.NetconfVrfTable',
False,
[
_MetaInfoClassMember('vrf', REFERENCE_LIST, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg', 'Crypto.Ssh.Server.NetconfVrfTable.Vrf',
[], [],
''' Enter VRF name
''',
'vrf',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
],
'Cisco-IOS-XR-crypto-ssh-cfg',
'netconf-vrf-table',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-ssh-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg'
),
},
'Crypto.Ssh.Server' : {
'meta_info' : _MetaInfoClass('Crypto.Ssh.Server',
False,
[
_MetaInfoClassMember('dscp', ATTRIBUTE, 'int' , None, None,
[(0, 63)], [],
''' Cisco sshd DSCP value
''',
'dscp',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('logging', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable ssh server logging
''',
'logging',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('netconf', ATTRIBUTE, 'int' , None, None,
[(1, 65535)], [],
''' port number on which ssh service to be started
for netconf
''',
'netconf',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('netconf-vrf-table', REFERENCE_CLASS, 'NetconfVrfTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg', 'Crypto.Ssh.Server.NetconfVrfTable',
[], [],
''' Cisco sshd Netconf VRF name
''',
'netconf_vrf_table',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('rate-limit', ATTRIBUTE, 'int' , None, None,
[(1, 600)], [],
''' Cisco sshd rate-limit of service requests
''',
'rate_limit',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('session-limit', ATTRIBUTE, 'int' , None, None,
[(1, 1024)], [],
''' Cisco sshd session-limit of service requests
''',
'session_limit',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('timeout', ATTRIBUTE, 'int' , None, None,
[(5, 120)], [],
''' Timeout value between 5-120 seconds defalut 30
''',
'timeout',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('v2', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Cisco sshd force protocol version 2 only
''',
'v2',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('vrf-table', REFERENCE_CLASS, 'VrfTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg', 'Crypto.Ssh.Server.VrfTable',
[], [],
''' Cisco sshd VRF name
''',
'vrf_table',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
],
'Cisco-IOS-XR-crypto-ssh-cfg',
'server',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-ssh-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg'
),
},
'Crypto.Ssh' : {
'meta_info' : _MetaInfoClass('Crypto.Ssh',
False,
[
_MetaInfoClassMember('client', REFERENCE_CLASS, 'Client' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg', 'Crypto.Ssh.Client',
[], [],
''' Provide SSH client service
''',
'client',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
_MetaInfoClassMember('server', REFERENCE_CLASS, 'Server' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg', 'Crypto.Ssh.Server',
[], [],
''' Provide SSH server service
''',
'server',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
],
'Cisco-IOS-XR-crypto-ssh-cfg',
'ssh',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-ssh-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg'
),
},
'Crypto' : {
'meta_info' : _MetaInfoClass('Crypto',
False,
[
_MetaInfoClassMember('sam', REFERENCE_CLASS, 'Sam' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg', 'Crypto.Sam',
[], [],
''' Software Authentication Manager (SAM) Config
''',
'sam',
'Cisco-IOS-XR-crypto-sam-cfg', False),
_MetaInfoClassMember('ssh', REFERENCE_CLASS, 'Ssh' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg', 'Crypto.Ssh',
[], [],
''' Secure Shell configuration
''',
'ssh',
'Cisco-IOS-XR-crypto-ssh-cfg', False),
],
'Cisco-IOS-XR-crypto-sam-cfg',
'crypto',
_yang_ns._namespaces['Cisco-IOS-XR-crypto-sam-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_cfg'
),
},
}
_meta_table['Crypto.Sam.PromptInterval']['meta_info'].parent =_meta_table['Crypto.Sam']['meta_info']
_meta_table['Crypto.Ssh.Server.VrfTable.Vrf']['meta_info'].parent =_meta_table['Crypto.Ssh.Server.VrfTable']['meta_info']
_meta_table['Crypto.Ssh.Server.NetconfVrfTable.Vrf']['meta_info'].parent =_meta_table['Crypto.Ssh.Server.NetconfVrfTable']['meta_info']
_meta_table['Crypto.Ssh.Server.VrfTable']['meta_info'].parent =_meta_table['Crypto.Ssh.Server']['meta_info']
_meta_table['Crypto.Ssh.Server.NetconfVrfTable']['meta_info'].parent =_meta_table['Crypto.Ssh.Server']['meta_info']
_meta_table['Crypto.Ssh.Client']['meta_info'].parent =_meta_table['Crypto.Ssh']['meta_info']
_meta_table['Crypto.Ssh.Server']['meta_info'].parent =_meta_table['Crypto.Ssh']['meta_info']
_meta_table['Crypto.Sam']['meta_info'].parent =_meta_table['Crypto']['meta_info']
_meta_table['Crypto.Ssh']['meta_info'].parent =_meta_table['Crypto']['meta_info']
|
|
import codecs
import json
import mimetypes
import os
import time
from collections import OrderedDict
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.defaultfilters import filesizeformat
from django.utils.encoding import smart_unicode
import commonware.log
import jinja2
from cache_nuggets.lib import memoize, Message
from jingo import register
from django.utils.translation import ugettext as _
from appvalidator.testcases.packagelayout import (
blacklisted_extensions as blocked_extensions,
blacklisted_magic_numbers as blocked_magic_numbers)
import mkt
from mkt.files.utils import extract_zip, get_md5
from mkt.site.storage_utils import (copy_stored_file, local_storage,
private_storage, public_storage,
storage_is_remote, walk_storage)
from mkt.site.utils import env
# Allow files with a shebang through.
blocked_magic_numbers = [
b for b in list(blocked_magic_numbers) if b != (0x23, 0x21)]
blocked_extensions = [
b for b in list(blocked_extensions) if b != 'sh']
task_log = commonware.log.getLogger('z.task')
@register.function
def file_viewer_class(value, key):
result = []
if value['directory']:
result.append('directory closed')
else:
result.append('file')
if value['short'] == key:
result.append('selected')
if value.get('diff'):
result.append('diff')
return ' '.join(result)
@register.function
def file_tree(files, selected):
depth = 0
output = ['<ul class="root">']
t = env.get_template('fileviewer/node.html')
for k, v in files.items():
if v['depth'] > depth:
output.append('<ul class="js-hidden">')
elif v['depth'] < depth:
output.extend(['</ul>' for x in range(v['depth'], depth)])
output.append(t.render({'value': v, 'selected': selected}))
depth = v['depth']
output.extend(['</ul>' for x in range(depth, -1, -1)])
return jinja2.Markup('\n'.join(output))
class FileViewer(object):
"""
Provide access to a storage-managed file by copying it locally and
extracting info from it. `src` is a storage-managed path and `dest` is a
local temp path.
"""
def __init__(self, file_obj):
self.file = file_obj
self.addon = self.file.version.addon
self.src = (file_obj.guarded_file_path
if file_obj.status == mkt.STATUS_DISABLED
else file_obj.file_path)
self.dest = os.path.join(settings.TMP_PATH, 'file_viewer',
str(file_obj.pk))
self._files, self.selected = None, None
def __str__(self):
return str(self.file.id)
def _extraction_cache_key(self):
return ('%s:file-viewer:extraction-in-progress:%s' %
(settings.CACHE_PREFIX, self.file.id))
def extract(self):
"""
Will make all the directories and expand the files.
Raises error on nasty files.
"""
if self.file.status in mkt.LISTED_STATUSES:
storage = public_storage
else:
storage = private_storage
try:
tempdir = extract_zip(storage.open(self.src))
# Move extracted files into persistent storage.
for root, subdirs, files in os.walk(tempdir):
storage_root = root.replace(tempdir, self.dest, 1)
for fname in files:
file_src = os.path.join(root, fname)
file_dest = os.path.join(storage_root, fname)
copy_stored_file(file_src, file_dest,
src_storage=local_storage,
dst_storage=private_storage)
except Exception, err:
task_log.error('Error (%s) extracting %s' % (err, self.src))
raise
def cleanup(self):
try:
for root, dirs, files in walk_storage(
self.dest, storage=private_storage):
for fname in files:
private_storage.delete(os.path.join(root, fname))
except OSError as e:
if e.errno == 2:
# Directory doesn't exist, nothing to clean up.
return
raise
def is_extracted(self):
"""If the file has been extracted or not."""
return (private_storage.exists(
os.path.join(self.dest, 'manifest.webapp')) and
not Message(self._extraction_cache_key()).get())
def _is_binary(self, mimetype, path):
"""Uses the filename to see if the file can be shown in HTML or not."""
# Re-use the blocked data from amo-validator to spot binaries.
ext = os.path.splitext(path)[1][1:]
if ext in blocked_extensions:
return True
# S3 will return false for storage.exists() for directory paths, so
# os.path call is safe here.
if private_storage.exists(path) and not os.path.isdir(path):
with private_storage.open(path, 'r') as rfile:
bytes = tuple(map(ord, rfile.read(4)))
if any(bytes[:len(x)] == x for x in blocked_magic_numbers):
return True
if mimetype:
major, minor = mimetype.split('/')
if major == 'image':
return 'image' # Mark that the file is binary, but an image.
return False
def read_file(self, allow_empty=False):
"""
Reads the file. Imposes a file limit and tries to cope with
UTF-8 and UTF-16 files appropriately. Return file contents and
a list of error messages.
"""
try:
file_data = self._read_file(allow_empty)
# If this is a webapp manifest, we should try to pretty print it.
if (self.selected and
self.selected.get('filename') == 'manifest.webapp'):
file_data = self._process_manifest(file_data)
return file_data
except (IOError, OSError):
self.selected['msg'] = _('That file no longer exists.')
return ''
def _read_file(self, allow_empty=False):
if not self.selected and allow_empty:
return ''
assert self.selected, 'Please select a file'
if self.selected['size'] > settings.FILE_VIEWER_SIZE_LIMIT:
# L10n: {0} is the file size limit of the file viewer.
msg = _(u'File size is over the limit of {0}.').format(
filesizeformat(settings.FILE_VIEWER_SIZE_LIMIT))
self.selected['msg'] = msg
return ''
with private_storage.open(self.selected['full'], 'r') as opened:
cont = opened.read()
codec = 'utf-16' if cont.startswith(codecs.BOM_UTF16) else 'utf-8'
try:
return cont.decode(codec)
except UnicodeDecodeError:
cont = cont.decode(codec, 'ignore')
# L10n: {0} is the filename.
self.selected['msg'] = (
_('Problems decoding {0}.').format(codec))
return cont
def _process_manifest(self, data):
"""
This will format the manifest nicely for maximum diff-ability.
"""
try:
json_data = json.loads(data)
except Exception:
# If there are any JSON decode problems, just return the raw file.
return data
def format_dict(data):
def do_format(value):
if isinstance(value, dict):
return format_dict(value)
else:
return value
# We want everything sorted, but we always want these few nodes
# right at the top.
prefix_nodes = ['name', 'description', 'version']
prefix_nodes = [(k, data.pop(k)) for k in prefix_nodes if
k in data]
processed_nodes = [(k, do_format(v)) for k, v in data.items()]
return OrderedDict(prefix_nodes + sorted(processed_nodes))
return json.dumps(format_dict(json_data), indent=2)
def select(self, file_):
self.selected = self.get_files().get(file_)
def is_binary(self):
if self.selected:
binary = self.selected['binary']
if binary and binary != 'image':
self.selected['msg'] = _('This file is not viewable online. '
'Please download the file to view '
'the contents.')
return binary
def is_directory(self):
if self.selected:
if self.selected['directory']:
self.selected['msg'] = _('This file is a directory.')
return self.selected['directory']
def get_default(self, key=None):
"""Gets the default file and copes with search engines."""
if key:
return key
return 'manifest.webapp'
def get_files(self):
"""
Returns an OrderedDict, ordered by the filename of all the files in the
addon-file. Full of all the useful information you'll need to serve
this file, build templates etc.
"""
if self._files:
return self._files
if not self.is_extracted():
return {}
# In case a cron job comes along and deletes the files
# mid tree building.
try:
self._files = self._get_files()
return self._files
except (OSError, IOError):
return {}
def truncate(self, filename, pre_length=15, post_length=10,
ellipsis=u'..'):
"""
Truncates a filename so that
somelongfilename.htm
becomes:
some...htm
as it truncates around the extension.
"""
root, ext = os.path.splitext(filename)
if len(root) > pre_length:
root = root[:pre_length] + ellipsis
if len(ext) > post_length:
ext = ext[:post_length] + ellipsis
return root + ext
def get_syntax(self, filename):
"""
Converts a filename into a syntax for the syntax highlighter, with
some modifications for specific common mozilla files.
The list of syntaxes is from:
http://alexgorbatchev.com/SyntaxHighlighter/manual/brushes/
"""
if filename:
short = os.path.splitext(filename)[1][1:]
syntax_map = {'xul': 'xml', 'rdf': 'xml', 'jsm': 'js',
'json': 'js', 'webapp': 'js'}
short = syntax_map.get(short, short)
if short in ['actionscript3', 'as3', 'bash', 'shell', 'cpp', 'c',
'c#', 'c-sharp', 'csharp', 'css', 'diff', 'html',
'java', 'javascript', 'js', 'jscript', 'patch',
'pas', 'php', 'plain', 'py', 'python', 'sass',
'scss', 'text', 'sql', 'vb', 'vbnet', 'xml', 'xhtml',
'xslt']:
return short
return 'plain'
@memoize(prefix='file-viewer', time=60 * 60)
def _get_files(self):
all_files, res = [], OrderedDict()
# Not using os.path.walk so we get just the right order.
def iterate(path):
path_dirs, path_files = private_storage.listdir(path)
for dirname in sorted(path_dirs):
full = os.path.join(path, dirname)
all_files.append(full)
iterate(full)
for filename in sorted(path_files):
full = os.path.join(path, filename)
all_files.append(full)
iterate(self.dest)
for path in all_files:
filename = smart_unicode(os.path.basename(path), errors='replace')
short = smart_unicode(path[len(self.dest) + 1:], errors='replace')
mime, encoding = mimetypes.guess_type(filename)
if not mime and filename == 'manifest.webapp':
mime = 'application/x-web-app-manifest+json'
if storage_is_remote():
# S3 doesn't have directories, so we check for names with this
# prefix and call it a directory if there are some.
subdirs, subfiles = private_storage.listdir(path)
directory = bool(subdirs or subfiles)
else:
directory = os.path.isdir(path)
res[short] = {
'binary': self._is_binary(mime, path),
'depth': short.count(os.sep),
'directory': directory,
'filename': filename,
'full': path,
'md5': get_md5(path) if not directory else '',
'mimetype': mime or 'application/octet-stream',
'syntax': self.get_syntax(filename),
'modified': (
time.mktime(
private_storage.modified_time(path).timetuple())
if not directory else 0),
'short': short,
'size': private_storage.size(path) if not directory else 0,
'truncated': self.truncate(filename),
'url': reverse('mkt.files.list',
args=[self.file.id, 'file', short]),
'url_serve': reverse('mkt.files.redirect',
args=[self.file.id, short]),
'version': self.file.version.version,
}
return res
class DiffHelper(object):
def __init__(self, left, right):
self.left = FileViewer(left)
self.right = FileViewer(right)
self.addon = self.left.addon
self.key = None
def __str__(self):
return '%s:%s' % (self.left, self.right)
def extract(self):
self.left.extract(), self.right.extract()
def cleanup(self):
self.left.cleanup(), self.right.cleanup()
def is_extracted(self):
return self.left.is_extracted() and self.right.is_extracted()
def get_url(self, short):
url_name = 'mkt.files.compare'
return reverse(url_name,
args=[self.left.file.id, self.right.file.id,
'file', short])
def get_files(self):
"""
Get the files from the primary and:
- remap any diffable ones to the compare url as opposed to the other
- highlight any diffs
"""
left_files = self.left.get_files()
right_files = self.right.get_files()
different = []
for key, file in left_files.items():
file['url'] = self.get_url(file['short'])
diff = file['md5'] != right_files.get(key, {}).get('md5')
file['diff'] = diff
if diff:
different.append(file)
# Now mark every directory above each different file as different.
for diff in different:
for depth in range(diff['depth']):
key = '/'.join(diff['short'].split('/')[:depth + 1])
if key in left_files:
left_files[key]['diff'] = True
return left_files
def get_deleted_files(self):
"""
Get files that exist in right, but not in left. These
are files that have been deleted between the two versions.
Every element will be marked as a diff.
"""
different = OrderedDict()
left_files = self.left.get_files()
right_files = self.right.get_files()
for key, file in right_files.items():
if key not in left_files:
copy = right_files[key]
copy.update({'url': self.get_url(file['short']), 'diff': True})
different[key] = copy
return different
def read_file(self):
"""Reads both selected files."""
return [self.left.read_file(allow_empty=True),
self.right.read_file(allow_empty=True)]
def select(self, key):
"""
Select a file and adds the file object to self.one and self.two
for later fetching.
"""
self.key = key
self.left.select(key)
self.right.select(key)
return self.left.selected and self.right.selected
def is_binary(self):
"""Tells you if both selected files are binary."""
return self.left.is_binary() or self.right.is_binary()
def is_diffable(self):
"""Tells you if the selected files are diffable."""
if not self.left.selected and not self.right.selected:
return False
for obj in [self.left, self.right]:
if obj.is_binary():
return False
if obj.is_directory():
return False
return True
def rmtree(prefix):
dirs, files = private_storage.listdir(prefix)
for fname in files:
private_storage.delete(os.path.join(prefix, fname))
for d in dirs:
rmtree(os.path.join(prefix, d))
private_storage.delete(prefix)
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Heat documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 13 11:23:35 2012.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import os
import re
import sys
import tempfile
from oslo_config import cfg
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
CONTRIB_DIR = os.path.join(ROOT, 'contrib')
PLUGIN_DIRS = glob.glob(os.path.join(CONTRIB_DIR, '*'))
ENV_DIR = os.path.join(ROOT, "etc", "heat", "environment.d")
TEMP_ENV_DIR = tempfile.mkdtemp()
for f in glob.glob(os.path.join(ENV_DIR, "*.yaml")):
with open(f, "r") as fin:
name = os.path.split(f)[-1]
with open(os.path.join(TEMP_ENV_DIR, name), "w") as fout:
fout.write(fin.read().replace("file:///", "file://%s/" % ROOT))
sys.path.insert(0, ROOT)
sys.path.insert(0, BASE_DIR)
cfg.CONF.import_opt('plugin_dirs', 'heat.common.config')
cfg.CONF.set_override(name='plugin_dirs', override=PLUGIN_DIRS)
cfg.CONF.import_opt('environment_dir', 'heat.common.config')
cfg.CONF.set_override(name='environment_dir', override=TEMP_ENV_DIR)
# This is required for ReadTheDocs.org, but isn't a bad idea anyway.
os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'
def write_autodoc_index():
def find_autodoc_modules(module_name, sourcedir):
"""Return a list of modules in the SOURCE directory."""
modlist = []
os.chdir(os.path.join(sourcedir, module_name))
print("SEARCHING %s" % sourcedir)
for root, dirs, files in os.walk("."):
for filename in files:
if filename.endswith(".py"):
# remove the pieces of the root
elements = root.split(os.path.sep)
# replace the leading "." with the module name
elements[0] = module_name
# and get the base module name
base, extension = os.path.splitext(filename)
if not (base == "__init__"):
elements.append(base)
result = ".".join(elements)
modlist.append(result)
return modlist
RSTDIR = os.path.abspath(os.path.join(BASE_DIR, "sourcecode"))
SOURCES = {'heat': {'module': 'heat', 'path': ROOT}}
EXCLUDED_MODULES = ('heat.testing',
'heat.cmd',
'heat.common',
'heat.cloudinit',
'heat.cfn_client',
'heat.doc',
'heat.db',
'heat.engine.resources',
'heat.locale',
'heat.openstack',
'.*\.tests',
'.*\.resources')
CURRENT_SOURCES = {}
if not(os.path.exists(RSTDIR)):
os.mkdir(RSTDIR)
CURRENT_SOURCES[RSTDIR] = ['autoindex.rst', '.gitignore']
INDEXOUT = open(os.path.join(RSTDIR, "autoindex.rst"), "w")
INDEXOUT.write("=================\n")
INDEXOUT.write("Source Code Index\n")
INDEXOUT.write("=================\n")
for title, info in SOURCES.items():
path = info['path']
modulename = info['module']
sys.stdout.write("Generating source documentation for %s\n" %
title)
INDEXOUT.write("\n%s\n" % title.capitalize())
INDEXOUT.write("%s\n" % ("=" * len(title),))
INDEXOUT.write(".. toctree::\n")
INDEXOUT.write(" :maxdepth: 1\n")
INDEXOUT.write("\n")
MOD_DIR = os.path.join(RSTDIR, title)
CURRENT_SOURCES[MOD_DIR] = []
if not(os.path.exists(MOD_DIR)):
os.makedirs(MOD_DIR)
for module in find_autodoc_modules(modulename, path):
if any([re.match(exclude, module)
for exclude
in EXCLUDED_MODULES]):
print("Excluded module %s." % module)
continue
mod_path = os.path.join(path, *module.split("."))
generated_file = os.path.join(MOD_DIR, "%s.rst" % module)
INDEXOUT.write(" %s/%s\n" % (title, module))
# Find the __init__.py module if this is a directory
if os.path.isdir(mod_path):
source_file = ".".join((os.path.join(mod_path, "__init__"),
"py",))
else:
source_file = ".".join((os.path.join(mod_path), "py"))
CURRENT_SOURCES[MOD_DIR].append("%s.rst" % module)
# Only generate a new file if the source has changed or we don't
# have a doc file to begin with.
if not os.access(generated_file, os.F_OK) or \
os.stat(generated_file).st_mtime < \
os.stat(source_file).st_mtime:
print("Module %s updated, generating new documentation."
% module)
FILEOUT = open(generated_file, "w")
header = "The :mod:`%s` Module" % module
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write("%s\n" % header)
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write(".. automodule:: %s\n" % module)
FILEOUT.write(" :members:\n")
FILEOUT.write(" :undoc-members:\n")
FILEOUT.write(" :show-inheritance:\n")
FILEOUT.write(" :noindex:\n")
FILEOUT.close()
INDEXOUT.close()
# Delete auto-generated .rst files for sources which no longer exist
for directory, subdirs, files in list(os.walk(RSTDIR)):
for old_file in files:
if old_file not in CURRENT_SOURCES.get(directory, []):
print("Removing outdated file for %s" % old_file)
os.remove(os.path.join(directory, old_file))
write_autodoc_index()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'oslosphinx',
'ext.resources']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Heat'
copyright = u'2012,2013 Heat Developers'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**/#*', '**~', '**/#*#']
# The reST default role (used for this markup: `text`)
# to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'py'
nitpicky = False
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ['.']
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"nosidebar": "false"
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Heatdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'Heat.tex', u'Heat Documentation',
u'Heat Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('man/heat-api', 'heat-api',
u'REST API service to the heat project.',
[u'Heat Developers'], 1),
('man/heat-api-cfn', 'heat-api-cfn',
u'CloudFormation compatible API service to the heat project.',
[u'Heat Developers'], 1),
('man/heat-api-cloudwatch', 'heat-api-cloudwatch',
u'CloudWatch alike API service to the heat project',
[u'Heat Developers'], 1),
('man/heat-db-setup', 'heat-db-setup',
u'Command line utility to setup the Heat database',
[u'Heat Developers'], 1),
('man/heat-engine', 'heat-engine',
u'Service which performs the actions from the API calls made by the user',
[u'Heat Developers'], 1),
('man/heat-keystone-setup', 'heat-keystone-setup',
u'Script which sets up keystone for usage by Heat',
[u'Heat Developers'], 1),
('man/heat-keystone-setup-domain', 'heat-keystone-setup-domain',
u'Script which sets up a keystone domain for heat users and projects',
[u'Heat Developers'], 1),
('man/heat-manage', 'heat-manage',
u'Script which helps manage specific database operations',
[u'Heat Developers'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Heat', u'Heat Documentation',
u'Heat Developers', 'Heat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
|
#########################################################################
""" wingdbstub.py -- Debug stub for debuggifying Python programs
Copyright (c) 1999-2001, Archaeopteryx Software, Inc. All rights reserved.
Written by Stephan R.A. Deibel and John P. Ehresman
Usage:
-----
This is the file that Wing DB users copy into their python project
directory if they want to be able to debug programs that are launched
outside of the IDE (e.g., CGI scripts, in response to a browser page
load).
To use this, edit the configuration values below to match your
Wing IDE installation and requirements of your project.
Then, add the following line to your code:
import wingdbstub
Debugging will start immediately after this import statements.
Next make sure that your IDE is running and that it's configured
to do passive listening and accept passive connections from the
host the debug program will be running on.
Now, invoking your python file should run the code within the debugger.
Note, however, that Wing will not stop in the code unless a breakpoint
set set.
If the debug process is started before the IDE, or is not listening
at the time this module is imported then the program will run with
debugging until an attach request is seen. Attaching only works
if the .wingdebugpw file is present; see the manual for details.
One win32, you either need to edit WINGHOME in this script or
pass in an environment variable called WINGHOME that points to
the Wing IDE installation directory.
"""
#########################################################################
import sys
import os
import imp
#------------------------------------------------------------------------
# Default configuration values: Note that the named environment
# variables, if set, will override these settings.
# Set this to 1 to disable all debugging; 0 to enable debugging
# (WINGDB_DISABLED environment variable)
kWingDebugDisabled = 0
# Host:port of the IDE within which to debug: As configured in the IDE
# with the Server Port preference
# (WINGDB_HOSTPORT environment variable)
kWingHostPort = 'localhost:50005'
# Port on which to listen for connection requests, so that the
# IDE can (re)attach to the debug process after it has started
# This is only used when the debug process is not attached to
# an IDE or the IDE has dropped its connection. The configured
# port can optionally be added to the IDE's Common Attach Hosts
# preference. Note that a random port is used instead if this
# port is already in use!
# (WINGDB_ATTACHPORT environment variable)
kAttachPort = '50015'
# Set this to a filename to log verbose information about the debugger
# internals to a file. If the file does not exist, it will be created
# as long as its enclosing directory exists and is writeable. Use
# "<stderr>" or "<stdout>". Note that "<stderr>" may cause problems
# on win32 if the debug process is not running in a console.
# (WINGDB_LOGFILE environment variable)
kLogFile = None
# Set to get a tremendous amount of logging from the debugger internals
# (WINGDB_LOGVERYVERBOSE)
kLogVeryVerbose = 0
# Set this to 1 when debugging embedded scripts in an environment that
# creates and reuses a Python instance across multiple script invocations:
# It turns off automatic detection of program quit so that the debug session
# can span multiple script invocations. When this is turned on, you may
# need to call ProgramQuit() on the debugger object to shut down the
# debugger cleanly when your application exits or discards the Python
# instance. If multiple Python instances are created in the same run,
# only the first one will be able to debug unless it terminates debug
# and the environment variable WINGDB_ACTIVE is unset before importing
# this module in the second or later Python instance. See the Wing
# IDE manual for details.
kEmbedded = 0
# Path to search for the debug password file and the name of the file
# to use. The password file contains the encryption type and connect
# password for all connections to the IDE and must match the wingdebugpw
# file in the profile dir used by the IDE. Any entry of '$<winguserprofile>'
# is replaced by the wing user profile directory for the user that the
# current process is running as
# (WINGDB_PWFILEPATH environment variable)
kPWFilePath = [os.path.dirname(__file__), '$<winguserprofile>']
kPWFileName = 'wingdebugpw'
# Whether to exit if the debugger fails to run or to connect with an IDE
# for whatever reason
kExitOnFailure = 0
#------------------------------------------------------------------------
# Find Wing debugger installation location
# Edit this to point to your Wing installation or set to None to use env setting
WINGHOME = None
if sys.hexversion >= 0x03000000:
def has_key(o, key):
return key in o
else:
def has_key(o, key):
return o.has_key(key)
# Check environment: Must have WINGHOME defined if still == None
if WINGHOME == None:
if has_key(os.environ, 'WINGHOME'):
WINGHOME=os.environ['WINGHOME']
else:
sys.stdout.write("*******************************************************************\n")
sys.stdout.write("Error: Could not find Wing installation! You must set WINGHOME or edit\n")
sys.stdout.write("wingdbstub.py where indicated to point it to the location where\n")
sys.stdout.write("Wing IDE is installed.\n")
sys.exit(1)
# The user settings dir where per-user settings & patches are located. Will be
# set from environment if left as None
kUserSettingsDir = None
if kUserSettingsDir is None:
kUserSettingsDir = os.environ.get('WINGDB_USERSETTINGS')
def _ImportWingdb(winghome, user_settings=None):
""" Find & import wingdb module. """
try:
exec_dict = {}
execfile(os.path.join(winghome, 'bin', '_patchsupport.py'), exec_dict)
find_matching = exec_dict['FindMatching']
dir_list = find_matching('bin', winghome, user_settings)
except Exception:
dir_list = []
dir_list.extend([os.path.join(WINGHOME, 'bin'), os.path.join(WINGHOME, 'src')])
for path in dir_list:
try:
f, p, d = imp.find_module('wingdb', [path])
try:
return imp.load_module('wingdb', f, p, d)
finally:
if f is not None:
f.close()
break
except ImportError:
pass
#------------------------------------------------------------------------
# Start debugging if not disabled and this module has never been imported
# before
if not has_key(os.environ, 'WINGDB_ACTIVE'):
debugger = None
if not kWingDebugDisabled and not has_key(os.environ, 'WINGDB_DISABLED') and \
not has_key(os.environ, 'WINGDB_ACTIVE'):
exit_on_fail = 0
try:
# Obtain exit if fails value
exit_on_fail = os.environ.get('WINGDB_EXITONFAILURE', kExitOnFailure)
# Obtain configuration for log file to use, if any
logfile = os.environ.get('WINGDB_LOGFILE', kLogFile)
if logfile == '-' or logfile == None or len(logfile.strip()) == 0:
logfile = None
very_verbose_log = os.environ.get('WINGDB_LOGVERYVERBOSE', kLogVeryVerbose)
if type(very_verbose_log) == type('') and very_verbose_log.strip() == '':
very_verbose_log = 0
# Determine remote host/port where the IDE is running
hostport = os.environ.get('WINGDB_HOSTPORT', kWingHostPort)
colonpos = hostport.find(':')
host = hostport[:colonpos]
port = int(hostport[colonpos+1:])
# Determine port to listen on locally for attach requests
attachport = int(os.environ.get('WINGDB_ATTACHPORT', kAttachPort))
# Check if running embedded script
embedded = int(os.environ.get('WINGDB_EMBEDDED', kEmbedded))
# Obtain debug password file search path
if has_key(os.environ, 'WINGDB_PWFILEPATH'):
pwfile_path = os.environ['WINGDB_PWFILEPATH'].split(os.pathsep)
else:
pwfile_path = kPWFilePath
# Obtain debug password file name
if has_key(os.environ, 'WINGDB_PWFILENAME'):
pwfile_name = os.environ['WINGDB_PWFILENAME']
else:
pwfile_name = kPWFileName
# Load wingdb.py
wingdb = _ImportWingdb(WINGHOME, kUserSettingsDir)
if wingdb == None:
sys.stdout.write("*******************************************************************\n")
sys.stdout.write("Error: Cannot find wingdb.py in $(WINGHOME)/bin or $(WINGHOME)/src\n")
sys.stdout.write("Error: Please check the WINGHOME definition in wingdbstub.py\n")
sys.exit(2)
# Find the netserver module and create an error stream
netserver = wingdb.FindNetServerModule(WINGHOME, kUserSettingsDir)
err = wingdb.CreateErrStream(netserver, logfile, very_verbose_log)
# Start debugging
debugger = netserver.CNetworkServer(host, port, attachport, err,
pwfile_path=pwfile_path,
pwfile_name=pwfile_name,
autoquit=not embedded)
debugger.StartDebug(stophere=0)
os.environ['WINGDB_ACTIVE'] = "1"
if debugger.ChannelClosed():
raise ValueError('Not connected')
except:
if exit_on_fail:
raise
else:
pass
def Ensure(require_connection=1, require_debugger=1):
""" Ensure the debugger is started and attempt to connect to the IDE if
not already connected. Will raise a ValueError if:
* the require_connection arg is true and the debugger is unable to connect
* the require_debugger arg is true and the debugger cannot be loaded
"""
if debugger is None:
if require_debugger:
raise ValueError("No debugger")
return
if not debugger.DebugActive():
debugger.StartDebug()
elif debugger.ChannelClosed():
debugger.ConnectToClient()
if require_connection and debugger.ChannelClosed():
raise ValueError('Not connected')
|
|
#!/usr/bin/env python
# Written by Irving Y. Ruan <irvingruan@gmail.com>
# All rights reserved.
"""
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# See README for instructions on how to use Matisse
import sys
import os
import fnmatch
import errno
import mmap
import contextlib
import shutil
import subprocess
import mhtml
JPEG_SIGNATURE_OFFSET = 492
artwork_item_count = 0
artwork_name_prefix = "AlbumArtwork"
local_url_prefix = "file://"
artworkDumpPath = '~/Desktop/MatisseAlbumArtwork/'
artworkDumpPath = os.path.expanduser(artworkDumpPath)
def print_usage():
print "Usage: ./Matisse.py\n"
def help():
print "For help, see README.\n"
def locate_album_artwork_path():
"""Finds local iTunes Album Artwork location"""
# Only recursively search down Music directory
album_artwork_path = os.path.expanduser("~/Music")
for root, subFolders, files in os.walk(album_artwork_path):
for sf in subFolders:
# We only want the Download folder, not Cache
if sf.lower() == "download":
path_components = os.path.join(root, sf).split("/")
if path_components[-2].lower() == "album artwork":
return os.path.join(root, sf)
sys.stderr.write("Unable to find iTunes Album Artwork director. Make sure your album artwork is located in your iTunes music directory as \"Album Artwork.\"\n")
sys.exit(-1)
def retrieve_itc_files(album_artwork_path):
"""Grabs the list of .itc files in Download folder"""
itc_list = []
pattern = '*.itc'
# Grab only iTunes album artwork files (*.itc)
for root, dirs, files in os.walk(album_artwork_path):
for filename in fnmatch.filter(files, pattern):
itc_list.append(os.path.join(root, filename))
return itc_list
def create_artwork_dump():
"""Creates the artwork dump directory of HTML/artwork output"""
try:
os.makedirs(artworkDumpPath+'/artwork')
except OSError, e:
if e.errno != errno.EEXIST:
raise Exception("Artwork dump folder already exists!")
sys.exit(-1)
def create_jpeg_from_itc(artwork_file):
"""Parses out JPEG from .itc files"""
global artwork_item_count
global artwork_name_prefix
try:
artwork_item_count += 1
itc_file_handle = open(artwork_file, "r+")
byte_data = mmap.mmap(itc_file_handle.fileno(),0)
file_size = len(byte_data)
new_size = file_size - JPEG_SIGNATURE_OFFSET
# Extract out ITC metadata info that we don't need for now
byte_data.move(0, JPEG_SIGNATURE_OFFSET, file_size - JPEG_SIGNATURE_OFFSET)
byte_data.flush()
byte_data.close()
itc_file_handle.truncate(new_size)
byte_data = mmap.mmap(itc_file_handle.fileno(),0)
jpeg_file = artwork_file.replace('.itc', '.jpeg')
artwork_path_components = jpeg_file.split("/")
artwork_path_components[-1] = artwork_name_prefix + str(artwork_item_count) + ".jpeg"
jpeg_file = "/".join(artwork_path_components)
os.rename(artwork_file, jpeg_file)
except:
sys.stderr.write("Error: could not convert %s to JPEG." % str(artwork_file))
sys.exit(-1)
def generate_html():
"""Generate index.html for webpage with enclosed JPEG img src locations"""
try:
artwork_jpegs = os.listdir(artworkDumpPath + "/artwork")
html_output = open('index.html', 'w')
html_output.write(mhtml.header)
html_output.write(mhtml.body_start)
item = 0
for i in range(0, len(artwork_jpegs)):
html_output.write("\t\t\t<div class=\"item\">\n")
html_output.write("\t\t\t\t<img class=\"content\" src=\"artwork/AlbumArtwork" + str(i+1) + ".jpeg\"/>\n")
html_output.write("\t\t\t</div>\n")
html_output.write(mhtml.body_end)
html_output.close()
except:
sys.stderr.write("Error: Unable to generate index.html.")
sys.exit(-1)
def deploy_matisse():
"""Copy over required HTML/JS/CSS files to the publish-ready directory"""
try:
# Copy over the .html, .js, and .css files
shutil.move(os.getcwd() + "/index.html", artworkDumpPath)
matisse_publish_files = os.listdir(os.getcwd() + '/publish')
rv = subprocess.Popen('cp -rf ' + os.getcwd() + '/publish/. ' + artworkDumpPath, shell=True)
rv.wait()
# Fire up Safari to see the result
rv = subprocess.Popen('open /Applications/Safari.app ' + artworkDumpPath + '/index.html', shell=True)
rv.wait()
except:
sys.stderr.write("Error: Could not publish Matisse Cover Flow.\n")
sys.exit(-1)
def convert_proc():
"""Starts the process for obtaining .itc files and converting them"""
global local_url_prefix
album_artwork_path = locate_album_artwork_path()
itc_list = retrieve_itc_files(album_artwork_path)
create_artwork_dump()
# Copy over the .itc files so we don't modify iTunes version
# We simply want the album artwork
for itc_file in itc_list:
shutil.copy(itc_file, artworkDumpPath+'/artwork')
artwork_path = artworkDumpPath + 'artwork'
new_itc_files = os.listdir(artwork_path)
for itc_file in new_itc_files:
create_jpeg_from_itc(os.path.join(artwork_path, itc_file))
def main():
if len(sys.argv) > 1:
print_usage()
sys.exit(-1)
global artwork_item_count
artwork_item_count = 0
# If we already have the JPEGs, no need to convert it again
if not(os.path.exists(artworkDumpPath) and os.listdir(artworkDumpPath)):
convert_proc()
generate_html()
deploy_matisse()
else:
sys.stderr.write("Album artwork dump folder already exists. Recreate anyway? (y/n):")
key = 0
try:
key = sys.stdin.read(1)
except KeyboardInterrupt:
key = 0
if key == 'y':
convert_proc()
generate_html()
deploy_matisse()
elif key == 'n':
sys.stderr.write("\nView your artwork at " + artworkDumpPath)
elif key == 0:
sys.stderr.write("\nError: keyboard interrupted.")
sys.exit(-1)
sys.exit(0)
if __name__ == "__main__":
main()
|
|
#by amounra 0513 : http://www.aumhaa.com
import Live
from _Tools.re import *
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from Live8DeviceComponent import Live8DeviceComponent as DeviceComponent
from _Generic.Devices import *
class MonoDeviceComponent(DeviceComponent):
__doc__ = ' Class representing a device linked to a Monomodular client, to be redirected by it from Max '
def __init__(self, parent, bank_dict={}, mod_types={}, *a, **k):
super(MonoDeviceComponent, self).__init__(*a, **k)
self._MOD_BANK_DICT = bank_dict
self._MOD_TYPES = mod_types
self._type = None
self._device_parent = None
self._parent = parent
self._chain = 0
self._device_chain = 0
self._number_params = 12
self._params = []
self._custom_parameter = []
self._nodevice = NoDevice()
def disconnect(self):
if self._device_parent != None:
if self._device_parent != None:
if self._device_parent.canonical_parent != None:
if self._device_parent.canonical_parent.devices_has_listener(self._parent_device_changed):
self._device_parent.canonical_parent.remove_devices_listener(self._parent_device_changed)
if self._device != None:
if self._device.canonical_parent != None:
if self._device.canonical_parent != None:
if self._device.canonical_parent.devices_has_listener(self._device_changed):
self._device.canonical_parent.remove_devices_listener(self._device_changed)
self._type = None
self._device_parent = None
self._device_chain = None
super(MonoDeviceComponent, self).disconnect()
def disconnect_client(self):
self.set_device(None)
self._custom_parameter = []
self._device_parent = None
self._device_chain = 0
self._set_type(None)
def set_device_defs(self, bank_dict={}, mod_types={}):
self._MOD_BANK_DICT = bank_dict
self._MOD_TYPES = mod_types
self.update()
def _set_type(self, mod_device_type):
#self._parent._host.log_message('set type: ' + str(mod_device_type))
if mod_device_type == None:
self._device_banks = DEVICE_DICT
self._device_best_banks = DEVICE_BOB_DICT
self._device_bank_names = BANK_NAME_DICT
self._set_device_parent(None)
self.set_enabled(False)
elif mod_device_type in self._MOD_TYPES.keys():
self.set_enabled(True)
self._type = mod_device_type
self._device_banks = self._MOD_TYPES[self._type]
self._device_best_banks = self._MOD_TYPES[self._type]
self._device_bank_names = self._MOD_BANK_DICT
self._set_device_parent(self._device_parent)
def _set_device_parent(self, mod_device_parent, single = None):
#self._parent._host.log_message('_set_device_parent ' + str(mod_device_parent) + ' ' + str(single))
if self._device_parent != None:
if self._device_parent.canonical_parent != None:
if self._device_parent.canonical_parent.devices_has_listener(self._parent_device_changed):
self._device_parent.canonical_parent.remove_devices_listener(self._parent_device_changed)
if isinstance(mod_device_parent, Live.Device.Device):
#self._parent._host.log_message('_set_device_parent is device')
if mod_device_parent.can_have_chains and single is None:
self._device_parent = mod_device_parent
if self._device_parent.canonical_parent != None:
if not self._device_parent.canonical_parent.devices_has_listener(self._parent_device_changed):
self._device_parent.canonical_parent.add_devices_listener(self._parent_device_changed)
self._select_parent_chain(self._device_chain)
else:
self._device_parent = mod_device_parent
self.set_device(self._device_parent, True)
elif 'NoDevice' in self._device_banks.keys():
#self._parent._host.log_message('_set_device_parent is NoDevice')
self._device_parent = self._nodevice
self._device_chain = 0
self.set_device(self._device_parent, True)
else:
#self._parent._host.log_message('_set_device_parent is \"None\"')
self._device_parent = None
self._device_chain = 0
self.set_device(self._device_parent, True)
def _select_parent_chain(self, chain, force = False):
#self._parent._host.log_message('_select_parent_chain ' + str(chain)) # + ' ' + str(self.is_enabled()))
self._device_chain = chain #self._chain = chain
if self._device_parent != None:
if isinstance(self._device_parent, Live.Device.Device):
if self._device_parent.can_have_chains:
if len(self._device_parent.chains) > chain:
if len(self._device_parent.chains[chain].devices) > 0:
self.set_device(self._device_parent.chains[chain].devices[0], force)
elif 'NoDevice' in self._device_banks.keys():
self.set_device(self._nodevice, True)
else:
self.set_device(None)
if self.is_enabled():
for host in self._parent._active_handlers:
for control in host._parameter_controls:
control.reset()
def _parent_device_changed(self):
#self._parent._host.log_message('parent_device_changed')
self._set_device_parent(None)
self._parent._send('lcd', 'parent', 'check')
def _device_changed(self):
#self._parent._host.log_message('device_changed')
self.set_device(None)
self._parent._send('lcd', 'device', 'check')
def _number_of_parameter_banks(self):
return self.number_of_parameter_banks(self._device) #added
def get_parameter_by_name(self, device, name):
""" Find the given device's parameter that belongs to the given name """
#self._parent._host.log_message('get parameter: device-' + str(device) + ' name-' + str(name))
result = None
for i in device.parameters:
if (i.original_name == name):
result = i
break
if result == None:
if name == 'Mod_Chain_Pan':
if device.canonical_parent.mixer_device != None:
if device.canonical_parent.mixer_device.panning != None:
result = device.canonical_parent.mixer_device.panning
elif name == 'Mod_Chain_Vol':
if device.canonical_parent.mixer_device !=None:
if device.canonical_parent.mixer_device.panning != None:
result = device.canonical_parent.mixer_device.volume
elif(match('Mod_Chain_Send_', name)):
name = int(name.replace('Mod_Chain_Send_', ''))
if device.canonical_parent != None:
if device.canonical_parent.mixer_device != None:
if device.canonical_parent.mixer_device.sends != None:
if len(device.canonical_parent.mixer_device.sends)>name:
result = device.canonical_parent.mixer_device.sends[name]
elif(match('ModDevice_', name) and self._parent.device != None):
name = name.replace('ModDevice_', '')
#self._parent._host.log_message('modDevice with name: ' + str(name))
for i in self._parent.device.parameters:
if (i.name == name):
result = i
break
elif match('CustomParameter_', name):
index = int(name.replace('CustomParameter_', ''))
if len(self._custom_parameter)>index:
if isinstance(self._custom_parameter[index], Live.DeviceParameter.DeviceParameter):
result = self._custom_parameter[index]
#self._parent._host.log_message('found: ' + str(result))
return result
def _turn_on_filter(self, param):
param.value = 1
param.value = 0
self.update()
def _recheck_FF(self, device):
if get_parameter_by_name(device, "Filter Freq") != None:
self.update()
def _assign_parameters(self, host, *a):
#self._parent._host.log_message('assign_parameters '+str(host))
assert self.is_enabled()
assert (self._device != None)
assert (host._parameter_controls != None)
if(host.is_enabled()):
for control in host._parameter_controls:
control.clear_send_cache()
self._bank_name = ('Bank ' + str(self._bank_index + 1)) #added
if (self._device.class_name in self._device_banks.keys()):
class_name = self._device.class_name
else:
class_name = 'Other'
if (class_name in self._device_banks.keys()): #modified
assert (class_name in self._device_best_banks.keys())
banks = self._device_banks[class_name]
if '_alt_device_banks' in dir(host):
if self._type in host._alt_device_banks.keys():
if (class_name in host._alt_device_banks[self._type].keys()):
banks = host._alt_device_banks[self._type][class_name]
bank = None
if (len(banks) > self._bank_index):
bank = banks[self._bank_index]
if self._is_banking_enabled(): #added
if class_name in self._device_bank_names.keys(): #added
self._bank_name[self._bank_index] = self._device_bank_names[class_name] #added *recheck
#assert (bank == None) # or (len(bank) >= len(self._parameter_controls)))
if not host._parameter_controls is None:
for index in range(len(host._parameter_controls)):
parameter = None
if (bank != None) and (index in range(len(bank))):
parameter = self.get_parameter_by_name(self._device, bank[index])
if (parameter != None):
host._parameter_controls[index].connect_to(parameter)
else:
host._parameter_controls[index].release_parameter()
else:
parameters = self._device_parameters_to_map()
if not host._parameter_controls is None:
num_controls = len(host._parameter_controls)
index = (self._bank_index * num_controls)
for control in host._parameter_controls:
if (index < len(parameters)):
control.connect_to(parameters[index])
else:
control.release_parameter()
index += 1
def _assign_params(self, *a):
#self._parent._host.log_message('assign params!')
if self._device != None and not len(self._params) is 0:
self._bank_name = ('ModBank ' + str(self._bank_index + 1)) #added
if (self._device.class_name in self._device_banks.keys()):
class_name = self._device.class_name
else:
class_name = 'Other'
if (class_name in self._device_banks.keys()): #modified
assert (class_name in self._device_best_banks.keys())
banks = self._device_banks[class_name]
bank = None
if (len(banks) > self._bank_index):
bank = banks[self._bank_index]
if self._is_banking_enabled(): #added
if class_name in self._device_bank_names.keys(): #added
self._bank_name[self._bank_index] = self._device_bank_names[class_name] #added *recheck
for index in range(len(self._params)):
parameter = None
if (bank != None) and (index in range(len(bank))):
parameter = self.get_parameter_by_name(self._device, bank[index])
if (parameter != None):
self._params[index]._parameter=self._connect_param(self._params[index], parameter)
else:
self._params[index]._parameter=self._connect_param(self._params[index], None)
else:
#self._parent._host.log_message('not in keys ')
parameters = self._device.parameters[1:]
num_controls = len(self._params)
index = (self._bank_index * num_controls)
for param in self._params:
#self._parent._host.log_message('assigning to param ')
if (index < len(parameters)):
self._params[index]._parameter=self._connect_param(self._params[index], parameters[index])
else:
self._params[index]._parameter=self._connect_param(self._params[index], None)
index += 1
else:
index = 0
for param in self._params:
self._params[index]._parameter = self._connect_param(self._params[index], None)
index += 1
for param in self._params:
param._value_change()
def _connect_param(self, holder, parameter):
#self._parent._host.log_message('connecting ') # + str(holder._parameter) + ' ' + str(parameter))
self._mapped_to_midi_velocity = False
if (holder._parameter!= None):
if holder._parameter.value_has_listener(holder._value_change):
holder._parameter.remove_value_listener(holder._value_change)
#self._parent._host.log_message('removing ' + str(holder._parameter.name))
if parameter != None:
assignment = parameter
if(str(parameter.name) == str('Track Volume')): #checks to see if parameter is track volume
if(parameter.canonical_parent.canonical_parent.has_audio_output is False): #checks to see if track has audio output
if(len(parameter.canonical_parent.canonical_parent.devices) > 0):
if(str(parameter.canonical_parent.canonical_parent.devices[0].class_name)==str('MidiVelocity')): #if not, looks for velicty as first plugin
assignment = parameter.canonical_parent.canonical_parent.devices[0].parameters[6] #if found, assigns fader to its 'outhi' parameter
self._mapped_to_midi_velocity = True
assignment.add_value_listener(holder._value_change)
#self._parent._host.log_message('adding ' + str(assignment.name))
return assignment
else:
return None
def _on_device_name_changed(self):
if (self._device != None):
self._parent._send('lcd', 'device_name', 'lcd_name', str(self.generate_strip_string(str(self._device.name))))
else:
self._parent._send('lcd', 'device_name', 'lcd_name', ' ')
def _params_value_change(self, sender, control_name, feedback = True):
#self._parent._host.log_message('params change ' + str(sender) + str(control_name))
pn = ' '
pv = ' '
val = 0
if(sender != None):
pn = str(self.generate_strip_string(str(sender.name)))
if sender.is_enabled:
try:
value = str(sender)
except:
value = ' '
pv = str(self.generate_strip_string(value))
else:
pv = '-bound-'
val = ((sender.value - sender.min) / (sender.max - sender.min)) * 127
self._parent._send('lcd', control_name, 'lcd_name', pn)
self._parent._send('lcd', control_name, 'lcd_value', pv)
if feedback == True:
self._parent._send('lcd', control_name, 'encoder_value', val)
def generate_strip_string(self, display_string):
NUM_CHARS_PER_DISPLAY_STRIP = 12
if (not display_string):
return (' ' * NUM_CHARS_PER_DISPLAY_STRIP)
if ((len(display_string.strip()) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.endswith('dB') and (display_string.find('.') != -1))):
display_string = display_string[:-2]
if (len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)):
for um in [' ',
'i',
'o',
'u',
'e',
'a']:
while ((len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.rfind(um, 1) != -1)):
um_pos = display_string.rfind(um, 1)
display_string = (display_string[:um_pos] + display_string[(um_pos + 1):])
else:
display_string = display_string.center((NUM_CHARS_PER_DISPLAY_STRIP - 1))
ret = u''
for i in range((NUM_CHARS_PER_DISPLAY_STRIP - 1)):
if ((ord(display_string[i]) > 127) or (ord(display_string[i]) < 0)):
ret += ' '
else:
ret += display_string[i]
ret += ' '
ret = ret.replace(' ', '_')
assert (len(ret) == NUM_CHARS_PER_DISPLAY_STRIP)
return ret
def set_device(self, device, force = False):
#self._parent._host.log_message('set device: ' + str(device) + ' ' + str(force))
assert ((device == None) or isinstance(device, Live.Device.Device) or isinstance(device, NoDevice))
if self._device != None:
if self._device.canonical_parent != None:
if self._device.canonical_parent.devices_has_listener(self._device_changed):
self._device.canonical_parent.remove_devices_listener(self._device_changed)
if ((not self._locked_to_device) and (device != self._device)) or force==True:
if (self._device != None):
if self._device.name_has_listener(self._on_device_name_changed):
self._device.remove_name_listener(self._on_device_name_changed)
if self._device.parameters_has_listener(self._on_parameters_changed):
self._device.remove_parameters_listener(self._on_parameters_changed)
parameter = self._on_off_parameter()
if (parameter != None):
if parameter.value_has_listener(self._on_on_off_changed):
parameter.remove_value_listener(self._on_on_off_changed)
for host in self._parent._active_handlers:
if (host._parameter_controls != None):
for control in host._parameter_controls:
control.release_parameter()
self._device = device
if (self._device != None):
if self._device.canonical_parent != None:
if not self._device.canonical_parent.devices_has_listener(self._device_changed):
self._device.canonical_parent.add_devices_listener(self._device_changed)
self._bank_index = 0
self._device.add_name_listener(self._on_device_name_changed)
self._device.add_parameters_listener(self._on_parameters_changed)
parameter = self._on_off_parameter()
if (parameter != None):
parameter.add_value_listener(self._on_on_off_changed)
for key in self._device_bank_registry.keys():
if (key == self._device):
self._bank_index = self._device_bank_registry.get(key, 0)
del self._device_bank_registry[key]
break
self._bank_name = '<No Bank>' #added
self._on_device_name_changed()
self.update()
def _post(self, msg):
#self._parent._host.log_message(str(msg))
pass
def update(self):
#self._parent._host.log_message('update!')
if self.is_enabled():
if self._device != None:
self._device_bank_registry[self._device] = self._bank_index
for host in self._parent._active_host:
if host.is_enabled() and not host._parameter_controls is None and len(host._parameter_controls) > 0:
old_bank_name = self._bank_name
self._assign_parameters(host)
if self._bank_name != old_bank_name:
self._show_msg_callback(str(self._device.name) + ' Bank: ' + str(self._bank_name))
else:
for host in self._parent._active_host:
if host._parameter_controls != None:
for control in host._parameter_controls:
control.release_parameter()
self._update_params()
self._assign_params()
if self.is_enabled():
for host in self._parent._active_host:
if host.is_enabled():
if not host._parameter_controls is None and len(host._parameter_controls) > 0:
host._script.request_rebuild_midi_map()
if hasattr(host, '_device_component'):
if host._device_component != None:
try:
#host._device_component.update()
self._parent._host.schedule_message(1, host._device_component.update)
except:
pass
#major hack here....this will need to be changed to a constant based on the length of the MOD_TYPES bank used
def _update_params(self):
count = self._number_params
for host in self._parent._host._hosts:
if not host._parameter_controls is None and len(host._parameter_controls) > count:
count = len(host._parameter_controls)
if count != len(self._params):
self._number_params = count
if self._number_params > 0:
self._params = [ParamHolder(self, None, index) for index in range(self._number_params)]
else:
for param in self._params:
self._connect_param(param, None)
self._params = []
"""def set_parameter_controls(self, controls):
self._params = [ParamHolder(self, controls[index]) for index in range(len(controls))]
#DeviceComponent.set_parameter_controls(self, controls)"""
def _device_parameters_to_map(self):
raise self.is_enabled() or AssertionError
raise self._device != None or AssertionError
raise host._parameter_controls != None or AssertionError
return self._device.parameters[1:]
def set_number_params(self, number, args2=None, args3=None, args4=None):
#self._parent._host.log_message('set number params' + str(number))
self._number_params = number
#self._parent._host.schedule_message(1, self.update)
self.update()
def set_number_custom(self, number, args2=None, args3=None, args4=None):
self._custom_parameter = [None for index in range(number)]
def set_custom_parameter(self, number, parameter, args3=None, args4=None):
if number < len(self._custom_parameter):
#self._parent._host.log_message('custom='+str(parameter))
if isinstance(parameter, Live.DeviceParameter.DeviceParameter) or parameter is None:
#self._parent._host.log_message('custom is device:'+str(parameter))
self._custom_parameter[number] = parameter
self.update()
def set_mod_device_type(self, mod_device_type, args2=None, args3=None, args4=None):
#self._parent._host.log_message('set type ' + str(mod_device_type))
for host in self._parent._active_handlers:
host.on_enabled_changed()
#self._parent._host.log_message('and then...')
#self._parent._host.schedule_message(5, self._set_type, mod_device_type)
self._set_type(mod_device_type)
def set_mod_device(self, mod_device, args2=None, args3=None, args4=None):
#self._parent._host.log_message('set device ' + str(mod_device))
self.set_device(mod_device, True)
for host in self._parent._active_handlers:
host.update()
def set_mod_device_parent(self, mod_device_parent, single=None, args3=None, args4=None):
#self._parent._host.log_message('set parent ' + str(mod_device_parent))
self._set_device_parent(mod_device_parent, single)
for host in self._parent._active_handlers:
host.update()
def set_mod_device_chain(self, chain, args2=None, args3=None, args4=None):
#self._parent._host.log_message('set_chain ' + str(chain))
self._select_parent_chain(chain, True)
for host in self._parent._active_handlers:
host.update()
def set_parameter_value(self, num, val, args2=None, args3=None, args4=None):
#self._parent._host.log_message('set_pval ' + str(num) + ' ' + str(val))
if self._device != None:
if num < len(self._params):
self._params[num]._change_value(val)
def set_custom_parameter_value(self, num, value, args2=None, args3=None, args4=None):
if num < len(self._custom_parameter):
parameter = self._custom_parameter[num]
if parameter != None:
newval = float(float(float(value)/127) * float(parameter.max - parameter.min)) + parameter.min
parameter.value = newval
def set_device_bank(self, bank_index, args2=None, args3=None, args4=None):
#self._parent._host.log_message('set bank ' + str(bank_index))
if self.is_enabled():
if (self._device != None):
if (self._number_of_parameter_banks() > bank_index):
self._bank_name = ''
self._bank_index = bank_index
self.update()
def number_of_parameter_banks(self, device):
""" Determine the amount of parameter banks the given device has """
result = 0
if (device != None):
result = 1
if (device.class_name in self._device_banks.keys()):
device_bank = self._device_banks[device.class_name]
result = len(device_bank)
elif len(self._params > 0):
param_count = len(list(device.parameters))
result = (param_count / len(self._params))
if (not ((param_count % len(self._params)) == 0)):
result += 1
return result
def on_enabled_changed(self):
#self._parent._host.log_message('on_enabled_changed '+str(self._parent)+' '+str(self.is_enabled()))
self.update()
class NewMonoDeviceComponent(DeviceComponent):
__doc__ = ' Class representing a device linked to a Monomodular client, to be redirected by it from Max '
def __init__(self, parent, bank_dict={}, mod_types={}, *a, **k):
super(NewMonoDeviceComponent, self).__init__(*a, **k)
self._MOD_BANK_DICT = bank_dict
self._MOD_TYPES = mod_types
self._type = None
self._device_parent = None
self._parent = parent
self.log_message = parent.log_message
self._chain = 0
self._device_chain = 0
self._number_params = 12
self._params = []
self._custom_parameter = []
self._nodevice = NoDevice()
def disconnect(self):
if self._device_parent != None:
if self._device_parent != None:
if self._device_parent.canonical_parent != None:
if self._device_parent.canonical_parent.devices_has_listener(self._parent_device_changed):
self._device_parent.canonical_parent.remove_devices_listener(self._parent_device_changed)
if self._device != None:
if self._device.canonical_parent != None:
if self._device.canonical_parent != None:
if self._device.canonical_parent.devices_has_listener(self._device_changed):
self._device.canonical_parent.remove_devices_listener(self._device_changed)
self._type = None
self._device_parent = None
self._device_chain = None
super(NewMonoDeviceComponent, self).disconnect()
def disconnect_client(self):
self.set_device(None)
self._custom_parameter = []
self._device_parent = None
self._device_chain = 0
self._set_type(None)
def set_device_defs(self, bank_dict={}, mod_types={}):
self._MOD_BANK_DICT = bank_dict
self._MOD_TYPES = mod_types
self.update()
def _set_type(self, mod_device_type):
#self.log_message('set type: ' + str(mod_device_type))
if mod_device_type == None:
self._device_banks = DEVICE_DICT
self._device_best_banks = DEVICE_BOB_DICT
self._device_bank_names = BANK_NAME_DICT
self._set_device_parent(None)
self.set_enabled(False)
elif mod_device_type in self._MOD_TYPES.keys():
self.set_enabled(True)
self._type = mod_device_type
self._device_banks = self._MOD_TYPES[self._type]
self._device_best_banks = self._MOD_TYPES[self._type]
self._device_bank_names = self._MOD_BANK_DICT
self._set_device_parent(self._device_parent)
def _set_device_parent(self, mod_device_parent, single = None):
#self.log_message('_set_device_parent ' + str(mod_device_parent) + ' ' + str(single))
if self._device_parent != None:
if self._device_parent.canonical_parent != None:
if self._device_parent.canonical_parent.devices_has_listener(self._parent_device_changed):
self._device_parent.canonical_parent.remove_devices_listener(self._parent_device_changed)
if isinstance(mod_device_parent, Live.Device.Device):
#self.log_message('_set_device_parent is device')
if mod_device_parent.can_have_chains and single is None:
self._device_parent = mod_device_parent
if self._device_parent.canonical_parent != None:
if not self._device_parent.canonical_parent.devices_has_listener(self._parent_device_changed):
self._device_parent.canonical_parent.add_devices_listener(self._parent_device_changed)
self._select_parent_chain(self._device_chain)
else:
self._device_parent = mod_device_parent
self.set_device(self._device_parent, True)
elif 'NoDevice' in self._device_banks.keys():
#self.log_message('_set_device_parent is NoDevice')
self._device_parent = self._nodevice
self._device_chain = 0
self.set_device(self._device_parent, True)
else:
#self.log_message('_set_device_parent is \"None\"')
self._device_parent = None
self._device_chain = 0
self.set_device(self._device_parent, True)
def _select_parent_chain(self, chain, force = False):
#self.log_message('_select_parent_chain ' + str(chain)) # + ' ' + str(self.is_enabled()))
self._device_chain = chain #self._chain = chain
if self._device_parent != None:
if isinstance(self._device_parent, Live.Device.Device):
if self._device_parent.can_have_chains:
if len(self._device_parent.chains) > chain:
if len(self._device_parent.chains[chain].devices) > 0:
self.set_device(self._device_parent.chains[chain].devices[0], force)
elif 'NoDevice' in self._device_banks.keys():
self.set_device(self._nodevice, True)
else:
self.set_device(None)
if self.is_enabled():
for host in self._parent._active_handlers:
for control in host._parameter_controls:
control.reset()
def _select_drum_pad(self, pad, force = False):
if self._device_parent != None:
if isinstance(self._device_parent, Live.Device.Device):
if self._device_parent.can_have_drum_pads and self._device_parent.has_drum_pads:
pad = self._device_parent.drum_pads[pad]
self._parent.log_message('pad is: ' + str(pad))
#for item in dir(pad):
# self._parent.log_message(str(item))
if pad.chains and pad.chains[0] and pad.chains[0].devices and isinstance(pad.chains[0].devices[0], Live.Device.Device):
self.set_device(pad.chains[0].devices[0], force)
elif 'NoDevice' in self._device_banks.keys():
self.set_device(self._nodevice, True)
else:
self.set_device(None)
if self.is_enabled():
for host in self._parent._active_handlers:
for control in host._parameter_controls:
control.reset()
def _parent_device_changed(self):
#self.log_message('parent_device_changed')
self._set_device_parent(None)
self._parent.send('lcd', 'parent', 'check')
def _device_changed(self):
#self.log_message('device_changed')
self.set_device(None)
self._parent.send('lcd', 'device', 'check')
def _number_of_parameter_banks(self):
return self.number_of_parameter_banks(self._device) #added
def get_parameter_by_name(self, device, name):
""" Find the given device's parameter that belongs to the given name """
#self.log_message('get parameter: device-' + str(device) + ' name-' + str(name))
result = None
for i in device.parameters:
if (i.original_name == name):
result = i
break
if result == None:
if name == 'Mod_Chain_Pan':
if device.canonical_parent.mixer_device != None:
if device.canonical_parent.mixer_device.panning != None:
result = device.canonical_parent.mixer_device.panning
elif name == 'Mod_Chain_Vol':
if device.canonical_parent.mixer_device !=None:
if device.canonical_parent.mixer_device.panning != None:
result = device.canonical_parent.mixer_device.volume
elif(match('Mod_Chain_Send_', name)):
name = int(name.replace('Mod_Chain_Send_', ''))
if device.canonical_parent != None:
if device.canonical_parent.mixer_device != None:
if device.canonical_parent.mixer_device.sends != None:
if len(device.canonical_parent.mixer_device.sends)>name:
result = device.canonical_parent.mixer_device.sends[name]
elif(match('ModDevice_', name) and self._parent.device != None):
name = name.replace('ModDevice_', '')
#self.log_message('modDevice with name: ' + str(name))
for i in self._parent.device.parameters:
if (i.name == name):
result = i
break
elif match('CustomParameter_', name):
index = int(name.replace('CustomParameter_', ''))
if len(self._custom_parameter)>index:
if isinstance(self._custom_parameter[index], Live.DeviceParameter.DeviceParameter):
result = self._custom_parameter[index]
#self.log_message('found: ' + str(result))
return result
def _turn_on_filter(self, param):
param.value = 1
param.value = 0
self.update()
def _recheck_FF(self, device):
if get_parameter_by_name(device, "Filter Freq") != None:
self.update()
def _assign_parameters(self, host, *a):
#self.log_message('assign_parameters '+str(host))
assert (self._device != None)
if(host.is_enabled()):
for control in host._parameter_controls:
control.clear_send_cache()
self._bank_name = ('Bank ' + str(self._bank_index + 1)) #added
if (self._device.class_name in self._device_banks.keys()):
class_name = self._device.class_name
else:
class_name = 'Other'
if (class_name in self._device_banks.keys()): #modified
assert (class_name in self._device_best_banks.keys())
banks = self._device_banks[class_name]
if '_alt_device_banks' in dir(host):
if self._type in host._alt_device_banks.keys():
if (class_name in host._alt_device_banks[self._type].keys()):
banks = host._alt_device_banks[self._type][class_name]
bank = None
if (len(banks) > self._bank_index):
bank = banks[self._bank_index]
if self._is_banking_enabled(): #added
if class_name in self._device_bank_names.keys(): #added
self._bank_name[self._bank_index] = self._device_bank_names[class_name] #added *recheck
#assert (bank == None) # or (len(bank) >= len(self._parameter_controls)))
if not host._parameter_controls is None:
for index in range(len(host._parameter_controls)):
parameter = None
if (bank != None) and (index in range(len(bank))):
parameter = self.get_parameter_by_name(self._device, bank[index])
if (parameter != None):
host._parameter_controls[index].connect_to(parameter)
else:
host._parameter_controls[index].release_parameter()
else:
parameters = self._device_parameters_to_map()
if not host._parameter_controls is None:
num_controls = len(host._parameter_controls)
index = (self._bank_index * num_controls)
for control in host._parameter_controls:
if (index < len(parameters)):
control.connect_to(parameters[index])
else:
control.release_parameter()
index += 1
def _assign_params(self, *a):
#self.log_message('assign params!')
if self._device != None and not len(self._params) is 0:
self._bank_name = ('ModBank ' + str(self._bank_index + 1)) #added
if (self._device.class_name in self._device_banks.keys()):
class_name = self._device.class_name
else:
class_name = 'Other'
if (class_name in self._device_banks.keys()): #modified
assert (class_name in self._device_best_banks.keys())
banks = self._device_banks[class_name]
bank = None
if (len(banks) > self._bank_index):
bank = banks[self._bank_index]
if self._is_banking_enabled(): #added
if class_name in self._device_bank_names.keys(): #added
self._bank_name[self._bank_index] = self._device_bank_names[class_name] #added *recheck
for index in range(len(self._params)):
parameter = None
if (bank != None) and (index in range(len(bank))):
parameter = self.get_parameter_by_name(self._device, bank[index])
if (parameter != None):
self._params[index]._parameter=self._connect_param(self._params[index], parameter)
else:
self._params[index]._parameter=self._connect_param(self._params[index], None)
else:
#self.log_message('not in keys ')
parameters = self._device.parameters[1:]
num_controls = len(self._params)
index = (self._bank_index * num_controls)
for param in self._params:
#self.log_message('assigning to param ')
if (index < len(parameters)):
self._params[index]._parameter=self._connect_param(self._params[index], parameters[index])
else:
self._params[index]._parameter=self._connect_param(self._params[index], None)
index += 1
else:
index = 0
for param in self._params:
self._params[index]._parameter = self._connect_param(self._params[index], None)
index += 1
for param in self._params:
param._value_change()
def _connect_param(self, holder, parameter):
#self.log_message('connecting ') # + str(holder._parameter) + ' ' + str(parameter))
self._mapped_to_midi_velocity = False
if (holder._parameter!= None):
if holder._parameter.value_has_listener(holder._value_change):
holder._parameter.remove_value_listener(holder._value_change)
#self.log_message('removing ' + str(holder._parameter.name))
if parameter != None:
assignment = parameter
if(str(parameter.name) == str('Track Volume')): #checks to see if parameter is track volume
if(parameter.canonical_parent.canonical_parent.has_audio_output is False): #checks to see if track has audio output
if(len(parameter.canonical_parent.canonical_parent.devices) > 0):
if(str(parameter.canonical_parent.canonical_parent.devices[0].class_name)==str('MidiVelocity')): #if not, looks for velicty as first plugin
assignment = parameter.canonical_parent.canonical_parent.devices[0].parameters[6] #if found, assigns fader to its 'outhi' parameter
self._mapped_to_midi_velocity = True
assignment.add_value_listener(holder._value_change)
#self.log_message('adding ' + str(assignment.name))
return assignment
else:
return None
def _on_device_name_changed(self):
if (self._device != None):
self._parent.send('lcd', 'device_name', 'lcd_name', str(self.generate_strip_string(str(self._device.name))))
else:
self._parent.send('lcd', 'device_name', 'lcd_name', ' ')
def _params_value_change(self, sender, control_name, feedback = True):
#self.log_message('params change ' + str(sender) + str(control_name))
pn = ' '
pv = ' '
val = 0
if(sender != None):
pn = str(self.generate_strip_string(str(sender.name)))
if sender.is_enabled:
try:
value = str(sender)
except:
value = ' '
pv = str(self.generate_strip_string(value))
else:
pv = '-bound-'
val = ((sender.value - sender.min) / (sender.max - sender.min)) * 127
self._parent.send('lcd', control_name, 'lcd_name', pn)
self._parent.send('lcd', control_name, 'lcd_value', pv)
if feedback == True:
self._parent.send('lcd', control_name, 'encoder_value', val)
def generate_strip_string(self, display_string):
NUM_CHARS_PER_DISPLAY_STRIP = 12
if (not display_string):
return (' ' * NUM_CHARS_PER_DISPLAY_STRIP)
if ((len(display_string.strip()) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.endswith('dB') and (display_string.find('.') != -1))):
display_string = display_string[:-2]
if (len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)):
for um in [' ',
'i',
'o',
'u',
'e',
'a']:
while ((len(display_string) > (NUM_CHARS_PER_DISPLAY_STRIP - 1)) and (display_string.rfind(um, 1) != -1)):
um_pos = display_string.rfind(um, 1)
display_string = (display_string[:um_pos] + display_string[(um_pos + 1):])
else:
display_string = display_string.center((NUM_CHARS_PER_DISPLAY_STRIP - 1))
ret = u''
for i in range((NUM_CHARS_PER_DISPLAY_STRIP - 1)):
if ((ord(display_string[i]) > 127) or (ord(display_string[i]) < 0)):
ret += ' '
else:
ret += display_string[i]
ret += ' '
ret = ret.replace(' ', '_')
assert (len(ret) == NUM_CHARS_PER_DISPLAY_STRIP)
return ret
def set_device(self, device, force = False):
#self.log_message('set device: ' + str(device) + ' ' + str(force))
#self.log_message('set device 0')
assert ((device == None) or isinstance(device, Live.Device.Device) or isinstance(device, NoDevice))
if self._device != None:
if self._device.canonical_parent != None:
if self._device.canonical_parent.devices_has_listener(self._device_changed):
self._device.canonical_parent.remove_devices_listener(self._device_changed)
#self.log_message('set device 1')
if ((not self._locked_to_device) and (device != self._device)) or force==True:
if (self._device != None):
if self._device.name_has_listener(self._on_device_name_changed):
self._device.remove_name_listener(self._on_device_name_changed)
if self._device.parameters_has_listener(self._on_parameters_changed):
self._device.remove_parameters_listener(self._on_parameters_changed)
parameter = self._on_off_parameter()
if (parameter != None):
if parameter.value_has_listener(self._on_on_off_changed):
parameter.remove_value_listener(self._on_on_off_changed)
for host in self._parent._active_handlers:
if (host._parameter_controls != None):
for control in host._parameter_controls:
control.release_parameter()
self._device = device
#self.log_message('set device 2')
if (self._device != None):
if self._device.canonical_parent != None:
if not self._device.canonical_parent.devices_has_listener(self._device_changed):
self._device.canonical_parent.add_devices_listener(self._device_changed)
self._bank_index = 0
self._device.add_name_listener(self._on_device_name_changed)
self._device.add_parameters_listener(self._on_parameters_changed)
parameter = self._on_off_parameter()
if (parameter != None):
parameter.add_value_listener(self._on_on_off_changed)
#self.log_message('set device 3')
for key in self._device_bank_registry.keys():
if (key == self._device):
self._bank_index = self._device_bank_registry.get(key, 0)
del self._device_bank_registry[key]
break
self._bank_name = '<No Bank>' #added
#self.log_message('set device 4')
self._on_device_name_changed()
self.update()
def _post(self, *msg):
self.log_message(str(msg))
def update(self):
#self.log_message('update!')
if self._device != None:
self._device_bank_registry[self._device] = self._bank_index
for host in self._parent._active_handlers:
if host.is_enabled() and not host._parameter_controls is None and len(host._parameter_controls) > 0:
old_bank_name = self._bank_name
self._assign_parameters(host)
if self._bank_name != old_bank_name:
self._show_msg_callback(str(self._device.name) + ' Bank: ' + str(self._bank_name))
else:
for host in self._parent._active_handlers:
if host._parameter_controls != None:
for control in host._parameter_controls:
control.release_parameter()
self._update_params()
self._assign_params()
for host in self._parent._active_handlers:
if host.is_enabled():
if not host._parameter_controls is None:
if len(host._parameter_controls) > 0:
host._script.request_rebuild_midi_map()
else:
host.update_device()
"""if hasattr(host, '_device_component'):
if host._device_component != None:
try:
#host._device_component.update()
self._parent._host.schedule_message(1, host._device_component.update)
except:
pass"""
def _update_params(self):
count = self._number_params
used_host = None
if self._number_params > 0:
count = self._number_params
if count != len(self._params):
if self._number_params > 0:
self._params = [ParamHolder(self, None, index) for index in range(self._number_params)]
else:
for param in self._params:
self._connect_param(param, None)
self._params = []
def _device_parameters_to_map(self):
raise self.is_enabled() or AssertionError
raise self._device != None or AssertionError
raise host._parameter_controls != None or AssertionError
return self._device.parameters[1:]
def set_number_params(self, number, *a):
#self.log_message('set number params' + str(number))
self._number_params = number
#self._parent._host.schedule_message(1, self.update)
self.update()
def set_number_custom(self, number, *a):
self._custom_parameter = [None for index in range(number)]
def set_custom_parameter(self, number, parameter, *a):
if number < len(self._custom_parameter):
#self.log_message('custom='+str(parameter))
if isinstance(parameter, Live.DeviceParameter.DeviceParameter) or parameter is None:
#self.log_message('custom is device:'+str(parameter))
self._custom_parameter[number] = parameter
self.update()
def set_mod_device_type(self, mod_device_type, *a):
#self.log_message('set type ' + str(mod_device_type))
for host in self._parent._active_handlers:
host.on_enabled_changed()
#self.log_message('and then...')
#self._parent._host.schedule_message(5, self._set_type, mod_device_type)
self._set_type(mod_device_type)
def set_mod_device(self, mod_device, *a):
#self.log_message('set device ' + str(mod_device))
self.set_device(mod_device, True)
for host in self._parent._active_handlers:
host.update()
def set_mod_device_parent(self, mod_device_parent, single=None, *a):
#self.log_message('set parent ' + str(mod_device_parent))
self._set_device_parent(mod_device_parent, single)
for host in self._parent._active_handlers:
host.update()
def set_mod_device_chain(self, chain, *a):
#self.log_message('set_chain ' + str(chain))
self._select_parent_chain(chain, True)
for host in self._parent._active_handlers:
host.update()
def set_mod_drum_pad(self, pad, *a):
self._select_drum_pad(pad, True)
for host in self._parent._active_handlers:
host.update()
def set_mod_parameter_value(self, num, val, *a):
#self.log_message('set_pval ' + str(num) + ' ' + str(val))
if self._device != None:
if num < len(self._params):
self._params[num]._change_value(val)
def set_custom_parameter_value(self, num, value, *a):
if num < len(self._custom_parameter):
parameter = self._custom_parameter[num]
if parameter != None:
newval = float(float(float(value)/127) * float(parameter.max - parameter.min)) + parameter.min
parameter.value = newval
def set_mod_device_bank(self, bank_index, *a):
#self.log_message('set bank ' + str(bank_index))
if self.is_enabled():
if (self._device != None):
if (self._number_of_parameter_banks() > bank_index):
self._bank_name = ''
self._bank_index = bank_index
self.update()
def number_of_parameter_banks(self, device):
""" Determine the amount of parameter banks the given device has """
result = 0
if (device != None):
result = 1
if (device.class_name in self._device_banks.keys()):
device_bank = self._device_banks[device.class_name]
result = len(device_bank)
elif len(self._params > 0):
param_count = len(list(device.parameters))
result = (param_count / len(self._params))
if (not ((param_count % len(self._params)) == 0)):
result += 1
return result
def on_enabled_changed(self):
#self.log_message('on_enabled_changed '+str(self._parent)+' '+str(self.is_enabled()))
self.update()
class ParamHolder(object):
__doc__ = ' Simple class to hold the owner of a Device.parameter and forward its value when receiving updates from Live, or update its value from a mod '
def __init__(self, parent, control, index):
self._control = control
self._control_name = 'Encoder_'+str(index)
self._parent = parent
self._parameter = None
self._feedback = True
def _value_change(self):
control_name = self._control_name
#if not self._control is None:
# control_name = self._control.name
self._parent._params_value_change(self._parameter, control_name, self._feedback)
self._feedback = True
def _change_value(self, value):
if(self._parameter != None):
if(self._parameter.is_enabled):
self._feedback = False
newval = float(float(float(value)/127) * float(self._parameter.max - self._parameter.min)) + self._parameter.min
#self._parent._parent._host.log_message('newval ' + str(newval))
self._parameter.value = newval
class NoDevice(object):
__doc__ = 'Dummy Device with no parameters and custom class_name that is used when no device is selected, but parameter assignment is still necessary'
def __init__(self):
self.class_name = 'NoDevice'
self.parameters = []
self.canonical_parent = None
self.can_have_chains = False
self.name = 'NoDevice'
def add_name_listener(self, callback=None):
pass
def remove_name_listener(self, callback=None):
pass
def name_has_listener(self, callback=None):
return False
def add_parameters_listener(self, callback=None):
pass
def remove_parameters_listener(self, callback=None):
pass
def parameters_has_listener(self, callback=None):
return False
def store_chosen_bank(self, callback=None):
pass
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for option in orm['questionnaire.questionoption'].objects.all():
option.UID = "O%d" % option.id
option.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.answer': {
'Meta': {'object_name': 'Answer'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.Question']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Draft'", 'max_length': '15'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'})
},
'questionnaire.answergroup': {
'Meta': {'object_name': 'AnswerGroup'},
'answer': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['questionnaire.Answer']", 'null': 'True', 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'grouped_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionGroup']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'questionnaire.comment': {
'Meta': {'object_name': 'Comment'},
'answer_group': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'comments'", 'symmetrical': 'False', 'to': "orm['questionnaire.AnswerGroup']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'questionnaire.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'countries'", 'null': 'True', 'to': "orm['questionnaire.Region']"})
},
'questionnaire.dateanswer': {
'Meta': {'object_name': 'DateAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.DateField', [], {})
},
'questionnaire.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionOption']"})
},
'questionnaire.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'})
},
'questionnaire.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'questionnaire.question': {
'Meta': {'object_name': 'Question'},
'UID': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '6'}),
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'is_core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'short_instruction': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'questionnaire.questiongroup': {
'Meta': {'ordering': "('order',)", 'object_name': 'QuestionGroup'},
'allow_multiples': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_group'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"}),
'question': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'question_group'", 'symmetrical': 'False', 'to': "orm['questionnaire.Question']"}),
'subsection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'to': "orm['questionnaire.SubSection']"})
},
'questionnaire.questiongrouporder': {
'Meta': {'ordering': "('order',)", 'object_name': 'QuestionGroupOrder'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orders'", 'to': "orm['questionnaire.Question']"}),
'question_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orders'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"})
},
'questionnaire.questionnaire': {
'Meta': {'object_name': 'Questionnaire'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'questionnaire.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'UID': ('django.db.models.fields.CharField', [], {'max_length': '6', 'unique': 'True', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['questionnaire.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.region': {
'Meta': {'object_name': 'Region'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'regions'", 'null': 'True', 'to': "orm['questionnaire.Organization']"})
},
'questionnaire.section': {
'Meta': {'ordering': "('order',)", 'object_name': 'Section'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['questionnaire.Questionnaire']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.subsection': {
'Meta': {'ordering': "('order',)", 'object_name': 'SubSection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_sections'", 'to': "orm['questionnaire.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.textanswer': {
'Meta': {'object_name': 'TextAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'questionnaire.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['questionnaire']
symmetrical = True
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import copy
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.patheffects as PathEffects
from matplotlib.patches import Circle, Ellipse, Rectangle
from matplotlib.colors import LogNorm, Normalize, PowerNorm
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.lines import Line2D
import matplotlib.mlab as mlab
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import numpy as np
from scipy.stats import norm
from scipy.stats import chi2
from scipy import interpolate
from gammapy.maps import WcsNDMap, HpxNDMap, MapCoord
import fermipy
import fermipy.config
import fermipy.utils as utils
import fermipy.wcs_utils as wcs_utils
import fermipy.hpx_utils as hpx_utils
import fermipy.defaults as defaults
import fermipy.catalog as catalog
from fermipy.utils import merge_dict
from fermipy.logger import Logger
from fermipy.logger import log_level
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256):
"""Function that extracts a subset of a colormap.
"""
if minval is None:
minval = 0.0
if maxval is None:
maxval = 0.0
name = "%s-trunc-%.2g-%.2g" % (cmap.name, minval, maxval)
return LinearSegmentedColormap.from_list(
name, cmap(np.linspace(minval, maxval, n)))
def get_xerr(sed):
delo = sed['e_ctr'] - sed['e_min']
dehi = sed['e_max'] - sed['e_ctr']
xerr = np.vstack((delo, dehi))
return xerr
def make_counts_spectrum_plot(o, roi, energies, imfile, **kwargs):
figsize = kwargs.get('figsize', (8.0, 6.0))
weighted = kwargs.get('weighted', False)
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(2, 1, height_ratios=[1.4, 1])
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[1, 0], sharex=ax0)
# axes = axes_grid.Grid(fig,111,
# nrows_ncols=(2,1),
# axes_pad=0.05,
# add_all=True)
# ax = axes[0]
x = 0.5 * (energies[1:] + energies[:-1])
xerr = 0.5 * (energies[1:] - energies[:-1])
count_str = 'counts'
model_counts_str = 'model_counts'
npred_str = 'npred'
if weighted:
count_str += '_wt'
model_counts_str += '_wt'
npred_str += '_wt'
y = o[count_str]
ym = o[model_counts_str]
ax0.errorbar(x, y, yerr=np.sqrt(y), xerr=xerr, color='k',
linestyle='None', marker='s',
label='Data')
ax0.errorbar(x, ym, color='k', linestyle='-', marker='None',
label='Total')
for s in sorted(roi.sources,
key=lambda t: t[npred_str], reverse=True)[:6]:
ax0.errorbar(x, s[model_counts_str], linestyle='-', marker='None',
label=s['name'])
for s in sorted(roi.sources,
key=lambda t: t[npred_str], reverse=True)[6:]:
ax0.errorbar(x, s[model_counts_str], color='gray',
linestyle='-', marker='None',
label='__nolabel__')
ax0.set_yscale('log')
ax0.set_ylim(0.1, None)
ax0.set_xlim(energies[0], energies[-1])
ax0.legend(frameon=False, loc='best', prop={'size': 8}, ncol=2)
ax1.errorbar(x, (y - ym) / ym, xerr=xerr, yerr=np.sqrt(y) / ym,
color='k', linestyle='None', marker='s',
label='Data')
ax1.set_xlabel('Energy [log$_{10}$(E/MeV)]')
ax1.set_ylabel('Fractional Residual')
ax0.set_ylabel('Counts')
ax1.set_ylim(-0.4, 0.4)
ax1.axhline(0.0, color='k')
plt.savefig(imfile)
plt.close(fig)
def load_ds9_cmap():
# http://tdc-www.harvard.edu/software/saoimage/saoimage.color.html
ds9_b = {
'red': [[0.0, 0.0, 0.0],
[0.25, 0.0, 0.0],
[0.50, 1.0, 1.0],
[0.75, 1.0, 1.0],
[1.0, 1.0, 1.0]],
'green': [[0.0, 0.0, 0.0],
[0.25, 0.0, 0.0],
[0.50, 0.0, 0.0],
[0.75, 1.0, 1.0],
[1.0, 1.0, 1.0]],
'blue': [[0.0, 0.0, 0.0],
[0.25, 1.0, 1.0],
[0.50, 0.0, 0.0],
[0.75, 0.0, 0.0],
[1.0, 1.0, 1.0]]
}
try:
plt.cm.ds9_b = plt.cm.get_cmap('ds9_b')
except ValueError:
ds9_cmap=LinearSegmentedColormap(name = 'ds9_b', segmentdata = ds9_b )
plt.register_cmap(cmap = ds9_cmap)
plt.cm.ds9_b = plt.cm.get_cmap('ds9_b')
return plt.cm.ds9_b
def load_bluered_cmap():
bluered = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0))
}
try:
plt.cm.bluered = plt.cm.get_cmap('bluered')
except ValueError:
bluered_cmap=LinearSegmentedColormap(name = 'bluered', segmentdata = bluered )
plt.register_cmap(cmap = bluered_cmap)
plt.cm.bluered = plt.cm.get_cmap('bluered')
return plt.cm.bluered
def annotate_name(data, xy=(0.05, 0.93), **kwargs):
if not 'name' in data:
return
ax = kwargs.pop('ax', plt.gca())
ax.annotate(data['name'],
xy=xy,
xycoords='axes fraction', fontsize=12,
xytext=(-5, 5), textcoords='offset points',
ha='left', va='center')
def annotate(**kwargs):
ax = kwargs.pop('ax', plt.gca())
loge_bounds = kwargs.pop('loge_bounds', None)
src = kwargs.pop('src', None)
text = []
if src:
if 'ASSOC1' in src['assoc'] and src['assoc']['ASSOC1']:
text += ['%s (%s)' % (src['name'], src['assoc']['ASSOC1'])]
else:
text += [src['name']]
if loge_bounds:
text += ['E = %.3f - %.3f GeV' % (10 ** loge_bounds[0] / 1E3,
10 ** loge_bounds[1] / 1E3)]
if not text:
return
ax.annotate('\n'.join(text),
xy=(0.05, 0.93),
xycoords='axes fraction', fontsize=12,
xytext=(-5, 5), textcoords='offset points',
ha='left', va='top')
def plot_markers(lon, lat, **kwargs):
transform = kwargs.get('transform', 'icrs')
path_effects = kwargs.get('path_effects', None)
p = plt.gca().plot(lon, lat,
marker=kwargs.get('marker', '+'),
color=kwargs.get('color', 'w'),
label=kwargs.get('label', '__nolabel__'),
linestyle='None',
transform=plt.gca().get_transform(transform))
if path_effects:
plt.setp(p, path_effects=path_effects)
def plot_error_ellipse(fit, xy, cdelt, **kwargs):
ax = kwargs.pop('ax', plt.gca())
colname = kwargs.pop('colname', 'r68')
color = kwargs.pop('color', 'k')
sigma = fit['pos_err']
sigmax = fit['pos_err_semimajor']
sigmay = fit['pos_err_semiminor']
theta = fit['pos_angle']
radius = fit[colname]
e0 = Ellipse(xy=(float(xy[0]), float(xy[1])),
width=2.0 * sigmax / cdelt[0] * radius / sigma,
height=2.0 * sigmay / cdelt[1] * radius / sigma,
angle=-theta,
facecolor='None', **kwargs)
ax.add_artist(e0)
class ImagePlotter(object):
def __init__(self, img, mapping=None):
if isinstance(img, WcsNDMap):
self._projtype = 'WCS'
img = copy.deepcopy(img)
self._geom = img.geom
elif isinstance(img, HpxNDMap):
self._projtype = 'HPX'
raise ValueError
else:
raise ValueError("Can't plot map of unknown type %s" % type(proj))
self._img = img
@property
def projtype(self):
return self._projtype
@property
def geom(self):
return self._geom
def plot(self, subplot=111, cmap='magma', **kwargs):
kwargs_contour = {'levels': None, 'colors': ['k'],
'linewidths': 1.0}
kwargs_imshow = {'interpolation': 'nearest',
'origin': 'lower', 'norm': None,
'vmin': None, 'vmax': None}
zscale = kwargs.get('zscale', 'lin')
gamma = kwargs.get('gamma', 0.5)
transform = kwargs.get('transform', None)
if zscale == 'pow':
kwargs_imshow['norm'] = PowerNorm(gamma=gamma)
elif zscale == 'sqrt':
kwargs_imshow['norm'] = PowerNorm(gamma=0.5)
elif zscale == 'log':
kwargs_imshow['norm'] = LogNorm()
elif zscale == 'lin':
kwargs_imshow['norm'] = Normalize()
else:
kwargs_imshow['norm'] = Normalize()
fig = plt.gcf()
ax = fig.add_subplot(subplot, projection=self._geom.wcs)
load_ds9_cmap()
try:
colormap = plt.cm.get_cmap(cmap).copy()
except:
colormap = plt.cm.get_cmap('ds9_b').copy()
colormap.set_under(colormap(0))
data = copy.copy(self._img.data)
if transform == 'sqrt':
data = np.sqrt(data)
kwargs_imshow = merge_dict(kwargs_imshow, kwargs)
kwargs_contour = merge_dict(kwargs_contour, kwargs)
im = ax.imshow(data, **kwargs_imshow)
im.set_cmap(colormap)
if kwargs_contour['levels']:
cs = ax.contour(data, **kwargs_contour)
cs.levels = ['%.0f' % val for val in cs.levels]
plt.clabel(cs, inline=1, fontsize=8)
frame = self._geom.frame
if frame == 'icrs':
ax.set_xlabel('RA')
ax.set_ylabel('DEC')
elif frame == 'galactic':
ax.set_xlabel('GLON')
ax.set_ylabel('GLAT')
xlabel = kwargs.get('xlabel', None)
ylabel = kwargs.get('ylabel', None)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# plt.colorbar(im,orientation='horizontal',shrink=0.7,pad=0.15,
# fraction=0.05)
ax.coords.grid(color='white', linestyle=':',
linewidth=0.5) # , alpha=0.5)
# ax.locator_params(axis="x", nbins=12)
return im, ax
def make_cube_slice(map_in, loge_bounds):
"""Extract a slice from a map cube object.
"""
# FIXME: This functionality should be moved into a slice method of
# gammapy.maps
axis = map_in.geom.axes[0]
i0 = utils.val_to_edge(axis.edges, 10**loge_bounds[0])[0]
i1 = utils.val_to_edge(axis.edges, 10**loge_bounds[1])[0]
new_axis = map_in.geom.axes[0].slice(slice(i0, i1))
geom = map_in.geom.to_image()
geom = geom.to_cube([new_axis])
map_out = WcsNDMap(geom, map_in.data[slice(i0, i1), ...].copy())
return map_out
class ROIPlotter(fermipy.config.Configurable):
defaults = {
'loge_bounds': (None, '', list),
'catalogs': (None, '', list),
'graticule_radii': (None, '', list),
'label_ts_threshold': (0.0, '', float),
'cmap': ('ds9_b', '', str),
}
def __init__(self, data_map, hpx2wcs=None, **kwargs):
self._roi = kwargs.pop('roi', None)
super(ROIPlotter, self).__init__(None, **kwargs)
self._catalogs = []
for c in self.config['catalogs']:
if utils.isstr(c):
self._catalogs += [catalog.Catalog.create(c)]
else:
self._catalogs += [c]
self._loge_bounds = self.config['loge_bounds']
if isinstance(data_map, WcsNDMap):
self._projtype = 'WCS'
self._data_map = copy.deepcopy(data_map)
elif isinstance(data_map, HpxNDMap):
self._projtype = 'HPX'
self._data_map = data_map.to_wcs(normalize=False, hpx2wcs=hpx2wcs)
else:
raise Exception(
"Can't make ROIPlotter of unknown projection type %s" % type(data_map))
if self._loge_bounds:
self._data_map = make_cube_slice(self._data_map, self._loge_bounds)
self._implot = ImagePlotter(self._data_map.sum_over_axes(keepdims=False))
@property
def data(self):
return self._data_map.data
@property
def geom(self):
return self._data_map.geom
@property
def map(self):
return self._data_map
@property
def projtype(self):
return self._projtype
@property
def proj(self):
return self._proj
@classmethod
def create_from_fits(cls, fitsfile, roi, **kwargs):
map_in = Map.read(fitsfile)
return cls(map_in, roi, **kwargs)
def plot_projection(self, iaxis, **kwargs):
data_map = kwargs.pop('data', self._data_map)
noerror = kwargs.pop('noerror', False)
xmin = kwargs.pop('xmin', -1)
xmax = kwargs.pop('xmax', 1)
axes = wcs_utils.wcs_to_axes(self.geom.wcs,
self._data_map.data.shape[-2:])
x = utils.edge_to_center(axes[iaxis])
xerr = 0.5 * utils.edge_to_width(axes[iaxis])
y = self.get_data_projection(data_map, axes, iaxis,
loge_bounds=self._loge_bounds,
xmin=xmin, xmax=xmax)
if noerror:
plt.errorbar(x, y, **kwargs)
else:
plt.errorbar(x, y, yerr=y ** 0.5, xerr=xerr, **kwargs)
@staticmethod
def get_data_projection(data_map, axes, iaxis, xmin=-1, xmax=1, loge_bounds=None):
s0 = slice(None, None)
s1 = slice(None, None)
s2 = slice(None, None)
if iaxis == 0:
if xmin is None:
xmin = axes[1][0]
if xmax is None:
xmax = axes[1][-1]
i0 = utils.val_to_edge(axes[iaxis], xmin)[0]
i1 = utils.val_to_edge(axes[iaxis], xmax)[0]
s1 = slice(i0, i1)
saxes = [1, 2]
else:
if xmin is None:
xmin = axes[0][0]
if xmax is None:
xmax = axes[0][-1]
i0 = utils.val_to_edge(axes[iaxis], xmin)[0]
i1 = utils.val_to_edge(axes[iaxis], xmax)[0]
s0 = slice(i0, i1)
saxes = [0, 2]
if loge_bounds is not None:
j0 = utils.val_to_edge(
data_map.geom.axes[0].edges, 10**loge_bounds[0])[0]
j1 = utils.val_to_edge(
data_map.geom.axes[0].edges, 10**loge_bounds[1])[0]
s2 = slice(j0, j1)
c = np.apply_over_axes(np.sum, data_map.data.T[s0, s1, s2], axes=saxes)
c = np.squeeze(c)
return c
@staticmethod
def setup_projection_axis(iaxis, loge_bounds=None):
plt.gca().legend(frameon=False, prop={'size': 10})
plt.gca().set_ylabel('Counts')
if iaxis == 0:
plt.gca().set_xlabel('LON Offset [deg]')
else:
plt.gca().set_xlabel('LAT Offset [deg]')
def plot_sources(self, skydir, labels,
plot_kwargs, text_kwargs, **kwargs):
ax = plt.gca()
nolabels = kwargs.get('nolabels', False)
label_mask = kwargs.get('label_mask',
np.ones(len(labels), dtype=bool))
if nolabels:
label_mask.fill(False)
pixcrd = wcs_utils.skydir_to_pix(skydir, self._implot.geom.wcs)
path_effect = PathEffects.withStroke(linewidth=2.0,
foreground="black")
for i, (x, y, label, show_label) in enumerate(zip(pixcrd[0], pixcrd[1],
labels, label_mask)):
if show_label:
t = ax.annotate(label, xy=(x, y),
xytext=(5.0, 5.0), textcoords='offset points',
**text_kwargs)
plt.setp(t, path_effects=[path_effect])
t = ax.plot(x, y, **plot_kwargs)
plt.setp(t, path_effects=[path_effect])
def plot_roi(self, roi, **kwargs):
src_color = 'w'
label_ts_threshold = kwargs.get('label_ts_threshold', 0.0)
plot_kwargs = dict(linestyle='None', marker='+',
markerfacecolor='None', mew=0.66, ms=8,
# markersize=8,
markeredgecolor=src_color, clip_on=True)
text_kwargs = dict(color=src_color, size=8, clip_on=True,
fontweight='normal')
ts = np.array([s['ts'] for s in roi.point_sources])
if label_ts_threshold is None:
m = np.zeros(len(ts), dtype=bool)
elif label_ts_threshold <= 0:
m = np.ones(len(ts), dtype=bool)
else:
m = ts > label_ts_threshold
skydir = roi._src_skydir
labels = [s.name for s in roi.point_sources]
self.plot_sources(skydir, labels, plot_kwargs, text_kwargs,
label_mask=m, **kwargs)
def plot_catalog(self, catalog):
color = 'lime'
plot_kwargs = dict(linestyle='None', marker='x',
markerfacecolor='None',
markeredgecolor=color, clip_on=True)
text_kwargs = dict(color=color, size=8, clip_on=True,
fontweight='normal')
skydir = catalog.skydir
if 'NickName' in catalog.table.columns:
labels = catalog.table['NickName']
else:
labels = catalog.table['Source_Name']
separation = skydir.separation(self.map.skydir).deg
m = separation < max(self.map.width)
self.plot_sources(skydir[m], labels[m], plot_kwargs, text_kwargs,
nolabels=True)
def plot(self, **kwargs):
zoom = kwargs.get('zoom', None)
graticule_radii = kwargs.get('graticule_radii',
self.config['graticule_radii'])
label_ts_threshold = kwargs.get('label_ts_threshold',
self.config['label_ts_threshold'])
im_kwargs = dict(cmap=self.config['cmap'],
interpolation='nearest', transform=None,
vmin=None, vmax=None, levels=None,
zscale='lin', subplot=111, colors=['k'])
cb_kwargs = dict(orientation='vertical', shrink=1.0, pad=0.1,
fraction=0.1, cb_label=None)
im_kwargs = merge_dict(im_kwargs, kwargs)
cb_kwargs = merge_dict(cb_kwargs, kwargs)
im, ax = self._implot.plot(**im_kwargs)
self._ax = ax
for c in self._catalogs:
self.plot_catalog(c)
if self._roi is not None:
self.plot_roi(self._roi,
label_ts_threshold=label_ts_threshold)
self._extent = im.get_extent()
ax.set_xlim(self._extent[0], self._extent[1])
ax.set_ylim(self._extent[2], self._extent[3])
self.zoom(zoom)
cb_label = cb_kwargs.pop('cb_label', None)
cb = plt.colorbar(im, **cb_kwargs)
if cb_label:
cb.set_label(cb_label)
for r in graticule_radii:
self.draw_circle(r)
def draw_circle(self, radius, **kwargs):
# coordsys = wcs_utils.get_coordsys(self.proj)
skydir = kwargs.get('skydir', None)
path_effects = kwargs.get('path_effects', None)
if skydir is None:
pix = self.map.geom.center_pix[:2]
else:
pix = skydir.to_pixel(self.map.geom.wcs)[:2]
kw = dict(facecolor='none', edgecolor='w', linestyle='--',
linewidth=0.5, label='__nolabel__')
kw = merge_dict(kw, kwargs)
pix_radius = radius / max(np.abs(self.map.geom.wcs.wcs.cdelt))
c = Circle(pix, pix_radius, **kw)
if path_effects is not None:
plt.setp(c, path_effects=path_effects)
self._ax.add_patch(c)
def zoom(self, zoom):
if zoom is None:
return
extent = self._extent
xw = extent[1] - extent[0]
x0 = 0.5 * (extent[0] + extent[1])
yw = extent[1] - extent[0]
y0 = 0.5 * (extent[0] + extent[1])
xlim = [x0 - 0.5 * xw / zoom, x0 + 0.5 * xw / zoom]
ylim = [y0 - 0.5 * yw / zoom, y0 + 0.5 * yw / zoom]
self._ax.set_xlim(xlim[0], xlim[1])
self._ax.set_ylim(ylim[0], ylim[1])
class SEDPlotter(object):
def __init__(self, sed):
self._sed = copy.deepcopy(sed)
@property
def sed(self):
return self._sed
@staticmethod
def get_ylims(sed):
fmin = np.log10(np.nanmin(sed['e2dnde_ul95'])) - 0.5
fmax = np.log10(np.nanmax(sed['e2dnde_ul95'])) + 0.5
fdelta = fmax - fmin
if fdelta < 2.0:
fmin -= 0.5 * (2.0 - fdelta)
fmax += 0.5 * (2.0 - fdelta)
return fmin, fmax
@staticmethod
def plot_lnlscan(sed, **kwargs):
ax = kwargs.pop('ax', plt.gca())
llhcut = kwargs.pop('llhcut', -2.70)
cmap = kwargs.pop('cmap', 'BuGn')
cmap_trunc_lo = kwargs.pop('cmap_trunc_lo', None)
cmap_trunc_hi = kwargs.pop('cmap_trunc_hi', None)
ylim = kwargs.pop('ylim', None)
if ylim is None:
fmin, fmax = SEDPlotter.get_ylims(sed)
else:
fmin, fmax = np.log10(ylim)
fluxM = np.arange(fmin, fmax, 0.01)
fbins = len(fluxM)
llhMatrix = np.zeros((len(sed['e_ctr']), fbins))
# loop over energy bins
for i in range(len(sed['e_ctr'])):
m = sed['norm_scan'][i] > 0
e2dnde_scan = sed['norm_scan'][i][m] * sed['ref_e2dnde'][i]
flux = np.log10(e2dnde_scan)
logl = sed['dloglike_scan'][i][m]
logl -= np.max(logl)
try:
fn = interpolate.interp1d(flux, logl, fill_value='extrapolate')
logli = fn(fluxM)
except:
logli = np.interp(fluxM, flux, logl)
llhMatrix[i, :] = logli
cmap = copy.deepcopy(plt.cm.get_cmap(cmap))
# cmap.set_under('w')
if cmap_trunc_lo is not None or cmap_trunc_hi is not None:
cmap = truncate_colormap(cmap, cmap_trunc_lo, cmap_trunc_hi, 1024)
xedge = 10**np.insert(sed['loge_max'], 0, sed['loge_min'][0])
yedge = np.logspace(fmin, fmax, fbins)
xedge, yedge = np.meshgrid(xedge, yedge)
im = ax.pcolormesh(xedge, yedge, llhMatrix.T,
vmin=llhcut, vmax=0, cmap=cmap,
linewidth=0, shading='auto')
cb = plt.colorbar(im)
cb.set_label('Delta LogLikelihood')
plt.gca().set_ylim(10 ** fmin, 10 ** fmax)
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.gca().set_xlim(sed['e_min'][0], sed['e_max'][-1])
@staticmethod
def plot_flux_points(sed, **kwargs):
ax = kwargs.pop('ax', plt.gca())
ul_ts_threshold = kwargs.pop('ul_ts_threshold', 4)
kw = {}
kw['marker'] = kwargs.get('marker', 'o')
kw['linestyle'] = kwargs.get('linestyle', 'None')
kw['color'] = kwargs.get('color', 'k')
fmin, fmax = SEDPlotter.get_ylims(sed)
m = sed['ts'] < ul_ts_threshold
x = sed['e_ctr']
y = sed['e2dnde']
yerr = sed['e2dnde_err']
yerr_lo = sed['e2dnde_err_lo']
yerr_hi = sed['e2dnde_err_hi']
yul = sed['e2dnde_ul95']
delo = sed['e_ctr'] - sed['e_min']
dehi = sed['e_max'] - sed['e_ctr']
xerr0 = np.vstack((delo[m], dehi[m]))
xerr1 = np.vstack((delo[~m], dehi[~m]))
plt.errorbar(x[~m], y[~m], xerr=xerr1,
yerr=(yerr_lo[~m], yerr_hi[~m]), **kw)
plt.errorbar(x[m], yul[m], xerr=xerr0,
yerr=yul[m] * 0.2, uplims=True, **kw)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(sed['e_min'][0], sed['e_max'][-1])
ax.set_ylim(10 ** fmin, 10 ** fmax)
@staticmethod
def plot_resid(src, model_flux, **kwargs):
ax = kwargs.pop('ax', plt.gca())
sed = src['sed']
m = sed['ts'] < 4
x = sed['e_ctr']
y = sed['e2dnde']
yerr = sed['e2dnde_err']
yul = sed['e2dnde_ul95']
delo = sed['e_ctr'] - sed['e_min']
dehi = sed['e_max'] - sed['e_ctr']
xerr = np.vstack((delo, dehi))
ym = np.interp(sed['e_ctr'], model_flux['log_energies'],
10 ** (2 * model_flux['log_energies']) *
model_flux['dnde'])
ax.errorbar(x, (y - ym) / ym, xerr=xerr, yerr=yerr / ym, **kwargs)
@staticmethod
def plot_model(model_flux, **kwargs):
ax = kwargs.pop('ax', plt.gca())
color = kwargs.pop('color', 'k')
noband = kwargs.pop('noband', False)
e2 = 10 ** (2 * model_flux['log_energies'])
ax.plot(10 ** model_flux['log_energies'],
model_flux['dnde'] * e2, color=color)
ax.plot(10 ** model_flux['log_energies'],
model_flux['dnde_lo'] * e2, color=color,
linestyle='--')
ax.plot(10 ** model_flux['log_energies'],
model_flux['dnde_hi'] * e2, color=color,
linestyle='--')
if not noband:
ax.fill_between(10 ** model_flux['log_energies'],
model_flux['dnde_lo'] * e2,
model_flux['dnde_hi'] * e2,
alpha=0.5, color=color, zorder=-1)
@staticmethod
def plot_sed(sed, showlnl=False, **kwargs):
"""Render a plot of a spectral energy distribution.
Parameters
----------
showlnl : bool
Overlay a map of the delta-loglikelihood values vs. flux
in each energy bin.
cmap : str
Colormap that will be used for the delta-loglikelihood
map.
llhcut : float
Minimum delta-loglikelihood value.
ul_ts_threshold : float
TS threshold that determines whether the MLE or UL
is plotted in each energy bin.
"""
ax = kwargs.pop('ax', plt.gca())
cmap = kwargs.get('cmap', 'BuGn')
annotate_name(sed, ax=ax)
SEDPlotter.plot_flux_points(sed, **kwargs)
if np.any(sed['ts'] > 9.):
if 'model_flux' in sed:
SEDPlotter.plot_model(sed['model_flux'],
noband=showlnl, **kwargs)
if showlnl:
SEDPlotter.plot_lnlscan(sed, **kwargs)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('Energy [MeV]')
ax.set_ylabel('E$^{2}$dN/dE [MeV cm$^{-2}$ s$^{-1}$]')
def plot(self, showlnl=False, **kwargs):
return SEDPlotter.plot_sed(self.sed, showlnl, **kwargs)
class ExtensionPlotter(object):
def __init__(self, src, roi, suffix, workdir, loge_bounds=None):
self._src = copy.deepcopy(src)
name = src['name'].lower().replace(' ', '_')
self._file0 = os.path.join(workdir,
'mcube_%s_noext%s.fits' % (name, suffix))
self._file1 = os.path.join(workdir,
'mcube_%s_ext_bkg%s.fits' % (name, suffix))
self._file2 = os.path.join(workdir, 'ccube%s.fits' % suffix)
self._files = []
self._width = src['extension']['width']
for i, w in enumerate(src['extension']['width']):
self._files += [os.path.join(workdir, 'mcube_%s_ext%02i%s.fits' % (
name, i, suffix))]
self._roi = roi
self._loge_bounds = loge_bounds
def plot(self, iaxis):
p0 = ROIPlotter.create_from_fits(self._file2, roi=self._roi,
loge_bounds=self._loge_bounds)
p1 = ROIPlotter.create_from_fits(self._file1, roi=self._roi,
loge_bounds=self._loge_bounds)
p0.plot_projection(iaxis, color='k', label='Data', marker='s',
linestyle='None')
p1.plot_projection(iaxis, color='b', noerror=True, label='Background')
n = len(self._width)
step = max(1, int(n / 5.))
fw = zip(self._files, self._width)[::step]
for i, (f, w) in enumerate(fw):
cf = float(i) / float(len(fw) - 1.0)
cf = 0.2 + cf * 0.8
p = ROIPlotter.create_from_fits(f, roi=self._roi,
loge_bounds=self._loge_bounds)
p._data += p1.data
p.plot_projection(iaxis, color=matplotlib.cm.Reds(cf),
noerror=True, label='%.4f$^\circ$' % w)
class AnalysisPlotter(fermipy.config.Configurable):
defaults = dict(defaults.plotting.items(),
fileio=defaults.fileio,
logging=defaults.logging)
def __init__(self, config, **kwargs):
fermipy.config.Configurable.__init__(self, config, **kwargs)
matplotlib.rcParams['font.size'] = 12
matplotlib.interactive(self.config['interactive'])
self._catalogs = []
for c in self.config['catalogs']:
self._catalogs += [catalog.Catalog.create(c)]
def run(self, gta, mcube_map, **kwargs):
"""Make all plots."""
prefix = kwargs.get('prefix', 'test')
format = kwargs.get('format', self.config['format'])
loge_bounds = [None] + self.config['loge_bounds']
for x in loge_bounds:
self.make_roi_plots(gta, mcube_map, loge_bounds=x,
**kwargs)
imfile = utils.format_filename(self.config['fileio']['workdir'],
'counts_spectrum', prefix=[prefix],
extension=format)
make_counts_spectrum_plot(gta._roi_data, gta.roi,
gta.log_energies,
imfile, **kwargs)
def make_residmap_plots(self, maps, roi=None, **kwargs):
"""Make plots from the output of
`~fermipy.gtanalysis.GTAnalysis.residmap`.
Parameters
----------
maps : dict
Output dictionary of
`~fermipy.gtanalysis.GTAnalysis.residmap`.
roi : `~fermipy.roi_model.ROIModel`
ROI Model object. Generate markers at the positions of
the sources in this ROI.
zoom : float
Crop the image by this factor. If None then no crop is
applied.
"""
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
workdir = kwargs.pop('workdir', self.config['fileio']['workdir'])
use_weights = kwargs.pop('use_weights', False)
# FIXME, how to set this:
no_contour = False
zoom = kwargs.get('zoom', None)
kwargs.setdefault('graticule_radii', self.config['graticule_radii'])
kwargs.setdefault('label_ts_threshold',
self.config['label_ts_threshold'])
cmap = kwargs.setdefault('cmap', self.config['cmap'])
cmap_resid = kwargs.pop('cmap_resid', self.config['cmap_resid'])
kwargs.setdefault('catalogs', self.config['catalogs'])
if no_contour:
sigma_levels = None
else:
sigma_levels = [-5, -3, 3, 5, 7] + list(np.logspace(1, 3, 17))
load_bluered_cmap()
prefix = maps['name']
mask = maps['mask']
if use_weights:
sigma_hist_data = maps['sigma'].data[maps['mask'].data.astype(
bool)]
maps['sigma'].data *= maps['mask'].data
maps['data'].data *= maps['mask'].data
maps['model'].data *= maps['mask'].data
maps['excess'].data *= maps['mask'].data
else:
sigma_hist_data = maps['sigma'].data
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['sigma'], roi=roi, **kwargs)
p.plot(vmin=-5, vmax=5, levels=sigma_levels,
cb_label='Significance [$\sigma$]', interpolation='bicubic',
cmap=cmap_resid, zoom=zoom)
plt.savefig(utils.format_filename(workdir,
'residmap_sigma',
prefix=[prefix],
extension=fmt))
plt.close(fig)
# make and draw histogram
fig, ax = plt.subplots(figsize=figsize)
nBins = np.linspace(-6, 6, 121)
data = np.nan_to_num(sigma_hist_data)
# find best fit parameters
mu, sigma = norm.fit(data.flatten())
# make and draw the histogram
data[data > 6.0] = 6.0
data[data < -6.0] = -6.0
n, bins, patches = ax.hist(data.flatten(), nBins, density=True,
histtype='stepfilled',
facecolor='green', alpha=0.75)
# make and draw best fit line
y = norm.pdf(bins, mu, sigma)
ax.plot(bins, y, 'r--', linewidth=2)
y = norm.pdf(bins, 0.0, 1.0)
ax.plot(bins, y, 'k', linewidth=1)
# labels and such
ax.set_xlabel(r'Significance ($\sigma$)')
ax.set_ylabel('Probability')
paramtext = 'Gaussian fit:\n'
paramtext += '$\\mu=%.2f$\n' % mu
paramtext += '$\\sigma=%.2f$' % sigma
ax.text(0.05, 0.95, paramtext, verticalalignment='top',
horizontalalignment='left', transform=ax.transAxes)
plt.savefig(utils.format_filename(workdir,
'residmap_sigma_hist',
prefix=[prefix],
extension=fmt))
plt.close(fig)
vmax = max(np.max(maps['data'].data), np.max(maps['model'].data))
vmin = min(np.min(maps['data'].data), np.min(maps['model'].data))
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['data'], roi=roi, **kwargs)
p.plot(cb_label='Counts', interpolation='bicubic',
cmap=cmap, zscale='sqrt', vmin=vmin, vmax=vmax)
plt.savefig(utils.format_filename(workdir,
'residmap_data',
prefix=[prefix],
extension=fmt))
plt.close(fig)
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['model'], roi=roi, **kwargs)
p.plot(cb_label='Counts', interpolation='bicubic',
cmap=cmap, zscale='sqrt', vmin=vmin, vmax=vmax)
plt.savefig(utils.format_filename(workdir,
'residmap_model',
prefix=[prefix],
extension=fmt))
plt.close(fig)
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['excess'], roi=roi, **kwargs)
p.plot(cb_label='Counts', interpolation='bicubic',
cmap=cmap_resid)
plt.savefig(utils.format_filename(workdir,
'residmap_excess',
prefix=[prefix],
extension=fmt))
plt.close(fig)
def make_tsmap_plots(self, maps, roi=None, **kwargs):
"""Make plots from the output of
`~fermipy.gtanalysis.GTAnalysis.tsmap` or
`~fermipy.gtanalysis.GTAnalysis.tscube`. This method
generates a 2D sky map for the best-fit test source in
sqrt(TS) and Npred.
Parameters
----------
maps : dict
Output dictionary of
`~fermipy.gtanalysis.GTAnalysis.tsmap` or
`~fermipy.gtanalysis.GTAnalysis.tscube`.
roi : `~fermipy.roi_model.ROIModel`
ROI Model object. Generate markers at the positions of
the sources in this ROI.
zoom : float
Crop the image by this factor. If None then no crop is
applied.
"""
kwargs.setdefault('graticule_radii', self.config['graticule_radii'])
kwargs.setdefault('label_ts_threshold',
self.config['label_ts_threshold'])
kwargs.setdefault('cmap', self.config['cmap'])
kwargs.setdefault('catalogs', self.config['catalogs'])
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
workdir = kwargs.pop('workdir', self.config['fileio']['workdir'])
suffix = kwargs.pop('suffix', 'tsmap')
zoom = kwargs.pop('zoom', None)
if 'ts' not in maps:
return
sigma_levels = [3, 5, 7] + list(np.logspace(1, 3, 17))
prefix = maps['name']
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['sqrt_ts'], roi=roi, **kwargs)
p.plot(vmin=0, vmax=5, levels=sigma_levels,
cb_label='Sqrt(TS) [$\sigma$]', interpolation='bicubic',
zoom=zoom)
plt.savefig(utils.format_filename(workdir,
'%s_sqrt_ts' % suffix,
prefix=[prefix],
extension=fmt))
plt.close(fig)
fig = plt.figure(figsize=figsize)
p = ROIPlotter(maps['npred'], roi=roi, **kwargs)
p.plot(vmin=0, cb_label='NPred [Counts]', interpolation='bicubic',
zoom=zoom)
plt.savefig(utils.format_filename(workdir,
'%s_npred' % suffix,
prefix=[prefix],
extension=fmt))
plt.close(fig)
# make and draw histogram
fig, ax = plt.subplots(figsize=figsize)
bins = np.linspace(0, 25, 101)
data = np.nan_to_num(maps['ts'].data.T)
data[data > 25.0] = 25.0
data[data < 0.0] = 0.0
n, bins, patches = ax.hist(data.flatten(), bins, density=True,
histtype='stepfilled',
facecolor='green', alpha=0.75)
# ax.plot(bins,(1-chi2.cdf(x,dof))/2.,**kwargs)
ax.plot(bins, 0.5 * chi2.pdf(bins, 1.0), color='k',
label=r"$\chi^2_{1} / 2$")
ax.set_yscale('log')
ax.set_ylim(1E-4)
ax.legend(loc='upper right', frameon=False)
# labels and such
ax.set_xlabel('TS')
ax.set_ylabel('Probability')
plt.savefig(utils.format_filename(workdir,
'%s_ts_hist' % suffix,
prefix=[prefix],
extension=fmt))
plt.close(fig)
def make_roi_plots(self, gta, mcube_tot, **kwargs):
"""Make various diagnostic plots for the 1D and 2D
counts/model distributions.
Parameters
----------
prefix : str
Prefix that will be appended to all filenames.
"""
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
prefix = kwargs.get('prefix', '')
loge_bounds = kwargs.get('loge_bounds', None)
weighted = kwargs.get('weighted', False)
roi_kwargs = {}
roi_kwargs.setdefault('loge_bounds', loge_bounds)
roi_kwargs.setdefault(
'graticule_radii', self.config['graticule_radii'])
roi_kwargs.setdefault('label_ts_threshold',
self.config['label_ts_threshold'])
roi_kwargs.setdefault('cmap', self.config['cmap'])
roi_kwargs.setdefault('catalogs', self._catalogs)
if loge_bounds is None:
loge_bounds = (gta.log_energies[0], gta.log_energies[-1])
esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1])
mcube_diffuse = gta.model_counts_map('diffuse')
counts_map = gta.counts_map()
if weighted:
wmap = gta.weight_map()
counts_map = copy.deepcopy(counts_map)
mcube_tot = copy.deepcopy(mcube_tot)
counts_map.data *= wmap.data
mcube_tot.data *= wmap.data
mcube_diffuse.data *= wmap.data
# colors = ['k', 'b', 'g', 'r']
data_style = {'marker': 's', 'linestyle': 'None'}
fig = plt.figure(figsize=figsize)
if gta.projtype == "WCS":
xmin = -1
xmax = 1
elif gta.projtype == "HPX":
hpx2wcs = counts_map.make_wcs_mapping(proj='CAR', oversample=2)
counts_map = counts_map.to_wcs(hpx2wcs=hpx2wcs)
mcube_tot = mcube_tot.to_wcs(hpx2wcs=hpx2wcs)
mcube_diffuse = mcube_diffuse.to_wcs(hpx2wcs=hpx2wcs)
xmin = None
xmax = None
fig = plt.figure(figsize=figsize)
rp = ROIPlotter(mcube_tot, roi=gta.roi, **roi_kwargs)
rp.plot(cb_label='Counts', zscale='pow', gamma=1. / 3.)
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_model_map%s.%s' % (
prefix, esuffix, fmt)))
plt.close(fig)
rp = ROIPlotter(counts_map, roi=gta.roi, **roi_kwargs)
rp.plot(cb_label='Counts', zscale='sqrt')
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_counts_map%s.%s' % (
prefix, esuffix, fmt)))
plt.close(fig)
for iaxis, xlabel, psuffix in zip([0, 1],
['LON Offset [deg]', 'LAT Offset [deg]'],
['xproj', 'yproj']):
fig = plt.figure(figsize=figsize)
rp.plot_projection(iaxis, label='Data', color='k',
xmin=xmin, xmax=xmax, **data_style)
rp.plot_projection(iaxis, data=mcube_tot, label='Model', xmin=xmin, xmax=xmax,
noerror=True)
rp.plot_projection(iaxis, data=mcube_diffuse, label='Diffuse', xmin=xmin, xmax=xmax,
noerror=True)
plt.gca().set_ylabel('Counts')
plt.gca().set_xlabel(xlabel)
plt.gca().legend(frameon=False)
annotate(loge_bounds=loge_bounds)
plt.savefig(os.path.join(gta.config['fileio']['workdir'],
'%s_counts_map_%s%s.%s' % (prefix, psuffix,
esuffix, fmt)))
plt.close(fig)
def make_sed_plots(self, sed, **kwargs):
prefix = kwargs.get('prefix', '')
name = sed['name'].lower().replace(' ', '_')
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
p = SEDPlotter(sed)
fig = plt.figure(figsize=figsize)
p.plot()
outfile = utils.format_filename(self.config['fileio']['workdir'],
'sed', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
fig = plt.figure(figsize=figsize)
p.plot(showlnl=True)
outfile = utils.format_filename(self.config['fileio']['workdir'],
'sedlnl', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
def make_localization_plots(self, loc, roi=None, **kwargs):
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
prefix = kwargs.get('prefix', '')
skydir = kwargs.get('skydir', None)
cmap = kwargs.get('cmap', self.config['cmap'])
name = loc.get('name', '')
name = name.lower().replace(' ', '_')
tsmap = loc['tsmap']
fit_init = loc['fit_init']
tsmap_renorm = copy.deepcopy(tsmap)
tsmap_renorm.data -= np.max(tsmap_renorm.data)
skydir = loc['tsmap_peak'].geom.get_coord(flat=True)
frame = loc['tsmap_peak'].geom.frame
skydir = MapCoord.create(skydir, frame=frame).skycoord
path_effect = PathEffects.withStroke(linewidth=2.0,
foreground="black")
p = ROIPlotter(tsmap_renorm, roi=roi)
fig = plt.figure(figsize=figsize)
vmin = max(-100.0, np.min(tsmap_renorm.data))
p.plot(levels=[-200, -100, -50, -20, -9.21, -5.99, -2.3, -1.0],
cmap=cmap, vmin=vmin, colors=['k'],
interpolation='bicubic', cb_label='2$\\times\Delta\ln$L')
cdelt0 = np.abs(tsmap.geom.wcs.wcs.cdelt[0])
cdelt1 = np.abs(tsmap.geom.wcs.wcs.cdelt[1])
cdelt = [cdelt0, cdelt1]
peak_skydir = SkyCoord(fit_init['ra'], fit_init['dec'],
frame='icrs', unit='deg')
scan_skydir = SkyCoord(loc['ra'], loc['dec'],
frame='icrs', unit='deg')
peak_pix = peak_skydir.to_pixel(tsmap_renorm.geom.wcs)
scan_pix = scan_skydir.to_pixel(tsmap_renorm.geom.wcs)
if 'ra_preloc' in loc:
preloc_skydir = SkyCoord(loc['ra_preloc'], loc['dec_preloc'],
frame='icrs', unit='deg')
plot_markers(preloc_skydir.ra.deg, preloc_skydir.dec.deg,
marker='+', color='w', path_effects=[path_effect],
label='Old Position')
plot_markers(peak_skydir.ra.deg, peak_skydir.dec.deg,
marker='x', color='lime', path_effects=[path_effect])
plot_markers(scan_skydir.ra.deg, scan_skydir.dec.deg,
marker='x', color='w', path_effects=[path_effect],
label='New Position')
if skydir is not None:
pix = skydir.to_pixel(tsmap_renorm.geom.wcs)
xmin = np.min(pix[0])
ymin = np.min(pix[1])
xwidth = np.max(pix[0]) - xmin
ywidth = np.max(pix[1]) - ymin
r = Rectangle((xmin, ymin), xwidth, ywidth,
edgecolor='w', facecolor='none', linestyle='--')
plt.gca().add_patch(r)
plot_error_ellipse(fit_init, peak_pix, cdelt, edgecolor='lime',
color='lime', colname='pos_r68')
plot_error_ellipse(fit_init, peak_pix, cdelt, edgecolor='lime',
color='lime', colname='pos_r99', linestyle=':')
plot_error_ellipse(loc, scan_pix, cdelt, edgecolor='w',
color='w', colname='pos_r68', label='68% Uncertainty')
plot_error_ellipse(loc, scan_pix, cdelt, edgecolor='w',
color='w', colname='pos_r99', label='99% Uncertainty',
linestyle='--')
handles, labels = plt.gca().get_legend_handles_labels()
h0 = Line2D([], [], color='w', marker='None',
label='68% Uncertainty', linewidth=1.0)
h1 = Line2D([], [], color='w', marker='None',
label='99% Uncertainty', linewidth=1.0,
linestyle='--')
plt.legend(handles=handles + [h0, h1])
outfile = utils.format_filename(self.config['fileio']['workdir'],
'localize', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
tsmap = loc['tsmap_peak']
tsmap_renorm = copy.deepcopy(tsmap)
tsmap_renorm.data -= np.max(tsmap_renorm.data)
p = ROIPlotter(tsmap_renorm, roi=roi)
fig = plt.figure(figsize=figsize)
vmin = max(-50.0, np.min(tsmap_renorm.data))
p.plot(levels=[-200, -100, -50, -20, -9.21, -5.99, -2.3, -1.0],
cmap=cmap, vmin=vmin, colors=['k'],
interpolation='bicubic', cb_label='2$\\times\Delta\ln$L')
cdelt0 = np.abs(tsmap.geom.wcs.wcs.cdelt[0])
cdelt1 = np.abs(tsmap.geom.wcs.wcs.cdelt[1])
cdelt = [cdelt0, cdelt1]
scan_pix = scan_skydir.to_pixel(tsmap_renorm.geom.wcs)
if 'ra_preloc' in loc:
preloc_skydir = SkyCoord(loc['ra_preloc'], loc['dec_preloc'],
frame='icrs', unit='deg')
plot_markers(preloc_skydir.ra.deg, preloc_skydir.dec.deg,
marker='+', color='w', path_effects=[path_effect],
label='Old Position')
plot_markers(scan_skydir.ra.deg, scan_skydir.dec.deg,
marker='x', color='w', path_effects=[path_effect],
label='New Position')
plot_error_ellipse(loc, scan_pix, cdelt, edgecolor='w',
color='w', colname='pos_r68', label='68% Uncertainty')
plot_error_ellipse(loc, scan_pix, cdelt, edgecolor='w',
color='w', colname='pos_r99', label='99% Uncertainty',
linestyle='--')
handles, labels = plt.gca().get_legend_handles_labels()
h0 = Line2D([], [], color='w', marker='None',
label='68% Uncertainty', linewidth=1.0)
h1 = Line2D([], [], color='w', marker='None',
label='99% Uncertainty', linewidth=1.0,
linestyle='--')
plt.legend(handles=handles + [h0, h1])
outfile = utils.format_filename(self.config['fileio']['workdir'],
'localize_peak', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
def make_extension_plots(self, ext, roi=None, **kwargs):
if ext.get('tsmap') is not None:
self._plot_extension_tsmap(ext, roi=roi, **kwargs)
if ext.get('ebin_ts_ext') is not None:
self._plot_extension_ebin(ext, roi=roi, **kwargs)
def _plot_extension_ebin(self, ext, roi=None, **kwargs):
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
prefix = kwargs.get('prefix', '')
name = ext.get('name', '')
name = name.lower().replace(' ', '_')
m = ext['ebin_ts_ext'] > 4.0
fig = plt.figure(figsize=figsize)
ectr = ext['ebin_e_ctr']
delo = ext['ebin_e_ctr'] - ext['ebin_e_min']
dehi = ext['ebin_e_max'] - ext['ebin_e_ctr']
xerr0 = np.vstack((delo[m], dehi[m]))
xerr1 = np.vstack((delo[~m], dehi[~m]))
ax = plt.gca()
ax.errorbar(ectr[m], ext['ebin_ext'][m], xerr=xerr0,
yerr=(ext['ebin_ext_err_lo'][m],
ext['ebin_ext_err_hi'][m]),
color='k', linestyle='None', marker='o')
ax.errorbar(ectr[~m], ext['ebin_ext_ul95'][~m], xerr=xerr1,
yerr=0.2 * ext['ebin_ext_ul95'][~m], uplims=True,
color='k', linestyle='None', marker='o')
ax.set_xlabel('Energy [log$_{10}$(E/MeV)]')
ax.set_ylabel('Extension [deg]')
ax.set_xscale('log')
ax.set_yscale('log')
annotate_name(ext)
ymin = min(10**-1.5, 0.8 * ext['ext_ul95'])
ymax = max(10**-0.5, 1.2 * ext['ext_ul95'])
if np.any(np.isfinite(ext['ebin_ext_ul95'])):
ymin = min(ymin, 0.8 * np.nanmin(ext['ebin_ext_ul95']))
ymax = max(ymax, 1.2 * np.nanmax(ext['ebin_ext_ul95']))
if ext['ts_ext'] > 4.0:
plt.axhline(ext['ext'], color='k')
ext_lo = ext['ext'] - ext['ext_err_lo']
ext_hi = ext['ext'] + ext['ext_err_hi']
ax.fill_between([ext['ebin_e_min'][0], ext['ebin_e_max'][-1]],
[ext_lo, ext_lo], [ext_hi, ext_hi],
alpha=0.5, color='k', zorder=-1)
ymin = min(ymin, 0.8 * (ext['ext'] - ext['ext_err_lo']))
ymax = max(ymax, 1.2 * (ext['ext'] + ext['ext_err_hi']))
else:
plt.axhline(ext['ext_ul95'], color='k', linestyle='--')
ax.set_ylim(ymin, ymax)
ax.set_xlim(ext['ebin_e_min'][0], ext['ebin_e_max'][-1])
outfile = utils.format_filename(self.config['fileio']['workdir'],
'extension_ebin', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
def _plot_extension_tsmap(self, ext, roi=None, **kwargs):
fmt = kwargs.get('format', self.config['format'])
figsize = kwargs.get('figsize', self.config['figsize'])
prefix = kwargs.get('prefix', '')
cmap = kwargs.get('cmap', self.config['cmap'])
name = ext.get('name', '')
name = name.lower().replace(' ', '_')
p = ROIPlotter(ext['tsmap'], roi=roi)
fig = plt.figure(figsize=figsize)
sigma_levels = [3, 5, 7] + list(np.logspace(1, 3, 17))
p.plot(cmap=cmap, interpolation='bicubic', levels=sigma_levels,
transform='sqrt')
c = SkyCoord(ext['ra'], ext['dec'], unit='deg')
path_effect = PathEffects.withStroke(linewidth=2.0,
foreground="black")
if ext['ts_ext'] > 9.0:
p.draw_circle(ext['ext'], skydir=c, edgecolor='lime', linestyle='-',
linewidth=1.0, label='R$_{68}$', path_effects=[path_effect])
p.draw_circle(ext['ext'] + ext['ext_err'], skydir=c, edgecolor='lime', linestyle='--',
linewidth=1.0, label='R$_{68}$ $\pm 1 \sigma$', path_effects=[path_effect])
p.draw_circle(ext['ext'] - ext['ext_err'], skydir=c, edgecolor='lime', linestyle='--',
linewidth=1.0, path_effects=[path_effect])
else:
p.draw_circle(ext['ext_ul95'], skydir=c, edgecolor='lime', linestyle='--',
linewidth=1.0, label='R$_{68}$ 95% UL',
path_effects=[path_effect])
leg = plt.gca().legend(frameon=False, loc='upper left')
for text in leg.get_texts():
text.set_color('lime')
outfile = utils.format_filename(self.config['fileio']['workdir'],
'extension', prefix=[prefix, name],
extension=fmt)
plt.savefig(outfile)
plt.close(fig)
def _plot_extension(self, gta, prefix, src, loge_bounds=None, **kwargs):
"""Utility function for generating diagnostic plots for the
extension analysis."""
# format = kwargs.get('format', self.config['plotting']['format'])
if loge_bounds is None:
loge_bounds = (self.energies[0], self.energies[-1])
name = src['name'].lower().replace(' ', '_')
esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1])
p = ExtensionPlotter(src, self.roi, '',
self.config['fileio']['workdir'],
loge_bounds=loge_bounds)
fig = plt.figure()
p.plot(0)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(0)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_xproj%s.png' % (
prefix, name, esuffix)))
plt.close(fig)
fig = plt.figure()
p.plot(1)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(1)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_yproj%s.png' % (
prefix, name, esuffix)))
plt.close(fig)
for i, c in enumerate(self.components):
suffix = '_%02i' % i
p = ExtensionPlotter(src, self.roi, suffix,
self.config['fileio']['workdir'],
loge_bounds=loge_bounds)
fig = plt.figure()
p.plot(0)
ROIPlotter.setup_projection_axis(0, loge_bounds=loge_bounds)
annotate(src=src, loge_bounds=loge_bounds)
plt.gca().set_xlim(-2, 2)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_xproj%s%s.png' % (
prefix, name, esuffix, suffix)))
plt.close(fig)
fig = plt.figure()
p.plot(1)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(1, loge_bounds=loge_bounds)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_yproj%s%s.png' % (
prefix, name, esuffix, suffix)))
plt.close(fig)
|
|
from __future__ import division # This makes Python interpret / as float division.
from classes import *
from heuristic import *
from addition_module import *
from multiplication_module import *
from function_module import *
from random import randint
from math import floor, ceil
import timeit
start = timeit.default_timer()
# Runs the heuristic procedure on an initialized Heuristic_data object.
# If split_cases is true, will look at all unsigned variables and split on them being > or < 0.
def run_heuristic_on_heuristic_data(H, split_cases):
while H.changed:
try:
H.changed = False
learn_add_comparisons(H)
learn_mul_comparisons(H)
learn_func_comparisons(H)
except Contradiction:
print "Contradiction found!"
return True
except KeyboardInterrupt:
print "Stopped."
quit()
if split_cases:
if H.verbose:
print 'We\'ve run out of new information. Let\'s try splitting cases.'
unsigned_vars = [t for t in range(H.num_terms) if H.weak_sign(t) == 0]
if unsigned_vars:
if H.verbose:
print 'We don\'t know sign information for:'
for t in unsigned_vars:
print ' ', IVar(t), '=', H.terms[t]
else:
if H.verbose:
print 'Signs of all variables are known; nothing to split on.'
for v in unsigned_vars:
if H.verbose:
print 'Assuming', IVar(v), '>= 0. That is,', H.terms[v], '>= 0.'
Hposv = H.duplicate()
contr = False
try:
Hposv.learn_zero_comparison(v, GE, HYP)
except Contradiction:
contr = True
if contr or run_heuristic_on_heuristic_data(Hposv, False): # v>=0 is a contradiction. learn v<0
if H.verbose:
print 'From the case split, we learned', H.terms[v], '< 0'
H.learn_zero_comparison(v, LT, HYP)
return run_heuristic_on_heuristic_data(H, True)
# otherwise, no contradiction from v>=0. try v<=0
if H.verbose:
print 'Assuming', H.terms[v], '<= 0'
Hnegv = H.duplicate()
contr = False
try:
Hnegv.learn_zero_comparison(v, LE, HYP)
except Contradiction:
contr = True
if contr or run_heuristic_on_heuristic_data(Hnegv, False): # v<=0 is a contradiction. learn v>0
if H.verbose:
print 'From the case split, we learned', H.terms[v], '> 0'
H.learn_zero_comparison(v, GT, HYP)
return run_heuristic_on_heuristic_data(H, True)
# otherwise, v could be pos or neg. try splitting on the next case.
return False
# Takes a list of (uncanonized) Zero_comparisons and runs the heuristic.
# If split_cases is true, it will try assuming variables of unknown sign are pos or neg
# if it doesn't find a contradiction before.
# Will not chain case splits.
def run_heuristic_on_hypotheses(hyps, func_data=[], split_cases=True):
hypotheses = [canonize_zero_comparison(h) for h in hyps]
# print "Canonized hypotheses:"
# for h in hypotheses:
# print h
# print
try:
H = Heuristic_data(hypotheses, func_data, verbose=False)
except Contradiction:
print "Contradiction found!"
return True
H.changed = True
if run_heuristic_on_heuristic_data(H, split_cases):
return True
print "Nothing more learned. No contradiction has been found."
return False
###############################################################################
#
# HARD-CODED TESTS
#
###############################################################################
def test_heuristic():
print
print("From these hypotheses:")
print
print(" 0 < x < y")
print(" 0 < u < v")
print(" 0 < w + z < r - 1")
print
print("It follows that:")
print
print(" u + (1 + x)^2 (2 w + 2 z + 3) < 2 v + (1 + y)^2 (2 r + 1)")
print
print("This test proves this by showing that the hypotheses together")
print("with the negation of the conclusion are inconsistent")
print
r = Var("r")
u = Var("u")
v = Var("v")
w = Var("w")
x = Var("x")
y = Var("y")
z = Var("z")
# hypotheses
gt_zero_hyps = [
x,
Add_term([(1, y), (-1, x)]),
u,
Add_term([(1, v), (-1, u)]),
Add_term([(1, w), (1, z)]),
Add_term([(1, r), (-1, one), (-1, w), (-1, z)])]
ge_zero_hyps = [
Add_term([(1, u),
(1, Mul_term([(Add_term([(1, one), (1, x)]), 2),
(Add_term([(2, w), (2, z), (3, one)]), 1)])),
(-2, v),
(-1, Mul_term([(Add_term([(1, one), (1, y)]), 2),
(Add_term([(2, r), (1, one)]), 1)]))])]
hypotheses = ([Zero_comparison(t, GT) for t in gt_zero_hyps] +
[Zero_comparison(t, GE) for t in ge_zero_hyps])
run_heuristic_on_hypotheses(hypotheses)
def test_heuristic_2():
print
print("From these hypotheses:")
print
print(" (x^2-8)/(x^2+3x-1) < 0")
print(" 0 < x < 1")
print
print("There should be no contradiction. Both are true at x=.5")
print
print
x = Var("x")
# hypotheses
# x^2-8
a = Add_term([Add_pair(1, Mul_term([Mul_pair(x, 2)])), Add_pair(-8, one)])
# x^2+3x-1
b = Add_term([Add_pair(1, Mul_term([Mul_pair(x, 2)])), Add_pair(3, x), Add_pair(-1, one)])
lt_zero_hyps = [
Mul_term([Mul_pair(a, 1), Mul_pair(b, -1)])
]
gt_zero_hyps = [
x,
Add_term([Add_pair(1, one), Add_pair(-1, x)])
]
hypotheses = ([Zero_comparison(t, LT) for t in lt_zero_hyps]
+ [Zero_comparison(t, GT) for t in gt_zero_hyps])
run_heuristic_on_hypotheses(hypotheses, split_cases=False)
def test_heuristic_3():
print
print("From these hypotheses:")
print
print(" -x^2 > -10")
print(" x > 5")
print
print("There should be a contradiction.")
print
print
x = Var("x")
# hypotheses
# -x^2>-10
a = Add_term([Add_pair(-1, Mul_term([Mul_pair(x, 2)])), Add_pair(10, one)])
# x>5
b = Add_term([Add_pair(1, x), Add_pair(-5, one)])
lt_zero_hyps = [
]
gt_zero_hyps = [
a, b
]
hypotheses = ([Zero_comparison(t, LT) for t in lt_zero_hyps]
+ [Zero_comparison(t, GT) for t in gt_zero_hyps])
run_heuristic_on_hypotheses(hypotheses)
def test_heuristic_4():
print
print("From these hypotheses:")
print
print(" x < 0")
print(" x - y < 0")
print(" x + y >= 5")
print
print("There should be no contradiction. All are true at x=-1, y=10.")
print
print
x = Var("x")
y = Var("y")
lt_zero_hyps = [
x, Add_term([Add_pair(1, x), Add_pair(-1, y)])
]
hypotheses = ([Zero_comparison(t, LT) for t in lt_zero_hyps]
+ [Zero_comparison(Add_term([Add_pair(1, x), Add_pair(1, y), Add_pair(-5, one)]), GE)])
run_heuristic_on_hypotheses(hypotheses)
def test_heuristic_on_functions():
# we know x < y => exp(x) < exp(y)
# Assume x < y, exp(x) > exp(y).
x = Var("x")
y = Var("y")
hypotheses = [Zero_comparison(Add_term([Add_pair(1, x), Add_pair(-1, y)]), LT), Zero_comparison(Add_term([Add_pair(1, Func_term('exp', [y])), Add_pair(-1, Func_term('exp', [x]))]), LT)]
co = Function_conclusion((lambda H, a, b:Func_term("exp", [a])), (lambda H, a, b:Func_term("exp", [b])), GT)
fr = Function_restriction('exp', lambda H, a, b:a.gt_rel(b, H), co)
co2 = Function_conclusion(lambda H, a:Func_term("exp", [a]), lambda H, a:0, GT)
fr2 = Function_restriction('exp2', lambda H, a:True, co2)
# fr2 = Function_restriction('exp',free_vars2,[h2],co2)
run_heuristic_on_hypotheses(hypotheses, [fr2])
def test_heuristic_on_functions2():
# we know x < y => exp(x) < exp(y)
# Assume x < y, exp(x) > exp(y).
x = Var("x")
y = Var("y")
hypotheses = [Zero_comparison(Add_term([Add_pair(1, x), Add_pair(-1, y)]), LT), Zero_comparison(Add_term([Add_pair(1, Func_term('exp', [y])), Add_pair(-1, Func_term('exp', [x]))]), LE),
Zero_comparison(Add_term([Add_pair(1, Func_term('exp', [y])), Add_pair(-1, Func_term('exp', [x]))]), GE)]
co = Function_conclusion((lambda H, a, b:Func_term("exp", [a])), (lambda H, a, b:Func_term("exp", [b])), GT)
fr = Function_restriction('exp', lambda H, a, b:a.neq_rel(b, H), co)
co2 = Function_conclusion(lambda H, a:Func_term("exp", [a]), lambda H, a:0, GT)
fr2 = Function_restriction('exp2', lambda H, a:True, co2)
# fr2 = Function_restriction('exp',free_vars2,[h2],co2)
run_heuristic_on_hypotheses(hypotheses, [fr])
###################################################
#
# INPUT CODE
#
####################################################
digit = '1234567890'
alpha = 'abcdefghijklmnopqrstuvwxyz_'
alphanum = alpha + digit
comp = '><='
punct = '()'
operators = '+-/*^'
# Helper function for lex.
# Takes a "property" string and an input string.
# Returns: the initial substring of input whose chars are in prop, the remainder of input
def splitoff(prop, inp):
index = 0
while index < len(inp) and inp[index] in prop: index += 1
# if prop==punct and index>1: raise Exception
return inp[:index], inp[index:]
# takes string with no whitespace
# returns list of strings where each string is a token of type digit,alpha,comp,punct,operators
# TODO: handle whitespace, multi-character names
def lex(inp):
if len(inp) == 0: return []
if len(inp) == 1: return [inp]
type = ""
# print inp[0]
if inp[0] in alphanum: type = alphanum
elif inp[0] in operators: type = operators
elif inp[0] in comp: type = comp
elif inp[0] in punct: type = punct
if type == "": raise Exception("Bad input!")
token, remainder = splitoff(type, inp)
return [token] + lex(remainder)
# kinds of inequalities
# GT, GE, LE, LT = range(4)
# comp_str = { GT : '>', GE : '>=', LT : '<', LE : '<=' }
comp_str_rev = {'>': GT, '>=': GE, '<': LT, '<=': LE}
# dir is an int: GT, GE, LE, LT
# Temporary data structure for parsing.
# left and right are strings representing terms.
# ineq is the comparison between those terms: left ineq right
class StringCompData:
def __init__(self, string1, string2, dir):
self.left = string1
self.right = string2
self.ineq = dir
def __str__(self):
return self.left + comp_str[self.ineq] + self.right
def __repr__(self):
return self.__str__()
# inputs string
# returns list of one or two StringCompDatas
def splitup(s):
# s should have one or two inequality signs of the same direction.
t = "".join(s.split())
if find(t, ">") > -1 and find(t, "<") > -1:
raise Exception
ineqs = count(t, ">") + count(t, "<")
if ineqs < 1 or ineqs > 2: raise Exception
if ineqs == 2: # This is a chain of inequalities. Split and parse each one separately.
dir = ">" if (find(t, ">") > -1) else "<"
dirs = []
i1 = find(t, dir)
dirs.append(dir + "=" if t[i1 + 1] == "=" else dir)
i2 = i1 + 1 + find(t[i1 + 1:], dir)
dirs.append(dir + "=" if t[i2 + 1] == "=" else dir)
substr1 = t[:i1]
substr2 = t[i1 + len(dirs[0]):i2]
substr3 = t[i2 + len(dirs[1]):]
str1 = substr1 + dirs[0] + substr2
str2 = substr2 + dirs[1] + substr3
return splitup(str1) + splitup(str2)
else: # Only one inequality.
iseq = "=" if (find(t, "=") > -1) else ""
dir = ">" if (find(t, ">") > -1) else "<"
substrs = split(t, dir + iseq)
return [StringCompData(substrs[0], substrs[1], comp_str_rev[dir + iseq])]
# takes a string like "x^5+2*y".
# Returns "Var('x')**5+2*Var('y')"
def fixvars(input):
lexed = lex(input)
for i in range(0, len(lexed)):
token = lexed[i]
isvar = False
for c in token:
if c in alpha:
isvar = True
break
if isvar:
lexed[i] = "Var('" + lexed[i] + "')"
elif lexed[i] == '^':
lexed[i] = '**'
output = ""
for token in lexed: output += token
return output
# takes string as input. Returns array of ZeroComparisons
# TODO: should be 2 functions
def make_zero_comp(input):
string_comp_data = splitup(fixvars(input))
zero_comps = []
for scd in string_comp_data:
if not ("Var" in scd.left + scd.right):
if not eval(scd.left + comp_str[scd.ineq] + scd.right):
print "Contradiction found while parsing!", scd.left, comp_str[scd.ineq], scd.right
exit()
continue
terml = eval(scd.left)
termr = eval(scd.right)
term = terml + (-1) * termr
zero_comps.append(Zero_comparison(term, scd.ineq))
return zero_comps
def run_heuristic_on_input():
print "Enter inequalities to run."
print "Type \"done\" or a blank line when finished."
args = []
v = "".join(raw_input("inequality: ").split()) # clear whitespace
while (v != "" and v != "done"):
try:
args.extend(make_zero_comp(v)) # args.append(parse(v))
except KeyError as inst:
print "Invalid input: ", inst
v = "".join(raw_input("inequality: ").split())
print args
run_heuristic_on_hypotheses(args)
# Uncomment one and only one line of inequalities.
def run_heuristic_on_list():
ineqs = [
# This example is similar to one from S. McLaughlin and J. Harrison (2005),
# which their algorithm solves in 23.5 seconds
# "1<x", "1<y", "1<z", "1>=x*(1+z*y)"
# This is not provable by Isabelle, from a post to the Isabelle mailing list.
# "a>0", "a<1", "b>0", "b<1", "a+b<a*b"
# This example takes a while and fails. No large constants.
# "x+y>=2", "z+w>=2", "u*x^2<u*x", "u*y^2<u*y", "u*w^2>u*w", "u*z^2>u*z"
# This example takes a few seconds, large multiplicative constants, fails
# "n<=(1/2)*k*x", "0<c", "0<p<1", "(1+p/(3*(c+3)))*n>=k*x"
# warning: the next example blows up!
# "x<1<y", "x*y>1", "u+x>=y+1", "x^2*y>=2-u*x*y"
# This example does not have a model.
# If the last inequality is changed to <, it does.
"0<x<3*y", "u<v<0", "1<v^2<x", "u*(3*y)^2+1 < x^2*v+x"
# This is a simple example with extraneous info,
# where the contradiction is found very quickly.
# "x*(y+z)<=0", "y+z>0", "x>=0", "x*w>0"
# This example performs splitting, fails
# "x+y+z<=0", "x+y>=-z"
# This set of constraints has a model: x = 0, found by the procedure
# "x>=0", "x^3<=0"
# warning: the next example blows up!
# "x^(1/2)+y^(1/2) < 30", "x^(7/2)-1<y", "y^(1/5)>4"
# The contradiction here is found relatively quickly.
# "x+1/y<2", "y<0", "y/x>1", "-2<=x<=2", "-2<=y<=2", "x^2*y^(-1)>1-x"
# This example case splits and fails.
# "((x+y)^2)^(1/2)>(x^2)^(1/2)+(y^2)^(1/2)"
# warning: the next example blows up!
# "(a+b)*(1/2)<(a*b)^(1/2)"
]
args = []
try:
for ineq in ineqs:
args.extend(make_zero_comp("".join(ineq.split())))
except KeyError as inst:
print "Invalid input: ", inst
return
run_heuristic_on_hypotheses(args)
#run_heuristic_on_input()
#run_heuristic_on_list()
#test_heuristic()
#test_heuristic_2()
#test_heuristic_3()
#test_heuristic_4()
#test_heuristic_on_functions()
#test_heuristic_on_functions2()
stop = timeit.default_timer()
#print round(stop - start, 3)
|
|
#!/usr/bin/python
# Copyright (c) 2013, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution. Neither the name of the University of California, Berkeley
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission. THIS
# SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file generates a set of graphs for a simulator "experiment".
# An experiment is equivalent to the file generated from the run of a
# single Experiment object in the simulator (i.e. a parameter sweep for a
# set of workload_descs), with the added constraint that only one of
# C, L, or lambda can be varied per a single series (the simulator
# currently allows ranges to be provided for more than one of these).
import sys, os, re
from utils import *
import numpy as np
import matplotlib.pyplot as plt
import math
import operator
import logging
from collections import defaultdict
import cluster_simulation_protos_pb2
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
def usage():
print "usage: scheduler-business.py <output folder> <REMOVED: input_protobuff> " \
"<paper_mode: 0|1> <vary_dim: c|l|lambda> <env: any of A,B,C> [png]"
sys.exit(1)
# if len(sys.argv) < 6:
# logging.error("Not enough arguments provided.")
# usage()
paper_mode = True
output_formats = ['pdf']
# try:
# output_prefix = str(sys.argv[1])
# input_protobuff = sys.argv[2]
# if int(sys.argv[3]) == 1:
# paper_mode = True
# vary_dim = sys.argv[4]
# if vary_dim not in ['c', 'l', 'lambda']:
# logging.error("vary_dim must be c, l, or lambda!")
# sys.exit(1)
# envs_to_plot = sys.argv[5]
# if re.search("[^ABC]",envs_to_plot):
# logging.error("envs_to_plot must be any combination of a, b, and c, without spaces!")
# sys.exit(1)
# if len(sys.argv) == 7:
# if sys.argv[6] == "png":
# output_formats.append('png')
# else:
# logging.error("The only valid optional 5th argument is 'png'")
# sys.exit(1)
#
# except:
# usage()
#
# set_leg_fontsize(11)
# logging.info("Output prefix: %s" % output_prefix)
# logging.info("Input file: %s" % input_protobuff)
# google-omega-resfit-allornoth-single_path-vary_l-604800.protobuf
# google-omega-resfit-inc-single_path-vary_l-604800.protobuf
# google-omega-seqnum-allornoth-single_path-vary_l-604800.protobuf
# google-omega-seqnum-inc-single_path-vary_l-604800.protobuf
envs_to_plot = "C"
file_dir = '/Users/andyk/omega-7day-simulator-results/'
output_prefix = file_dir + "/graphs"
file_names = [("Fine/Gang", "google-omega-resfit-allornoth-single_path-vary_c-604800.protobuf"),
("Fine/Inc", "google-omega-resfit-inc-single_path-vary_c-604800.protobuf"),
("Coarse/Gang", "google-omega-seqnum-allornoth-single_path-vary_c-604800.protobuf"),
("Course/Inc", "google-omega-seqnum-inc-single_path-vary_c-604800.protobuf")]
experiment_result_sets = []
for title_name_tuple in file_names:
title = title_name_tuple[0]
file_name = title_name_tuple[1]
full_name = file_dir + file_name
# Read in the ExperimentResultSet.
#experiment_result_sets.append((title, cluster_simulation_protos_pb2.ExperimentResultSet()))
res_set = cluster_simulation_protos_pb2.ExperimentResultSet()
experiment_result_sets.append([title, res_set])
#titles[experiment_result_sets[-1]] = title
f = open(full_name, "rb")
res_set.ParseFromString(f.read())
f.close()
# ---------------------------------------
# Set up some general graphing variables.
if paper_mode:
set_paper_rcs()
fig = plt.figure(figsize=(2,1.33))
else:
fig = plt.figure()
prefilled_colors_web = { 'A': 'b', 'B': 'r', 'C': 'c', "synth": 'y' }
colors_web = { 'A': 'b', 'B': 'r', 'C': 'm', "synth": 'y' }
colors_paper = { 'A': 'b', 'B': 'r', 'C': 'c', "synth": 'b' }
per_wl_colors = { 'OmegaBatch': 'b',
'OmegaService': 'r' }
title_colors_web = { "Fine/Gang": 'b', "Fine/Inc": 'r', "Coarse/Gang": 'm', "Course/Inc": 'c' }
prefilled_linestyles_web = { 'Monolithic': 'D-',
'MonolithicApprox': 's-',
'MesosBatch': 'D-',
'MesosService': 'D:',
'MesosBatchApprox': 's-',
'MesosServiceApprox': 's:',
'OmegaBatch': 'D-',
'OmegaService': 'D:',
'OmegaBatchApprox': 's-',
'OmegaServiceApprox': 's:',
'Batch': 'D-',
'Service': 'D:' }
linestyles_web = { 'Monolithic': 'x-',
'MonolithicApprox': 'o-',
'MesosBatch': 'x-',
'MesosService': 'x:',
'MesosBatchApprox': 'o-',
'MesosServiceApprox': 'o:',
'OmegaBatch': 'x-',
'OmegaService': 'x:',
'OmegaBatchApprox': 'o-',
'OmegaServiceApprox': 'o:',
'Batch': 'x-',
'Service': 'x:' }
linestyles_paper = { 'Monolithic': '-',
'MonolithicApprox': '--',
'MesosBatch': '-',
'MesosService': ':',
'MesosBatchApprox': '--',
'MesosServiceApprox': '-.',
'OmegaBatch': '-',
'OmegaService': ':',
'OmegaBatchApprox': '--',
'OmegaServiceApprox': '-.',
'Batch': '-',
'Service': ':' }
dashes_paper = { 'Monolithic': (None,None),
'MonolithicApprox': (3,3),
'MesosBatch': (None,None),
'MesosService': (1,1),
'MesosBatchApprox': (3,3),
'MesosServiceApprox': (4,2),
'OmegaBatch': (None,None),
'OmegaService': (1,1),
'OmegaBatchApprox': (3,3),
'OmegaServiceApprox': (4,2),
'Batch': (None,None),
'Service': (1,1),
'Fine/Gang': (1,1),
'Fine/Inc': (3,3),
'Coarse/Gang': (4,2)
}
# Some dictionaries whose values will be dictionaries
# to make 2d dictionaries, which will be indexed by both exp_env
# and either workoad or scheduler name.
# --
# (cellName, assignmentPolicy, workload_name) -> array of data points
# for the parameter sweep done in the experiment.
workload_queue_time_till_first = {}
workload_queue_time_till_fully = {}
workload_queue_time_till_first_90_ptile = {}
workload_queue_time_till_fully_90_ptile = {}
workload_num_jobs_unscheduled = {}
# (cellName, assignmentPolicy, scheduler_name) -> array of data points
# for the parameter sweep done in the experiment.
sched_total_busy_fraction = {}
sched_daily_busy_fraction = {}
sched_daily_busy_fraction_err = {}
# TODO(andyk): Graph retry_busy_fraction on same graph as total_busy_fraction
# to parallel Malte's graphs.
# sched_retry_busy_fraction = {}
sched_conflict_fraction = {}
sched_daily_conflict_fraction = {}
sched_daily_conflict_fraction_err = {}
sched_task_conflict_fraction = {}
sched_num_retried_transactions = {}
sched_num_jobs_remaining = {}
sched_failed_find_victim_attempts = {}
# Convenience wrapper to override __str__()
class ExperimentEnv:
def __init__(self, init_exp_env):
self.exp_env = init_exp_env
self.cell_name = init_exp_env.cell_name
self.workload_split_type = init_exp_env.workload_split_type
self.is_prefilled = init_exp_env.is_prefilled
self.run_time = init_exp_env.run_time
def __str__(self):
return str("%s, %s" % (self.exp_env.cell_name, self.exp_env.workload_split_type))
# Figure out if we are varying c, l, or lambda in this experiment.
def vary_dim(self):
env = self.exp_env # Make a convenient short handle.
assert(len(env.experiment_result) > 1)
if (env.experiment_result[0].constant_think_time !=
env.experiment_result[1].constant_think_time):
vary_dim = "c"
# logging.debug("Varying %s. The first two experiments' c values were %d, %d "
# % (vary_dim,
# env.experiment_result[0].constant_think_time,
# env.experiment_result[1].constant_think_time))
elif (env.experiment_result[0].per_task_think_time !=
env.experiment_result[1].per_task_think_time):
vary_dim = "l"
# logging.debug("Varying %s. The first two experiments' l values were %d, %d "
# % (vary_dim,
# env.experiment_result[0].per_task_think_time,
# env.experiment_result[1].per_task_think_time))
else:
vary_dim = "lambda"
# logging.debug("Varying %s." % vary_dim)
return vary_dim
class Value:
def __init__(self, init_x, init_y):
self.x = init_x
self.y = init_y
def __str__(self):
return str("%f, %f" % (self.x, self.y))
def bt_approx(cell_name, sched_name, point, vary_dim_, tt_c, tt_l, runtime):
logging.debug("sched_name is %s " % sched_name)
assert(sched_name == "Batch" or sched_name == "Service")
lbd = {}
n = {}
# This function calculates an approximated scheduler busyness line given
# an average inter-arrival time and job size for each scheduler
# XXX: configure the below parameters and comment out the following
# line in order to
# 1) disable the warning, and
# 2) get a correct no-conflict approximation.
print >> sys.stderr, "*********************************************\n" \
"WARNING: YOU HAVE NOT CONFIGURED THE PARAMETERS IN THE bt_approx\n" \
"*********************************************\n"
################################
# XXX EDIT BELOW HERE
# hard-coded SAMPLE params for cluster A
lbd['A'] = { "Batch": 0.1, "Service": 0.01 } # lambdas for 0: serv & 1: Batch
n['A'] = { "Batch": 10.0, "Service": 5.0 } # avg num tasks per job
# hard-coded SAMPLE params for cluster B
lbd['B'] = { "Batch": 0.1, "Service": 0.01 }
n['B'] = { "Batch": 10.0, "Service": 5.0 }
# hard-coded SAMPLE params for cluster C
lbd['C'] = { "Batch": 0.1, "Service": 0.01 }
n['C'] = { "Batch": 10.0, "Service": 5.0 }
################################
# approximation formula
if vary_dim_ == 'c':
# busy_time = num_jobs * (per_job_think_time = C + nL) / runtime
return runtime * lbd[cell_name][sched_name] * \
((point + n[cell_name][sched_name] * float(tt_l))) / runtime
elif vary_dim_ == 'l':
return runtime * lbd[cell_name][sched_name] * \
((float(tt_c) + n[cell_name][sched_name] * point)) / runtime
def get_mad(median, data):
#print "in get_mad, with median %f, data: %s" % (median, " ".join([str(i) for i in data]))
devs = [abs(x - median) for x in data]
mad = np.median(devs)
#print "returning mad = %f" % mad
return mad
def sort_labels(handles, labels):
hl = sorted(zip(handles, labels),
key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
return (handles2, labels2)
for experiment_result_set_arry in experiment_result_sets:
title = experiment_result_set_arry[0]
logging.debug("\n\n==========================\nHandling title %s." % title)
experiment_result_set = experiment_result_set_arry[1]
# Loop through each experiment environment.
logging.debug("Processing %d experiment envs."
% len(experiment_result_set.experiment_env))
for env in experiment_result_set.experiment_env:
if not re.search(cell_to_anon(env.cell_name), envs_to_plot):
logging.debug(" skipping env/cell " + env.cell_name)
continue
logging.debug("\n\n\n env: " + env.cell_name)
exp_env = ExperimentEnv(env) # Wrap the protobuff object to get __str__()
logging.debug(" Handling experiment env %s." % exp_env)
# Within this environment, loop through each experiment result
logging.debug(" Processing %d experiment results." % len(env.experiment_result))
for exp_result in env.experiment_result:
logging.debug(" Handling experiment with per_task_think_time %f, constant_think_time %f"
% (exp_result.per_task_think_time, exp_result.constant_think_time))
# Record the correct x val depending on which dimension is being
# swept over in this experiment.
vary_dim = exp_env.vary_dim() # This line is unecessary since this value
# is a flag passed as an arg to the script.
if vary_dim == "c":
x_val = exp_result.constant_think_time
elif vary_dim == "l":
x_val = exp_result.per_task_think_time
else:
x_val = exp_result.avg_job_interarrival_time
# logging.debug("Set x_val to %f." % x_val)
# Build results dictionaries of per-scheduler stats.
for sched_stat in exp_result.scheduler_stats:
# Per day busy time and conflict fractions.
daily_busy_fractions = []
daily_conflict_fractions = []
daily_conflicts = [] # counts the mean of daily abs # of conflicts.
daily_successes = []
logging.debug(" handling scheduler %s" % sched_stat.scheduler_name)
for day_stats in sched_stat.per_day_stats:
# Calculate the total busy time for each of the days and then
# take median of all fo them.
run_time_for_day = exp_env.run_time - 86400 * day_stats.day_num
# logging.debug("setting run_time_for_day = exp_env.run_time - 86400 * "
# "day_stats.day_num = %f - 86400 * %d = %f"
# % (exp_env.run_time, day_stats.day_num, run_time_for_day))
if run_time_for_day > 0.0:
daily_busy_fractions.append(((day_stats.useful_busy_time +
day_stats.wasted_busy_time) /
min(86400.0, run_time_for_day)))
if day_stats.num_successful_transactions > 0:
conflict_fraction = (float(day_stats.num_failed_transactions) /
float(day_stats.num_successful_transactions))
daily_conflict_fractions.append(conflict_fraction)
daily_conflicts.append(float(day_stats.num_failed_transactions))
daily_successes.append(float(day_stats.num_successful_transactions))
# logging.debug("appending daily_conflict_fraction %f / %f = %f."
# % (float(day_stats.num_failed_transactions),
# float(day_stats.num_successful_transactions),
# conflict_fraction))
else:
daily_conflict_fractions.append(0)
# Daily busy time median.
daily_busy_time_med = np.median(daily_busy_fractions)
logging.debug(" Daily_busy_fractions, med: %f, vals: %s"
% (daily_busy_time_med,
" ".join([str(i) for i in daily_busy_fractions])))
value = Value(x_val, daily_busy_time_med)
append_or_create_2d(sched_daily_busy_fraction,
title,
sched_stat.scheduler_name,
value)
#logging.debug("sched_daily_busy_fraction[%s %s].append(%s)."
# % (exp_env, sched_stat.scheduler_name, value))
# Error Bar (MAD) for daily busy time.
value = Value(x_val, get_mad(daily_busy_time_med,
daily_busy_fractions))
append_or_create_2d(sched_daily_busy_fraction_err,
title,
sched_stat.scheduler_name,
value)
#logging.debug("sched_daily_busy_fraction_err[%s %s].append(%s)."
# % (exp_env, sched_stat.scheduler_name, value))
# Daily conflict fraction median.
daily_conflict_fraction_med = np.median(daily_conflict_fractions)
logging.debug(" Daily_abs_num_conflicts, med: %f, vals: %s"
% (np.median(daily_conflicts),
" ".join([str(i) for i in daily_conflicts])))
logging.debug(" Daily_num_successful_conflicts, med: %f, vals: %s"
% (np.median(daily_successes),
" ".join([str(i) for i in daily_successes])))
logging.debug(" Daily_conflict_fractions, med : %f, vals: %s\n --"
% (daily_conflict_fraction_med,
" ".join([str(i) for i in daily_conflict_fractions])))
value = Value(x_val, daily_conflict_fraction_med)
append_or_create_2d(sched_daily_conflict_fraction,
title,
sched_stat.scheduler_name,
value)
# logging.debug("sched_daily_conflict_fraction[%s %s].append(%s)."
# % (exp_env, sched_stat.scheduler_name, value))
# Error Bar (MAD) for daily conflict fraction.
value = Value(x_val, get_mad(daily_conflict_fraction_med,
daily_conflict_fractions))
append_or_create_2d(sched_daily_conflict_fraction_err,
title,
sched_stat.scheduler_name,
value)
def plot_2d_data_set_dict(data_set_2d_dict,
plot_title,
filename_suffix,
y_label,
y_axis_type,
error_bars_data_set_2d_dict = None):
assert(y_axis_type == "0-to-1" or
y_axis_type == "ms-to-day" or
y_axis_type == "abs")
plt.clf()
ax = fig.add_subplot(111)
for title, name_to_val_map in data_set_2d_dict.iteritems():
for wl_or_sched_name, values in name_to_val_map.iteritems():
line_label = title
# Hacky: chop MonolithicBatch, MesosBatch, MonolithicService, etc.
# down to "Batch" and "Service" if in paper mode.
updated_wl_or_sched_name = wl_or_sched_name
if paper_mode and re.search("Batch", wl_or_sched_name):
updated_wl_or_sched_name = "Batch"
if paper_mode and re.search("Service", wl_or_sched_name):
updated_wl_or_sched_name = "Service"
# Don't show lines for service frameworks
if updated_wl_or_sched_name == "Batch":
"Skipping a line for a service scheduler"
continue
x_vals = [value.x for value in values]
# Rewrite zero's for the y_axis_types that will be log.
y_vals = [0.00001 if (value.y == 0 and y_axis_type == "ms-to-day")
else value.y for value in values]
logging.debug("Plotting line for %s %s %s." %
(title, updated_wl_or_sched_name, plot_title))
#logging.debug("x vals: " + " ".join([str(i) for i in x_vals]))
#logging.debug("y vals: " + " ".join([str(i) for i in y_vals]))
logging.debug("wl_or_sched_name: " + wl_or_sched_name)
logging.debug("title: " + title)
ax.plot(x_vals, y_vals,
dashes=dashes_paper[wl_or_sched_name],
color=title_colors_web[title],
label=line_label, markersize=4,
mec=title_colors_web[title])
setup_graph_details(ax, plot_title, filename_suffix, y_label, y_axis_type)
def setup_graph_details(ax, plot_title, filename_suffix, y_label, y_axis_type):
assert(y_axis_type == "0-to-1" or
y_axis_type == "ms-to-day" or
y_axis_type == "abs")
# Paper title.
if not paper_mode:
plt.title(plot_title)
if paper_mode:
try:
# Set up the legend, for removing the border if in paper mode.
handles, labels = ax.get_legend_handles_labels()
handles2, labels2 = sort_labels(handles, labels)
leg = plt.legend(handles2, labels2, loc=2, labelspacing=0)
fr = leg.get_frame()
fr.set_linewidth(0)
except:
print "Failed to remove frame around legend, legend probably is empty."
# Axis labels.
if not paper_mode:
ax.set_ylabel(y_label)
if vary_dim == "c":
ax.set_xlabel(u'Scheduler 1 constant processing time [sec]')
elif vary_dim == "l":
ax.set_xlabel(u'Scheduler 1 per-task processing time [sec]')
elif vary_dim == "lambda":
ax.set_xlabel(u'Job arrival rate to scheduler 1, $\lambda_1$')
# x-axis scale, limit, tics and tic labels.
ax.set_xscale('log')
ax.set_autoscalex_on(False)
if vary_dim == 'c':
plt.xlim(xmin=0.01)
plt.xticks((0.01, 0.1, 1, 10, 100), ('10ms', '0.1s', '1s', '10s', '100s'))
elif vary_dim == 'l':
plt.xlim(xmin=0.001, xmax=1)
plt.xticks((0.001, 0.01, 0.1, 1), ('1ms', '10ms', '0.1s', '1s'))
elif vary_dim == 'lambda':
plt.xlim([0.1, 100])
plt.xticks((0.1, 1, 10, 100), ('0.1s', '1s', '10s', '100s'))
# y-axis limit, tics and tic labels.
if y_axis_type == "0-to-1":
logging.debug("Setting up y-axis for '0-to-1' style graph.")
plt.ylim([0, 1])
plt.yticks((0, 0.2, 0.4, 0.6, 0.8, 1.0),
('0.0', '0.2', '0.4', '0.6', '0.8', '1.0'))
elif y_axis_type == "ms-to-day":
logging.debug("Setting up y-axis for 'ms-to-day' style graph.")
#ax.set_yscale('symlog', linthreshy=0.001)
ax.set_yscale('log')
plt.ylim(ymin=0.01, ymax=24*3600)
plt.yticks((0.01, 1, 60, 3600, 24*3600), ('10ms', '1s', '1m', '1h', '1d'))
elif y_axis_type == "abs":
plt.ylim(ymin=0)
logging.debug("Setting up y-axis for 'abs' style graph.")
#plt.yticks((0.01, 1, 60, 3600, 24*3600), ('10ms', '1s', '1m', '1h', '1d'))
else:
logging.error('y_axis_label parameter must be either "0-to-1"'
', "ms-to-day", or "abs".')
sys.exit(1)
final_filename = (output_prefix +
('/sisi-vary-%s-vs-' % vary_dim) +
filename_suffix)
logging.debug("Writing plot to %s", final_filename)
writeout(final_filename, output_formats)
#SCHEDULER DAILY BUSY AND CONFLICT FRACTION MEDIANS
plot_2d_data_set_dict(sched_daily_busy_fraction,
"Scheduler processing time vs. median(daily busy time fraction)",
"daily-busy-fraction-med",
u'Median(daily busy time fraction)',
"0-to-1")
plot_2d_data_set_dict(sched_daily_conflict_fraction,
"Scheduler processing time vs. median(daily conflict fraction)",
"daily-conflict-fraction-med",
u'Median(daily conflict fraction)',
"0-to-1")
|
|
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import strutils
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import cells
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
from nova.i18n import _
from nova import rpc
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
ALIAS = "os-cells"
authorize = extensions.os_compute_authorizer(ALIAS)
def _filter_keys(item, keys):
"""Filters all model attributes except for keys
item is a dict
"""
return {k: v for k, v in six.iteritems(item) if k in keys}
def _fixup_cell_info(cell_info, keys):
"""If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport_url = rpc.get_transport_url(transport_url)
except messaging.InvalidTransportURL:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return
if not transport_url.hosts:
return
transport_host = transport_url.hosts[0]
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = getattr(transport_host, transport_field)
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class CellsController(wsgi.Controller):
"""Controller for Cell resources."""
def __init__(self):
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@extensions.expected_errors(501)
@common.check_cells_enabled
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@extensions.expected_errors(501)
@common.check_cells_enabled
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@extensions.expected_errors(501)
@common.check_cells_enabled
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@extensions.expected_errors((404, 501))
@common.check_cells_enabled
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell={"capacities": capacities})
@extensions.expected_errors((404, 501))
@common.check_cells_enabled
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
# NOTE(gmann): Returns 200 for backwards compatibility but should be 204
# as this operation complete the deletion of aggregate resource and return
# no response body.
@extensions.expected_errors((403, 404, 501))
@common.check_cells_enabled
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context, action="delete")
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound(
explanation=_("Cell %s doesn't exist.") % id)
def _normalize_cell(self, cell, existing=None):
"""Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport_url = existing.get('transport_url') if existing else None
transport_url = rpc.get_transport_url(transport_url)
if 'rpc_virtual_host' in cell:
transport_url.virtual_host = cell.pop('rpc_virtual_host')
if not transport_url.hosts:
transport_url.hosts.append(messaging.TransportHost())
transport_host = transport_url.hosts[0]
if 'rpc_port' in cell:
cell['rpc_port'] = int(cell['rpc_port'])
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
for key, input_field in transport_field_map.items():
# Only override the value if we're given an override
if input_field in cell:
setattr(transport_host, key, cell.pop(input_field))
# Now set the transport URL
cell['transport_url'] = str(transport_url)
# NOTE(gmann): Returns 200 for backwards compatibility but should be 201
# as this operation complete the creation of aggregates resource when
# returning a response.
@extensions.expected_errors((400, 403, 501))
@common.check_cells_enabled
@validation.schema(cells.create)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context, action="create")
cell = body['cell']
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((400, 403, 404, 501))
@common.check_cells_enabled
@validation.schema(cells.update)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context, action="update")
cell = body['cell']
cell.pop('id', None)
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
# NOTE(gmann): Returns 200 for backwards compatibility but should be 204
# as this operation complete the sync instance info and return
# no response body.
@extensions.expected_errors((400, 501))
@common.check_cells_enabled
@validation.schema(cells.sync_instances)
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context, action="sync_instances")
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if isinstance(deleted, six.string_types):
deleted = strutils.bool_from_string(deleted, strict=True)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.V21APIExtensionBase):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = ALIAS
version = 1
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension(ALIAS, CellsController(),
collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
def get_controller_extensions(self):
return []
|
|
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
__all__ = ['Emitter', 'EmitterError']
from .error import YAMLError
from .events import *
class EmitterError(YAMLError):
pass
class ScalarAnalysis:
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Emitter:
DEFAULT_TAG_PREFIXES = {
'!' : '!',
'tag:yaml.org,2002:' : '!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overridden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Whether the document requires an explicit document indicator
self.open_ended = False
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = '\n'
if line_break in ['\r', '\n', '\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if self.event.encoding and not hasattr(self.stream, 'encoding'):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s"
% self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator('...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = sorted(self.event.tags.keys())
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator('---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator('...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s"
% self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator('...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s"
% self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor('&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor('*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator('[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator(']', False)
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator('{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator('}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator('}', False)
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
self.write_indicator('-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.check_simple_key():
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
self.write_indicator(':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and event.value == '')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = '!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if not self.event.style and self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
#if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return '%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != '!' or handle[-1] != '!':
raise EmitterError("tag handle must start and end with '!': %r" % handle)
for ch in handle[1:-1]:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch, handle))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == '!':
end = 1
while end < len(prefix):
ch = prefix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append('%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return ''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == '!':
return tag
handle = None
suffix = tag
prefixes = sorted(self.tag_prefixes.keys())
for prefix in prefixes:
if tag.startswith(prefix) \
and (prefix == '!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?:@&=+$,_.~*\'()[]' \
or (ch == '!' and handle != '!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append('%%%02X' % ord(ch))
if start < end:
chunks.append(suffix[start:end])
suffix_text = ''.join(chunks)
if handle:
return '%s%s' % (handle, suffix_text)
else:
return '!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch, anchor))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith('---') or scalar.startswith('...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = (len(scalar) == 1 or
scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in '#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in '?:':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in ',?[]{}':
flow_indicators = True
if ch == ':':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '#' and preceded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in '\n\x85\u2028\u2029':
line_breaks = True
if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD'
or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == ' ':
if index == 0:
leading_space = True
if index == len(scalar)-1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in '\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar)-1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
followed_by_whitespace = (index+1 >= len(scalar) or
scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if (leading_space or leading_break
or trailing_space or trailing_break):
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if space_break or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write('\uFEFF'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = ' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = ' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = '%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = '%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator('\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != ' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == '\'':
data = '\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == ' ')
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
self.write_indicator('\'', False)
ESCAPE_REPLACEMENTS = {
'\0': '0',
'\x07': 'a',
'\x08': 'b',
'\x09': 't',
'\x0A': 'n',
'\x0B': 'v',
'\x0C': 'f',
'\x0D': 'r',
'\x1B': 'e',
'\"': '\"',
'\\': '\\',
'\x85': 'N',
'\xA0': '_',
'\u2028': 'L',
'\u2029': 'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator('"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
or not ('\x20' <= ch <= '\x7E'
or (self.allow_unicode
and ('\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= '\xFF':
data = '\\x%02X' % ord(ch)
elif ch <= '\uFFFF':
data = '\\u%04X' % ord(ch)
else:
data = '\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == ' ':
data = '\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator('"', False)
def determine_block_hints(self, text):
hints = ''
if text:
if text[0] in ' \n\x85\u2028\u2029':
hints += str(self.best_indent)
if text[-1] not in '\n\x85\u2028\u2029':
hints += '-'
elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
hints += '+'
return hints
def write_folded(self, text):
hints = self.determine_block_hints(text)
self.write_indicator('>'+hints, True)
if hints[-1:] == '+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != ' ' \
and text[start] == '\n':
self.write_line_break()
leading_space = (ch == ' ')
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != ' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in '\n\x85\u2028\u2029')
spaces = (ch == ' ')
end += 1
def write_literal(self, text):
hints = self.determine_block_hints(text)
self.write_indicator('|'+hints, True)
if hints[-1:] == '+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in '\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if self.root_context:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = ' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != ' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == ' ')
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import paddle.fluid.layers.ops as ops
from paddle.fluid.initializer import init_on_cpu
from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter
import paddle.fluid.core as core
from parallel_executor_test_base import TestParallelExecutorBase
import unittest
import math
import os
import numpy as np
# FIXME(zcd): If the neural net has dropout_op, the output of ParallelExecutor
# and Executor is different. Because, for ParallelExecutor, the dropout_op of
# the neural net will be copied N copies(N is the number of device). This will
# lead to the random numbers generated by ParallelExecutor and Executor are different.
# So, if we compare the loss of ParallelExecutor and Executor, we should remove the
# dropout_op.
remove_dropout = False
# FIXME(zcd): If the neural net has batch_norm, the output of ParallelExecutor
# and Executor is different.
remove_bn = False
def squeeze_excitation(input, num_channels, reduction_ratio):
# pool = fluid.layers.pool2d(
# input=input, pool_size=0, pool_type='avg', global_pooling=True)
conv = input
shape = conv.shape
reshape = fluid.layers.reshape(
x=conv, shape=[-1, shape[1], shape[2] * shape[3]])
pool = fluid.layers.reduce_mean(input=reshape, dim=2)
squeeze = fluid.layers.fc(input=pool,
size=num_channels / reduction_ratio,
act='relu')
excitation = fluid.layers.fc(input=squeeze,
size=num_channels,
act='sigmoid')
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) / 2,
groups=groups,
act=None,
bias_attr=False)
return conv if remove_bn else fluid.layers.batch_norm(
input=conv, act=act, momentum=0.1)
def shortcut(input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out:
if stride == 1:
filter_size = 1
else:
filter_size = 3
return conv_bn_layer(input, ch_out, filter_size, stride)
else:
return input
def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio):
# The number of first 1x1 convolutional channels for each bottleneck build block
# was halved to reduce the compution cost.
conv0 = conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu')
conv1 = conv_bn_layer(
input=conv0,
num_filters=num_filters * 2,
filter_size=3,
stride=stride,
groups=cardinality,
act='relu')
conv2 = conv_bn_layer(
input=conv1, num_filters=num_filters * 2, filter_size=1, act=None)
scale = squeeze_excitation(
input=conv2,
num_channels=num_filters * 2,
reduction_ratio=reduction_ratio)
short = shortcut(input, num_filters * 2, stride)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
batch_size = 12
img_shape = [3, 224, 224]
def SE_ResNeXt50Small(use_feed):
img = fluid.layers.data(name='image', shape=img_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
conv = conv_bn_layer(
input=img, num_filters=16, filter_size=3, stride=2, act='relu')
conv = conv_bn_layer(
input=conv, num_filters=16, filter_size=3, stride=1, act='relu')
conv = conv_bn_layer(
input=conv, num_filters=16, filter_size=3, stride=1, act='relu')
conv = fluid.layers.pool2d(
input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 6, 3]
num_filters = [128, 256, 512, 1024]
for block in range(len(depth)):
for i in range(depth[block]):
conv = bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
reduction_ratio=reduction_ratio)
shape = conv.shape
reshape = fluid.layers.reshape(
x=conv, shape=[-1, shape[1], shape[2] * shape[3]])
pool = fluid.layers.reduce_mean(input=reshape, dim=2)
dropout = pool if remove_dropout else fluid.layers.dropout(
x=pool, dropout_prob=0.2, seed=1)
# Classifier layer:
prediction = fluid.layers.fc(input=dropout, size=1000, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.mean(loss)
return loss
def cosine_decay(learning_rate, step_each_epoch, epochs=120):
"""
Applies cosine decay to the learning rate.
lr = 0.05 * (math.cos(epoch * (math.pi / 120)) + 1)
"""
global_step = _decay_step_counter()
with init_on_cpu():
epoch = ops.floor(global_step / step_each_epoch)
decayed_lr = learning_rate * \
(ops.cos(epoch * (math.pi / epochs)) + 1)/2
return decayed_lr
def optimizer(learning_rate=0.01):
optimizer = fluid.optimizer.Momentum(
learning_rate=cosine_decay(
learning_rate=learning_rate, step_each_epoch=2, epochs=1),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
class TestResnet(TestParallelExecutorBase):
@classmethod
def setUpClass(cls):
os.environ['CPU_NUM'] = str(4)
global remove_dropout
global remove_bn
remove_dropout = False
remove_bn = False
def _init_data(self, batch_size=2, random=True):
np.random.seed(5)
if random:
img = np.random.random(
size=[batch_size] + img_shape).astype(np.float32)
else:
img = np.ones(shape=[batch_size] + img_shape, dtype='float32')
label = [np.random.randint(0, 999) for _ in range(batch_size)]
label = np.array(label).astype(np.int64).reshape(-1, 1)
return img, label
def _compare_reduce_and_allreduce(self,
model,
use_cuda,
iter=20,
delta2=1e-6):
if use_cuda and not core.is_compiled_with_cuda():
return
global remove_bn
remove_bn = True
img, label = self._init_data(batch_size=batch_size)
all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
iter=iter,
batch_size=batch_size,
use_cuda=use_cuda,
use_reduce=False,
optimizer=optimizer)
reduce_first_loss, reduce_last_loss = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
iter=iter,
batch_size=batch_size,
use_cuda=use_cuda,
use_reduce=True,
optimizer=optimizer)
for loss in zip(all_reduce_first_loss, reduce_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6)
for loss in zip(all_reduce_last_loss, reduce_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2)
def _check_resnet_convergence(self,
model,
use_cuda=True,
use_reduce=False,
iter=20,
delta2=1e-6):
if use_cuda and not core.is_compiled_with_cuda():
return
global remove_dropout
global remove_bn
remove_dropout = True
remove_bn = True
img, label = self._init_data(batch_size=batch_size)
single_first_loss, single_last_loss = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
iter=iter,
batch_size=batch_size,
use_cuda=use_cuda,
use_reduce=use_reduce,
optimizer=optimizer,
use_parallel_executor=False)
parallel_first_loss, parallel_last_loss = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
iter=iter,
batch_size=batch_size,
use_cuda=use_cuda,
use_reduce=use_reduce,
optimizer=optimizer)
self.assertAlmostEquals(
np.mean(parallel_first_loss), single_first_loss[0], delta=1e-6)
self.assertAlmostEquals(
np.mean(parallel_last_loss), single_last_loss[0], delta=delta2)
def test_seresnext_with_learning_rate_decay(self):
self._check_resnet_convergence(model=SE_ResNeXt50Small, use_cuda=True)
self._check_resnet_convergence(
model=SE_ResNeXt50Small, use_cuda=False, iter=2, delta2=1e-3)
def test_seresnext_with_new_strategy(self):
self._compare_reduce_and_allreduce(
model=SE_ResNeXt50Small, use_cuda=True, delta2=1e-2)
self._compare_reduce_and_allreduce(
model=SE_ResNeXt50Small, use_cuda=False, iter=5)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# OSM to VISSIM converter
from vissim_objs import Vissim
import networkx as nx
from osm_to_graph import read_osm
from osm_to_graph import BusStopNode
from collections import OrderedDict
import geo_math as geo
import math
import vissim_v8 as vissim
import matplotlib.pyplot as plt
import numpy as np
import sys
import pickle
#my added
osmFile = "map.osm"
highway_cat = 'motorway|trunk|primary|secondary|tertiary|road|residential|service|motorway_link|trunk_link|primary_link|secondary_link|teriary_ilnk'
def stringify(iterable):
""" Convert tuple containing numbers to string
Input: iterable with numbers as values
Output: tuple with strings as values
"""
return tuple([str(i) for i in iterable])
class OSM(Vissim):
def __init__(self, osmFile ):
self.includeBusStops = '--include-bus-stops' in sys.argv #RV
self.G, self.osm = read_osm(osmFile)
#RV
c = 0
for n in self.osm.nodes:
if (type(self.osm.nodes[n]) is BusStopNode):
c += 1
pass
pass
print "Number of bs nodes is %d" %(c)
self.v = Vissim()
self.roadTypes = ['motorway', 'motorway_link', 'primary', 'secondary',
'tertiary', 'traffic_signals', 'bus_stop']
self.refLat, self.refLng = self.getRefLatLng()
self.refX, self.refY = self.latLngToMeters(self.refLat, self.refLng)
self.intersections = self.createIntersectionDict()
self.ways = self.createWaysDict()
if ( "--loadXYDict" not in sys.argv):
self.xy = self.createXYDict()
fh = open('xydict.pickle','wb')
pickle.dump(self.xy,open('xydict.pickle','wb'))
fh.close()
else:
self.xy = pickle.load(open('xydict.pickle','r'))
#RV
self.importLinks()
if ( self.includeBusStops == True):
self.processBusStops()
self.importConnectors()
self.v.createReference(self.refX, self.refY)
self.v.export("testxml.inpx")
'''
plt.ylim(-1.0, 1.0);
x = np.linspace(0, 10, 1000)
plt.plot(x, 2*np.sin(x*5));
plt.show()
'''
# Create reference point
def getRefLatLng(self):
""" Get reference lat/lng for xy conversion.
Input: Graph
Output: lat, long tuple
"""
latlng = self.G.node.itervalues().next()
return latlng['lat'], latlng['lon']
# Boolean helper functions
def isOneway(self, attr):
""" Determine if link is oneway based on OSM attributes.
Input: OSM attribute
Output: boolean
"""
yes = ['yes', 'true', '1', '-1']
no = ['no', 'false', '0']
if attr.get('oneway') in yes:
return True
elif attr.get('oneway') in no:
return False
elif attr.get('highway') == 'motorway':
return True
elif attr.get('junction') == 'roundabout':
return True
else:
return False
def isNewWay(self, fromN, toN, prevAttr):
""" Determine if the current edge is a new way
Input: edge nodes, current attribute
Output: boolean
"""
if prevAttr is None:
return False
newID = self.G.edge[fromN][toN]['id']
oldID = prevAttr.get('id')
if newID == oldID:
return False
else:
return True
def isIntersection(self, n):
""" If node has more than two connecting nodes, it's an intersection.
"""
if len(set(self.G.successors(n) + self.G.predecessors(n))) > 2:
return True
else:
return False
def isExterior(self, n):
""" If a node has only one successor node and no predecessors, then
it's at the exterior of the model and pointing in the correct
direction.
"""
if len(self.G.successors(n)) == 1 and len(self.G.predecessors(n)) == 0:
# These are one-way start points, or bi-directional end points
return True
else:
return False
# Get start nodes and lane info for creating intersection and ways dicts
def getExteriorNodes(self):
""" Get a list of nodes that are on the exterior.
"""
nodes = []
for n in self.G.nodes():
if self.isExterior(n):
nodes.append(n)
return nodes
def getStartNodes(self):
""" Get a list of nodes that do not overlap when traversed.
"""
edgeNodes = self.getExteriorNodes()
for node in edgeNodes:
for prev, n in nx.edge_dfs(self.G, source=node):
if n in edgeNodes:
edgeNodes.remove(n)
return edgeNodes
# Intersection dict
def compassBearing(self, pointA, pointB):
""" Calculates the bearing between two points. Source:
https://gist.github.com/jeromer/2005586
Input:
Output: The bearing in degrees
"""
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = (math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) *
math.cos(lat2) * math.cos(diffLong)))
initial = math.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# from -180 to + 180 which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial = math.degrees(initial)
compass = round((initial + 360) % 360, 1)
return compass
def getIntersection(self, node):
""" Get direction, bearing and lane information for all intersection
edges. Bearing is calculated from the common intersection node.
Input: intersection node
Output: dictionary of intersection attributes
"""
intersection = {}
nodePoint = (self.G.node[node]['lat'], self.G.node[node]['lon'])
for n in self.G.successors(node):
attr = self.G.edge[node][n]
if attr['highway'] not in self.roadTypes:
continue
nPoint = (self.G.node[n]['lat'], self.G.node[n]['lon'])
intersection[n] = {'beginning': True, 'lanes':
attr.get('lanes', 1), 'bearing':
self.compassBearing(nodePoint, nPoint),
'oneway': self.isOneway(attr), 'forward':
attr.get('lanes:forward', 1), 'backward':
attr.get('lanes:backward', 1)}
for n in self.G.predecessors(node):
attr = self.G.edge[n][node]
if attr['highway'] not in self.roadTypes:
continue
nPoint = (self.G.node[n]['lat'], self.G.node[n]['lon'])
intersection[n] = {'beginning': False, 'lanes':
attr.get('lanes', 1), 'bearing':
self.compassBearing(nodePoint, nPoint),
'oneway': self.isOneway(attr), 'forward':
attr.get('lanes:forward', 1), 'backward':
attr.get('lanes:backward', 1)}
return intersection
def createIntersectionDict(self):
""" Use depth-first search to map intersection nodes and lane widths.
Input: graph, startNode
Output: dictionary mapping of intersection nodes
"""
intersections = {}
for fromN, toN in nx.edge_dfs(self.G):
if self.isIntersection(toN):
print 'Processing intersection %d' %(int(toN))
intersections[toN] = self.getIntersection(toN)
return intersections
# Ways dict
def getTurnLanes(self, attr, direction='forward'):
""" Parse turn lane info.
Input: link attribute
Output: turn lane instructions
"""
attr = attr['attr']
if self.isOneway(attr):
if 'turn:lanes' in attr:
turns = attr['turn:lanes'].split('|')
elif 'lanes' in attr:
lanes = int(attr['lanes'])
turns = ['through'] * lanes
else:
turns = ['through']
else:
if 'turn:lanes:' + direction in attr:
turns = attr['turn:lanes:' + direction].split('|')
elif 'lanes:' + direction in attr:
lanes = int(attr['lanes:' + direction])
turns = ['through'] * lanes
elif 'lanes' in attr:
lanes = int(attr['lanes']) / 2
turns = ['through'] * lanes
else:
turns = ['through']
turns = ['through' if i == '' or i.lower() == 'none' else i for i in
turns]
return [i.split(';') for i in turns]
def getLanes(self, attr):
""" Determine number of lanes based on OSM attributes.
Input: OSM attribute
Output: tuple with number of lanes (forward, backward),
False if no lane numbers specified
"""
if self.isOneway(attr['attr']):
return len(self.getTurnLanes(attr)), 0
else:
forward = len(self.getTurnLanes(attr))
backward = len(self.getTurnLanes(attr, direction='backward'))
return forward, backward
def calcLaneAlignment(self, waysDict):
""" For a list of ways between two intersections, calculate the
correct lane transitions as offsets from the centerline.
Input: dict of ways
Output: modified dict with offsets
"""
aFwdAttr = waysDict['forward'].values()[0]
aBkdAttr = waysDict['backward'].values()[0]
aFwdLanes, aBkdLanes = self.getLanes(aFwdAttr)
bFwdAttr = waysDict['forward'].values()[-1]
bBkdAttr = waysDict['backward'].values()[-1]
bFwdLanes, bBkdLanes = self.getLanes(bFwdAttr)
aLanes, bLanes = aFwdLanes + aBkdLanes, bFwdLanes + bBkdLanes
aDiff, bDiff = aFwdLanes - aBkdLanes, bFwdLanes - bBkdLanes
# Calculate the offsets for the (A/B) ways: those that connect to an
# intersection
if aLanes == bLanes:
aFwdAttr['offset'] = aFwdLanes / 2.0 - (aDiff / 2.0)
aBkdAttr['offset'] = aBkdLanes / 2.0 + (aDiff / 2.0)
bFwdAttr['offset'] = bFwdLanes / 2.0 - (bDiff / 2.0)
bBkdAttr['offset'] = bBkdLanes / 2.0 + (bDiff / 2.0)
elif aLanes > bLanes:
aFwdAttr['offset'] = aFwdLanes / 2.0 - (aDiff / 2.0)
aBkdAttr['offset'] = aBkdLanes / 2.0 + (aDiff / 2.0)
bFwdAttr['offset'] = aFwdLanes / 2.0 - (aDiff / 2.0)
aBkdTurns = self.getTurnLanes(aBkdAttr, direction='backward')
lefts = sum([1 if i == ['left'] else 0 for i in aBkdTurns])
bBkdAttr['offset'] = bBkdLanes / 2.0 + (bDiff / 2.0) + lefts / 2.0
elif aLanes < bLanes:
bFwdAttr['offset'] = bFwdLanes / 2.0 - (bDiff / 2.0)
bBkdAttr['offset'] = bBkdLanes / 2.0 + (bDiff / 2.0)
aBkdAttr['offset'] = bBkdLanes / 2.0 + (bDiff / 2.0)
bFwdTurns = self.getTurnLanes(bFwdAttr)
lefts = sum([1 if i == ['left'] else 0 for i in bFwdTurns])
aFwdAttr['offset'] = aFwdLanes / 2.0 + (aDiff / 2.0) + lefts / 2.0
# Calculate the offsets for the intermediate ways: those that don't
# connect to an intersection.
if len(waysDict['forward']) > 2:
fwdOffset = bBkdAttr['offset']
bkdOffset = aFwdAttr['offset']
fwdAttrs = waysDict['forward'].values()[1:-1]
bkdAttrs = waysDict['backward'].values()[1:-1]
for attr in fwdAttrs:
attr['offset'] = fwdOffset
for attr in bkdAttrs:
attr['offset'] = bkdOffset
fwd = {k + '-F': v for k, v in waysDict['forward'].items()}
bkd = {k + '-B': v for k, v in waysDict['backward'].items()}
return dict(fwd, **bkd)
def getWay(self, ways):
""" Get way attributes and points.
Input: coordinate list and attribute dictionary
Output: way dictionary
"""
waysDict = OrderedDict()
if ways == [[]]:
return waysDict
for way in ways:
way = list(OrderedDict.fromkeys([n for n in way]))
fromN = way[0]
toN = way[1]
attr = self.G.edge[fromN][toN]
fwdLanes, bkdLanes = self.getLanes({'attr': attr})
if self.isOneway(attr):
if not waysDict.get('oneway'):
waysDict['oneway'] = OrderedDict()
wayID = attr['id']
waysDict['oneway'][wayID] = {'forward': True, 'nodes': way,
'attr': attr, 'offset': 0,
'laneNumber': fwdLanes}
else:
wayID = attr['id']
if (not waysDict.get('forward') and
not waysDict.get('backward')):
waysDict['forward'] = OrderedDict()
waysDict['backward'] = OrderedDict()
waysDict['forward'][wayID] = {'forward': True, 'nodes': way,
'attr': attr, 'laneNumber':
fwdLanes}
waysDict['backward'][wayID] = {'forward': False, 'nodes':
list(reversed(way)), 'attr':
attr, 'laneNumber': bkdLanes}
if waysDict.get('forward') and waysDict.get('oneway'):
fwdBkd = self.calcLaneAlignment(waysDict)
return dict(fwdBkd, **waysDict['oneway'])
elif waysDict.get('forward'):
return self.calcLaneAlignment(waysDict)
elif waysDict.get('oneway'):
return waysDict['oneway']
def createWaysDict(self):
""" Begin with startNode and traverse the graph, collecting the nodes
of each way. When a new way is encountered, start a new list of
nodes. When a new intersection is encountered, pass the list of
ways to the getWay function for processing.
Input: graph, startNode
Output: dictionary used for creating VISSIM links
"""
waysDict = {}
ways = []
nodes = []
prevAttr = None
currAttr = None
for fromN, toN in nx.edge_dfs(self.G):
currAttr = self.G.edge[fromN][toN]
print 'createWaysDict : fromN %s toN %s ' %(fromN,toN)
#print currAttr['highway']
if currAttr['highway'] not in self.roadTypes:
continue
if self.isIntersection(fromN):
ways.append(nodes)
# print ways
waysDict.update(self.getWay(ways))
ways = []
nodes = []
elif self.isNewWay(fromN, toN, prevAttr):
ways.append(nodes)
nodes = []
nodes.append(fromN)
nodes.append(toN)
prevAttr = currAttr
if self.isExterior(toN):
ways.append(nodes)
self.getWay(ways)
ways.append(nodes)
waysDict.update(self.getWay(ways))
return waysDict
# XY dict - translate nodes from ways dict to X,Y points including lane
# offsets
def getWayByNode(self, fromN, toN):
if (fromN, toN) in self.G.edges():
# Forward way
wayID = self.G.edge[fromN][toN]['id']
if wayID in self.ways.keys():
return wayID
elif wayID + '-F' in self.ways.keys():
return wayID + '-F'
else:
raise KeyError
elif (toN, fromN) in self.G.edges():
# Backward way
wayID = self.G.edge[toN][fromN]['id'] + '-B'
if wayID in self.ways.keys():
return wayID
else:
raise KeyError
else:
raise KeyError
'''
RV Encodes the link id for vissim based on wayID used in this script
Input : wayID e.g typical syntax : '175878996-2'
Output : link ID in vissim syntax e.g. u'17587899602'
'''
def wayIDToVissimLinkNumber(self,wayID):
tmp = wayID
tmp2 = tmp.replace('-','0',1)
tmp3 = tmp2.split('-')
return(tmp3[0])
'''
RV Encodes wayID used in this script from the vissim link id
Input : link ID in vissim syntax e.g. u'17587899602'
Output : wayID e.g typical syntax : e.g typical syntax : '175878996-2'
'''
def vissimLinkNumberToWayID(self,vlinkID):
#todo
pass
'''
RV Returns all the way ids with the specified name
'''
def getWayByName(self, name):
wByN = []
for w in self.ways:
#print self.ways[w]['attr']['name']
try:
if( self.ways[w]['attr']['name'] == name):
wByN.append( w )
except:
#print 'Warning : way %s has no name' %(w)
wByN.append( w )
continue;
pass
pass
return wByN
pass
'''
RV Gets the way nearest to the given lat,lon
'''
def getNearestLinks(self, btNum, wayList, latlon):
p1 = latlon
BusStopPoint3D = self.latLngToScaledMeters(p1[0], p1[1])
p2 = latlon
offset = 0.0
nw = wayList[0]
validLinksDict = {}
curr = []
validLinksDict[btNum] = curr
for w in wayList:
print 'processing way %s with %d nodes ' %(w, len(self.ways[w]['nodes']))
linkAttr = {}
linkAttr['wayID'] = w
OriginOfWayPoint3D = self.ways[w]['point3D'][0]
EndOfWayPoint3D = self.ways[w]['point3D'][-1]
linkAttr['linkLength'] = geo.getDistance(OriginOfWayPoint3D,EndOfWayPoint3D)
# len should be at least 2
tmpSegList = []
for i in range(len(self.ways[w]['nodes'])-1 ):
node1Point3D = self.ways[w]['point3D'][i]
node2Point3D = self.ways[w]['point3D'][i+1]
nwl = geo.getDistance(node1Point3D,node2Point3D)
#print 'processing segment of way %s with len %f ' %(w, nwl)
BSPointOnWay = geo.getNearestPointOnSegment(BusStopPoint3D, (node1Point3D, node2Point3D))
if (BSPointOnWay == None ):
#print '+++ Nearest point not on segment ; Skipping way %s' %(w)
continue;
pass
# else valid perp to Bus stop found on segment
newpd = geo.getDistance(BusStopPoint3D,BSPointOnWay)
linkAttr['PerpDistance'] = newpd
linkAttr['BusStopPoint'] = BSPointOnWay
linkAttr['DistanceToOrigin'] = geo.getDistance(BusStopPoint3D,OriginOfWayPoint3D)
linkAttr['DistanceToEnd'] = geo.getDistance(BusStopPoint3D,EndOfWayPoint3D)
linkAttr['Offset'] = geo.getDistance(BSPointOnWay, OriginOfWayPoint3D)
tmpSegList.append(linkAttr)
pass
if (len(tmpSegList) == 0):
continue;
vls = sorted(tmpSegList,key=lambda k:k['PerpDistance'])
curr = validLinksDict[btNum]
try:
curr.append ( vls[0])
validLinksDict[btNum] = curr
except:
print '*** Failed to add item to valid links Dictionary'
pass
return validLinksDict
'''
RV Gets the nearest link
todo : add checks - link segment which can fit the bus stop in it
'''
def selectNearest(self, validLinks ):
#print 'Processing %d links ' %(len(validLinks))
if (len(validLinks) <=0 ):
print 'Empty list of links'
return None
pass
vls = sorted(validLinks,key=lambda k:k['PerpDistance'])
# add check here - perpDistance should not be more than 10m >
linkAttr = vls[0]
newd1 = linkAttr['DistanceToOrigin']
if ( (newd1 + 20.0) > linkAttr['linkLength']): # since cannot accomodate start of busstop ?
print '+++Warning : may need to skip link %s ; since too short?' %(linkAttr['wayID'])
return linkAttr
pass
def calcTurn(self, startBearing, endBearing):
thruMin, thruMax = 135, -135
leftMin, leftMax = -135, -45
rightMin, rightMax = 45, 135
left, right, through = None, None, None
diff = ((((startBearing-endBearing) % 360)+540) % 360) - 180
if diff >= thruMin or diff <= thruMax:
return 'through'
elif diff > leftMin and diff < leftMax:
return 'left'
elif diff > rightMin and diff < rightMax:
return 'right'
else:
return None
def calcTurns(self, intN, fromN):
""" For a given approach to an intersection, find the wayIDs
which represent left, through and right turns. In the case where
a way ends mid-block, then specify the next way to connect to.
Input: intersection node, from node
Output: Dict of wayIDs and turn movements
"""
turns = {'left': [], 'right': [], 'through': []}
try:
way1 = self.getWayByNode(fromN,intN)
way1Attr = self.ways[way1]
way1TurnLanes = self.getTurnLanes(way1Attr)
except:
way1Turns = {}
if intN in self.intersections:
intersection = self.intersections[intN]
startBearing = intersection[fromN]['bearing']
for n, attr in intersection.items():
endBearing = attr['bearing']
try:
wayID = self.getWayByNode(intN, n)
wayIDAttr = self.ways[wayID]
wayIDTurnLanes = self.getTurnLanes(wayIDAttr)
turn = self.calcTurn(startBearing, endBearing)
turns[turn].append(wayID)
#RV
# handle case where wayID is a little piece of the same
# way as way1 but should have been ideally fused into one
# way
# if wayID is having a right turn, add that id also.
# if wayID is having a left turn, add that id also.
if (turn == 'through'):
if self.hasTurn(wayIDTurnLanes, 'right') and self.hasTurn(way1TurnLanes,'right'):
turns['right'].append(wayID)
if self.hasTurn(wayIDTurnLanes, 'left') and self.hasTurn(way1TurnLanes,'left'):
turns['left'].append(wayID)
except:
continue
else:
if (fromN in self.G.successors(intN) and
len(self.G.predecessors(intN)) == 1):
n = self.G.predecessors(intN)[0]
try:
wayID = self.getWayByNode(intN, n)
turns['through'].append(wayID)
except:
pass
elif (fromN in self.G.predecessors(intN) and
len(self.G.successors(intN)) == 1):
n = self.G.successors(intN)[0]
try:
wayID = self.getWayByNode(intN, n)
turns['through'].append(wayID)
except:
pass
return turns
def calcCrossSection(self, intN, fromN, bearing, clockwise=True):
""" Calculate the parallel offsets for two-way and one-way links
adjacent to the subject approach link in order to offsets to
offset the subject link's endpoints.
Input: intersection attribute, bearing, direction of cross street.
Output: Adjusted endpoint offset distance
"""
attr = self.intersections[intN][fromN]
if attr['oneway']:
return round(abs(int(attr['lanes']) *
math.sin(math.radians(bearing))) / 2.0, 1)
else:
if clockwise:
if attr['beginning']:
# way is pointing away from the intersection
lane = 'backward'
wayID = self.getWayByNode(fromN, intN)
else:
# way is pointing toward the intersection
lane = 'forward'
wayID = self.getWayByNode(intN, fromN)
else:
if attr['beginning']:
lane = 'forward'
wayID = self.getWayByNode(intN, fromN)
else:
lane = 'backward'
wayID = self.getWayByNode(fromN, intN)
offset = float(self.ways[wayID]['offset'])
offset += int(attr[lane]) / 2.0
return round(abs(offset * math.sin(math.radians(bearing))), 1)
def getCrossStreets(self, intN, fromN):
""" Get cross street lane width information.
Input: node pairs approaching intersection
Output: cross section information
"""
intersection = self.intersections[intN]
startBearing = intersection[fromN]['bearing']
minBearing, maxBearing = None, None
left, right = None, None
for n, attr in intersection.items():
endBearing = attr['bearing']
diff = ((((startBearing-endBearing) % 360)+540) % 360) - 180
if self.calcTurn(startBearing, endBearing) == 'left':
left = n
maxBearing = diff
elif self.calcTurn(startBearing, endBearing) == 'right':
right = n
minBearing = diff
if left:
leftLane = self.calcCrossSection(intN, left, maxBearing,
clockwise=False)
else:
leftLane = 0
if right:
rightLane = self.calcCrossSection(intN, right, minBearing)
else:
rightLane = 0
return max([leftLane, rightLane])
def latLngToMeters(self, lat, lng):
""" Convert lat/lng to meters from 0,0 point on WGS84 map.
Input: WGS84 lat/lng
Output: x,y in meters
"""
assert abs(lng) <= 180, '%s exceeds longitudinal domain' % (lng)
extent = 20015085 # height/width in meters of the VISSIM map
x = lng * extent / 180.0
y = (math.log(math.tan((90 + lat) * math.pi / 360.0)) /
(math.pi / 180.0))
y = y * extent / 180.0
return x, y
def latLngToScaledMeters(self, lat, lng):
x, y = self.latLngToMeters(lat, lng)
scale = 1 / math.cos(math.radians(self.refLat))
scaleX = (x - self.refX) / scale
scaleY = (y - self.refY) / scale
return (scaleX, scaleY, 0)
def getLatLng(self, n):
""" Return lat/lng tuple for a given node.
"""
return self.G.node[n]['lat'], self.G.node[n]['lon']
def nodeToScaledMeters(self, n):
""" Apply Mercator scaling factor based on latitude to xy points.
Input: node
Output: correctly scaled xy
"""
lat, lng = self.getLatLng(n)
x, y = self.latLngToMeters(lat, lng)
scale = 1 / math.cos(math.radians(self.refLat))
scaleX = (x - self.refX) / scale
scaleY = (y - self.refY) / scale
return (scaleX, scaleY, 0)
def nodesToXY(self, attr):
""" Process links dictionary to calculate proper XY coordinates.
Input: links dictionary
Output: updated links dictionary with xy dictionary
"""
width = self.v.defaultWidth
nodes = attr['nodes']
#print "#######", nodes
point3D = attr['point3D'] = [self.nodeToScaledMeters(n) for n in nodes]
#print point3D
# Parallel
if not self.isOneway(attr):
dist = attr['offset'] * width
#print "###", point3D
#print nodes
#tt = geo.offsetEndpoint(point3D, 1.0)
#print tt
#print "@@@", point3D
'''tt = [[468.514244231,48.6922583583,0],
[468.069585906,89.8343879775,0],
[476.929403034,136.402838403,0]]
'''
point3D = geo.offsetParallel(point3D, dist)
#point3D = geo.offsetParallel(point3D, dist)
# Endpoints / Turns
if nodes[0] in self.intersections:
dist = self.getCrossStreets(nodes[0], nodes[1]) * width
point3D[0] = geo.offsetEndpoint(point3D, dist)
if nodes[-1] in self.intersections:
dist = self.getCrossStreets(nodes[-1], nodes[-2]) * width
point3D[-1] = geo.offsetEndpoint(point3D, dist, beginning=False)
# Turn dictionary only for ways pointing toward the intersection
attr['turns'] = self.calcTurns(nodes[-1], nodes[-2])
attr['point3D'] = [stringify(i) for i in point3D]
return attr
def createXYDict(self):
""" Create a dictionary for each way calculating node locations in
XY space, compass bearing and intersection offsets.
"""
xy = {}
for k, attr in self.ways.items():
#print self.ways
#RV extract the relevant parts of the way id without the -B/-F encoding
tmp = k
if (k.endswith('-B') or k.endswith('-F')):
tmp3 = tmp.split('-')
tmp = tmp3[0] + '-' + tmp3[1]
print 'XY>> Way being processed %s ' %(tmp)
xy[tmp] = self.nodesToXY(attr)
return xy
# Create VISSM objects from OSM
def importLinks(self):
""" Create links based on xy dictionary, using attributes and
centerlines as a guide.
Input: xy dictionary
Output: modifies vissim object in place
"""
for wayID, attr in self.xy.items():
point3D = attr['point3D']
lanes = int(attr['laneNumber']) * [self.v.defaultWidth]
tmp = self.wayIDToVissimLinkNumber(wayID)
try:
self.v.Links.createLink(**{'point3D': point3D, 'lane': lanes,
'no': tmp})
except:
print 'Could not create link for %s' %(tmp)
continue
pass
pass
def hasTurn(self, turnLanes, turn):
""" Check if a turning movement exists at an approach.
Input: turnLanes, turn
Output: bool
"""
for lane in turnLanes:
if turn in lane:
return True
else:
continue
return False
def processTurns(self, fromLink, turnTo, turnLanes, turn):
turns = sum([1 if turn in lane else 0 for lane in turnLanes])
fromLane = min([i+1 if turn in v else '' for i, v in
enumerate(reversed(turnLanes))])
for wayID in turnTo[turn]:
#RV
tmp = self.wayIDToVissimLinkNumber(wayID)
print 'Processing wayid old %s new %s' %(wayID, tmp)
try:
attr = self.v.Links._getAttributes('no', tmp)
toLink = self.v.Links._getAttributes('no', tmp)['no']
lanes = len(self.v.Links.getLanes(toLink))
except:
print ' Attribute/lanes not found for way %s' %(tmp)
continue
pass
if lanes < turns:
turns = lanes
toLane = lanes - turns + 1
try:
self.v.Links.createConnector(fromLink, fromLane, toLink, toLane, turns)
except:
print ' creat connector failed '
continue
pass
def importConnectors(self):
""" Create connectors based on xy dictionary.
Input: xy dictionary
Output: modifies vissim object in place
"""
for wayID, attr in self.xy.items():
if 'turns' in attr:
if wayID[-1] == 'B':
direction = 'backward'
else:
direction = 'forward'
#RV
tmp = self.wayIDToVissimLinkNumber(wayID)
print 'Processing wayid %s' %(tmp)
try:
fromLink = self.v.Links._getAttributes('no', tmp)['no']
except:
print 'Discarding wayID %s' %(tmp)
continue
pass
turnTo = attr['turns']
turnLanes = self.getTurnLanes(attr, direction=direction)
if len(turnTo['left']) > 0 and self.hasTurn(turnLanes, 'left'):
self.processTurns(fromLink, turnTo, turnLanes, 'left')
if (len(turnTo['through']) > 0 and
self.hasTurn(turnLanes, 'through')):
self.processTurns(fromLink, turnTo, turnLanes, 'through')
if (len(turnTo['right']) > 0 and
self.hasTurn(turnLanes, 'right')):
self.processTurns(fromLink, turnTo, turnLanes, 'right')
def processBusStops(self):
ptStops = []
for n in self.osm.nodes:
if (type(self.osm.nodes[n]) is BusStopNode):
try:
locName = self.osm.nodes[n].tags['location']
except:
locName = ''
try:
btNum = int(self.osm.nodes[n].tags['asset_ref'])
except:
continue ; # skip bus stop without a bt number
print 'NonWayNode found of type %s <%d> is at location %s' %(self.osm.nodes[n].typeTag, btNum, locName)
wayList = []
lat = self.osm.nodes[n].lat
lon = self.osm.nodes[n].lon
wayList = self.getWayByName(locName)
if (len(wayList) != 0) :
nearestLinks = self.getNearestLinks(btNum, wayList, (self.osm.nodes[n].lat, self.osm.nodes[n].lon))
if (len(nearestLinks[btNum]) ==0 ):
continue
nearestWay = self.selectNearest(nearestLinks[btNum])
if (nearestWay == None):
print 'No links for bus stop %d ' %(btNum)
continue;
pass
print 'Nearest Way is %s <len %f> at distance %f m from BusStop' %(nearestWay['wayID'], nearestWay['linkLength'],nearestWay['PerpDistance'])
#print 'Offset of bus-stop to start of link is %f meters' %(nearestWay['DistanceToOrigin'])
pts = self.InitPTStop(btNum,nearestWay)
ptStops.append(pts)
else:
print 'No ways with name %s ' %(locName)
pass
pass
pass
print 'Processed Bus stops -- count = %d' %(len(ptStops))
for i in range(len(ptStops)):
print ptStops[i]
self.v.PTStop.createptStop(**ptStops[i])
pass
#RV --2018--Jan--
pass
def InitPTStop(self,btNum, wayInfo):
ptStopRecord = {}
ptStopRecord['no'] = btNum
if (wayInfo['linkLength'] < 20):
print ' Bus stop on v. short way'
ptStopRecord['length'] = wayInfo['linkLength']
pass
offset = wayInfo['DistanceToOrigin']
if ((offset + 20) >= wayInfo['linkLength']):
print ' Adjusting offset for Bus stop '
offset = offset - 20.0;
if (offset < 0.0): #RV No negative offsets
offset = 0.0
pass
ptStopRecord['pos'] = offset
#RV : fix the lane number
lanes = int(self.ways[wayInfo['wayID']]['laneNumber'])
tmp = self.wayIDToVissimLinkNumber(wayInfo['wayID'])
ptStopRecord['lane'] = tmp + ' ' + str(lanes)
return ptStopRecord
#my added
if __name__ == '__main__':
o=OSM(osmFile)
#o = OSM(osmFile)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.contrib import nvcc
import numpy as np
import time
import tvm.testing
@tvm.testing.requires_gpu
def test_exp():
# graph
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: te.exp(A(*i)), name="B")
s = te.create_schedule(B.op)
# create iter var and assign them tags.
num_thread = 8
bx, tx = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
# one line to build the function.
def check_device(device, host="stackvm"):
if not tvm.testing.device_enabled(host):
return
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fexp = tvm.build(s, [A, B], device, host, name="myexp")
dev = tvm.device(device, 0)
# launch the kernel.
n = 1024
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
fexp(a, b)
tvm.testing.assert_allclose(b.numpy(), np.exp(a.numpy()), rtol=1e-5)
check_device("opencl -device=intel_graphics")
check_device("cuda", "llvm")
check_device("vulkan")
@tvm.testing.requires_gpu
def test_fmod():
# graph
def run(dtype):
n = te.size_var("n")
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
C = te.compute(A.shape, lambda *i: te.fmod(A(*i), B(*i)), name="C")
s = te.create_schedule(C.op)
# create iter var and assign them tags.
num_thread = 8
bx, tx = s[C].split(C.op.axis[0], factor=num_thread)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
target = tvm.target.Target(device)
if "cpu" not in target.keys:
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
fmod = tvm.build(s, [A, B, C], device, name="myfmod")
# launch the kernel.
n = 1024
a_np = (np.random.uniform(size=n) * 256).astype(A.dtype)
b_np = (np.random.uniform(size=n) * 256).astype(B.dtype)
# "fix" the values in a and b to avoid the result being too small
b_np += (b_np < 2.0) * 2
a_np[np.abs(np.fmod(a_np, b_np)) < 1] += 1
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
ftimer = fmod.time_evaluator(fmod.entry_name, dev, number=1)
tcost = ftimer(a, b, c).mean
# fmod(a, b, c)
np.testing.assert_allclose(c.numpy(), np.mod(a.numpy(), b.numpy()), rtol=1e-5)
check_device("cuda")
check_device("opencl -device=intel_graphics")
check_device("metal")
run("float32")
@tvm.testing.requires_gpu
def test_multiple_cache_write():
# graph
n = tvm.runtime.convert(1024)
A0 = te.placeholder((n,), name="A0", dtype="float32")
A1 = te.placeholder((n,), name="A1", dtype="float32")
B0, B1 = te.compute((n,), lambda *i: (A0(*i) + A1(*i), A0(*i) * A1(*i)), name="B")
C = te.compute((n,), lambda *i: B0(*i) + B1(*i), name="C")
s = te.create_schedule(C.op)
# create iter var and assign them tags.
num_thread = 8
B0_cache, B1_cache = s.cache_write([B0, B1], "local")
bx, tx = s[C].split(C.op.axis[0], factor=num_thread)
s[B0].compute_at(s[C], bx)
s[B0_cache].compute_at(s[C], bx)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
# one line to build the function.
def check_device(device, host="stackvm"):
if not tvm.testing.device_enabled(host):
return
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
return
func = tvm.build(s, [A0, A1, C], device, host, name="multiple_cache_write")
dev = tvm.device(device, 0)
# launch the kernel.
n = 1024
a0 = tvm.nd.array(np.random.uniform(size=n).astype(A0.dtype), dev)
a1 = tvm.nd.array(np.random.uniform(size=n).astype(A1.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
func(a0, a1, c)
tvm.testing.assert_allclose(
c.numpy(), a0.numpy() + a1.numpy() + (a0.numpy() * a1.numpy()), rtol=1e-5
)
check_device("cuda", "llvm")
check_device("vulkan")
check_device("opencl")
def test_log_pow_llvm():
# graph
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: te.power(te.log(A(*i)), 2.0), name="B")
s = te.create_schedule(B.op)
# create iter var and assign them tags.
bx, tx = s[B].split(B.op.axis[0], factor=32)
# one line to build the function.
if not tvm.testing.device_enabled("llvm"):
return
flog = tvm.build(s, [A, B], "llvm", name="mylog")
dev = tvm.cpu(0)
# launch the kernel.
n = 1028
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
repeat = 10
ftimer = flog.time_evaluator(flog.entry_name, dev, number=1, repeat=repeat)
res = ftimer(a, b)
assert len(res.results) == repeat
tvm.testing.assert_allclose(b.numpy(), np.power(np.log(a.numpy()), 2.0), rtol=1e-5)
@tvm.testing.uses_gpu
def test_popcount():
def run(dtype):
# graph
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.compute(A.shape, lambda *i: tvm.tir.popcount(A(*i)), name="B")
s = te.create_schedule(B.op)
# simple schedule
num_thread = 8
bx, tx = s[B].split(B.op.axis[0], factor=num_thread)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
target = tvm.target.Target(device)
if "cpu" not in target.keys:
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
func = tvm.build(s, [A, B], device)
# launch the kernel.
n = 1024
a = tvm.nd.array(np.random.randint(low=0, high=1000, size=n, dtype=A.dtype), dev)
b = tvm.nd.array(np.zeros(shape=n, dtype=B.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(
b.numpy(), list(map(lambda x: bin(x).count("1"), a.numpy())), rtol=1e-5
)
check_device("llvm")
check_device("cuda")
check_device("opencl")
if dtype == "uint32":
check_device("metal")
check_device("vulkan")
run("uint32")
run("uint64")
@tvm.testing.requires_gpu
def test_add():
def run(dtype):
# graph
n = te.size_var("n")
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
bias = te.var("bias", dtype=dtype)
scale = te.var("scale", dtype=dtype)
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
# schedule
s = te.create_schedule(C.op)
# create iter var and assign them tags.
num_thread = 16
bx, x = s[C].split(C.op.axis[0], factor=num_thread * 4)
tx, x = s[C].split(x, nparts=num_thread)
_, x = s[C].split(x, factor=4)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].vectorize(x)
# one line to build the function.
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
fadd = tvm.build(s, [A, B, C], device, name="myadd")
# launch the kernel.
n = 1024
a = tvm.nd.array((np.random.uniform(size=n) * 256).astype(A.dtype), dev)
b = tvm.nd.array((np.random.uniform(size=n) * 256).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
ftimer = fadd.time_evaluator(fadd.entry_name, dev, number=1)
tcost = ftimer(a, b, c).mean
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy(), rtol=1e-6)
check_device("opencl")
check_device("cuda")
if dtype == "float32":
check_device("metal")
check_device("vulkan")
run("float32")
run("int32")
run("int64")
run("uint64")
@tvm.testing.requires_gpu
def try_warp_memory():
"""skip this in default test because it require higher arch"""
m = 128
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] + 3, name="B")
warp_size = 32
s = te.create_schedule(B.op)
AA = s.cache_read(A, "warp", [B])
xo, xi = s[B].split(B.op.axis[0], warp_size * 2)
xi0, xi1 = s[B].split(xi, factor=warp_size)
tx = te.thread_axis("threadIdx.x")
s[B].bind(xi1, tx)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[AA].compute_at(s[B], xo)
xo, xi = s[AA].split(s[AA].op.axis[0], warp_size)
s[AA].bind(xi, tx)
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
# one line to build the function.
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled.." % device)
return
f = tvm.build(s, [A, B], device)
a = tvm.nd.array((np.random.uniform(size=m) * 256).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(m, dtype=B.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), a.numpy() + 3, rtol=1e-6)
check_device("cuda")
if __name__ == "__main__":
test_exp()
try_warp_memory()
test_multiple_cache_write()
test_add()
test_log_pow_llvm()
test_popcount()
test_fmod()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for slice op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class SliceTest(test.TestCase):
def testEmpty(self):
inp = np.random.rand(4, 4).astype("f")
for k in xrange(4):
with self.cached_session(use_gpu=True):
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testInt32(self):
inp = np.random.rand(4, 4).astype("i")
for k in xrange(4):
with self.cached_session(use_gpu=True):
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testSlicingWithInt64Index(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
# Slice using int64 Tensor.
i = constant_op.constant(1, dtype=dtypes.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int64 integer.
i = np.asarray(1).astype(np.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
a_int32 = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
slice_t = array_ops.slice(a_int32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
a_float32 = constant_op.constant([0, 1, 2], dtype=dtypes.float32)
slice_t = array_ops.slice(a_float32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSlicingInt64Tensor(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int64)
# Slice using int32 Tensor.
i = constant_op.constant(1, dtype=dtypes.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int32 integer.
i = np.asarray(1).astype(np.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
slice_t = array_ops.slice(a, [1], [2])
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSelectAll(self):
for _ in range(10):
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 4, 4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
slice_implicit_t = a[:, :, :, :]
self.assertAllEqual(inp, self.evaluate(slice_explicit_t))
self.assertAllEqual(inp, self.evaluate(slice_implicit_t))
self.assertEqual(inp.shape, slice_explicit_t.get_shape())
self.assertEqual(inp.shape, slice_implicit_t.get_shape())
def testSingleDimension(self):
for _ in range(10):
with self.cached_session(use_gpu=True):
inp = np.random.rand(10).astype("f")
a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
hi = np.random.randint(0, 9)
scalar_t = a[hi]
scalar_val = self.evaluate(scalar_t)
self.assertAllEqual(scalar_val, inp[hi])
if hi > 0:
lo = np.random.randint(0, hi)
else:
lo = 0
slice_t = a[lo:hi]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[lo:hi])
def test3Dimension(self):
with self.cached_session():
input_shape = [8, 16, 16, 16, 8]
total_input_size = 1
for s in input_shape:
total_input_size *= s
inputs = [
i * 1.0 / total_input_size for i in range(1, total_input_size + 1)
]
a = constant_op.constant(inputs, shape=input_shape, dtype=dtypes.float32)
filter_shape = [1, 1, 1, 8, 8]
total_filter_size = 1
for s in filter_shape:
total_filter_size *= s
filters = [
i * 1.0 / total_filter_size for i in range(1, total_filter_size + 1)
]
f = constant_op.constant(
filters, shape=filter_shape, dtype=dtypes.float32)
conv_t = nn_ops.conv3d(
a, filter=f, strides=[1, 1, 1, 1, 1], padding="VALID")
slice_t = array_ops.slice(conv_t, [0, 1, 1, 1, 0], [1, 1, 1, 1, 8])
result = self.evaluate(slice_t)
expected = [
0.03028321, 0.03132677, 0.03237033, 0.03341389, 0.03445745, 0.035501,
0.03654456, 0.03758812
]
self.assertAllClose(expected, result.flatten(), rtol=1e-6)
def testScalarInput(self):
input_val = 0
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(
(ValueError, errors_impl.InvalidArgumentError), "out of range"):
constant_op.constant(input_val)[:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
@def_function.function
def func(input_t):
slice_t = input_t[:]
return slice_t
with self.assertRaisesWithPredicateMatch(TypeError, "not subscriptable"):
self.evaluate(func(input_val))
def testInvalidIndex(self):
input_val = [1, 2]
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(
(ValueError, errors_impl.InvalidArgumentError), "out of range"):
constant_op.constant(input_val)[1:, 1:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
@def_function.function
def func(input_t):
slice_t = input_t[1:, 1:]
return slice_t
with self.assertRaisesWithPredicateMatch(
TypeError, "must be integers or slices, not tuple"):
self.evaluate(func(input_val))
def _testSliceMatrixDim0(self, x, begin, size):
tf_ans = self.evaluate(array_ops.slice(x, [begin, 0], [size, x.shape[1]]))
np_ans = x[begin:begin + size, :]
self.assertAllEqual(tf_ans, np_ans)
def testSliceMatrixDim0(self):
x = np.random.rand(8, 4).astype("f")
self._testSliceMatrixDim0(x, 1, 2)
self._testSliceMatrixDim0(x, 3, 3)
y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
self._testSliceMatrixDim0(y, 1, 2)
self._testSliceMatrixDim0(y, 3, 3)
def testSingleElementAll(self):
for _ in range(10):
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
x, y = np.random.randint(0, 3, size=2).tolist()
slice_t = a[x, 0:y]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[x, 0:y])
def testSimple(self):
with test_util.use_gpu():
for dtype in [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.bool,
np.float16, np.float32, np.float64, np.complex64, np.complex128,]:
inp = np.random.rand(4, 4).astype(dtype)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 4],
dtype=dtypes.float32)
slice_t = array_ops.slice(a, [0, 0], [2, 2])
slice2_t = a[:2, :2]
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
self.assertAllEqual(slice_val, np.array(inp[:2, :2], dtype=np.float32))
self.assertAllEqual(slice2_val, np.array(inp[:2, :2], dtype=np.float32))
self.assertEqual(slice_val.shape, slice_t.get_shape())
self.assertEqual(slice2_val.shape, slice2_t.get_shape())
def testComplex(self):
inp = np.random.rand(4, 10, 10, 4).astype("f")
a = constant_op.constant(inp, dtype=dtypes.float32)
x = np.random.randint(0, 9)
z = np.random.randint(0, 9)
if z > 0:
y = np.random.randint(0, z)
else:
y = 0
slice_t = a[:, x, y:z, :]
self.assertAllEqual(slice_t, inp[:, x, y:z, :])
def testRandom(self):
# Random dims of rank 6
input_shape = np.random.randint(0, 20, size=6)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
sizes = [
np.random.randint(0, input_shape[i] - indices[i] + 1) for i in range(6)
]
slice_t = array_ops.slice(a, indices, sizes)
slice2_t = a[indices[0]:indices[0] + sizes[0],
indices[1]:indices[1] + sizes[1],
indices[2]:indices[2] + sizes[2],
indices[3]:indices[3] + sizes[3],
indices[4]:indices[4] + sizes[4],
indices[5]:indices[5] + sizes[5]]
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
expected_val = inp[indices[0]:indices[0] + sizes[0],
indices[1]:indices[1] + sizes[1],
indices[2]:indices[2] + sizes[2],
indices[3]:indices[3] + sizes[3],
indices[4]:indices[4] + sizes[4],
indices[5]:indices[5] + sizes[5]]
self.assertAllEqual(slice_val, expected_val)
self.assertAllEqual(slice2_val, expected_val)
self.assertEqual(expected_val.shape, slice_t.get_shape())
self.assertEqual(expected_val.shape, slice2_t.get_shape())
def testPartialShapeInference(self):
z = array_ops.zeros((1, 2, 3))
self.assertAllEqual(z.get_shape().as_list(), [1, 2, 3])
m1 = array_ops.slice(z, [0, 0, 0], [-1, -1, -1])
self.assertAllEqual(m1.get_shape().as_list(), [1, 2, 3])
m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])
self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
with self.cached_session(use_gpu=True):
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]
result = self.evaluate(grad)
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in xrange(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[slices] = grads
self.assertAllClose(np_ans, result)
def _testGradientSliceTape(self, input_shape, slice_begin, slice_size):
with backprop.GradientTape() as tape:
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
tape.watch(a)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = tape.gradient(slice_t, [a], grad_tensor)[0]
result = self.evaluate(grad)
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in xrange(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[slices] = grads
self.assertAllClose(np_ans, result)
def _testGradientVariableSize(self):
with self.cached_session(use_gpu=True):
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
out = array_ops.slice(inp, [1], [-1])
grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0])
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientVariableSizeTape(self):
with backprop.GradientTape() as tape:
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
tape.watch(inp)
out = array_ops.slice(inp, [1], [-1])
grad_actual = self.evaluate(tape.gradient(out, inp))
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientVariableSize2D(self):
# Regression test for bug in slice. A low-level bug in Eigen was causing
# incorrect results for negative indices in multi-dimensional tensors.
# See b/114318298.
with self.cached_session(use_gpu=True):
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
g1 = gradients_impl.gradients(loss1, x)[0]
g2 = gradients_impl.gradients(loss2, x)[0]
g1_val, g2_val = self.evaluate([g1, g2])
self.assertAllEqual(g1_val, g2_val)
def _testGradientVariableSize2DTape(self):
# Regression test for bug in slice. A low-level bug in Eigen was causing
# incorrect results for negative indices in multi-dimensional tensors.
# See b/114318298.
with backprop.GradientTape(persistent=True) as tape:
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
tape.watch(x)
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
g1 = tape.gradient(loss1, x)
g2 = tape.gradient(loss2, x)
g1_val, g2_val = self.evaluate([g1, g2])
self.assertAllEqual(g1_val, g2_val)
def testGradientsAll(self):
with ops.Graph().as_default():
# Slice the middle square out of a 4x4 input
self._testGradientSlice([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSlice([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSlice([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSize()
# Use -1 as a slice dimension on a 2D tensor.
self._testGradientVariableSize2D()
def testGradientsAllTape(self):
# Slice the middle square out of a 4x4 input
self._testGradientSliceTape([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSliceTape([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSliceTape([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSliceTape([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSizeTape()
# Use -1 as a slice dimension on a 2D tensor.
self._testGradientVariableSize2DTape()
def testNotIterable(self):
# Tensor iteration is disabled explicitly for only graph mode.
with ops.Graph().as_default():
# NOTE(mrry): If we register __getitem__ as an overloaded
# operator, Python will valiantly attempt to iterate over the
# Tensor from 0 to infinity. This test ensures that this
# unintended behavior is prevented.
c = constant_op.constant(5.0)
with self.assertRaisesRegex(errors_impl.OperatorNotAllowedInGraphError,
"iterating over `tf.Tensor`"):
for _ in c:
pass
def testComputedShape(self):
# NOTE(mrry): We cannot currently handle partially-known values,
# because `tf.slice()` uses -1 to specify a wildcard size, and
# this can't be handled using the
# `tensor_util.constant_value_as_shape()` trick.
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = constant_op.constant(0)
size = constant_op.constant(1)
b = array_ops.slice(a, [begin, 0], [size, 2])
self.assertEqual([1, 2], b.get_shape())
# placeholders only make sense in a graph.
with ops.Graph().as_default():
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = array_ops.placeholder(dtypes.int32, shape=())
c = array_ops.slice(a, [begin, 0], [-1, 2])
self.assertEqual([None, 2], c.get_shape().as_list())
def testSliceOfSlice(self):
with self.session(use_gpu=True):
a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
b = a[1:, :]
c = b[:-1, :]
d = c[1, :]
res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]
self.assertAllEqual([0, 0, 0], self.evaluate(res))
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
import logging
import json
import urllib
import os
import sys
from threading import Thread
from twisted.internet import reactor
import requests
from bs4 import BeautifulSoup as bs4
from soupy import Soupy, Q
requests.packages.urllib3.disable_warnings()
log = logging.getLogger('giantbomb')
t = None
videos = {}
getvids_callLater = None
bot = None
config = None
useragent = None
VIDEO_NAMES = {'ql': 'Quick Look', 'sub': 'Premium Video', 'feature': 'Feature', 'bombastica': 'Encyclopedia Bombastica',
'event': 'Event Video', 'unfinished': 'Unfinished', 'er': 'Endurance Run'}
VIDEO_CODES = {'ql': '3', 'sub': '10', 'feature': '8', 'bombastica': '12', 'event': '6', 'er': '5', 'unfinished': '13'}
VIDEO_URL = "http://www.giantbomb.com/api/videos/?api_key=%s&format=json&limit=1&filter=video_categories:%s"
PODCAST_NAMES = {'premcast': 'Premium Podcast', 'presents': 'GB Presents'}
PODCAST_URLS = {'premcast': 'http://www.giantbomb.com/podcasts/premium/', 'presents': 'http://www.giantbomb.com/podcasts/giant-bomb-presents/'}
CHANNEL = "#giantbomb"
def event_signedon(bot):
global getvids_callLater, videos
with open(os.path.join(sys.path[0], 'modules', 'module_giantbomb_conf.json')) as datafile:
videos = json.load(datafile)
log.info("Loaded cached video names")
if getvids_callLater != None:
log.info("Stopping previous scraping thread")
getvids_callLater.cancel()
rotator_getvids(bot, 500)
def handle_privmsg(bot, user, channel, cmd):
global videos
msg = cmd[1:]
if(user == channel):
if(msg == "startgb"):
bot.say(channel, "GB scraper started!")
with open(os.path.join(sys.path[0], 'modules', 'module_giantbomb_conf.json')) as datafile:
videos = json.load(datafile)
log.info("Loaded cached video names")
if getvids_callLater != None:
log.info("Stopping previous scraping thread")
getvids_callLater.cancel()
rotator_getvids(bot, 500)
def init(botref):
global config, bot, apikey, bearer, useragent
bot = botref
config = bot.config.get("module_giantbomb", {})
apikey = config.get("apikey")
twitconfig = bot.config.get("module_urltitle", {})
bearer = twitconfig.get('twitter_bearer')
useragent = bot.config.get("nick")
def finalize():
if getvids_callLater is not None:
try:
log.info("Stopping previous scraping thread")
getvids_callLater.cancel()
except Exception, e:
log.error("Exception occurred stopping scraping thread")
log.error(e)
def command_gb(bot, user, channel, args):
""".gb upcoming - Returns any posted upcoming items at GiantBomb.com (it's a website about video games)"""
global videos
if args:
cmds = args.split()
subcommand = cmds[0]
if subcommand == "upcoming":
page = bs4(urllib.urlopen("http://www.giantbomb.com/"))
upcoming = page.find("dl", {"class": "promo-upcoming"})
if not upcoming:
bot.say(channel, "No items on the upcoming list! Alert @GiantBombStats!")
slots = upcoming.find_all("dd")
bot.say(channel, "%d Upcoming Items (times in EST):" % len(slots))
for slot in slots:
text = slot.find("h4").text
time = slot.find("p").text
bot.say(channel, "%s - %s" % (text, time))
def getvids(botref):
"""This function is launched from rotator to collect and announce new items from feeds to channel"""
global CHANNEL, videos, bot, apikey, bearer, useragent
req_headers = {'User-Agent': useragent}
bot = botref
change = False
for type, code in VIDEO_CODES.iteritems():
if check_latest(type, code):
change = True
page = bs4(urllib.urlopen("http://www.giantbomb.com/feeds/news/"), "xml")
latestitem = page.rss.channel.item
latestname = latestitem.title.text
if not latestname == videos['article']:
link = latestitem.link.text
bot.say(CHANNEL, "[New Article] %s - %s" % (latestname, link))
log.info("New Article: %s" % latestname)
videos['article'] = latestname
change = True
data = requests.get("http://www.giantbomb.com/api/promos/?api_key=%s&format=json&limit=5&sort=date_added:desc" %
apikey, headers = req_headers)
response = data.json()
promos = response['results']
for promo in promos:
podcastid = promo['id']
if podcastid == videos['podcast']:
break
elif promo['resource_type'] == 'podcast':
latestname = promo['name']
latestdesc = promo['deck']
url = promo['link']
bot.say(CHANNEL, "[New Podcast] %s - %s %s" % (latestname, latestdesc, url))
log.info("New Podcast: %s" % latestname)
videos['podcast'] = podcastid
change = True
break
data = requests.get("http://www.giantbomb.com/api/reviews/?api_key=%s&format=json&limit=1&sort=publish_date:desc" % apikey, headers = req_headers)
response = data.json()
review = response['results'][0]
releaseid = review['release']['id']
if not releaseid == videos['review']:
gamename = review['release']['name']
deck = review['deck']
author = review['reviewer']
link = review['site_detail_url']
score = review['score']
score = 'Unscored' if score == '0' else '%s-Star' % score
bot.say(CHANNEL, "[New %s Review by %s] %s - %s %s" % (score, author, gamename,
deck, link))
log.info("New Review: %s" % gamename)
videos['review'] = releaseid
change = True
livetwitter = "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=giantbomblive&count=1"
data = bot.get_url(livetwitter,headers={'Authorization':'Bearer ' + bearer})
parsed = data.json()
latesttweet = parsed[0]['id']
if not latesttweet == videos['tweet']:
text = parsed[0]['text']
bot.say(CHANNEL, "LIVE STREAM %s" % text[10:])
log.info("New Livestream Tweet")
videos['tweet'] = latesttweet
change = True
mixlr = requests.get("https://api.mixlr.com/users/jeff-gerstmann?source=embed&include_comments=false")
mdata = mixlr.json()
code = mdata['url']
live = mdata['is_live']
if live and not videos['mixlrlive']:
latestmixlr = mdata['broadcasts'][0]['title']
bot.say(CHANNEL, "Jeff is LIVE on Mixlr: %s - %s" % (latestmixlr, code))
log.info("New Mixlr Broadcast")
videos['mixlr'] = latestmixlr
videos['mixlrlive'] = True
change = True
elif videos['mixlrlive'] and not live:
videos['mixlrlive'] = False
change=True
if change:
with open(os.path.join(sys.path[0], 'modules', 'module_giantbomb_conf.json'),'w') as datafile:
json.dump(videos, datafile)
def check_latest(type, code):
global videos, bot, apikey, useragent
req_headers = {'User-Agent': useragent}
try:
data = requests.get(VIDEO_URL % (apikey, code), headers = req_headers)
json = data.json()
video = json['results'][0]
vidid = video['id']
if not vidid in videos[type]:
name = video['name']
deck = video['deck']
link = video['site_detail_url']
bot.say(CHANNEL, "[New %s] %s - %s %s" % (VIDEO_NAMES[type], name, deck,
link))
log.info("New %s: %s" % (VIDEO_NAMES[type], name))
videos[type].append(vidid)
# Only need to keep the latest 5 ids
if len(videos[type]) > 5:
videos[type].pop(0)
return True
return False
except:
log.error("Failed checking for latest %s at %s" % (type, code))
return False
def check_podcast(type, url):
global CHANNEL, videos, bot, apikey
page = Soupy(urllib.urlopen(url))
try:
namenode = page.find("h2")
latestname = namenode.text.val()
if not latestname == videos[type]:
latestdesc = page.find(class_="deck").text.val().strip()
bot.say(CHANNEL, "[New %s] %s - %s %s" % (PODCAST_NAMES[type], latestname, latestdesc, url))
log.info("New %s: %s" % (PODCAST_NAMES[type], latestname))
videos[type] = latestname
return True
return False
except:
log.error("Failed checking for latest %s at %s" % (type, url))
return False
def rotator_getvids(bot, delay):
"""Timer for methods/functions"""
try:
global t, getvids_callLater
t = Thread(target=getvids, args=(bot,))
t.daemon = True
t.start()
getvids_callLater = reactor.callLater(delay, rotator_getvids, bot, delay)
except Exception, e:
log.error('Error in rotator_getvids')
log.error(e)
|
|
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.db.models import Count
from django.db.models.aggregates import Sum
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils import timezone
from django_extensions.db.fields import AutoSlugField
from allauth.account.signals import user_signed_up
from textwrap import dedent
from jury.models import Jury
class DeckBaseManager(models.QuerySet):
def cached_authors(self):
return super(DeckBaseManager, self).select_related('author')
def published_ones(self):
return self.cached_authors().filter(is_published=True)
def order_by_never_voted(self, user_id):
if self.model != Proposal:
raise AttributeError(
"%s object has no attribute %s" % (
self.model, 'order_by_never_voted'))
order_by_criteria = dedent("""
SELECT 1
FROM deck_vote
WHERE deck_vote.user_id = %s AND
deck_vote.proposal_id = deck_proposal.activity_ptr_id
LIMIT 1
""")
new_ordering = ['-never_voted']
if settings.DATABASES['default'].get('ENGINE') == 'django.db.backends.sqlite3':
new_ordering = ['never_voted']
new_ordering.extend(Proposal._meta.ordering)
return self.extra(
select=dict(never_voted=order_by_criteria % user_id),
order_by=new_ordering
)
class DeckBaseModel(models.Model):
title = models.CharField(_('Title'), max_length=200)
slug = AutoSlugField(populate_from='title', overwrite=True,
max_length=200, unique=True, db_index=True)
description = models.TextField(
_('Description'), max_length=10000, blank=True)
created_at = models.DateTimeField(_('Created At'), auto_now_add=True)
is_published = models.BooleanField(_('Publish'), default=True)
# relations
author = models.ForeignKey(to=settings.AUTH_USER_MODEL,
related_name='%(class)ss')
# managers
objects = DeckBaseManager.as_manager()
class Meta:
abstract = True
def __unicode__(self):
return unicode(self.title)
class Vote(models.Model):
ANGRY, SLEEPY, SAD, HAPPY, LAUGHING = range(-1, 4)
VOTE_TITLES = dict(
angry=_('Angry'), sad=_('Sad'),
sleepy=_('Sleepy'), happy=_('Happy'),
laughing=_('Laughing')
)
VOTE_RATES = ((ANGRY, 'angry'),
(SAD, 'sad'),
(SLEEPY, 'sleepy'),
(HAPPY, 'happy'),
(LAUGHING, 'laughing'))
rate = models.SmallIntegerField(_('Rate Index'), null=True, blank=True,
choices=VOTE_RATES)
# relations
proposal = models.ForeignKey(to='deck.Proposal', related_name='votes')
user = models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='votes')
class Meta:
verbose_name = _('Vote')
verbose_name_plural = _('Votes')
unique_together = (('proposal', 'user'),)
def __unicode__(self):
return u"{0.user}: {0.rate} in {0.proposal}".format(self)
def save(self, *args, **kwargs):
validation_message = None
user_is_in_jury = self.proposal.event.jury.users.filter(
pk=self.user.pk).exists()
if (self.user.is_superuser or user_is_in_jury):
pass
elif self.user == self.proposal.author:
validation_message = _(u'You cannot Rate your own proposals.')
elif not self.proposal.event.allow_public_voting:
validation_message = _(u"Proposal doesn't accept Public Voting.")
elif self.proposal.user_already_voted(self.user):
validation_message = _(u'Proposal already Rated by you.')
if validation_message:
raise ValidationError(_(validation_message))
return super(Vote, self).save(*args, **kwargs)
class Activity(DeckBaseModel):
PROPOSAL = 'proposal'
WORKSHOP = 'workshop'
OPENNING = 'openning'
COFFEEBREAK = 'coffee-break'
LUNCH = 'lunch'
LIGHTNINGTALKS = 'lightning-talks'
ENDING = 'ending'
ACTIVITY_TYPES = (
(PROPOSAL, _('Proposal')),
(WORKSHOP, _('Workshop')),
(OPENNING, _('Openning')),
(COFFEEBREAK, _('Coffee Break')),
(LUNCH, _('Lunch')),
(LIGHTNINGTALKS, _('Lightning Talks')),
(ENDING, _('Ending')),
)
start_timetable = models.TimeField(
_('Start Timetable'), null=True, blank=False)
end_timetable = models.TimeField(
_('End Timetable'), null=True, blank=False)
track_order = models.SmallIntegerField(_('Order'), null=True, blank=True)
activity_type = models.CharField(
_('Type'), choices=ACTIVITY_TYPES, default=PROPOSAL, max_length=50)
# relations
track = models.ForeignKey(to='deck.Track', related_name='activities',
null=True, blank=True)
class Meta:
ordering = ('track_order', 'start_timetable', 'pk')
verbose_name = _('Activity')
verbose_name_plural = _('Activities')
@property
def timetable(self):
if all([self.start_timetable is None, self.end_timetable is None]):
return '--:--'
return '{0} - {1}'.format(
self.start_timetable.strftime('%H:%M'),
self.end_timetable.strftime('%H:%M')
)
class Proposal(Activity):
is_approved = models.BooleanField(_('Is approved'), default=False)
more_information = models.TextField(
_('More information'), max_length=10000, null=True, blank=True)
# relations
event = models.ForeignKey(to='deck.Event', related_name='proposals')
class Meta:
ordering = ['title']
verbose_name = _('Proposal')
verbose_name_plural = _('Proposals')
def save(self, *args, **kwargs):
if not self.pk and self.event.due_date_is_passed:
raise ValidationError(
_("This Event doesn't accept Proposals anymore."))
return super(Proposal, self).save(*args, **kwargs)
@property
def get_rate(self):
rate = None
try:
rate = self.votes__rate__sum
except AttributeError:
rate = self.votes.aggregate(Sum('rate'))['rate__sum']
finally:
return rate or 0
def rate(self, user, rate):
rate_int = [r[0] for r in Vote.VOTE_RATES if rate in r][0]
with transaction.atomic():
self.votes.create(user=user, rate=rate_int)
def user_already_voted(self, user):
if isinstance(user, AnonymousUser):
return False
return self.votes.filter(user=user).exists()
def user_can_vote(self, user):
can_vote = False
if self.user_already_voted(user) or \
(self.author == user and not self.event.author == user):
pass
elif self.event.allow_public_voting:
can_vote = True
elif user.is_superuser:
can_vote = True
elif self.event.jury.users.filter(pk=user.pk).exists():
can_vote = True
return can_vote
def user_can_approve(self, user):
can_approve = False
if user.is_superuser:
can_approve = True
elif self.event.jury.users.filter(pk=user.pk).exists():
can_approve = True
return can_approve
def get_absolute_url(self):
return reverse('view_event', kwargs={'slug': self.event.slug}) + \
'#' + self.slug
def approve(self):
if self.is_approved:
raise ValidationError(_("This Proposal was already approved."))
self.is_approved = True
self.save()
def disapprove(self):
if not self.is_approved:
raise ValidationError(_("This Proposal was already disapproved."))
self.is_approved = False
self.save()
class Track(models.Model):
title = models.CharField(_('Title'), max_length=200)
slug = AutoSlugField(populate_from='title', overwrite=True,
max_length=200, unique=True, db_index=True)
# relations
event = models.ForeignKey(to='deck.Event', related_name='tracks')
class Meta:
verbose_name = _('Track')
verbose_name_plural = _('Tracks')
def __unicode__(self):
return 'Track for: "%s"' % self.event.title
@property
def proposals(self):
return Proposal.objects.filter(
pk__in=self.activities.values_list('pk', flat=True)
)
class Event(DeckBaseModel):
allow_public_voting = models.BooleanField(_('Allow Public Voting'),
default=True)
due_date = models.DateTimeField(null=True, blank=True)
slots = models.SmallIntegerField(_('Slots'), default=10)
# relations
jury = models.OneToOneField(to='jury.Jury', related_name='event',
null=True, blank=True)
class Meta:
ordering = ['-due_date', '-created_at']
verbose_name = _('Event')
verbose_name_plural = _('Events')
@property
def due_date_is_passed(self):
if not self.due_date:
return False
return timezone.now() > self.due_date
def get_absolute_url(self):
return reverse('view_event', kwargs={'slug': self.slug})
def user_can_see_proposals(self, user):
can_see_proposals = False
if user.is_superuser or self.author == user:
can_see_proposals = True
elif self.allow_public_voting:
can_see_proposals = True
elif (not user.is_anonymous() and
self.jury.users.filter(pk=user.pk).exists()):
can_see_proposals = True
return can_see_proposals
def get_proposers_count(self):
return self.proposals.values_list(
'author', flat=True).distinct().count()
def get_votes_count(self):
return self.proposals.values_list('votes', flat=True).count()
def get_votes_to_export(self):
return self.proposals.values(
'id', 'title', 'author__username', 'author__email'
).annotate(
Sum('votes__rate')
).annotate(Count('votes'))
def get_schedule(self):
schedule = Activity.objects.filter(track__event=self)\
.cached_authors()\
.annotate(Sum('proposal__votes__rate'))\
.extra(select=dict(track_isnull='track_id IS NULL'))\
.order_by('track_isnull', 'track_order',
'-proposal__votes__rate__sum')
return schedule
def get_not_approved_schedule(self):
not_approved_schedule = self.proposals\
.cached_authors()\
.filter(models.Q(is_approved=False) |
models.Q(track__isnull=True))\
.annotate(Sum('votes__rate'))\
.order_by('-is_approved', '-votes__rate__sum')
return not_approved_schedule
@receiver(user_signed_up)
def send_welcome_mail(request, user, **kwargs):
message = render_to_string('mailing/welcome.txt')
subject = _(u'Welcome')
recipients = [user.email]
send_mail(subject, message, settings.NO_REPLY_EMAIL, recipients)
@receiver(post_save, sender=Event)
def create_initial_jury(sender, instance, signal, created, **kwargs):
if not created:
return
jury = Jury()
jury.save()
jury.users.add(instance.author)
instance.jury = jury
instance.save()
@receiver(post_save, sender=Event)
def create_initial_track(sender, instance, signal, created, **kwargs):
if not created:
return
Track.objects.create(event=instance)
@receiver(post_delete, sender=Proposal)
def send_proposal_deleted_mail(sender, instance, **kwargs):
context = {'event_title': instance.event.title,
'proposal_title': instance.title}
message = render_to_string('mailing/jury_deleted_proposal.txt', context)
subject = _(u'Proposal from %s just got deleted' % instance.event.title)
recipients = instance.event.jury.users.values_list('email', flat=True)
send_mail(subject, message, settings.NO_REPLY_EMAIL, recipients)
|
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
# -*- coding: utf-8 -*-
from __future__ import absolute_import, with_statement
__all__ = ['CurrencyHandler', 'TaxHandler', 'CategoryHandler', 'AssetHandler',
'AccountHandler', 'EquityHandler', 'LiabilityHandler',
'TransactionHandler']
from django.core.exceptions import ObjectDoesNotExist
from treeio.core.api.utils import rc
from treeio.sales.models import SaleOrder
from treeio.finance.helpers import convert
from treeio.sales.forms import dict_currencies
from treeio.core.api.handlers import ObjectHandler
from treeio.finance.models import Currency, Tax, Category, Asset, Account, Equity, Liability, Transaction
from treeio.finance.forms import TransactionForm, LiabilityForm, AccountForm, EquityForm, AssetForm, \
CategoryForm, CurrencyForm, TaxForm
class FinanceCommonHandler(ObjectHandler):
def check_create_permission(self, request, mode):
return True # request.user.profile.is_admin('treeio.finance')
def check_instance_permission(self, request, inst, mode):
return request.user.profile.has_permission(inst, mode=mode) \
or request.user.profile.is_admin('treeio.finance')
class CurrencyHandler(ObjectHandler):
""" Process Currency objects"""
model = Currency
form = CurrencyForm
@classmethod
def resource_uri(cls, obj=None):
object_id = "id"
if obj is not None:
object_id = obj.id
return ('api_finance_currencies', [object_id])
def create(self, request, *args, **kwargs):
if request.data is None:
return rc.BAD_REQUEST
if not self.check_create_permission(request, "x"):
return rc.FORBIDDEN
currency = Currency()
form = CurrencyForm(
request.user.profile, request.data, instance=currency)
if form.is_valid():
currency = form.save(commit=False)
cname = dict_currencies[currency.code]
currency.name = cname[cname.index(' ') + 2:]
# currency.factor = 1.0 #Get currency conversion here
currency.save()
currency.set_user_from_request(request)
return currency
else:
self.status = 400
return form.errors
class TaxHandler(FinanceCommonHandler):
model = Tax
form = TaxForm
@classmethod
def resource_uri(cls, obj=None):
object_id = "id"
if obj is not None:
object_id = obj.id
return ('api_finance_taxes', [object_id])
class CategoryHandler(FinanceCommonHandler):
model = Category
form = CategoryForm
@classmethod
def resource_uri(cls, obj=None):
object_id = "id"
if obj is not None:
object_id = obj.id
return ('api_finance_categories', [object_id])
class AssetHandler(FinanceCommonHandler):
model = Asset
form = AssetForm
@classmethod
def resource_uri(cls, obj=None):
object_id = "id"
if obj is not None:
object_id = obj.id
return ('api_finance_assets', [object_id])
class AccountHandler(FinanceCommonHandler):
model = Account
form = AccountForm
@classmethod
def resource_uri(cls, obj=None):
object_id = "id"
if obj is not None:
object_id = obj.id
return ('api_finance_accounts', [object_id])
def create(self, request, *args, **kwargs):
if request.data is None:
return rc.BAD_REQUEST
if not self.check_create_permission(request, "x"):
return rc.FORBIDDEN
account = Account()
form = AccountForm(
request.user.profile, request.data, instance=account)
if form.is_valid():
account = form.save(commit=False)
convert(account, 'balance')
account.set_user_from_request(request)
return account
else:
self.status = 400
return form.errors
class EquityHandler(FinanceCommonHandler):
model = Equity
form = EquityForm
@classmethod
def resource_uri(cls, obj=None):
object_id = "id"
if obj is not None:
object_id = obj.id
return ('api_finance_equities', [object_id])
class LiabilityHandler(FinanceCommonHandler):
model = Liability
form = LiabilityForm
@classmethod
def resource_uri(cls, obj=None):
object_id = "id"
if obj is not None:
object_id = obj.id
return ('api_finance_liabilities', [object_id])
def create(self, request, *args, **kwargs):
if request.data is None:
return rc.BAD_REQUEST
# if not self.check_create_permission(request, "x"):
# return rc.FORBIDDEN
liability = self.model()
form = self.form(
request.user.profile, request.data, instance=liability)
if form.is_valid():
liability = form.save(commit=False)
liability.source = liability.account.owner
convert(liability, 'value')
liability.set_user_from_request(request)
return liability
else:
self.status = 400
return form.errors
class TransactionHandler(FinanceCommonHandler):
model = Transaction
form = TransactionForm
@classmethod
def resource_uri(cls, obj=None):
object_id = "id"
if obj is not None:
object_id = obj.id
return ('api_finance_transactions', [object_id])
def create(self, request, *args, **kwargs):
if request.data is None:
return rc.BAD_REQUEST
# if not self.check_create_permission(request, "x"):
# return rc.FORBIDDEN
transaction = self.model()
form = self.form(
request.user.profile, None, None, request.POST, instance=transaction)
if form.is_valid():
transaction = form.save(commit=False)
convert(transaction, 'value')
transaction.set_user_from_request(request)
if "order" in request.data:
try:
order = SaleOrder.objects.get(pk=request.data['order'])
order.payment.add(transaction)
order.save()
except:
pass
return transaction
else:
self.status = 400
return form.errors
def update(self, request, *args, **kwargs):
pkfield = kwargs.get(self.model._meta.pk.name) or request.data.get(
self.model._meta.pk.name)
if not pkfield:
return rc.BAD_REQUEST
try:
obj = self.model.objects.get(pk=pkfield)
except ObjectDoesNotExist:
return rc.NOT_FOUND
form = self.form(
request.user.profile, None, None, request.data, instance=obj)
if form.is_valid():
transaction = form.save(commit=False)
convert(transaction, 'value')
return transaction
else:
self.status = 400
return form.errors
|
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# NOTE: If you make changes to this file, be sure to restart Mr. Do!
import os
import shutil
import sys
import time
import conf
import g
from grax.access_level import Access_Level
#from grax.grac_manager import Grac_Manager
from grax.item_manager import Item_Manager
from item.feat import branch
from item.util import revision
from item.util.item_query_builder import Item_Query_Builder
from util_ import db_glue
from util_.path_helper import Path_Helper
from merge.make_ccp_yml import Make_Ccp_Yml
from merge.ccp_export import Ccp_Export
from merge.export_cyclop import Export_Cyclop
from merge.merge_job_base import Merge_Job_Base
__all__ = ('Merge_Job_Export',)
log = g.log.getLogger('merg_job_exp')
# ***
class Merge_Job_Export(Merge_Job_Base):
__slots__ = (
)
# *** Constructor
def __init__(self, wtem, mr_do):
Merge_Job_Base.__init__(self, wtem, mr_do)
# ***
#
@staticmethod
def process_request(wtem, mr_do):
mi = Merge_Job_Export(wtem, mr_do)
mi.process_request_()
# ***
#
def job_cleanup(self):
Merge_Job_Base.job_cleanup(self)
#
def process_request_(self):
Merge_Job_Base.process_request_(self)
# *** The stage function lookup.
#
def make_stage_lookup(self):
# Make a fake handler just to see how many substages it has.
handler = Ccp_Export(self, None)
g.assurt(self.handler is not None)
# The handler is triggered by do_do_export. It eventually calls
# feature_classes_export.
n_sub_stages = len(handler.substage_lookup)
# The substage handler is called multiple times, one for each substage.
do_do_exports = n_sub_stages * [self.do_do_export,]
self.stage_lookup = []
self.stage_lookup += [
self.do_reserve_directory, # Stages 1-2.
self.do_make_import_config,
]
Export_Cyclop.stage_num_base = len(self.stage_lookup) + 1
self.stage_lookup += do_do_exports # A bunch more stages.
self.stage_lookup += [
self.do_create_archive, # The cleanup stages.
self.do_notify_users,
self.job_mark_complete,
]
self.handler = None
handler = None
# *** Stage fcn. defs
# *** STAGE 1: SECURE DIR.
#
def do_reserve_directory(self):
self.stage_initialize('Reserving directory')
fpath, rand_path = Path_Helper.path_reserve(
basedir=conf.shapefile_directory,
extension='', is_dir=False)
log.debug('do_reserve_directory: rand_path: %s' % (rand_path,))
log.verbose('do_reserve_directory: wtem: %s' % (self.wtem,))
# Remember the path.
g.assurt(not self.wtem.local_file_guid)
self.wtem.local_file_guid = rand_path
# Resave the work item job data.
# FIXME: It's not obvious that local_file_guid is part of job_def.
self.wtem.job_data_update()
# Make the dummy 'usr' directory (we use it just for the yml, to appease
# the code originally writ for import and now being used for export).
oname = '%s.usr' % (self.wtem.local_file_guid,)
opath = os.path.join(conf.shapefile_directory, oname)
try:
os.mkdir(opath)
# 2013.05.06: Need to chmod?
os.chmod(opath, 02775)
except OSError, e:
g.assurt(False)
raise
# *** STAGE 2: CREATE YML.
#
def do_make_import_config(self):
self.stage_initialize('Creating YML')
log.verbose('do_make_import_config: wtem: %s' % (self.wtem,))
if Merge_Job_Base.use_yaml_conf:
cfg_path = self.get_import_config_path('usr')
g.assurt(self.handler.qb_src is not None)
as_yml = Make_Ccp_Yml.produce(self.handler.qb_src, self.wtem)
try:
#as_yml = yaml.dump(self.cfg, default_flow_style=True)
yaml_stream = file(cfg_path, 'w')
yaml_stream.write(as_yml)
yaml_stream.close()
except Exception, e:
failure_reason = (
'merge_job_export cannot save yaml file: %s / %s'
% (cfg_path, str(e),))
self.job_mark_failed(failure_reason)
# else, not using ccp.yml.
# *** STAGE 3: EXPORT CCP.
#
def do_do_export(self):
# NOTE: This fcn. gets called multiple times, once for each substage.
# Skipping: self.stage_initialize
log.verbose('do_do_export: wtem: %s' % (self.wtem,))
self.do_import_or_export('export_callback')
# *** STAGE 4: CREATE ZIP.
#
def do_create_archive(self):
# The import code is written to expect a 'usr' and an 'out' directory,
# and expects the input yml to be in the 'usr' directory. Since the
# import doesn't create an output yml file (that's our job) just copy the
# one we already created.
if Merge_Job_Base.use_yaml_conf:
try:
cfg_src = self.get_import_config_path('usr')
cfg_cur = self.get_import_config_path('cur')
shutil.copy(cfg_src, cfg_cur)
except IOError, e:
log.warning('Could not copy yaml config: %s' % (str(e),))
raise
# else, the handler added geometryless fields to the shapefile by
# calling Ccp_Merge_Conf.record_as_feats().
# 2013.05.02: Include the Shapefile metadata, and license.
# MAGIC_NUMBERS: Hard-coding well known paths.
oname = '%s.out' % self.wtem.local_file_guid
# SYNC_ME: self.file_driver.CreateDataSource uses this zipname.
opath = os.path.join(
conf.shapefile_directory, oname, self.wtem.get_zipname())
#
# FIXME/BUG nnnn: This is the public basemap's metadata, so it's missing
# any metadata about attributes specific to leafy branches. E.g., if
# you're exporting the Metc Bikeways 2012 branch, the metadata is missing
# the bike_facil attribute, which is specific to Metc.
# os.curdir is '.', and its os.abspath is '/'.
# sys.path[0] is the path to the simplejson library...
# so we cheat and use the environment variable.
metadata_path = os.path.abspath(
'%s/../scripts/daily/export_docs/metadata/ccp_road_network.htm'
% (os.path.abspath(os.environ['PYSERVER_HOME']),))
shutil.copy(metadata_path, opath)
#
license_path = os.path.abspath(
'%s/../scripts/daily/export_docs/metadata/LICENSE.txt'
% (os.path.abspath(os.environ['PYSERVER_HOME']),))
shutil.copy(license_path, opath)
# FIXME_2013_06_11: Revisit this.
# FIXME: Does route_analysis.py include the license and its metadata?
# Call the work_item_job base class to zip up the 'out' directory
# and copy the zip to the 'out' directory.
Merge_Job_Base.do_create_archive(self)
# *** STAGE 5: NOTIFY PPL.
#
def do_notify_users(self):
Merge_Job_Base.do_notify_users(self)
# ***
# ***
if (__name__ == '__main__'):
pass
|
|
#!/usr/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/tools/docco/graphdocpy.py
"""Generate documentation for reportlab.graphics classes.
Type the following for usage info:
python graphdocpy.py -h
"""
__version__ = '0.8'
import sys
sys.path.insert(0, '.')
import os, re, types, string, getopt, pickle, copy, time, pprint, traceback
from string import find, join, split, replace, expandtabs, rstrip
import reportlab
from reportlab import rl_config
from docpy import PackageSkeleton0, ModuleSkeleton0
from docpy import DocBuilder0, PdfDocBuilder0, HtmlDocBuilder0
from docpy import htmlescape, htmlrepr, defaultformat, \
getdoc, reduceDocStringLength
from docpy import makeHtmlSection, makeHtmlSubSection, \
makeHtmlInlineImage
from reportlab.lib.units import inch, cm
from reportlab.lib.pagesizes import A4
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER, TA_LEFT
from reportlab.lib.utils import getStringIO
#from StringIO import StringIO
#getStringIO=StringIO
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.pdfgen import canvas
from reportlab.platypus.flowables import Flowable, Spacer
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.flowables \
import Flowable, Preformatted,Spacer, Image, KeepTogether, PageBreak
from reportlab.platypus.xpreformatted import XPreformatted
from reportlab.platypus.frames import Frame
from reportlab.platypus.doctemplate \
import PageTemplate, BaseDocTemplate
from reportlab.platypus.tables import TableStyle, Table
from reportlab.graphics.shapes import NotImplementedError
import inspect
# Needed to draw Widget/Drawing demos.
from reportlab.graphics.widgetbase import Widget
from reportlab.graphics.shapes import Drawing
from reportlab.graphics import shapes
from reportlab.graphics import renderPDF
VERBOSE = rl_config.verbose
VERIFY = 1
_abstractclasserr_re = re.compile(r'^\s*abstract\s*class\s*(\w+)\s*instantiated',re.I)
####################################################################
#
# Stuff needed for building PDF docs.
#
####################################################################
def mainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
pageNumber = canvas.getPageNumber()
canvas.line(2*cm, A4[1]-2*cm, A4[0]-2*cm, A4[1]-2*cm)
canvas.line(2*cm, 2*cm, A4[0]-2*cm, 2*cm)
if pageNumber > 1:
canvas.setFont('Times-Roman', 12)
canvas.drawString(4 * inch, cm, "%d" % pageNumber)
if hasattr(canvas, 'headerLine'): # hackish
headerline = string.join(canvas.headerLine, ' \xc2\x8d ')
canvas.drawString(2*cm, A4[1]-1.75*cm, headerline)
canvas.setFont('Times-Roman', 8)
msg = "Generated with docpy. See http://www.reportlab.com!"
canvas.drawString(2*cm, 1.65*cm, msg)
canvas.restoreState()
class MyTemplate(BaseDocTemplate):
"The document template used for all PDF documents."
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
frame1 = Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')
self.allowSplitting = 0
apply(BaseDocTemplate.__init__, (self, filename), kw)
self.addPageTemplates(PageTemplate('normal', [frame1], mainPageFrame))
def afterFlowable(self, flowable):
"Takes care of header line, TOC and outline entries."
if flowable.__class__.__name__ == 'Paragraph':
f = flowable
# Build a list of heading parts.
# So far, this is the *last* item on the *previous* page...
if f.style.name[:8] == 'Heading0':
self.canv.headerLine = [f.text] # hackish
elif f.style.name[:8] == 'Heading1':
if len(self.canv.headerLine) == 2:
del self.canv.headerLine[-1]
elif len(self.canv.headerLine) == 3:
del self.canv.headerLine[-1]
del self.canv.headerLine[-1]
self.canv.headerLine.append(f.text)
elif f.style.name[:8] == 'Heading2':
if len(self.canv.headerLine) == 3:
del self.canv.headerLine[-1]
self.canv.headerLine.append(f.text)
if f.style.name[:7] == 'Heading':
# Register TOC entries.
headLevel = int(f.style.name[7:])
self.notify('TOCEntry', (headLevel, flowable.getPlainText(), self.page))
# Add PDF outline entries.
c = self.canv
title = f.text
key = str(hash(f))
lev = int(f.style.name[7:])
try:
if lev == 0:
isClosed = 0
else:
isClosed = 1
c.bookmarkPage(key)
c.addOutlineEntry(title, key, level=lev, closed=isClosed)
c.showOutline()
except:
if VERBOSE:
# AR hacking in exception handlers
print 'caught exception in MyTemplate.afterFlowable with heading text %s' % f.text
traceback.print_exc()
else:
pass
####################################################################
#
# Utility functions
#
####################################################################
def indentLevel(line, spacesPerTab=4):
"""Counts the indent levels on the front.
It is assumed that one tab equals 4 spaces.
"""
x = 0
nextTab = 4
for ch in line:
if ch == ' ':
x = x + 1
elif ch == '\t':
x = nextTab
nextTab = x + spacesPerTab
else:
return x
assert indentLevel('hello') == 0, 'error in indentLevel'
assert indentLevel(' hello') == 1, 'error in indentLevel'
assert indentLevel(' hello') == 2, 'error in indentLevel'
assert indentLevel(' hello') == 3, 'error in indentLevel'
assert indentLevel('\thello') == 4, 'error in indentLevel'
assert indentLevel(' \thello') == 4, 'error in indentLevel'
assert indentLevel('\t hello') == 5, 'error in indentLevel'
####################################################################
#
# Special-purpose document builders
#
####################################################################
class GraphPdfDocBuilder0(PdfDocBuilder0):
"""A PDF document builder displaying widgets and drawings.
This generates a PDF file where only methods named 'demo' are
listed for any class C. If C happens to be a subclass of Widget
and has a 'demo' method, this method is assumed to generate and
return a sample widget instance, that is then appended graphi-
cally to the Platypus story.
Something similar happens for functions. If their names start
with 'sample' they are supposed to generate and return a sample
drawing. This is then taken and appended graphically to the
Platypus story, as well.
"""
fileSuffix = '.pdf'
def begin(self, name='', typ=''):
styleSheet = getSampleStyleSheet()
self.code = styleSheet['Code']
self.bt = styleSheet['BodyText']
self.story = []
# Cover page
t = time.gmtime(time.time())
timeString = time.strftime("%Y-%m-%d %H:%M", t)
self.story.append(Paragraph('<font size=18>Documentation for %s "%s"</font>' % (typ, name), self.bt))
self.story.append(Paragraph('<font size=18>Generated by: graphdocpy.py version %s</font>' % __version__, self.bt))
self.story.append(Paragraph('<font size=18>Date generated: %s</font>' % timeString, self.bt))
self.story.append(Paragraph('<font size=18>Format: PDF</font>', self.bt))
self.story.append(PageBreak())
# Table of contents
toc = TableOfContents()
self.story.append(toc)
self.story.append(PageBreak())
def end(self, fileName=None):
if fileName: # overrides output path
self.outPath = fileName
elif self.packageName:
self.outPath = self.packageName + self.fileSuffix
elif self.skeleton:
self.outPath = self.skeleton.getModuleName() + self.fileSuffix
else:
self.outPath = ''
if self.outPath:
doc = MyTemplate(self.outPath)
doc.multiBuild(self.story)
def beginModule(self, name, doc, imported):
story = self.story
bt = self.bt
# Defer displaying the module header info to later...
self.shouldDisplayModule = (name, doc, imported)
self.hasDisplayedModule = 0
def endModule(self, name, doc, imported):
if self.hasDisplayedModule:
DocBuilder0.endModule(self, name, doc, imported)
def beginClasses(self, names):
# Defer displaying the module header info to later...
if self.shouldDisplayModule:
self.shouldDisplayClasses = names
# Skip all methods.
def beginMethod(self, name, doc, sig):
pass
def endMethod(self, name, doc, sig):
pass
def beginClass(self, name, doc, bases):
"Append a graphic demo of a Widget or Drawing at the end of a class."
if VERBOSE:
print 'GraphPdfDocBuilder.beginClass(%s...)' % name
aClass = eval('self.skeleton.moduleSpace.' + name)
if issubclass(aClass, Widget):
if self.shouldDisplayModule:
modName, modDoc, imported = self.shouldDisplayModule
self.story.append(Paragraph(modName, self.makeHeadingStyle(self.indentLevel-2, 'module')))
self.story.append(XPreformatted(modDoc, self.bt))
self.shouldDisplayModule = 0
self.hasDisplayedModule = 1
if self.shouldDisplayClasses:
self.story.append(Paragraph('Classes', self.makeHeadingStyle(self.indentLevel-1)))
self.shouldDisplayClasses = 0
PdfDocBuilder0.beginClass(self, name, doc, bases)
self.beginAttributes(aClass)
elif issubclass(aClass, Drawing):
if self.shouldDisplayModule:
modName, modDoc, imported = self.shouldDisplayModule
self.story.append(Paragraph(modName, self.makeHeadingStyle(self.indentLevel-2, 'module')))
self.story.append(XPreformatted(modDoc, self.bt))
self.shouldDisplayModule = 0
self.hasDisplayedModule = 1
if self.shouldDisplayClasses:
self.story.append(Paragraph('Classes', self.makeHeadingStyle(self.indentLevel-1)))
self.shouldDisplayClasses = 0
PdfDocBuilder0.beginClass(self, name, doc, bases)
def beginAttributes(self, aClass):
"Append a list of annotated attributes of a class."
self.story.append(Paragraph(
'Public Attributes',
self.makeHeadingStyle(self.indentLevel+1)))
map = aClass._attrMap
if map:
map = map.items()
map.sort()
else:
map = []
for name, typ in map:
if typ != None:
if hasattr(typ, 'desc'):
desc = typ.desc
else:
desc = '<i>%s</i>' % typ.__class__.__name__
else:
desc = '<i>None</i>'
self.story.append(Paragraph(
"<b>%s</b> %s" % (name, desc), self.bt))
self.story.append(Paragraph("", self.bt))
def endClass(self, name, doc, bases):
"Append a graphic demo of a Widget or Drawing at the end of a class."
PdfDocBuilder0.endClass(self, name, doc, bases)
aClass = eval('self.skeleton.moduleSpace.' + name)
if hasattr(aClass, '_nodoc'):
pass
elif issubclass(aClass, Widget):
try:
widget = aClass()
except AssertionError, err:
if _abstractclasserr_re.match(str(err)): return
raise
self.story.append(Spacer(0*cm, 0.5*cm))
self._showWidgetDemoCode(widget)
self.story.append(Spacer(0*cm, 0.5*cm))
self._showWidgetDemo(widget)
self.story.append(Spacer(0*cm, 0.5*cm))
self._showWidgetProperties(widget)
self.story.append(PageBreak())
elif issubclass(aClass, Drawing):
drawing = aClass()
self.story.append(Spacer(0*cm, 0.5*cm))
self._showDrawingCode(drawing)
self.story.append(Spacer(0*cm, 0.5*cm))
self._showDrawingDemo(drawing)
self.story.append(Spacer(0*cm, 0.5*cm))
def beginFunctions(self, names):
srch = string.join(names, ' ')
if string.find(string.join(names, ' '), ' sample') > -1:
PdfDocBuilder0.beginFunctions(self, names)
# Skip non-sample functions.
def beginFunction(self, name, doc, sig):
"Skip function for 'uninteresting' names."
if name[:6] == 'sample':
PdfDocBuilder0.beginFunction(self, name, doc, sig)
def endFunction(self, name, doc, sig):
"Append a drawing to the story for special function names."
if name[:6] != 'sample':
return
if VERBOSE:
print 'GraphPdfDocBuilder.endFunction(%s...)' % name
PdfDocBuilder0.endFunction(self, name, doc, sig)
aFunc = eval('self.skeleton.moduleSpace.' + name)
drawing = aFunc()
self.story.append(Spacer(0*cm, 0.5*cm))
self._showFunctionDemoCode(aFunc)
self.story.append(Spacer(0*cm, 0.5*cm))
self._showDrawingDemo(drawing)
self.story.append(PageBreak())
def _showFunctionDemoCode(self, function):
"""Show a demo code of the function generating the drawing."""
# Heading
self.story.append(Paragraph("<i>Example</i>", self.bt))
self.story.append(Paragraph("", self.bt))
# Sample code
codeSample = inspect.getsource(function)
self.story.append(Preformatted(codeSample, self.code))
def _showDrawingCode(self, drawing):
"""Show code of the drawing class."""
# Heading
#className = drawing.__class__.__name__
self.story.append(Paragraph("<i>Example</i>", self.bt))
# Sample code
codeSample = inspect.getsource(drawing.__class__.__init__)
self.story.append(Preformatted(codeSample, self.code))
def _showDrawingDemo(self, drawing):
"""Show a graphical demo of the drawing."""
# Add the given drawing to the story.
# Ignored if no GD rendering available
# or the demo method does not return a drawing.
try:
flo = renderPDF.GraphicsFlowable(drawing)
self.story.append(Spacer(6,6))
self.story.append(flo)
self.story.append(Spacer(6,6))
except:
if VERBOSE:
print 'caught exception in _showDrawingDemo'
traceback.print_exc()
else:
pass
def _showWidgetDemo(self, widget):
"""Show a graphical demo of the widget."""
# Get a demo drawing from the widget and add it to the story.
# Ignored if no GD rendering available
# or the demo method does not return a drawing.
try:
if VERIFY:
widget.verify()
drawing = widget.demo()
flo = renderPDF.GraphicsFlowable(drawing)
self.story.append(Spacer(6,6))
self.story.append(flo)
self.story.append(Spacer(6,6))
except:
if VERBOSE:
print 'caught exception in _showWidgetDemo'
traceback.print_exc()
else:
pass
def _showWidgetDemoCode(self, widget):
"""Show a demo code of the widget."""
# Heading
#className = widget.__class__.__name__
self.story.append(Paragraph("<i>Example</i>", self.bt))
# Sample code
codeSample = inspect.getsource(widget.__class__.demo)
self.story.append(Preformatted(codeSample, self.code))
def _showWidgetProperties(self, widget):
"""Dump all properties of a widget."""
props = widget.getProperties()
keys = props.keys()
keys.sort()
lines = []
for key in keys:
value = props[key]
f = getStringIO()
pprint.pprint(value, f)
value = f.getvalue()[:-1]
valueLines = string.split(value, '\n')
for i in range(1, len(valueLines)):
valueLines[i] = ' '*(len(key)+3) + valueLines[i]
value = string.join(valueLines, '\n')
lines.append('%s = %s' % (key, value))
text = join(lines, '\n')
self.story.append(Paragraph("<i>Properties of Example Widget</i>", self.bt))
self.story.append(Paragraph("", self.bt))
self.story.append(Preformatted(text, self.code))
class GraphHtmlDocBuilder0(HtmlDocBuilder0):
"A class to write the skeleton of a Python source."
fileSuffix = '.html'
def beginModule(self, name, doc, imported):
# Defer displaying the module header info to later...
self.shouldDisplayModule = (name, doc, imported)
self.hasDisplayedModule = 0
def endModule(self, name, doc, imported):
if self.hasDisplayedModule:
HtmlDocBuilder0.endModule(self, name, doc, imported)
def beginClasses(self, names):
# Defer displaying the module header info to later...
if self.shouldDisplayModule:
self.shouldDisplayClasses = names
# Skip all methods.
def beginMethod(self, name, doc, sig):
pass
def endMethod(self, name, doc, sig):
pass
def beginClass(self, name, doc, bases):
"Append a graphic demo of a widget at the end of a class."
aClass = eval('self.skeleton.moduleSpace.' + name)
if issubclass(aClass, Widget):
if self.shouldDisplayModule:
modName, modDoc, imported = self.shouldDisplayModule
self.outLines.append('<H2>%s</H2>' % modName)
self.outLines.append('<PRE>%s</PRE>' % modDoc)
self.shouldDisplayModule = 0
self.hasDisplayedModule = 1
if self.shouldDisplayClasses:
self.outLines.append('<H2>Classes</H2>')
self.shouldDisplayClasses = 0
HtmlDocBuilder0.beginClass(self, name, doc, bases)
def endClass(self, name, doc, bases):
"Append a graphic demo of a widget at the end of a class."
HtmlDocBuilder0.endClass(self, name, doc, bases)
aClass = eval('self.skeleton.moduleSpace.' + name)
if issubclass(aClass, Widget):
widget = aClass()
self._showWidgetDemoCode(widget)
self._showWidgetDemo(widget)
self._showWidgetProperties(widget)
def beginFunctions(self, names):
if string.find(string.join(names, ' '), ' sample') > -1:
HtmlDocBuilder0.beginFunctions(self, names)
# Skip non-sample functions.
def beginFunction(self, name, doc, sig):
"Skip function for 'uninteresting' names."
if name[:6] == 'sample':
HtmlDocBuilder0.beginFunction(self, name, doc, sig)
def endFunction(self, name, doc, sig):
"Append a drawing to the story for special function names."
if name[:6] != 'sample':
return
HtmlDocBuilder0.endFunction(self, name, doc, sig)
aFunc = eval('self.skeleton.moduleSpace.' + name)
drawing = aFunc()
self._showFunctionDemoCode(aFunc)
self._showDrawingDemo(drawing, aFunc.__name__)
def _showFunctionDemoCode(self, function):
"""Show a demo code of the function generating the drawing."""
# Heading
self.outLines.append('<H3>Example</H3>')
# Sample code
codeSample = inspect.getsource(function)
self.outLines.append('<PRE>%s</PRE>' % codeSample)
def _showDrawingDemo(self, drawing, name):
"""Show a graphical demo of the drawing."""
# Add the given drawing to the story.
# Ignored if no GD rendering available
# or the demo method does not return a drawing.
try:
from reportlab.graphics import renderPM
modName = self.skeleton.getModuleName()
path = '%s-%s.jpg' % (modName, name)
renderPM.drawToFile(drawing, path, fmt='JPG')
self.outLines.append('<H3>Demo</H3>')
self.outLines.append(makeHtmlInlineImage(path))
except:
if VERBOSE:
print 'caught exception in GraphHTMLDocBuilder._showDrawingDemo'
traceback.print_exc()
else:
pass
def _showWidgetDemo(self, widget):
"""Show a graphical demo of the widget."""
# Get a demo drawing from the widget and add it to the story.
# Ignored if no GD rendering available
# or the demo method does not return a drawing.
try:
from reportlab.graphics import renderPM
drawing = widget.demo()
if VERIFY:
widget.verify()
modName = self.skeleton.getModuleName()
path = '%s-%s.jpg' % (modName, widget.__class__.__name__)
renderPM.drawToFile(drawing, path, fmt='JPG')
self.outLines.append('<H3>Demo</H3>')
self.outLines.append(makeHtmlInlineImage(path))
except:
if VERBOSE:
print 'caught exception in GraphHTMLDocBuilder._showWidgetDemo'
traceback.print_exc()
else:
pass
def _showWidgetDemoCode(self, widget):
"""Show a demo code of the widget."""
# Heading
#className = widget.__class__.__name__
self.outLines.append('<H3>Example Code</H3>')
# Sample code
codeSample = inspect.getsource(widget.__class__.demo)
self.outLines.append('<PRE>%s</PRE>' % codeSample)
self.outLines.append('')
def _showWidgetProperties(self, widget):
"""Dump all properties of a widget."""
props = widget.getProperties()
keys = props.keys()
keys.sort()
lines = []
for key in keys:
value = props[key]
# Method 3
f = getStringIO()
pprint.pprint(value, f)
value = f.getvalue()[:-1]
valueLines = string.split(value, '\n')
for i in range(1, len(valueLines)):
valueLines[i] = ' '*(len(key)+3) + valueLines[i]
value = string.join(valueLines, '\n')
lines.append('%s = %s' % (key, value))
text = join(lines, '\n')
self.outLines.append('<H3>Properties of Example Widget</H3>')
self.outLines.append('<PRE>%s</PRE>' % text)
self.outLines.append('')
# Highly experimental!
class PlatypusDocBuilder0(DocBuilder0):
"Document the skeleton of a Python module as a Platypus story."
fileSuffix = '.pps' # A pickled Platypus story.
def begin(self, name='', typ=''):
styleSheet = getSampleStyleSheet()
self.code = styleSheet['Code']
self.bt = styleSheet['BodyText']
self.story = []
def end(self):
if self.packageName:
self.outPath = self.packageName + self.fileSuffix
elif self.skeleton:
self.outPath = self.skeleton.getModuleName() + self.fileSuffix
else:
self.outPath = ''
if self.outPath:
f = open(self.outPath, 'w')
pickle.dump(self.story, f)
def beginPackage(self, name):
DocBuilder0.beginPackage(self, name)
self.story.append(Paragraph(name, self.bt))
def beginModule(self, name, doc, imported):
story = self.story
bt = self.bt
story.append(Paragraph(name, bt))
story.append(XPreformatted(doc, bt))
def beginClasses(self, names):
self.story.append(Paragraph('Classes', self.bt))
def beginClass(self, name, doc, bases):
bt = self.bt
story = self.story
if bases:
bases = map(lambda b:b.__name__, bases) # hack
story.append(Paragraph('%s(%s)' % (name, join(bases, ', ')), bt))
else:
story.append(Paragraph(name, bt))
story.append(XPreformatted(doc, bt))
def beginMethod(self, name, doc, sig):
bt = self.bt
story = self.story
story.append(Paragraph(name+sig, bt))
story.append(XPreformatted(doc, bt))
def beginFunctions(self, names):
if names:
self.story.append(Paragraph('Functions', self.bt))
def beginFunction(self, name, doc, sig):
bt = self.bt
story = self.story
story.append(Paragraph(name+sig, bt))
story.append(XPreformatted(doc, bt))
####################################################################
#
# Main
#
####################################################################
def printUsage():
"""graphdocpy.py - Automated documentation for the RL Graphics library.
Usage: python graphdocpy.py [options]
[options]
-h Print this help message.
-f name Use the document builder indicated by 'name',
e.g. Html, Pdf.
-m module Generate document for module named 'module'.
'module' may follow any of these forms:
- docpy.py
- docpy
- c:\\test\\docpy
and can be any of these:
- standard Python modules
- modules in the Python search path
- modules in the current directory
-p package Generate document for package named 'package'
(default is 'reportlab.graphics').
'package' may follow any of these forms:
- reportlab
- reportlab.graphics.charts
- c:\\test\\reportlab
and can be any of these:
- standard Python packages (?)
- packages in the Python search path
- packages in the current directory
-s Silent mode (default is unset).
Examples:
python graphdocpy.py reportlab.graphics
python graphdocpy.py -m signsandsymbols.py -f Pdf
python graphdocpy.py -m flags.py -f Html
python graphdocpy.py -m barchart1.py
"""
# The following functions, including main(), are actually
# the same as in docpy.py (except for some defaults).
def documentModule0(pathOrName, builder, opts={}):
"""Generate documentation for one Python file in some format.
This handles Python standard modules like string, custom modules
on the Python search path like e.g. docpy as well as modules
specified with their full path like C:/tmp/junk.py.
The doc file will always be saved in the current directory with
a basename equal to that of the module, e.g. docpy.
"""
cwd = os.getcwd()
# Append directory to Python search path if we get one.
dirName = os.path.dirname(pathOrName)
if dirName:
sys.path.append(dirName)
# Remove .py extension from module name.
if pathOrName[-3:] == '.py':
modname = pathOrName[:-3]
else:
modname = pathOrName
# Remove directory paths from module name.
if dirName:
modname = os.path.basename(modname)
# Load the module.
try:
module = __import__(modname)
except:
print 'Failed to import %s.' % modname
os.chdir(cwd)
return
# Do the real documentation work.
s = ModuleSkeleton0()
s.inspect(module)
builder.write(s)
# Remove appended directory from Python search path if we got one.
if dirName:
del sys.path[-1]
os.chdir(cwd)
def _packageWalkCallback((builder, opts), dirPath, files):
"A callback function used when waking over a package tree."
#must CD into a directory to document the module correctly
cwd = os.getcwd()
os.chdir(dirPath)
# Skip __init__ files.
files = filter(lambda f:f != '__init__.py', files)
files = filter(lambda f:f[-3:] == '.py', files)
for f in files:
path = os.path.join(dirPath, f)
## if not opts.get('isSilent', 0):
## print path
builder.indentLevel = builder.indentLevel + 1
#documentModule0(path, builder)
documentModule0(f, builder)
builder.indentLevel = builder.indentLevel - 1
#CD back out
os.chdir(cwd)
def documentPackage0(pathOrName, builder, opts={}):
"""Generate documentation for one Python package in some format.
'pathOrName' can be either a filesystem path leading to a Python
package or package name whose path will be resolved by importing
the top-level module.
The doc file will always be saved in the current directory with
a basename equal to that of the package, e.g. reportlab.lib.
"""
# Did we get a package path with OS-dependant seperators...?
if os.sep in pathOrName:
path = pathOrName
name = os.path.splitext(os.path.basename(path))[0]
# ... or rather a package name?
else:
name = pathOrName
package = __import__(name)
# Some special care needed for dotted names.
if '.' in name:
subname = 'package' + name[find(name, '.'):]
package = eval(subname)
path = os.path.dirname(package.__file__)
cwd = os.getcwd()
os.chdir(path)
builder.beginPackage(name)
os.path.walk(path, _packageWalkCallback, (builder, opts))
builder.endPackage(name)
os.chdir(cwd)
def makeGraphicsReference(outfilename):
"Make graphics_reference.pdf"
builder = GraphPdfDocBuilder0()
builder.begin(name='reportlab.graphics', typ='package')
documentPackage0('reportlab.graphics', builder, {'isSilent': 0})
builder.end(outfilename)
print 'made graphics reference in %s' % outfilename
def main():
"Handle command-line options and trigger corresponding action."
opts, args = getopt.getopt(sys.argv[1:], 'hsf:m:p:')
# Make an options dictionary that is easier to use.
optsDict = {}
for k, v in opts:
optsDict[k] = v
hasOpt = optsDict.has_key
# On -h print usage and exit immediately.
if hasOpt('-h'):
print printUsage.__doc__
sys.exit(0)
# On -s set silent mode.
isSilent = hasOpt('-s')
# On -f set the appropriate DocBuilder to use or a default one.
builder = { 'Pdf': GraphPdfDocBuilder0,
'Html': GraphHtmlDocBuilder0,
}[optsDict.get('-f', 'Pdf')]()
# Set default module or package to document.
if not hasOpt('-p') and not hasOpt('-m'):
optsDict['-p'] = 'reportlab.graphics'
# Save a few options for further use.
options = {'isSilent':isSilent}
# Now call the real documentation functions.
if hasOpt('-m'):
nameOrPath = optsDict['-m']
if not isSilent:
print "Generating documentation for module %s..." % nameOrPath
builder.begin(name=nameOrPath, typ='module')
documentModule0(nameOrPath, builder, options)
elif hasOpt('-p'):
nameOrPath = optsDict['-p']
if not isSilent:
print "Generating documentation for package %s..." % nameOrPath
builder.begin(name=nameOrPath, typ='package')
documentPackage0(nameOrPath, builder, options)
builder.end()
if not isSilent:
print "Saved %s." % builder.outPath
#if doing the usual, put a copy in docs
if builder.outPath == 'reportlab.graphics.pdf':
import shutil, reportlab
dst = os.path.join(os.path.dirname(reportlab.__file__),'docs','graphics_reference.pdf')
shutil.copyfile('reportlab.graphics.pdf', dst)
if not isSilent:
print 'copied to '+dst
def makeSuite():
"standard test harness support - run self as separate process"
from reportlab.test.utils import ScriptThatMakesFileTest
return ScriptThatMakesFileTest('tools/docco',
'graphdocpy.py',
'reportlab.graphics.pdf')
if __name__ == '__main__':
main()
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import contextlib
import errno
import itertools
import os
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from grpc.framework.alpha import exceptions
from grpc.framework.foundation import future
# Identifiers of entities we expect to find in the generated module.
SERVICER_IDENTIFIER = 'EarlyAdopterTestServiceServicer'
SERVER_IDENTIFIER = 'EarlyAdopterTestServiceServer'
STUB_IDENTIFIER = 'EarlyAdopterTestServiceStub'
SERVER_FACTORY_IDENTIFIER = 'early_adopter_create_TestService_server'
STUB_FACTORY_IDENTIFIER = 'early_adopter_create_TestService_stub'
# Timeouts and delays.
SHORT_TIMEOUT = 0.1
NORMAL_TIMEOUT = 1
LONG_TIMEOUT = 2
DOES_NOT_MATTER_DELAY = 0
NO_DELAY = 0
LONG_DELAY = 1
# Build mode environment variable set by tools/run_tests/run_tests.py.
_build_mode = os.environ['CONFIG']
class _ServicerMethods(object):
def __init__(self, test_pb2, delay):
self._paused = False
self._failed = False
self.test_pb2 = test_pb2
self.delay = delay
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
self._paused = True
yield
self._paused = False
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
self._failed = True
yield
self._failed = False
def _control(self): # pylint: disable=invalid-name
if self._failed:
raise ValueError()
time.sleep(self.delay)
while self._paused:
time.sleep(0)
def UnaryCall(self, request, unused_context):
response = self.test_pb2.SimpleResponse()
response.payload.payload_type = self.test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_context):
for parameter in request.response_parameters:
response = self.test_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self.test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_context):
response = self.test_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_context):
for request in request_iter:
for parameter in request.response_parameters:
response = self.test_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self.test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = self.test_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self.test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
@contextlib.contextmanager
def _CreateService(test_pb2, delay):
"""Provides a servicer backend and a stub.
The servicer is just the implementation
of the actual servicer passed to the face player of the python RPC
implementation; the two are detached.
Non-zero delay puts a delay on each call to the servicer, representative of
communication latency. Timeout is the default timeout for the stub while
waiting for the service.
Args:
test_pb2: the test_pb2 module generated by this test
delay: delay in seconds per response from the servicer
timeout: how long the stub will wait for the servicer by default.
Yields:
A three-tuple (servicer_methods, servicer, stub), where the servicer is
the back-end of the service bound to the stub and the server and stub
are both activated and ready for use.
"""
servicer_methods = _ServicerMethods(test_pb2, delay)
class Servicer(getattr(test_pb2, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
servicer = Servicer()
server = getattr(
test_pb2, SERVER_FACTORY_IDENTIFIER)(servicer, 0, None, None)
with server:
port = server.port()
stub = getattr(test_pb2, STUB_FACTORY_IDENTIFIER)('localhost', port)
with stub:
yield servicer_methods, stub, server
def StreamingInputRequest(test_pb2):
for _ in range(3):
request = test_pb2.StreamingInputCallRequest()
request.payload.payload_type = test_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def StreamingOutputRequest(test_pb2):
request = test_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def FullDuplexRequest(test_pb2):
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.async()`) only gives futures for the *non-streaming* responses,
else it behaves like its blocking cousin.
"""
def setUp(self):
protoc_command = '../../bins/%s/protobuf/protoc' % _build_mode
protoc_plugin_filename = '../../bins/%s/grpc_python_plugin' % _build_mode
test_proto_filename = './test.proto'
if not os.path.isfile(protoc_command):
# Assume that if we haven't built protoc that it's on the system.
protoc_command = 'protoc'
# Ensure that the output directory exists.
self.outdir = tempfile.mkdtemp()
# Invoke protoc with the plugin.
cmd = [
protoc_command,
'--plugin=protoc-gen-python-grpc=%s' % protoc_plugin_filename,
'-I %s' % os.path.dirname(test_proto_filename),
'--python_out=%s' % self.outdir,
'--python-grpc_out=%s' % self.outdir,
os.path.basename(test_proto_filename),
]
subprocess.call(' '.join(cmd), shell=True)
sys.path.append(self.outdir)
def tearDown(self):
try:
shutil.rmtree(self.outdir)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
# TODO(atash): Figure out which of theses tests is hanging flakily with small
# probability.
def testImportAttributes(self):
# check that we can access the generated module and its members.
import test_pb2 # pylint: disable=g-import-not-at-top
self.assertIsNotNone(getattr(test_pb2, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, SERVER_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, SERVER_FACTORY_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, STUB_FACTORY_IDENTIFIER, None))
def testUpDown(self):
import test_pb2
with _CreateService(
test_pb2, DOES_NOT_MATTER_DELAY) as (servicer, stub, unused_server):
request = test_pb2.SimpleRequest(response_size=13)
def testUnaryCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (servicer, stub, unused_server):
request = test_pb2.SimpleRequest(response_size=13)
response = stub.UnaryCall(request, NORMAL_TIMEOUT)
expected_response = servicer.UnaryCall(request, None)
self.assertEqual(expected_response, response)
def testUnaryCallAsync(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = test_pb2.SimpleRequest(response_size=13)
with _CreateService(test_pb2, LONG_DELAY) as (
servicer, stub, unused_server):
start_time = time.clock()
response_future = stub.UnaryCall.async(request, LONG_TIMEOUT)
# Check that we didn't block on the asynchronous call.
self.assertGreater(LONG_DELAY, time.clock() - start_time)
response = response_future.result()
expected_response = servicer.UnaryCall(request, None)
self.assertEqual(expected_response, response)
def testUnaryCallAsyncExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
# set the timeout super low...
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
request = test_pb2.SimpleRequest(response_size=13)
with servicer.pause():
response_future = stub.UnaryCall.async(request, SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
response_future.result()
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testUnaryCallAsyncCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = test_pb2.SimpleRequest(response_size=13)
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
with servicer.pause():
response_future = stub.UnaryCall.async(request, 1)
response_future.cancel()
self.assertTrue(response_future.cancelled())
def testUnaryCallAsyncFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = test_pb2.SimpleRequest(response_size=13)
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
with servicer.fail():
response_future = stub.UnaryCall.async(request, NORMAL_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testStreamingOutputCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = StreamingOutputRequest(test_pb2)
with _CreateService(test_pb2, NO_DELAY) as (servicer, stub, unused_server):
responses = stub.StreamingOutputCall(request, NORMAL_TIMEOUT)
expected_responses = servicer.StreamingOutputCall(request, None)
for check in itertools.izip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testStreamingOutputCallExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = StreamingOutputRequest(test_pb2)
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
with servicer.pause():
responses = stub.StreamingOutputCall(request, SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
list(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testStreamingOutputCallCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = StreamingOutputRequest(test_pb2)
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
unused_servicer, stub, unused_server):
responses = stub.StreamingOutputCall(request, SHORT_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(future.CancelledError):
next(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this times out '
'instead of raising the proper error.')
def testStreamingOutputCallFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = StreamingOutputRequest(test_pb2)
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
with servicer.fail():
responses = stub.StreamingOutputCall(request, 1)
self.assertIsNotNone(responses)
with self.assertRaises(exceptions.ServicerError):
next(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testStreamingInputCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (servicer, stub, unused_server):
response = stub.StreamingInputCall(StreamingInputRequest(test_pb2),
NORMAL_TIMEOUT)
expected_response = servicer.StreamingInputCall(
StreamingInputRequest(test_pb2), None)
self.assertEqual(expected_response, response)
def testStreamingInputCallAsync(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, LONG_DELAY) as (
servicer, stub, unused_server):
start_time = time.clock()
response_future = stub.StreamingInputCall.async(
StreamingInputRequest(test_pb2), LONG_TIMEOUT)
self.assertGreater(LONG_DELAY, time.clock() - start_time)
response = response_future.result()
expected_response = servicer.StreamingInputCall(
StreamingInputRequest(test_pb2), None)
self.assertEqual(expected_response, response)
def testStreamingInputCallAsyncExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
# set the timeout super low...
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
with servicer.pause():
response_future = stub.StreamingInputCall.async(
StreamingInputRequest(test_pb2), SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
response_future.result()
self.assertIsInstance(
response_future.exception(), exceptions.ExpirationError)
def testStreamingInputCallAsyncCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
with servicer.pause():
response_future = stub.StreamingInputCall.async(
StreamingInputRequest(test_pb2), NORMAL_TIMEOUT)
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
def testStreamingInputCallAsyncFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
with servicer.fail():
response_future = stub.StreamingInputCall.async(
StreamingInputRequest(test_pb2), SHORT_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testFullDuplexCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (servicer, stub, unused_server):
responses = stub.FullDuplexCall(FullDuplexRequest(test_pb2),
NORMAL_TIMEOUT)
expected_responses = servicer.FullDuplexCall(FullDuplexRequest(test_pb2),
None)
for check in itertools.izip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testFullDuplexCallExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = FullDuplexRequest(test_pb2)
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
with servicer.pause():
responses = stub.FullDuplexCall(request, SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
list(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testFullDuplexCallCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (servicer, stub, unused_server):
request = FullDuplexRequest(test_pb2)
responses = stub.FullDuplexCall(request, NORMAL_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(future.CancelledError):
next(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this hangs forever '
'and fix.')
def testFullDuplexCallFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = FullDuplexRequest(test_pb2)
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
with servicer.fail():
responses = stub.FullDuplexCall(request, NORMAL_TIMEOUT)
self.assertIsNotNone(responses)
with self.assertRaises(exceptions.ServicerError):
next(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testHalfDuplexCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, DOES_NOT_MATTER_DELAY) as (
servicer, stub, unused_server):
def HalfDuplexRequest():
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = stub.HalfDuplexCall(HalfDuplexRequest(), NORMAL_TIMEOUT)
expected_responses = servicer.HalfDuplexCall(HalfDuplexRequest(), None)
for check in itertools.izip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
import test_pb2 # pylint: disable=g-import-not-at-top
wait_flag = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
wait_flag[0] = True
yield
wait_flag[0] = False
def HalfDuplexRequest():
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
while wait_flag[0]:
time.sleep(0.1)
with _CreateService(test_pb2, NO_DELAY) as (servicer, stub, unused_server):
with wait():
responses = stub.HalfDuplexCall(HalfDuplexRequest(), NORMAL_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(exceptions.ExpirationError):
next(responses)
if __name__ == '__main__':
os.chdir(os.path.dirname(sys.argv[0]))
unittest.main()
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class prefix_limit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/prefix-limit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "prefix-limit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"ipv6-unicast",
"prefix-limit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_unicast/prefix_limit/config (container)
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_unicast/prefix_limit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state (container)
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class prefix_limit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/prefix-limit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "prefix-limit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"ipv6-unicast",
"prefix-limit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_unicast/prefix_limit/config (container)
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_unicast/prefix_limit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state (container)
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv6_unicast/prefix_limit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import shutil
import tempfile
from anvil import cfg
from anvil import exceptions
from anvil import shell
from anvil import test
from anvil import utils
class TestYamlRefLoader(test.TestCase):
def setUp(self):
super(TestYamlRefLoader, self).setUp()
self.sample = ""
self.sample2 = ""
self.sample3 = ""
self.temp_dir = tempfile.mkdtemp()
self.loader = cfg.YamlRefLoader(self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
del self.loader
super(TestYamlRefLoader, self).tearDown()
def _write_samples(self):
with open(os.path.join(self.temp_dir, 'sample.yaml'), 'w') as f:
f.write(self.sample)
with open(os.path.join(self.temp_dir, 'sample2.yaml'), 'w') as f:
f.write(self.sample2)
with open(os.path.join(self.temp_dir, 'sample3.yaml'), 'w') as f:
f.write(self.sample3)
def test_load__default(self):
self.sample = "default: default_value"
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict({'default': 'default_value'})
self.assertEqual(processed, should_be)
def test_load__empty(self):
self.sample = ""
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict()
self.assertEqual(processed, should_be)
def test_load__empty2(self):
self.sample = "empty: "
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict({'empty': None})
self.assertEqual(processed, should_be)
def test_load__integer(self):
self.sample = "integer: 11"
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict({'integer': 11})
self.assertEqual(processed, should_be)
def test_load__string(self):
self.sample = 'string: "string sample"'
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict({'string': "string sample"})
self.assertEqual(processed, should_be)
def test_load__float(self):
self.sample = "float: 1.1234"
self._write_samples()
processed = self.loader.load('sample')
self.assertAlmostEqual(processed['float'], 1.1234)
def test_load__bool(self):
self.sample = "bool: true"
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict({'bool': True})
self.assertEqual(processed, should_be)
def test_load__list(self):
self.sample = """
list:
- first
- second
- 100
"""
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict({'list': ['first', 'second', 100]})
self.assertEqual(processed, should_be)
def test_load__dict(self):
self.sample = """
dict:
integer: 11
default: default_value
string: "string sample"
"""
self._write_samples()
# Note: dictionaries are always sorted by options names.
processed = self.loader.load('sample')
should_be = utils.OrderedDict([
('dict',
utils.OrderedDict([
('default', 'default_value'),
('integer', 11),
('string', 'string sample')
]))
])
self.assertEqual(processed, should_be)
def test_load__nested_dict(self):
self.sample = """
dict:
dict1:
default: default_value
integer: 11
dict2:
default: default_value
string: "string sample"
"""
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict({
'dict': {
'dict1': {'default': 'default_value',
'integer': 11},
'dict2': {'default': 'default_value',
'string': 'string sample'}
}
})
self.assertEqual(processed, should_be)
def test_load__complex(self):
self.sample = """
# some comments...
integer: 15
bool-opt: false
bool-opt2: 0
bool-opt3: 1
float: 0.15
list:
- 1st
- 2nd
- 0.1
- 100
- true
dict:
dict1:
default: default_value 1
integer: 11
bool: true
dict2:
default: default_value 2
"""
self._write_samples()
processed = self.loader.load('sample')
self.assertEqual(len(processed), 7)
self.assertEqual(processed['integer'], 15)
self.assertEqual(processed['bool-opt'], False)
self.assertEqual(processed['bool-opt2'], False)
self.assertEqual(processed['bool-opt3'], True)
self.assertAlmostEqual(processed['float'], 0.15)
self.assertEqual(processed['list'], ['1st', '2nd', 0.1, 100, True])
self.assertEqual(processed['dict']['dict1']['integer'], 11)
self.assertEqual(processed['dict']['dict1']['bool'], True)
self.assertEqual(processed['dict']['dict1']['default'],
'default_value 1')
self.assertEqual(processed['dict']['dict2']['default'],
'default_value 2')
def test_load__simple_reference(self):
self.sample = 'opt: $(sample2:opt)'
self.sample2 = 'opt: 10'
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict({'opt': 10})
self.assertEqual(processed, should_be)
def test_load__self_reference(self):
self.sample = """
opt1: "$(sample:opt2)"
opt2: "$(sample:opt3)"
opt3: 10
"""
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict([('opt1', 10), ('opt2', 10), ('opt3', 10)])
self.assertEqual(processed, should_be)
def test_load__auto_reference(self):
self.sample = """
ip: "$(auto:ip)"
host: "$(auto:hostname)"
home: "$(auto:home)"
"""
self._write_samples()
processed = self.loader.load('sample')
self.assertTrue(isinstance(processed, utils.OrderedDict))
self.assertEqual(len(processed), 3)
self.assertEqual(processed['ip'], utils.get_host_ip())
self.assertEqual(processed['host'], shell.hostname())
self.assertEqual(processed['home'], shell.gethomedir())
def test_load__multi_reference(self):
self.sample = """
multi_ref: "9 + $(sample2:opt) + $(sample3:opt) + $(auto:home) + 12"
"""
self.sample2 = """opt: 10"""
self.sample3 = """opt: 11"""
self._write_samples()
processed = self.loader.load('sample')
self.assertTrue(isinstance(processed, utils.OrderedDict))
self.assertEqual(len(processed), 1)
self.assertEqual(processed['multi_ref'],
"9 + 10 + 11 + " + shell.gethomedir() + " + 12")
def test_load__dict_reference(self):
self.sample = """
sample2:
opt: "$(sample2:opt)"
"""
self.sample2 = """opt: 10"""
self._write_samples()
processed = self.loader.load('sample')
should_be = utils.OrderedDict([
('sample2', utils.OrderedDict([
('opt', 10)
]))
])
self.assertEqual(processed, should_be)
def test_load__wrapped_ref(self):
self.sample = """
stable: 23
prefixed: "1$(sample:stable)"
suffixed: "$(sample:stable)4"
wrapped: "1$(sample:stable)4"
"""
self._write_samples()
processed = self.loader.load('sample')
self.assertEqual(processed['prefixed'], "123")
self.assertEqual(processed['suffixed'], "234")
self.assertEqual(processed['wrapped'], "1234")
def test_load__complex_reference(self):
self.sample = """
stable: 9
ref0: "$(sample:stable)"
ref1: "$(sample2:stable)"
ref2: "$(sample2:ref1)"
ref3: "$(sample2:ref2)"
ref4: "$(sample2:ref3)"
ref5: "$(sample3:ref1)"
sample:
stable: "$(sample:stable)"
ref0: "$(sample:ref0)"
ref1: "$(sample:ref1)"
sample2:
stable: "$(sample2:stable)"
ref3: "$(sample2:ref3)"
sample3:
stable: "$(sample3:stable)"
ref1: "$(sample3:ref1)"
list:
- "$(sample:sample2)"
- "$(sample:sample3)"
dict:
sample3: "$(sample:sample3)"
sample2: "$(sample:sample2)"
"""
self.sample2 = """
stable: 10
ref1: "$(sample:stable)"
ref2: "$(sample3:stable)"
ref3: "$(sample3:ref1)"
ref4: "$(sample2:stable)"
"""
self.sample3 = """
stable: 11
ref1: "$(sample:stable)"
"""
self._write_samples()
processed = self.loader.load('sample')
self.assertTrue(isinstance(processed, utils.OrderedDict))
#self.assertEqual(len(processed), 11)
self.assertEqual(processed['stable'], 9)
self.assertEqual(processed['ref0'], 9)
self.assertEqual(processed['ref1'], 10)
self.assertEqual(processed['ref2'], 9)
self.assertEqual(processed['ref3'], 11)
self.assertEqual(processed['ref4'], 9)
self.assertEqual(processed['ref5'], 9)
sample = utils.OrderedDict([
('ref0', 9),
('ref1', 10),
('stable', 9),
])
self.assertEqual(processed['sample'], sample)
sample2 = utils.OrderedDict([
('ref3', 9),
('stable', 10),
])
self.assertEqual(processed['sample2'], sample2)
sample3 = utils.OrderedDict([
('ref1', 9),
('stable', 11),
])
self.assertEqual(processed['sample3'], sample3)
self.assertEqual(processed['list'], [sample2, sample3])
self.assertEqual(
processed['dict'],
utils.OrderedDict([
('sample2', sample2),
('sample3', sample3),
])
)
processed = self.loader.load('sample2')
self.assertEqual(processed, {
'stable': 10,
'ref1': 9,
'ref2': 11,
'ref3': 9,
'ref4': 10,
})
processed = self.loader.load('sample3')
self.assertEqual(len(processed), 2)
self.assertEqual(processed['stable'], 11)
self.assertEqual(processed['ref1'], 9)
def test_load__magic_reference(self):
self.sample = """
magic:
reference: $(sample:reference)
reference: "$(sample:stable)"
stable: 1
"""
self._write_samples()
processed = self.loader.load('sample')
self.assertEqual(processed['stable'], 1)
self.assertEqual(processed['reference'], 1)
self.assertEqual(processed['magic']['reference'], 1)
def test_load__more_complex_ref(self):
"""Test loading references links via dictionaries and lists."""
self.sample = """
stable: 9
ref_to_s1: "$(sample:stable)"
ref_to_s2: "$(sample2:stable)"
ref_to_s3: "$(sample3:stable)"
sample:
stable: "$(sample:stable)"
ref_to_s1: "$(sample:ref_to_s1)"
ref_to_s2: "$(sample:ref_to_s2)"
list:
- "$(sample:stable)"
- "$(sample2:stable)"
- "$(sample3:stable)"
- "$(sample:ref_to_s1)"
- "$(sample:ref_to_s2)"
- "$(sample:ref_to_s3)"
- "$(sample:sample)"
dict:
stable: "$(sample:stable)"
sample: "$(sample:sample)"
list: "$(sample:list)"
"""
self.sample2 = """stable: 10"""
self.sample3 = """stable: 11"""
self._write_samples()
processed = self.loader.load('sample')
self.assertEqual(processed['stable'], 9)
self.assertEqual(processed['ref_to_s1'], 9)
self.assertEqual(processed['ref_to_s2'], 10)
self.assertEqual(processed['ref_to_s3'], 11)
self.assertEqual(
processed['sample'],
utils.OrderedDict([('ref_to_s1', 9),
('ref_to_s2', 10),
('stable', 9)])
)
self.assertEqual(processed['list'], [9, 10, 11, 9, 10, 11,
processed['sample']])
self.assertEqual(
processed['dict'],
utils.OrderedDict([
('list', processed['list']),
('sample', processed['sample']),
('stable', 9),
])
)
def test_load__raises_no_option(self):
self.sample = "ref: $(sample2:no-such-opt)"
self.sample2 = ""
self._write_samples()
self.assertRaises(exceptions.YamlOptionNotFoundException,
self.loader.load, 'sample')
def test_load__raises_no_config(self):
self.sample = "ref: $(no-sush-conf:opt)"
self.sample2 = ""
self._write_samples()
self.assertRaises(exceptions.YamlConfigNotFoundException,
self.loader.load, 'sample')
def test_load__raises_loop(self):
self.sample = "opt: $(sample2:opt)"
self.sample2 = "opt: $(sample:opt)"
self._write_samples()
self.assertRaises(exceptions.YamlLoopException,
self.loader.load, 'sample')
def test_load__raises_self_loop(self):
self.sample = "opt: $(sample:opt)"
self._write_samples()
self.assertRaises(exceptions.YamlLoopException,
self.loader.load, 'sample')
self.sample = """
opt:
- $(sample:opt)
"""
self._write_samples()
self.assertRaises(exceptions.YamlLoopException,
self.loader.load, 'sample')
self.sample = """
opt:
opt: $(sample:opt)
"""
self._write_samples()
self.assertRaises(exceptions.YamlLoopException,
self.loader.load, 'sample')
def test_update_cache(self):
self.sample = """
stable: 9
reference: "$(sample2:stable)"
reference2: "$(sample2:stable)"
reference3: "$(sample2:stable2)"
"""
self.sample2 = """
stable: 10
stable2: 11
"""
self._write_samples()
self.loader.update_cache('sample', dict(reference=20))
self.loader.update_cache('sample2', dict(stable=21))
processed = self.loader.load('sample')
self.assertEqual(processed['stable'], 9)
self.assertEqual(processed['reference'], 20)
self.assertEqual(processed['reference2'], 21)
self.assertEqual(processed['reference3'], 11)
def test_update_cache__few_times(self):
self.sample = "stable: '$(sample2:stable)'"
self.sample2 = "stable: 10"
self._write_samples()
processed = self.loader.load('sample')
self.assertEqual(processed['stable'], 10)
self.loader.update_cache('sample', dict(stable=11))
processed = self.loader.load('sample')
self.assertEqual(processed['stable'], 11)
self.loader.update_cache('sample', dict(stable=12))
processed = self.loader.load('sample')
self.assertEqual(processed['stable'], 12)
class TestYamlMergeLoader(test.TestCase):
def setUp(self):
super(TestYamlMergeLoader, self).setUp()
class Distro(object):
def __init__(self):
self.options = {
'unique-distro': True,
'redefined-in-general': 0,
'redefined-in-component': 0
}
class Persona(object):
def __init__(self):
self.component_options = {
'component': {
'unique-specific': True,
'redefined-in-specific': 1
}
}
self.general = ""
self.component = ""
self.distro = Distro()
self.persona = Persona()
self.temp_dir = tempfile.mkdtemp()
with mock.patch('anvil.settings.COMPONENT_CONF_DIR', self.temp_dir):
self.loader = cfg.YamlMergeLoader(self.temp_dir)
def tearDown(self):
super(TestYamlMergeLoader, self).tearDown()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def _write_samples(self):
with open(os.path.join(self.temp_dir, 'general.yaml'), 'w') as f:
f.write(self.general)
with open(os.path.join(self.temp_dir, 'component.yaml'), 'w') as f:
f.write(self.component)
def test_load(self):
self.general = """
unique-general: True
redefined-in-general: 1
redefined-in-component: 1
"""
self.component = """
unique-component: True
redefined-in-component: 2
redefined-in-specific: 0
"""
self._write_samples()
merged = self.loader.load(self.distro, 'component', self.persona)
should_be = utils.OrderedDict([
('app_dir', os.path.join(self.temp_dir, 'component', 'app')),
('component_dir', os.path.join(self.temp_dir, 'component')),
('root_dir', os.path.join(self.temp_dir)),
('trace_dir', os.path.join(self.temp_dir, 'component', 'traces')),
('unique-distro', True),
('redefined-in-general', 1),
('redefined-in-component', 2),
('redefined-in-specific', 1),
('unique-general', True),
('unique-specific', True),
('unique-component', True),
])
self.assertEqual(merged, should_be)
# yet once loading with changed values.
self.persona.component_options['component']['redefined-in-specific'] = 2
merged = self.loader.load(self.distro, 'component', self.persona)
self.assertEqual(merged['redefined-in-specific'], 2)
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides Guru Meditation Report
This module defines the actual OpenStack Guru Meditation
Report class.
This can be used in the OpenStack command definition files.
For example, in a nova command module (under nova/cmd):
.. code-block:: python
:emphasize-lines: 8,9,10
CONF = cfg.CONF
# maybe import some options here...
def main():
config.parse_args(sys.argv)
logging.setup('blah')
TextGuruMeditation.register_section('Some Special Section',
special_section_generator)
TextGuruMeditation.setup_autorun(version_object)
server = service.Service.create(binary='some-service',
topic=CONF.some_service_topic)
service.serve(server)
service.wait()
Then, you can do
.. code-block:: bash
$ kill -USR1 $SERVICE_PID
and get a Guru Meditation Report in the file or terminal
where stderr is logged for that given service.
"""
from __future__ import print_function
import inspect
import os
import signal
import sys
from oslo_utils import timeutils
from nova.openstack.common.report.generators import conf as cgen
from nova.openstack.common.report.generators import process as prgen
from nova.openstack.common.report.generators import threading as tgen
from nova.openstack.common.report.generators import version as pgen
from nova.openstack.common.report import report
class GuruMeditation(object):
"""A Guru Meditation Report Mixin/Base Class
This class is a base class for Guru Meditation Reports.
It provides facilities for registering sections and
setting up functionality to auto-run the report on
a certain signal.
This class should always be used in conjunction with
a Report class via multiple inheritance. It should
always come first in the class list to ensure the
MRO is correct.
"""
timestamp_fmt = "%Y%m%d%H%M%S"
def __init__(self, version_obj, sig_handler_tb=None, *args, **kwargs):
self.version_obj = version_obj
self.traceback = sig_handler_tb
super(GuruMeditation, self).__init__(*args, **kwargs)
self.start_section_index = len(self.sections)
@classmethod
def register_section(cls, section_title, generator):
"""Register a New Section
This method registers a persistent section for the current
class.
:param str section_title: the title of the section
:param generator: the generator for the section
"""
try:
cls.persistent_sections.append([section_title, generator])
except AttributeError:
cls.persistent_sections = [[section_title, generator]]
@classmethod
def setup_autorun(cls, version, service_name=None,
log_dir=None, signum=None):
"""Set Up Auto-Run
This method sets up the Guru Meditation Report to automatically
get dumped to stderr or a file in a given dir when the given signal
is received.
:param version: the version object for the current product
:param service_name: this program name used to construct logfile name
:param log_dir: path to a log directory where to create a file
:param signum: the signal to associate with running the report
"""
if not signum and hasattr(signal, 'SIGUSR1'):
# SIGUSR1 is not supported on all platforms
signum = signal.SIGUSR1
if signum:
signal.signal(signum,
lambda sn, tb: cls.handle_signal(
version, service_name, log_dir, tb))
@classmethod
def handle_signal(cls, version, service_name, log_dir, traceback):
"""The Signal Handler
This method (indirectly) handles receiving a registered signal and
dumping the Guru Meditation Report to stderr or a file in a given dir.
If service name and log dir are not None, the report will be dumped to
a file named $service_name_gurumeditation_$current_time in the log_dir
directory.
This method is designed to be curried into a proper signal handler by
currying out the version
parameter.
:param version: the version object for the current product
:param service_name: this program name used to construct logfile name
:param log_dir: path to a log directory where to create a file
:param traceback: the traceback provided to the signal handler
"""
try:
res = cls(version, traceback).run()
except Exception:
print("Unable to run Guru Meditation Report!",
file=sys.stderr)
else:
if log_dir:
service_name = service_name or os.path.basename(
inspect.stack()[-1][1])
filename = "%s_gurumeditation_%s" % (
service_name, timeutils.strtime(fmt=cls.timestamp_fmt))
filepath = os.path.join(log_dir, filename)
try:
with open(filepath, "w") as dumpfile:
dumpfile.write(res)
except Exception:
print("Unable to dump Guru Meditation Report to file %s" %
(filepath,), file=sys.stderr)
else:
print(res, file=sys.stderr)
def _readd_sections(self):
del self.sections[self.start_section_index:]
self.add_section('Package',
pgen.PackageReportGenerator(self.version_obj))
self.add_section('Threads',
tgen.ThreadReportGenerator(self.traceback))
self.add_section('Green Threads',
tgen.GreenThreadReportGenerator())
self.add_section('Processes',
prgen.ProcessReportGenerator())
self.add_section('Configuration',
cgen.ConfigReportGenerator())
try:
for section_title, generator in self.persistent_sections:
self.add_section(section_title, generator)
except AttributeError:
pass
def run(self):
self._readd_sections()
return super(GuruMeditation, self).run()
# GuruMeditation must come first to get the correct MRO
class TextGuruMeditation(GuruMeditation, report.TextReport):
"""A Text Guru Meditation Report
This report is the basic human-readable Guru Meditation Report
It contains the following sections by default
(in addition to any registered persistent sections):
- Package Information
- Threads List
- Green Threads List
- Process List
- Configuration Options
:param version_obj: the version object for the current product
:param traceback: an (optional) frame object providing the actual
traceback for the current thread
"""
def __init__(self, version_obj, traceback=None):
super(TextGuruMeditation, self).__init__(version_obj, traceback,
'Guru Meditation')
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2011, Monash e-Research Centre
# (Monash University, Australia)
# Copyright (c) 2010-2011, VeRSI Consortium
# (Victorian eResearch Strategic Initiative, Australia)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the VeRSI, the VeRSI Consortium members, nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
test_views.py
http://docs.djangoproject.com/en/dev/topics/testing/
.. moduleauthor:: Russell Sim <russell.sim@monash.edu>
.. moduleauthor:: Steve Androulakis <steve.androulakis@monash.edu>
"""
from datetime import datetime
from compare import expect
from django.core.exceptions import SuspiciousOperation
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
import pytz
from tardis.tardis_portal.models import Experiment, Dataset, DataFile, \
DataFileObject, Schema, ParameterName, DatafileParameterSet, \
DatafileParameter
from tardis.tardis_portal.ParameterSetManager import ParameterSetManager
class ParameterSetManagerTestCase(TestCase):
def setUp(self):
from django.contrib.auth.models import User
from tempfile import mkdtemp
user = 'tardis_user1'
pwd = 'secret'
email = ''
self.user = User.objects.create_user(user, email, pwd)
self.test_dir = mkdtemp()
self.exp = Experiment(title='test exp1',
institution_name='monash',
created_by=self.user)
self.exp.save()
self.dataset = Dataset(description="dataset description...")
self.dataset.save()
self.dataset.experiments.add(self.exp)
self.dataset.save()
self.datafile = DataFile(dataset=self.dataset,
filename="testfile.txt",
size="42", md5sum='bogus')
self.datafile.save()
self.dfo = DataFileObject(
datafile=self.datafile,
storage_box=self.datafile.get_default_storage_box(),
uri="1/testfile.txt")
self.dfo.save()
self.schema = Schema(
namespace="http://localhost/psmtest/df/",
name="Parameter Set Manager", type=3)
self.schema.save()
self.parametername1 = ParameterName(
schema=self.schema, name="parameter1",
full_name="Parameter 1")
self.parametername1.save()
self.parametername2 = ParameterName(
schema=self.schema, name="parameter2",
full_name="Parameter 2",
data_type=ParameterName.NUMERIC)
self.parametername2.save()
self.parametername3 = ParameterName(
schema=self.schema, name="parameter3",
full_name="Parameter 3",
data_type=ParameterName.DATETIME)
self.parametername3.save()
self.datafileparameterset = DatafileParameterSet(
schema=self.schema, datafile=self.datafile)
self.datafileparameterset.save()
self.datafileparameter1 = DatafileParameter(
parameterset=self.datafileparameterset,
name=self.parametername1, string_value="test1")
self.datafileparameter1.save()
self.datafileparameter2 = DatafileParameter(
parameterset=self.datafileparameterset,
name=self.parametername2, numerical_value=2)
self.datafileparameter2.save()
# Create a ParameterName and Parameter of type LINK to an experiment
self.parametername_exp_link = ParameterName(
schema=self.schema, name="exp_link",
full_name="This parameter is a experiment LINK",
data_type=ParameterName.LINK)
self.parametername_exp_link.save()
self.exp_link_param = DatafileParameter(
parameterset=self.datafileparameterset,
name=self.parametername_exp_link)
exp_url = self.exp.get_absolute_url() # /experiment/view/1/
self.exp_link_param.set_value(exp_url)
self.exp_link_param.save()
# Create a ParameterName and Parameter of type LINK to a dataset
self.parametername_dataset_link = ParameterName(
schema=self.schema, name="dataset_link",
full_name="This parameter is a dataset LINK",
data_type=ParameterName.LINK)
self.parametername_dataset_link.save()
self.dataset_link_param = DatafileParameter(
parameterset=self.datafileparameterset,
name=self.parametername_dataset_link)
dataset_url = self.dataset.get_absolute_url() # /dataset/1/
self.dataset_link_param.set_value(dataset_url)
self.dataset_link_param.save()
# Create a ParameterName type LINK to an unresolvable (non-URL)
# free-text value
self.parametername_unresolvable_link = ParameterName(
schema=self.schema, name="freetext_link",
full_name="This parameter is a non-URL LINK",
data_type=ParameterName.LINK)
self.parametername_unresolvable_link.save()
def tearDown(self):
self.exp.delete()
self.user.delete()
self.parametername1.delete()
self.parametername2.delete()
self.parametername3.delete()
self.parametername_exp_link.delete()
self.parametername_dataset_link.delete()
self.parametername_unresolvable_link.delete()
self.schema.delete()
def test_existing_parameterset(self):
psm = ParameterSetManager(parameterset=self.datafileparameterset)
self.assertTrue(psm.get_schema().namespace ==
"http://localhost/psmtest/df/")
self.assertTrue(psm.get_param("parameter1").string_value == "test1")
self.assertTrue(psm.get_param("parameter2", True) == 2)
def test_new_parameterset(self):
psm = ParameterSetManager(parentObject=self.datafile,
schema="http://localhost/psmtest/df2/")
self.assertTrue(psm.get_schema().namespace ==
"http://localhost/psmtest/df2/")
psm.set_param("newparam1", "test3", "New Parameter 1")
self.assertTrue(psm.get_param("newparam1").string_value ==
"test3")
self.assertTrue(psm.get_param("newparam1").name.full_name ==
"New Parameter 1")
psm.new_param("newparam1", "test4")
self.assertTrue(len(psm.get_params("newparam1", True)) == 2)
psm.set_param_list("newparam2", ("a", "b", "c", "d"))
self.assertTrue(len(psm.get_params("newparam2")) == 4)
psm.set_params_from_dict(
{"newparam2": "test5", "newparam3": 3})
self.assertTrue(psm.get_param("newparam2", True) == "test5")
# the newparam3 gets created and '3' is set to a string_value
# since once cannot assume that an initial numeric value
# will imply continuing numeric type for this new param
self.assertTrue(psm.get_param("newparam3").string_value == '3')
psm.delete_params("newparam1")
self.assertTrue(len(psm.get_params("newparam1", True)) == 0)
def test_link_parameter_type(self):
"""
Test that Parameter.link_gfk (GenericForeignKey) is correctly
assigned after using Parameter.set_value(some_url) for a LINK Parameter.
"""
psm = ParameterSetManager(parameterset=self.datafileparameterset)
# Check link to experiment
exp_url = self.exp.get_absolute_url() # /experiment/view/1/
self.assertTrue(psm.get_param("exp_link").string_value ==
exp_url)
self.assertTrue(psm.get_param("exp_link").link_id ==
self.exp.id)
exp_ct = ContentType.objects.get(model__iexact="experiment")
self.assertTrue(psm.get_param("exp_link").link_ct == exp_ct)
self.assertTrue(psm.get_param("exp_link").link_gfk == self.exp)
# Check link to dataset
dataset_url = self.dataset.get_absolute_url() # /dataset/1/
self.assertTrue(psm.get_param("dataset_link").string_value ==
dataset_url)
self.assertTrue(psm.get_param("dataset_link").link_id ==
self.dataset.id)
dataset_ct = ContentType.objects.get(model__iexact="dataset")
self.assertTrue(psm.get_param("dataset_link").link_ct == dataset_ct)
self.assertTrue(psm.get_param("dataset_link").link_gfk == self.dataset)
def test_link_parameter_type_extra(self):
# make a second ParameterSet for testing some variations
# in URL values
self.datafileparameterset2 = DatafileParameterSet(
schema=self.schema, datafile=self.datafile)
self.datafileparameterset2.save()
psm = ParameterSetManager(parameterset=self.datafileparameterset2)
self.dataset_link_param2 = DatafileParameter(
parameterset=self.datafileparameterset2,
name=self.parametername_dataset_link)
# /dataset/1 - no trailing slash
dataset_url = self.dataset.get_absolute_url()
self.dataset_link_param2.set_value(dataset_url)
self.dataset_link_param2.save()
# Check link_id/link_ct/link_gfk to dataset
self.assertTrue(psm.get_param("dataset_link").link_id ==
self.dataset.id)
dataset_ct = ContentType.objects.get(model__iexact="dataset")
self.assertTrue(psm.get_param("dataset_link").link_ct == dataset_ct)
self.assertTrue(psm.get_param("dataset_link").link_gfk == self.dataset)
# Test links of the form /api/v1/experiment/<experiment_id>/
self.exp_link_param2 = DatafileParameter(
parameterset=self.datafileparameterset2,
name=self.parametername_exp_link)
exp_url = '/api/v1/experiment/%s/' % self.exp.id
self.exp_link_param2.set_value(exp_url)
self.exp_link_param2.save()
# Check link_id/link_ct/link_gfk to experiment
self.assertTrue(psm.get_param("exp_link").link_id ==
self.exp.id)
exp_ct = ContentType.objects.get(model__iexact="experiment")
self.assertTrue(psm.get_param("exp_link").link_ct == exp_ct)
self.assertTrue(psm.get_param("exp_link").link_gfk == self.exp)
def test_unresolvable_link_parameter(self):
"""
Test that LINK Parameters that can't be resolved to a model (including
non-URL values) still work.
"""
self.datafileparameterset3 = DatafileParameterSet(
schema=self.schema, datafile=self.datafile)
self.datafileparameterset3.save()
psm = ParameterSetManager(parameterset=self.datafileparameterset3)
# Create a Parameter of type LINK to an unresolvable (non-URL)
# free-text value
self.freetext_link_param = DatafileParameter(
parameterset=self.datafileparameterset3,
name=self.parametername_unresolvable_link)
self.assertRaises(SuspiciousOperation,
lambda: self.freetext_link_param.set_value(
"FREETEXT_ID_123"))
def test_tz_naive_date_handling(self):
"""
Ensure that dates are handling in a timezone-aware way.
"""
psm = ParameterSetManager(parameterset=self.datafileparameterset)
psm.new_param("parameter3", str(datetime(1970, 01, 01, 10, 0, 0)))
expect(psm.get_param("parameter3", True))\
.to_equal(datetime(1970, 01, 01, 0, 0, 0, tzinfo=pytz.utc))
def test_tz_aware_date_handling(self):
"""
Ensure that dates are handling in a timezone-aware way.
"""
psm = ParameterSetManager(parameterset=self.datafileparameterset)
psm.new_param("parameter3",
'1970-01-01T08:00:00+08:00')
expect(psm.get_param("parameter3", True))\
.to_equal(datetime(1970, 01, 01, 0, 0, 0, tzinfo=pytz.utc))
|
|
import re
from django.db.backends import BaseDatabaseIntrospection, FieldInfo
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [FieldInfo(info['name'], info['type'], None, info['size'], None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_index, other_desc in enumerate(other_table_results.split(',')):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
name = other_desc.split(' ', 1)[0].strip('"')
if name == column:
relations[field_index] = (other_index, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for number, index, unique in cursor.fetchall():
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.